diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java index 17575076444d..974a4533ff97 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupProvider; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import com.cloud.user.Account; @@ -70,11 +71,12 @@ private void setupResponse(final List providers) { final ListResponse response = new ListResponse<>(); final List responses = new ArrayList<>(); for (final BackupProvider provider : providers) { - if (provider == null || (getName() != null && !provider.getName().equals(getName()))) { + final String displayName = provider == null ? null : BackupProviderNameUtils.toDisplayName(provider.getName()); + if (provider == null || (getName() != null && !displayName.equalsIgnoreCase(getName()))) { continue; } final BackupProviderResponse backupProviderResponse = new BackupProviderResponse(); - backupProviderResponse.setName(provider.getName()); + backupProviderResponse.setName(displayName); backupProviderResponse.setDescription(provider.getDescription()); backupProviderResponse.setObjectName("providers"); responses.add(backupProviderResponse); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java index 8d4fa8eba502..7f16fa42bdc3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupProvider; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import com.cloud.user.Account; @@ -75,7 +76,7 @@ private void setupResponse(final List providers) { continue; } final BackupProviderResponse backupProviderResponse = new BackupProviderResponse(); - backupProviderResponse.setName(provider.getName()); + backupProviderResponse.setName(BackupProviderNameUtils.toDisplayName(provider.getName())); backupProviderResponse.setDescription(provider.getDescription()); backupProviderResponse.setObjectName("providers"); responses.add(backupProviderResponse); diff --git a/api/src/main/java/org/apache/cloudstack/backup/AblestackBackupFrameworkUtils.java b/api/src/main/java/org/apache/cloudstack/backup/AblestackBackupFrameworkUtils.java new file mode 100644 index 000000000000..612335da03a7 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/AblestackBackupFrameworkUtils.java @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.vm.VirtualMachine; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +public final class AblestackBackupFrameworkUtils { + + private AblestackBackupFrameworkUtils() { + } + + public static int getEffectiveIncrementalLimit(final int defaultLimit, final List scheduleMaxBackups) { + int effectiveLimit = defaultLimit; + if (scheduleMaxBackups == null) { + return effectiveLimit; + } + for (Integer maxBackups : scheduleMaxBackups) { + if (maxBackups != null && maxBackups > 0) { + effectiveLimit = Math.min(effectiveLimit, maxBackups); + } + } + return effectiveLimit; + } + + public static int getBackupChainSize(final T latestBackup, final Map backupsByUuid, + final Function parentBackupUuidResolver) { + if (latestBackup == null) { + return 0; + } + int chainSize = 1; + T current = latestBackup; + while (current != null) { + final String parentBackupUuid = parentBackupUuidResolver.apply(current); + if (parentBackupUuid == null) { + break; + } + current = backupsByUuid.get(parentBackupUuid); + if (current != null) { + chainSize++; + } + } + return chainSize; + } + + public static boolean requiresRunningVmAttach(final VirtualMachine.State vmState) { + return VirtualMachine.State.Running.equals(vmState); + } + + public static boolean shouldExecuteRestoreOnSourceHost(final VirtualMachine.State vmState) { + return !requiresRunningVmAttach(vmState); + } + + public static BackupRestorePlan createRestorePlan(final boolean attachRequired, final boolean cleanupRequired) { + final List stages = new ArrayList<>(); + stages.add(BackupRestoreStage.PREPARE_SOURCE); + stages.add(BackupRestoreStage.VALIDATE_CHAIN); + stages.add(BackupRestoreStage.RESTORE_DATA); + if (attachRequired) { + stages.add(BackupRestoreStage.ATTACH_VOLUME); + } + if (cleanupRequired) { + stages.add(BackupRestoreStage.CLEANUP_SOURCE); + } + return new BackupRestorePlan(stages); + } + + public static boolean hasRestoreStage(final BackupRestorePlan restorePlan, final BackupRestoreStage stage) { + return restorePlan == null || restorePlan.hasStage(stage); + } + + public static List sanitizeChainFiles(final List chainFiles) { + final LinkedHashSet sanitized = new LinkedHashSet<>(); + if (chainFiles == null) { + return new ArrayList<>(); + } + for (final String chainFile : chainFiles) { + if (StringUtils.isNotBlank(chainFile)) { + sanitized.add(chainFile.trim()); + } + } + return new ArrayList<>(sanitized); + } + + public static void validateVolumeChainStates(final List volumeChainStates) { + if (volumeChainStates == null || volumeChainStates.isEmpty()) { + throw new IllegalArgumentException("Backup volume chain states cannot be empty"); + } + for (final BackupVolumeChainState volumeChainState : volumeChainStates) { + if (volumeChainState == null) { + throw new IllegalArgumentException("Backup volume chain state cannot be null"); + } + if (StringUtils.isBlank(volumeChainState.getVolumeUuid())) { + throw new IllegalArgumentException("Backup volume chain state must include a volume UUID"); + } + if (sanitizeChainFiles(volumeChainState.getChainFiles()).isEmpty()) { + throw new IllegalArgumentException(String.format("Backup volume chain state for volume [%s] must include at least one chain file", + volumeChainState.getVolumeUuid())); + } + } + } + + public static boolean hasUsableVolumeChainStates(final List volumeChainStates) { + try { + validateVolumeChainStates(volumeChainStates); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index 8fcd0de86e26..6f510e769f6f 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.command.user.backup.ListBackupsCmd; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.ValidatedConfigKey; import org.apache.cloudstack.framework.config.Configurable; import com.cloud.exception.ResourceUnavailableException; @@ -54,11 +53,10 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer "false", "Is backup and recovery framework enabled.", false, ConfigKey.Scope.Zone); - ConfigKey BackupProviderPlugin = new ValidatedConfigKey<>("Advanced", String.class, + ConfigKey BackupProviderPlugin = new ConfigKey<>("Advanced", String.class, "backup.framework.provider.plugin", "dummy", - "The backup and recovery provider plugin. Valid plugin values: dummy, veeam, networker, bx and nas", - true, ConfigKey.Scope.Zone, BackupFrameworkEnabled.key(), value -> validateBackupProviderConfig((String)value)); + "The backup and recovery provider plugin.", true, ConfigKey.Scope.Zone, BackupFrameworkEnabled.key()); ConfigKey BackupSyncPollingInterval = new ConfigKey<>("Advanced", Long.class, "backup.framework.sync.interval", @@ -70,6 +68,23 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer "false", "Enable volume attach/detach operations for VMs that are assigned to Backup Offerings.", true); + ConfigKey KvmIncrementalBackup = new ConfigKey<>("Advanced", Boolean.class, + "kvm.incremental.backup", + "false", + "Enable KVM incremental backups for supported backup providers.", + false, + ConfigKey.Scope.Cluster, + null); + + ConfigKey BackupChainSize = new ConfigKey<>(Integer.class, + "backup.chain.size", + "Advanced", + "10", + "Max incremental backup chain size before switching back to a full backup for KVM backup providers.", + true, + ConfigKey.Scope.Global, + null); + ConfigKey DefaultMaxAccountBackups = new ConfigKey("Account Defaults", Long.class, "max.account.backups", "20", @@ -253,13 +268,4 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer void checkAndRemoveBackupOfferingBeforeExpunge(VirtualMachine vm); - static void validateBackupProviderConfig(String value) { - if (value != null && (value.contains(",") || value.trim().contains(" "))) { - throw new IllegalArgumentException("Multiple backup provider plugins are not supported. Please provide a single plugin value."); - } - List validPlugins = List.of("dummy", "veeam", "networker", "nas", "bx"); - if (value != null && !validPlugins.contains(value)) { - throw new IllegalArgumentException("Invalid backup provider plugin: " + value + ". Valid plugin values are: " + String.join(", ", validPlugins)); - } - } } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java index feeac18a68f2..1cdbe75b169e 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java @@ -79,6 +79,10 @@ public interface BackupProvider { */ Pair takeBackup(VirtualMachine vm, Boolean quiesceVM); + default Pair takeBackup(VirtualMachine vm, Boolean quiesceVM, Long backupScheduleId) { + return takeBackup(vm, quiesceVM); + } + /** * Delete an existing backup * @param backup The backup to exclude @@ -170,4 +174,50 @@ default boolean supportsMemoryVmSnapshot() { * update commvault backup plan */ boolean updateBackupPlan(Long zoneId, String retentionPeriod, String externalId); + + default boolean supportsBackgroundSync() { + return true; + } + + default boolean supportsBackupMetricsSync() { + return true; + } + + default boolean supportsOutOfBandBackupSync() { + return true; + } + + default boolean supportsProviderManagedBackupAgents() { + return false; + } + + default boolean supportsRetentionPlanUpdate() { + return false; + } + + default boolean supportsVolumeLevelChainState() { + return false; + } + + default boolean supportsRestorePlan() { + return false; + } + + default boolean supportsRestoreChainValidation() { + return false; + } + + default boolean supportsPostRestoreMaintenance() { + return false; + } + + default void runPostRestoreMaintenance(VirtualMachine vm, Backup backup, boolean volumeOnly) { + } + + default boolean supportsBackgroundChainValidation() { + return false; + } + + default void validateChains(Long zoneId) { + } } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProviderNameUtils.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProviderNameUtils.java new file mode 100644 index 000000000000..9788398cc3d4 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProviderNameUtils.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import org.apache.commons.lang3.StringUtils; + +public final class BackupProviderNameUtils { + public static final String NAS = "nas"; + public static final String COMMVAULT = "commvault"; + public static final String ABLESTACK_NAS = "ablestack-nas"; + public static final String ABLESTACK_COMMVAULT = "ablestack-commvault"; + + private BackupProviderNameUtils() { + } + + public static String canonicalize(final String providerName) { + if (StringUtils.isBlank(providerName)) { + return providerName; + } + if (NAS.equalsIgnoreCase(providerName) || ABLESTACK_NAS.equalsIgnoreCase(providerName)) { + return ABLESTACK_NAS; + } + if (COMMVAULT.equalsIgnoreCase(providerName) || ABLESTACK_COMMVAULT.equalsIgnoreCase(providerName)) { + return ABLESTACK_COMMVAULT; + } + return providerName; + } + + public static String toDisplayName(final String providerName) { + if (StringUtils.isBlank(providerName)) { + return providerName; + } + if (ABLESTACK_NAS.equalsIgnoreCase(providerName) || NAS.equalsIgnoreCase(providerName)) { + return NAS; + } + if (ABLESTACK_COMMVAULT.equalsIgnoreCase(providerName) || COMMVAULT.equalsIgnoreCase(providerName)) { + return COMMVAULT; + } + return providerName; + } + + public static boolean isNasFamily(final String providerName) { + return ABLESTACK_NAS.equalsIgnoreCase(canonicalize(providerName)); + } + + public static boolean isCommvaultFamily(final String providerName) { + return ABLESTACK_COMMVAULT.equalsIgnoreCase(canonicalize(providerName)); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupRestorePlan.java b/api/src/main/java/org/apache/cloudstack/backup/BackupRestorePlan.java new file mode 100644 index 000000000000..5306bd7600ca --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupRestorePlan.java @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class BackupRestorePlan { + private List stages = new ArrayList<>(); + + public BackupRestorePlan() { + } + + public BackupRestorePlan(List stages) { + if (stages != null) { + this.stages = new ArrayList<>(stages); + } + } + + public List getStages() { + return stages == null ? Collections.emptyList() : stages; + } + + public void setStages(List stages) { + this.stages = stages; + } + + public boolean hasStage(BackupRestoreStage stage) { + return getStages().contains(stage); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupRestoreStage.java b/api/src/main/java/org/apache/cloudstack/backup/BackupRestoreStage.java new file mode 100644 index 000000000000..f5fe11ba5d4e --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupRestoreStage.java @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +public enum BackupRestoreStage { + PREPARE_SOURCE, + VALIDATE_CHAIN, + RESTORE_DATA, + ATTACH_VOLUME, + CLEANUP_SOURCE +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupVolumeChainState.java b/api/src/main/java/org/apache/cloudstack/backup/BackupVolumeChainState.java new file mode 100644 index 000000000000..eb8196bdaab8 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupVolumeChainState.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class BackupVolumeChainState { + private String volumeUuid; + private String backupEngine; + private List chainFiles = new ArrayList<>(); + + public BackupVolumeChainState() { + } + + public BackupVolumeChainState(String volumeUuid, String backupEngine, List chainFiles) { + this.volumeUuid = volumeUuid; + this.backupEngine = backupEngine; + if (chainFiles != null) { + this.chainFiles = new ArrayList<>(chainFiles); + } + } + + public String getVolumeUuid() { + return volumeUuid; + } + + public void setVolumeUuid(String volumeUuid) { + this.volumeUuid = volumeUuid; + } + + public String getBackupEngine() { + return backupEngine; + } + + public void setBackupEngine(String backupEngine) { + this.backupEngine = backupEngine; + } + + public List getChainFiles() { + return chainFiles == null ? Collections.emptyList() : chainFiles; + } + + public void setChainFiles(List chainFiles) { + this.chainFiles = chainFiles; + } +} diff --git a/client/pom.xml b/client/pom.xml index 0b2ae1bb50b1..9f05050af42d 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -614,7 +614,12 @@ org.apache.cloudstack - cloud-plugin-backup-commvault + cloud-plugin-backup-ablestack-nas + ${project.version} + + + org.apache.cloudstack + cloud-plugin-backup-ablestack-commvault ${project.version} diff --git a/core/src/main/java/org/apache/cloudstack/backup/CommvaultRestoreBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultRestoreBackupCommand.java similarity index 73% rename from core/src/main/java/org/apache/cloudstack/backup/CommvaultRestoreBackupCommand.java rename to core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultRestoreBackupCommand.java index fbcff2070801..e6795eaae539 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/CommvaultRestoreBackupCommand.java +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultRestoreBackupCommand.java @@ -26,10 +26,14 @@ import java.util.List; -public class CommvaultRestoreBackupCommand extends Command { +public class AblestackCommvaultRestoreBackupCommand extends Command { private String vmName; private String backupPath; private List backupVolumesUUIDs; + private List backupFiles; + private List backupFileChains; + private List volumeChainStates; + private BackupRestorePlan restorePlan; private List restoreVolumePools; private List restoreVolumePaths; private String diskType; @@ -39,8 +43,9 @@ public class CommvaultRestoreBackupCommand extends Command { private Integer timeout; private String cacheMode; private String hostName; + private List backupSourceHosts; - protected CommvaultRestoreBackupCommand() { + protected AblestackCommvaultRestoreBackupCommand() { super(); } @@ -124,6 +129,38 @@ public void setBackupVolumesUUIDs(List backupVolumesUUIDs) { this.backupVolumesUUIDs = backupVolumesUUIDs; } + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + + public List getBackupFileChains() { + return backupFileChains; + } + + public void setBackupFileChains(List backupFileChains) { + this.backupFileChains = backupFileChains; + } + + public List getVolumeChainStates() { + return volumeChainStates; + } + + public void setVolumeChainStates(List volumeChainStates) { + this.volumeChainStates = volumeChainStates; + } + + public BackupRestorePlan getRestorePlan() { + return restorePlan; + } + + public void setRestorePlan(BackupRestorePlan restorePlan) { + this.restorePlan = restorePlan; + } + public Integer getTimeout() { return this.timeout == null ? 0 : this.timeout; } @@ -147,4 +184,12 @@ public String getHostName() { public void setHostName(String hostName) { this.hostName = hostName; } + + public List getBackupSourceHosts() { + return backupSourceHosts; + } + + public void setBackupSourceHosts(List backupSourceHosts) { + this.backupSourceHosts = backupSourceHosts; + } } diff --git a/core/src/main/java/org/apache/cloudstack/backup/CommvaultTakeBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultTakeBackupCommand.java similarity index 58% rename from core/src/main/java/org/apache/cloudstack/backup/CommvaultTakeBackupCommand.java rename to core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultTakeBackupCommand.java index f24f41d98675..5c22a8d8cfc7 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/CommvaultTakeBackupCommand.java +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultTakeBackupCommand.java @@ -24,14 +24,20 @@ import java.util.List; -public class CommvaultTakeBackupCommand extends Command { +public class AblestackCommvaultTakeBackupCommand extends Command { private String vmName; private String backupPath; private List volumePools; private List volumePaths; private Boolean quiesce; - - public CommvaultTakeBackupCommand(String vmName, String backupPath) { + private String backupType; + private String checkpointName; + private String parentBackupPath; + private String parentCheckpointName; + private String parentCheckpointPath; + private List backupFiles; + + public AblestackCommvaultTakeBackupCommand(String vmName, String backupPath) { super(); this.vmName = vmName; this.backupPath = backupPath; @@ -77,6 +83,54 @@ public void setQuiesce(Boolean quiesce) { this.quiesce = quiesce; } + public String getBackupType() { + return backupType; + } + + public void setBackupType(String backupType) { + this.backupType = backupType; + } + + public String getCheckpointName() { + return checkpointName; + } + + public void setCheckpointName(String checkpointName) { + this.checkpointName = checkpointName; + } + + public String getParentBackupPath() { + return parentBackupPath; + } + + public void setParentBackupPath(String parentBackupPath) { + this.parentBackupPath = parentBackupPath; + } + + public String getParentCheckpointName() { + return parentCheckpointName; + } + + public void setParentCheckpointName(String parentCheckpointName) { + this.parentCheckpointName = parentCheckpointName; + } + + public String getParentCheckpointPath() { + return parentCheckpointPath; + } + + public void setParentCheckpointPath(String parentCheckpointPath) { + this.parentCheckpointPath = parentCheckpointPath; + } + + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + @Override public boolean executeInSequence() { return true; diff --git a/core/src/main/java/org/apache/cloudstack/backup/AblestackDeleteBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackDeleteBackupCommand.java new file mode 100644 index 000000000000..7d892b22a12c --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackDeleteBackupCommand.java @@ -0,0 +1,113 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; + +public class AblestackDeleteBackupCommand extends Command { + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + private String backupProvider; + private String checkpointName; + private String diskPaths; + private boolean forced; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public AblestackDeleteBackupCommand(String backupPath, String backupRepoType, String backupRepoAddress, String mountOptions, boolean forced) { + super(); + this.backupPath = backupPath; + this.backupRepoType = backupRepoType; + this.backupRepoAddress = backupRepoAddress; + this.mountOptions = mountOptions; + this.forced = forced; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getBackupProvider() { + return backupProvider; + } + + public void setBackupProvider(String backupProvider) { + this.backupProvider = backupProvider; + } + + public String getCheckpointName() { + return checkpointName; + } + + public void setCheckpointName(String checkpointName) { + this.checkpointName = checkpointName; + } + + public String getDiskPaths() { + return diskPaths; + } + + public void setDiskPaths(String diskPaths) { + this.diskPaths = diskPaths; + } + + public String getMountOptions() { + return mountOptions == null ? "" : mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public boolean isForced() { + return forced; + } + + public void setForced(boolean forced) { + this.forced = forced; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/AblestackNasRestoreBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasRestoreBackupCommand.java new file mode 100644 index 000000000000..9ff75545caca --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasRestoreBackupCommand.java @@ -0,0 +1,203 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; + +import java.util.List; + +public class AblestackNasRestoreBackupCommand extends Command { + private String vmName; + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + private List backupVolumesUUIDs; + private List restoreVolumePools; + private List restoreVolumePaths; + private List volumePaths; + private List backupFiles; + private List backupFileChains; + private List volumeChainStates; + private BackupRestorePlan restorePlan; + private String diskType; + private Boolean vmExists; + private VirtualMachine.State vmState; + private Integer mountTimeout; + private String cacheMode; + + protected AblestackNasRestoreBackupCommand() { + super(); + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public List getRestoreVolumePools() { + return restoreVolumePools; + } + + public void setRestoreVolumePools(List restoreVolumePools) { + this.restoreVolumePools = restoreVolumePools; + } + + public List getRestoreVolumePaths() { + return restoreVolumePaths; + } + + public void setRestoreVolumePaths(List restoreVolumePaths) { + this.restoreVolumePaths = restoreVolumePaths; + } + + public List getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(List volumePaths) { + this.volumePaths = volumePaths; + } + + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + + public List getBackupFileChains() { + return backupFileChains; + } + + public void setBackupFileChains(List backupFileChains) { + this.backupFileChains = backupFileChains; + } + + public List getVolumeChainStates() { + return volumeChainStates; + } + + public void setVolumeChainStates(List volumeChainStates) { + this.volumeChainStates = volumeChainStates; + } + + public BackupRestorePlan getRestorePlan() { + return restorePlan; + } + + public void setRestorePlan(BackupRestorePlan restorePlan) { + this.restorePlan = restorePlan; + } + + public Boolean isVmExists() { + return vmExists; + } + + public void setVmExists(Boolean vmExists) { + this.vmExists = vmExists; + } + + public String getDiskType() { + return diskType; + } + + public void setDiskType(String diskType) { + this.diskType = diskType; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public VirtualMachine.State getVmState() { + return vmState; + } + + public void setVmState(VirtualMachine.State vmState) { + this.vmState = vmState; + } + + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + @Override + + public boolean executeInSequence() { + return true; + } + + public List getBackupVolumesUUIDs() { + return backupVolumesUUIDs; + } + + public void setBackupVolumesUUIDs(List backupVolumesUUIDs) { + this.backupVolumesUUIDs = backupVolumesUUIDs; + } + + public Integer getMountTimeout() { + return this.mountTimeout == null ? 0 : this.mountTimeout; + } + + public void setMountTimeout(Integer mountTimeout) { + this.mountTimeout = mountTimeout; + } + + public String getCacheMode() { + return cacheMode; + } + + public void setCacheMode(String cacheMode) { + this.cacheMode = cacheMode; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/AblestackNasTakeBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasTakeBackupCommand.java new file mode 100644 index 000000000000..56b132176913 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasTakeBackupCommand.java @@ -0,0 +1,167 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; + +import java.util.List; + +public class AblestackNasTakeBackupCommand extends Command { + private String vmName; + private String backupPath; + private String backupType; + private String checkpointName; + private String parentBackupPath; + private String parentCheckpointName; + private String parentCheckpointPath; + private String backupRepoType; + private String backupRepoAddress; + private List volumePools; + private List volumePaths; + private List backupFiles; + private Boolean quiesce; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public AblestackNasTakeBackupCommand(String vmName, String backupPath) { + super(); + this.vmName = vmName; + this.backupPath = backupPath; + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupType() { + return backupType; + } + + public void setBackupType(String backupType) { + this.backupType = backupType; + } + + public String getCheckpointName() { + return checkpointName; + } + + public void setCheckpointName(String checkpointName) { + this.checkpointName = checkpointName; + } + + public String getParentBackupPath() { + return parentBackupPath; + } + + public void setParentBackupPath(String parentBackupPath) { + this.parentBackupPath = parentBackupPath; + } + + public String getParentCheckpointName() { + return parentCheckpointName; + } + + public void setParentCheckpointName(String parentCheckpointName) { + this.parentCheckpointName = parentCheckpointName; + } + + public String getParentCheckpointPath() { + return parentCheckpointPath; + } + + public void setParentCheckpointPath(String parentCheckpointPath) { + this.parentCheckpointPath = parentCheckpointPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public List getVolumePools() { + return volumePools; + } + + public void setVolumePools(List volumePools) { + this.volumePools = volumePools; + } + + public List getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(List volumePaths) { + this.volumePaths = volumePaths; + } + + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + + public Boolean getQuiesce() { + return quiesce; + } + + public void setQuiesce(Boolean quiesce) { + this.quiesce = quiesce; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java index de79e8c18e1f..4ba41f9e3569 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java @@ -24,6 +24,7 @@ import com.cloud.domain.dao.DomainDao; import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.backup.BackupOffering; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import org.apache.cloudstack.backup.BackupOfferingVO; import com.cloud.dc.DataCenterVO; @@ -67,7 +68,7 @@ public BackupOfferingResponse newBackupOfferingResponse(BackupOffering offering, response.setName(offering.getName()); response.setDescription(offering.getDescription()); response.setExternalId(offering.getExternalId()); - response.setProvider(offering.getProvider()); + response.setProvider(BackupProviderNameUtils.toDisplayName(offering.getProvider())); response.setUserDrivenBackups(offering.isUserDrivenBackupAllowed()); if (zone != null) { response.setZoneId(zone.getUuid()); @@ -91,7 +92,7 @@ public BackupOfferingResponse newBackupOfferingResponse(BackupOffering offering, if (offering.getRetentionPeriod() != null) { response.setRetentionPeriod(offering.getRetentionPeriod()); } - response.setProvider(offering.getProvider()); + response.setProvider(BackupProviderNameUtils.toDisplayName(offering.getProvider())); response.setCreated(offering.getCreated()); response.setObjectName("backupoffering"); return response; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql index ff9b1f7ed02e..8750f5e1faab 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql @@ -41,6 +41,13 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.ldap_configuration', 'uuid', 'VARCHA -- Populate uuid for existing rows where uuid is NULL or empty UPDATE `cloud`.`ldap_configuration` SET uuid = UUID() WHERE uuid IS NULL OR uuid = ''; +-- Add vm_id column to usage_event table for volume usage events +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"'); +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"'); + +-- Add vm_id column to cloud_usage.usage_volume table +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_volume','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with the volume usage"'); + -- Add the column cross_zone_instance_creation to cloud.backup_repository. if enabled it means that new Instance can be created on all Zones from Backups on this Repository. CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_repository', 'cross_zone_instance_creation', 'TINYINT(1) DEFAULT NULL COMMENT ''Backup Repository can be used for disaster recovery on another zone'''); diff --git a/plugins/backup/ablestack-commvault/pom.xml b/plugins/backup/ablestack-commvault/pom.xml new file mode 100644 index 000000000000..6b622e7a5a6e --- /dev/null +++ b/plugins/backup/ablestack-commvault/pom.xml @@ -0,0 +1,54 @@ + + + 4.0.0 + cloud-plugin-backup-ablestack-commvault + Ablestack Plugin - KVM Commvault Backup and Recovery Plugin + + cloudstack-plugins + org.apache.cloudstack + 4.22.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-hypervisor-kvm + ${project.version} + + + org.apache.commons + commons-lang3 + ${cs.commons-lang3.version} + + + com.fasterxml.jackson.core + jackson-databind + ${cs.jackson.version} + + + com.github.tomakehurst + wiremock-standalone + ${cs.wiremock.version} + test + + + diff --git a/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultBackupProvider.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultBackupProvider.java new file mode 100644 index 000000000000..14b138c83185 --- /dev/null +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultBackupProvider.java @@ -0,0 +1,2017 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.AgentManager; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.domain.Domain; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.User; +import com.cloud.user.Account; +import com.cloud.user.AccountService; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.ssh.SshHelper; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.backup.commvault.AblestackCommvaultClient; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDaoImpl; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.xml.utils.URI; +import org.json.JSONObject; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.HashMap; +import java.util.Date; +import java.util.Objects; +import java.util.UUID; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.Collections; +import java.util.Comparator; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.inject.Inject; + +import static org.apache.cloudstack.backup.BackupManager.BackupChainSize; +import static org.apache.cloudstack.backup.BackupManager.BackupFrameworkEnabled; +import static org.apache.cloudstack.backup.BackupManager.KvmIncrementalBackup; + +public class AblestackCommvaultBackupProvider extends AdapterBase implements BackupProvider, Configurable { + + private static final Logger LOG = LogManager.getLogger(AblestackCommvaultBackupProvider.class); + private static final String BACKUP_TYPE_FULL = "FULL"; + private static final String BACKUP_TYPE_INCREMENTAL = "INCREMENTAL"; + private static final String BACKUP_ENGINE_QCOW2 = "QCOW2"; + private static final String BACKUP_ENGINE_RBD_DIFF = "RBD_DIFF"; + private static final String DETAIL_CHECKPOINT_NAME = "commvault.checkpoint.name"; + private static final String DETAIL_CHECKPOINT_PATH = "commvault.checkpoint.path"; + private static final String DETAIL_PARENT_BACKUP_UUID = "commvault.parent.backup.uuid"; + private static final String DETAIL_PARENT_BACKUP_PATH = "commvault.parent.backup.path"; + private static final String DETAIL_PARENT_CHECKPOINT_NAME = "commvault.parent.checkpoint.name"; + private static final String DETAIL_PARENT_CHECKPOINT_PATH = "commvault.parent.checkpoint.path"; + private static final String DETAIL_BACKUP_ENGINE = "commvault.backup.engine"; + private static final String DETAIL_RBD_DISK_PATHS = "commvault.rbd.disk.paths"; + private static final String MISSING_PARENT_RBD_SNAPSHOT_ERROR = "Parent RBD snapshot"; + private static final String DETAIL_STAGE_HOST = "commvault.stage.host"; + private static final String DETAIL_CHAIN_SEALED = "commvault.chain.sealed"; + private static final String DETAIL_CHAIN_SEAL_REASON = "commvault.chain.seal.reason"; + private static final String DETAIL_FALLBACK_VOLUME_UUIDS = "commvault.fallback.volume.uuids"; + private static final String RM_COMMAND = "rm -rf %s"; + private static final String DF_AVAILABLE_COMMAND = "df -B1 --output=avail %s | tail -n 1"; + private static final int BASE_MAJOR = 11; + private static final int BASE_FR = 32; + private static final int BASE_MT = 89; + private static final Pattern VERSION_PATTERN = Pattern.compile("^(\\d+)\\s*SP\\s*(\\d+)(?:\\.(\\d+))?$", Pattern.CASE_INSENSITIVE); + private static final String COMMVAULT_DIRECTORY = "/tmp/mold/backup"; + private static final long STAGE_SPACE_BUFFER_BYTES = 10L * 1024L * 1024L * 1024L; + + public ConfigKey CommvaultUrl = new ConfigKey<>("Advanced", String.class, + "backup.plugin.commvault.url", "https://localhost/commandcenter/api", + "Commvault Command Center API URL.", true, ConfigKey.Scope.Zone); + + private ConfigKey CommvaultUsername = new ConfigKey<>("Advanced", String.class, + "backup.plugin.commvault.username", "admin", + "Commvault Command Center API username.", true, ConfigKey.Scope.Zone); + + private ConfigKey CommvaultPassword = new ConfigKey<>("Secure", String.class, + "backup.plugin.commvault.password", "password", + "Commvault Command Center API password.", true, ConfigKey.Scope.Zone); + + private ConfigKey CommvaultValidateSSLSecurity = new ConfigKey<>("Advanced", Boolean.class, + "backup.plugin.commvault.validate.ssl", "false", + "Validate the SSL certificate when connecting to Commvault Command Center API service.", true, ConfigKey.Scope.Zone); + + private ConfigKey CommvaultApiRequestTimeout = new ConfigKey<>("Advanced", Integer.class, + "backup.plugin.commvault.request.timeout", "300", + "Commvault Command Center API request timeout in seconds.", true, ConfigKey.Scope.Zone); + + private static ConfigKey CommvaultRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, + "backup.plugin.commvault.restore.timeout", "600", + "Commvault B&R API restore backup timeout in seconds.", true, ConfigKey.Scope.Zone); + + private static ConfigKey CommvaultTaskPollInterval = new ConfigKey<>("Advanced", Integer.class, + "backup.plugin.commvault.task.poll.interval", "5", + "The time interval in seconds when the management server polls for Commvault task status.", true, ConfigKey.Scope.Zone); + + private static ConfigKey CommvaultTaskPollMaxRetry = new ConfigKey<>("Advanced", Integer.class, + "backup.plugin.commvault.task.poll.max.retry", "120", + "The max number of retrying times when the management server polls for Commvault task status.", true, ConfigKey.Scope.Zone); + + private ConfigKey CommvaultClientVerboseLogs = new ConfigKey<>("Advanced", Boolean.class, + "backup.plugin.commvault.client.verbosity", "false", + "Produce Verbose logs in Hypervisor", true, ConfigKey.Scope.Zone); + + private ConfigKey CommvaultBackupRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, + "commvault.backup.restore.timeout", + "1800", + "Timeout in seconds after which Commvault backup restore operations fail.", + true, + BackupFrameworkEnabled.key()); + + @Inject + private BackupDao backupDao; + + @Inject + private BackupDetailsDao backupDetailsDao; + + @Inject + private BackupOfferingDao backupOfferingDao; + + @Inject + private HostDao hostDao; + + @Inject + private ClusterDao clusterDao; + + @Inject + private VolumeDao volumeDao; + + @Inject + private SnapshotDao snapshotDao; + + @Inject + private SnapshotDataStoreDao snapshotStoreDao; + + @Inject + private StoragePoolHostDao storagePoolHostDao; + + @Inject + private VMInstanceDao vmInstanceDao; + + @Inject + private AccountService accountService; + + @Inject + DataStoreManager dataStoreMgr; + + @Inject + private AgentManager agentManager; + + @Inject + private VMSnapshotDao vmSnapshotDao; + + @Inject + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + + @Inject + private ConfigurationDao configDao; + + @Inject + private BackupManager backupManager; + + @Inject + ResourceManager resourceManager; + + @Inject + private DiskOfferingDao diskOfferingDao; + + + private Long getClusterIdFromRootVolume(VirtualMachine vm) { + VolumeVO rootVolume = volumeDao.getInstanceRootVolume(vm.getId()); + StoragePoolVO rootDiskPool = primaryDataStoreDao.findById(rootVolume.getPoolId()); + if (rootDiskPool == null) { + return null; + } + return rootDiskPool.getClusterId(); + } + + protected Host getVMHypervisorHost(VirtualMachine vm) { + Long hostId = vm.getLastHostId(); + Long clusterId = null; + + if (hostId != null) { + Host host = hostDao.findById(hostId); + if (host.getStatus() == Status.Up) { + return host; + } + // Try to find any Up host in the same cluster + clusterId = host.getClusterId(); + } else { + // Try to find any Up host in the same cluster as the root volume + clusterId = getClusterIdFromRootVolume(vm); + } + + if (clusterId != null) { + for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(clusterId)) { + if (hostInCluster.getStatus() == Status.Up) { + LOG.debug("Found Host {} in cluster {}", hostInCluster, clusterId); + return hostInCluster; + } + } + } + + // Try to find any Host in the zone + return resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, vm.getDataCenterId()); + } + + protected Host getVMHypervisorHostForBackup(VirtualMachine vm) { + Long hostId = vm.getHostId(); + if (hostId == null && VirtualMachine.State.Running.equals(vm.getState())) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for %s. Make sure the virtual machine is running", vm.getName())); + } + if (VirtualMachine.State.Stopped.equals(vm.getState())) { + hostId = vm.getLastHostId(); + } + if (hostId == null) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for stopped VM: %s", vm)); + } + final Host host = hostDao.findById(hostId); + if (host == null || !Status.Up.equals(host.getStatus()) || !Hypervisor.HypervisorType.KVM.equals(host.getHypervisorType())) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } + return host; + } + + @Override + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { + return takeBackup(vm, quiesceVM, null); + } + + @Override + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM, Long backupScheduleId) { + final Host vmHost = getVMHypervisorHostForBackup(vm); + final HostVO vmHostVO = hostDao.findById(vmHost.getId()); + validateNoKvmFileBasedVmSnapshots(vm); + + try { + String commvaultServer = getUrlDomain(CommvaultUrl.value()); + } catch (URISyntaxException e) { + throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); + } + // 백업 중인 작업 조회 + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); + boolean activeJob = client.getActiveJob(vm.getInstanceName()); + if (activeJob) { + throw new CloudRuntimeException("There are backup jobs running on the virtual machine. Please try again later."); + } + + BackupOfferingVO vmBackupOffering = new BackupOfferingDaoImpl().findById(vm.getBackupOfferingId()); + String planId = vmBackupOffering.getExternalId(); + + // 클라이언트의 백업세트 조회하여 호스트 정의 + String checkVm = client.getVmBackupSetId(vmHost.getName(), vm.getInstanceName()); + if (checkVm == null) { + String clientId = client.getClientId(vmHost.getName()); + String applicationId = client.getApplicationId(clientId); + boolean result = client.createBackupSet(vm.getInstanceName(), applicationId, clientId, planId); + if (!result) { + throw new CloudRuntimeException("Execution of the API that creates a backup set of a virtual machine on the host failed."); + } + } + + final String backupPath = buildBackupPath(vm); + final String backupContentPath = buildBackupContentPath(vm); + List vmVolumes = volumeDao.findByInstance(vm.getId()); + vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(vmVolumes); + validateVolumePoolTypes(volumePoolsAndPaths.first()); + final Backup latestBackup = getLatestBackedUpBackup(vm); + final boolean incrementalBackup = shouldUseIncrementalBackup(vm, latestBackup, vmHost, vmVolumes, backupScheduleId); + BackupExecutionResult result = executeBackup(vm, quiesceVM, vmHost, vmHostVO, client, planId, backupPath, backupContentPath, vmVolumes, volumePoolsAndPaths, + latestBackup, incrementalBackup, incrementalBackup && vmVolumes.size() > 1); + if (!result.success && incrementalBackup && shouldRetryAsFullAfterIncrementalFailure(result, vmVolumes)) { + cleanupFailedBackupForFullRetry(result.backup); + LOG.warn("Incremental backup failed for VM [{}] due to [{}]. Retrying as full backup.", vm, result.details); + String fallbackBackupPath = buildBackupPath(vm); + result = executeBackup(vm, quiesceVM, vmHost, vmHostVO, client, planId, fallbackBackupPath, backupContentPath, vmVolumes, volumePoolsAndPaths, + null, false, false); + } + return new Pair<>(result.success, result.backup); + } + + private Backup getLatestBackedUpBackup(VirtualMachine vm) { + List backups = backupDao.listByVmId(null, vm.getId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(b -> Backup.Status.BackedUp.equals(b.getStatus())) + .peek(backupDao::loadDetails) + .max(Comparator.comparing(BackupVO::getDate)) + .orElse(null); + } + + private boolean shouldUseIncrementalBackup(VirtualMachine vm, Backup latestBackup, Host vmHost, List vmVolumes, Long backupScheduleId) { + if (latestBackup == null) { + return false; + } + loadBackupDetailsIfNeeded(latestBackup); + + if (backupScheduleId != null && !hasBackedUpBackupForSchedule(backupScheduleId)) { + return false; + } + + Long clusterId = getClusterIdFromRootVolume(vm); + if (clusterId == null) { + return false; + } + + if (!Boolean.TRUE.equals(KvmIncrementalBackup.valueIn(clusterId))) { + return false; + } + + if (!hasHealthyIncrementalSource(latestBackup)) { + markVolumeFallbackAndSeal(latestBackup, "unhealthy-chain"); + return false; + } + if (!canContinueIncrementalChain(vm, latestBackup, vmHost)) { + sealBackupChain(latestBackup, "stage-host-mismatch"); + return false; + } + if (getBackupChainSize(vm, latestBackup) >= BackupChainSize.value()) { + sealBackupChain(latestBackup, "chain-size-limit"); + return false; + } + return true; + } + + private boolean hasBackedUpBackupForSchedule(Long backupScheduleId) { + return backupDao.listBySchedule(backupScheduleId).stream() + .anyMatch(backup -> Backup.Status.BackedUp.equals(backup.getStatus())); + } + + private boolean canContinueIncrementalChain(VirtualMachine vm, Backup latestBackup, Host vmHost) { + final String backupEngine = getBackupDetail(latestBackup, DETAIL_BACKUP_ENGINE); + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + LOG.debug("Allowing Commvault incremental backup for VM [{}] on host [{}] using RBD chain from previous stage host [{}]", + vm.getInstanceName(), vmHost.getName(), getBackupDetail(latestBackup, DETAIL_STAGE_HOST)); + return true; + } + + String stageHost = getBackupDetail(latestBackup, DETAIL_STAGE_HOST); + return Objects.equals(stageHost, vmHost.getName()); + } + + private int getBackupChainSize(VirtualMachine vm, Backup latestBackup) { + List backups = backupDao.listByVmId(null, vm.getId()).stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .peek(backupDao::loadDetails) + .collect(Collectors.toList()); + Map backupsByUuid = backups.stream().collect(Collectors.toMap(BackupVO::getUuid, backup -> (Backup) backup, (left, right) -> left)); + return AblestackBackupFrameworkUtils.getBackupChainSize(latestBackup, backupsByUuid, + current -> getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID)); + } + + private boolean hasHealthyIncrementalSource(Backup latestBackup) { + try { + return AblestackBackupFrameworkUtils.hasUsableVolumeChainStates(getVolumeChainStates(latestBackup.getBackedUpVolumes(), latestBackup)); + } catch (Exception e) { + LOG.warn("Latest Commvault backup chain [{}] is not healthy enough for incremental reuse: {}", latestBackup.getUuid(), e.getMessage()); + return false; + } + } + + private void markVolumeFallbackAndSeal(Backup latestBackup, String reason) { + List unhealthyVolumeUuids = listUnhealthyVolumeUuids(latestBackup); + if (!unhealthyVolumeUuids.isEmpty()) { + updateBackupDetail(latestBackup, DETAIL_FALLBACK_VOLUME_UUIDS, String.join(",", unhealthyVolumeUuids)); + } + sealBackupChain(latestBackup, reason); + } + + private List listUnhealthyVolumeUuids(Backup backup) { + List unhealthy = new ArrayList<>(); + if (backup == null || CollectionUtils.isEmpty(backup.getBackedUpVolumes())) { + return unhealthy; + } + for (Backup.VolumeInfo volumeInfo : backup.getBackedUpVolumes()) { + List chainFiles = AblestackBackupFrameworkUtils.sanitizeChainFiles(getBackupChain(volumeInfo, backup)); + if (chainFiles.isEmpty()) { + unhealthy.add(volumeInfo.getUuid()); + } + } + return unhealthy; + } + + private void sealBackupChain(Backup backup, String reason) { + updateBackupDetail(backup, DETAIL_CHAIN_SEALED, "true"); + updateBackupDetail(backup, DETAIL_CHAIN_SEAL_REASON, reason); + } + + private void updateBackupDetail(Backup backup, String key, String value) { + if (backup == null || StringUtils.isBlank(key)) { + return; + } + backupDetailsDao.removeDetail(backup.getId(), key); + backupDetailsDao.addDetail(backup.getId(), key, value, false); + if (backup instanceof BackupVO) { + backupDao.loadDetails((BackupVO) backup); + } + } + + @Override + public boolean supportsProviderManagedBackupAgents() { + return true; + } + + @Override + public boolean supportsRetentionPlanUpdate() { + return true; + } + + private boolean hasDependentBackups(Backup backup) { + List backups = backupDao.listByVmId(null, backup.getVmId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(candidate -> !Objects.equals(candidate.getId(), backup.getId())) + .peek(backupDao::loadDetails) + .anyMatch(candidate -> Objects.equals(getBackupDetail(candidate, DETAIL_PARENT_BACKUP_UUID), backup.getUuid())); + } + + private BackupVO createBackupObject(VirtualMachine vm, String backupPath, String backupType, Map details) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(backupPath); + backup.setType(backupType); + backup.setDate(new Date()); + long virtualSize = 0L; + for (final Volume volume: volumeDao.findByInstance(vm.getId())) { + if (Volume.State.Ready.equals(volume.getState())) { + virtualSize += volume.getSize(); + } + } + backup.setProtectedSize(virtualSize); + backup.setStatus(Backup.Status.BackingUp); + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + backup.setDetails(details); + + return backupDao.persist(backup); + } + + private Map getBackupDetails(VirtualMachine vm, String backupPath, String checkpointName, String backupEngine, Backup latestBackup, + boolean incrementalBackup, String stageHost) { + Map details = backupManager.getBackupDetailsFromVM(vm); + details.put(DETAIL_BACKUP_ENGINE, backupEngine); + details.put(DETAIL_STAGE_HOST, stageHost); + details.put(DETAIL_CHECKPOINT_NAME, checkpointName); + details.put(DETAIL_CHECKPOINT_PATH, getCheckpointPath(backupPath, checkpointName, backupEngine)); + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + details.put(DETAIL_RBD_DISK_PATHS, String.join(",", getVolumePoolsAndPaths(volumeDao.findByInstance(vm.getId())).second())); + } + if (!incrementalBackup) { + return details; + } + + details.put(DETAIL_PARENT_BACKUP_UUID, latestBackup.getUuid()); + details.put(DETAIL_PARENT_BACKUP_PATH, latestBackup.getExternalId().substring(0, latestBackup.getExternalId().lastIndexOf(','))); + details.put(DETAIL_PARENT_CHECKPOINT_NAME, getBackupDetail(latestBackup, DETAIL_CHECKPOINT_NAME)); + details.put(DETAIL_PARENT_CHECKPOINT_PATH, getBackupDetail(latestBackup, DETAIL_CHECKPOINT_PATH)); + return details; + } + + private String getCheckpointPath(String backupPath, String checkpointName, String backupEngine) { + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + return String.format("%s/checkpoints/%s.meta", backupPath, checkpointName); + } + return String.format("%s/checkpoints/%s.xml", backupPath, checkpointName); + } + + private String getBackupDetail(Backup backup, String key) { + return backup == null ? null : backup.getDetail(key); + } + + private String getBackupDetail(Backup backup, String key, String defaultValue) { + String value = getBackupDetail(backup, key); + return value == null ? defaultValue : value; + } + + private Pair parseExternalId(String externalId) { + if (StringUtils.isBlank(externalId)) { + throw new CloudRuntimeException("Backup externalId is empty"); + } + + final int separatorIndex = externalId.lastIndexOf(','); + if (separatorIndex < 0) { + throw new CloudRuntimeException(String.format("Invalid Commvault backup externalId format: [%s]", externalId)); + } + + final String path = externalId.substring(0, separatorIndex); + final String jobId = externalId.substring(separatorIndex + 1).trim(); + if (StringUtils.isAnyBlank(path, jobId)) { + throw new CloudRuntimeException(String.format("Invalid Commvault backup externalId format: [%s]", externalId)); + } + return new Pair<>(path, jobId); + } + + private BackupExecutionResult executeBackup(VirtualMachine vm, Boolean quiesceVM, Host vmHost, HostVO vmHostVO, AblestackCommvaultClient client, + String planId, String backupPath, String backupContentPath, List vmVolumes, + Pair, List> volumePoolsAndPaths, Backup latestBackup, + boolean incrementalBackup, boolean retryAsFullOnFailure) { + final String checkpointName = backupPath.substring(backupPath.lastIndexOf("/") + 1); + final String backupEngine = areAllVolumesOnRbdPool(volumePoolsAndPaths.first()) ? BACKUP_ENGINE_RBD_DIFF : BACKUP_ENGINE_QCOW2; + final String requestedBackupType = incrementalBackup ? BACKUP_TYPE_INCREMENTAL : BACKUP_TYPE_FULL; + final List backupFiles = buildBackupFileNames(vmVolumes, backupEngine, incrementalBackup); + final Map backupDetails = getBackupDetails(vm, backupPath, checkpointName, backupEngine, latestBackup, + BACKUP_TYPE_INCREMENTAL.equalsIgnoreCase(requestedBackupType), vmHost.getName()); + + BackupVO backupVO = createBackupObject(vm, backupPath, requestedBackupType, backupDetails); + AblestackCommvaultTakeBackupCommand command = new AblestackCommvaultTakeBackupCommand(vm.getInstanceName(), backupPath); + command.setQuiesce(quiesceVM); + command.setVolumePools(volumePoolsAndPaths.first()); + command.setVolumePaths(volumePoolsAndPaths.second()); + command.setBackupType(requestedBackupType); + command.setCheckpointName(checkpointName); + command.setBackupFiles(backupFiles); + if (incrementalBackup && latestBackup != null) { + command.setParentBackupPath(getBackupDetail(latestBackup, DETAIL_PARENT_BACKUP_PATH, + latestBackup.getExternalId().substring(0, latestBackup.getExternalId().lastIndexOf(',')))); + command.setParentCheckpointName(getBackupDetail(latestBackup, DETAIL_CHECKPOINT_NAME)); + command.setParentCheckpointPath(getBackupDetail(latestBackup, DETAIL_CHECKPOINT_PATH)); + } + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(vmHost.getId(), command); + } catch (AgentUnavailableException e) { + LOG.error("Unable to contact backend control plane to initiate backup for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + LOG.error("Operation to initiate backup timed out for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + Ternary credentials = getKVMHyperisorCredentials(vmHostVO); + String cmd = String.format(RM_COMMAND, backupPath); + String clientId = client.getClientId(vmHost.getName()); + String subClientEntity = client.getSubclient(clientId, vm.getInstanceName()); + if (subClientEntity == null) { + LOG.error("Failed to take backup for VM {} to get subclient info commvault api", vm.getInstanceName()); + } else { + JSONObject jsonObject = new JSONObject(subClientEntity); + String subclientId = String.valueOf(jsonObject.get("subclientId")); + String applicationId = String.valueOf(jsonObject.get("applicationId")); + String backupsetId = String.valueOf(jsonObject.get("backupsetId")); + String instanceId = String.valueOf(jsonObject.get("instanceId")); + String backupsetName = String.valueOf(jsonObject.get("backupsetName")); + String displayName = String.valueOf(jsonObject.get("displayName")); + String commCellName = String.valueOf(jsonObject.get("commCellName")); + String companyId = String.valueOf(jsonObject.getJSONObject("entityInfo").get("companyId")); + String companyName = String.valueOf(jsonObject.getJSONObject("entityInfo").get("companyName")); + String instanceName = String.valueOf(jsonObject.get("instanceName")); + String appName = String.valueOf(jsonObject.get("appName")); + String clientName = String.valueOf(jsonObject.get("clientName")); + String subclientGUID = String.valueOf(jsonObject.get("subclientGUID")); + String subclientName = String.valueOf(jsonObject.get("subclientName")); + String csGUID = String.valueOf(jsonObject.get("csGUID")); + boolean upResult = client.updateBackupSet(backupContentPath, subclientId, clientId, planId, applicationId, backupsetId, instanceId, subclientName, backupsetName); + if (upResult) { + String planName = client.getPlanName(planId); + String storagePolicyId = client.getStoragePolicyId(planName); + if (planName == null || storagePolicyId == null) { + LOG.error("Failed to take backup for VM {} to get storage policy id commvault api", vm.getInstanceName()); + } else { + String jobId = client.createBackup(subclientId, storagePolicyId, displayName, commCellName, clientId, companyId, companyName, instanceName, appName, + applicationId, clientName, backupsetId, instanceId, subclientGUID, subclientName, csGUID, backupsetName, requestedBackupType); + if (jobId != null) { + String jobStatus = client.getJobStatus(jobId); + String externalId = backupPath + "," + jobId; + if (jobStatus.equalsIgnoreCase("Completed")) { + String jobDetails = client.getJobDetails(jobId); + if (jobDetails != null) { + JSONObject jsonObject2 = new JSONObject(jobDetails); + String endTime = String.valueOf(jsonObject2.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); + long timestamp = Long.parseLong(endTime) * 1000L; + Date endDate = new Date(timestamp); + SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); + String formattedString = formatterDateTime.format(endDate); + String size = String.valueOf(jsonObject2.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("sizeOfApplication")); + String type = String.valueOf(jsonObject2.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").get("backupType")); + backupVO.setExternalId(externalId); + backupVO.setType(type.toUpperCase()); + try { + backupVO.setDate(formatterDateTime.parse(formattedString)); + } catch (ParseException e) { + String msg = String.format("Unable to parse date [%s].", endTime); + LOG.error(msg, e); + throw new CloudRuntimeException(msg, e); + } + backupVO.setSize(Long.parseLong(size)); + backupVO.setStatus(Backup.Status.BackedUp); + backupVO.setDetails(backupDetails); + backupVO.setBackedUpVolumes(createVolumeInfoFromVolumes(vmVolumes, backupFiles)); + if (backupDao.update(backupVO.getId(), backupVO)) { + return BackupExecutionResult.success(backupVO); + } + throw new CloudRuntimeException("Failed to update backup"); + } + backupVO.setExternalId(externalId); + LOG.error("Failed to take backup for VM {} to get details job commvault api", vm.getInstanceName()); + } else { + backupVO.setExternalId(externalId); + LOG.error("Failed to take backup for VM {} to create backup job status is {}", vm.getInstanceName(), jobStatus); + } + } else { + LOG.error("Failed to take backup for VM {} to create backup job commvault api", vm.getInstanceName()); + } + } + } else { + LOG.error("Failed to take backup for VM {} to update backupset content path commvault api", vm.getInstanceName()); + } + } + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); + return BackupExecutionResult.failure("Failed to complete Commvault backup job", backupVO); + } + + final String details = answer != null ? answer.getDetails() : "No answer received"; + LOG.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), details); + if (retryAsFullOnFailure) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } else if (answer != null && answer.getNeedsCleanup()) { + LOG.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Error); + backupDao.update(backupVO.getId(), backupVO); + } else { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } + return BackupExecutionResult.failure(details, backupVO); + } + + private boolean shouldRetryAsFullAfterIncrementalFailure(BackupExecutionResult result, List vmVolumes) { + if (result == null || result.success) { + return false; + } + if (StringUtils.contains(result.details, MISSING_PARENT_RBD_SNAPSHOT_ERROR)) { + return true; + } + return vmVolumes.size() > 1; + } + + private void cleanupFailedBackupForFullRetry(Backup backup) { + if (backup == null) { + return; + } + backupDao.remove(backup.getId()); + } + + private static final class BackupExecutionResult { + private final boolean success; + private final Backup backup; + private final String details; + + private BackupExecutionResult(boolean success, Backup backup, String details) { + this.success = success; + this.backup = backup; + this.details = details; + } + + private static BackupExecutionResult success(Backup backup) { + return new BackupExecutionResult(true, backup, null); + } + + private static BackupExecutionResult failure(String details, Backup backup) { + return new BackupExecutionResult(false, backup, details); + } + } + + private String buildBackupPath(VirtualMachine vm) { + return String.format("%s/%s/%s", COMMVAULT_DIRECTORY, vm.getInstanceName(), + new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.SSS").format(new Date())); + } + + private String buildBackupContentPath(VirtualMachine vm) { + return String.format("%s/%s", COMMVAULT_DIRECTORY, vm.getInstanceName()); + } + + private void validateVolumePoolTypes(List volumePools) { + boolean hasRbd = volumePools.stream().anyMatch(pool -> pool.getPoolType() == Storage.StoragePoolType.RBD); + boolean hasNonRbd = volumePools.stream().anyMatch(pool -> pool.getPoolType() != Storage.StoragePoolType.RBD); + if (hasRbd && hasNonRbd) { + throw new CloudRuntimeException("Commvault incremental backup does not support VMs with mixed RBD and non-RBD volumes"); + } + } + + private boolean areAllVolumesOnRbdPool(List volumePools) { + return volumePools.stream().allMatch(pool -> pool.getPoolType() == Storage.StoragePoolType.RBD); + } + + private List buildBackupFileNames(List volumes, String backupEngine, boolean incrementalBackup) { + List backupFiles = new ArrayList<>(); + for (VolumeVO volume : volumes) { + String suffix; + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + suffix = incrementalBackup ? ".rbdiff" : ".raw"; + } else { + suffix = ".qcow2"; + } + backupFiles.add(String.format("volume-%s%s", volume.getUuid(), suffix)); + } + return backupFiles; + } + + private String createVolumeInfoFromVolumes(List volumes, List backupFiles) { + List infoList = new ArrayList<>(); + for (int i = 0; i < volumes.size(); i++) { + VolumeVO vol = volumes.get(i); + DiskOffering diskOffering = diskOfferingDao.findById(vol.getDiskOfferingId()); + String diskOfferingUuid = diskOffering != null ? diskOffering.getUuid() : null; + infoList.add(new Backup.VolumeInfo(vol.getUuid(), backupFiles.get(i), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOfferingUuid, vol.getMinIops(), vol.getMaxIops())); + } + return new com.google.gson.Gson().toJson(infoList.toArray(), Backup.VolumeInfo[].class); + } + + private List getBackupFileChains(List backupVolumes, Backup backup) { + return backupVolumes.stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(volume -> getBackupFileChain(volume, backup)) + .collect(Collectors.toList()); + } + + private String getBackupFileChain(Backup.VolumeInfo backupVolume, Backup backup) { + loadBackupDetailsIfNeeded(backup); + List chain = getBackupChain(backupVolume, backup); + return String.join(";", chain); + } + + private List getVolumeChainStates(List backupVolumes, Backup backup) { + String backupEngine = getBackupDetail(backup, DETAIL_BACKUP_ENGINE); + List volumeChainStates = backupVolumes.stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(volume -> new BackupVolumeChainState(volume.getUuid(), backupEngine, + AblestackBackupFrameworkUtils.sanitizeChainFiles(getBackupChain(volume, backup)))) + .collect(Collectors.toList()); + AblestackBackupFrameworkUtils.validateVolumeChainStates(volumeChainStates); + return volumeChainStates; + } + + private BackupRestorePlan createRestorePlan(boolean attachRequired) { + return AblestackBackupFrameworkUtils.createRestorePlan(attachRequired, true); + } + + @Override + public boolean supportsVolumeLevelChainState() { + return true; + } + + @Override + public boolean supportsRestorePlan() { + return true; + } + + @Override + public boolean supportsRestoreChainValidation() { + return true; + } + + @Override + public boolean supportsPostRestoreMaintenance() { + return true; + } + + @Override + public void runPostRestoreMaintenance(VirtualMachine vm, Backup backup, boolean volumeOnly) { + if (backup == null || CollectionUtils.isEmpty(backup.getBackedUpVolumes())) { + return; + } + loadBackupDetailsIfNeeded(backup); + final List chainStates = getVolumeChainStates(backup.getBackedUpVolumes(), backup); + AblestackBackupFrameworkUtils.validateVolumeChainStates(chainStates); + LOG.debug("Completed Commvault post-restore maintenance for VM [{}], backup [{}], volumeOnly=[{}]", vm != null ? vm.getInstanceName() : null, + backup.getUuid(), volumeOnly); + } + + @Override + public boolean supportsBackgroundChainValidation() { + return true; + } + + @Override + public void validateChains(Long zoneId) { + final List vmIdsWithBackups = backupDao.listVmIdsWithBackupsInZone(zoneId); + if (CollectionUtils.isEmpty(vmIdsWithBackups)) { + return; + } + for (final Long vmId : vmIdsWithBackups) { + final Backup latestBackup = getLatestBackedUpBackupForProvider(zoneId, vmId); + if (latestBackup == null) { + continue; + } + loadBackupDetailsIfNeeded(latestBackup); + if (Boolean.parseBoolean(getBackupDetail(latestBackup, DETAIL_CHAIN_SEALED))) { + continue; + } + if (!hasHealthyIncrementalSource(latestBackup)) { + markVolumeFallbackAndSeal(latestBackup, "background-chain-validation"); + LOG.warn("Sealed Commvault backup chain [{}] during background validation in zone [{}]", latestBackup.getUuid(), zoneId); + } + } + } + + private Backup getLatestBackedUpBackupForProvider(Long zoneId, Long vmId) { + return backupDao.listByVmId(zoneId, vmId).stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .filter(backup -> { + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + return offering != null && Objects.equals(getName(), offering.getProvider()); + }) + .peek(backupDao::loadDetails) + .max(Comparator.comparing(BackupVO::getDate)) + .orElse(null); + } + + private List getBackupChain(Backup.VolumeInfo backupVolume, Backup backup) { + loadBackupDetailsIfNeeded(backup); + List chain = new ArrayList<>(); + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + Backup.VolumeInfo currentVolumeInfo = current.getBackedUpVolumes().stream() + .filter(volume -> Objects.equals(volume.getUuid(), backupVolume.getUuid())) + .findFirst() + .orElse(null); + if (currentVolumeInfo == null) { + break; + } + chain.add(0, getRestoreBackupFilePath(current, currentVolumeInfo)); + if (StringUtils.endsWith(currentVolumeInfo.getPath(), ".raw")) { + break; + } + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupDao.findByUuid(parentBackupUuid); + } + if (chain.isEmpty()) { + chain.add(backupVolume.getPath()); + } + return chain; + } + + private LinkedHashMap getBackupChainStageHosts(Backup backup) { + LinkedHashMap stageHosts = new LinkedHashMap<>(); + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + String stageHost = getBackupDetail(current, DETAIL_STAGE_HOST); + if (StringUtils.isNotBlank(stageHost)) { + stageHosts.putIfAbsent(stageHost, current); + } + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupDao.findByUuid(parentBackupUuid); + } + return stageHosts; + } + + private List getRestoreSourcePathsForStageHost(Backup backup, String stageHost) { + if (!BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return Collections.singletonList(getRestoreBackupRootPath(backup)); + } + + List restoreSourcePaths = new ArrayList<>(); + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + String currentStageHost = getBackupDetail(current, DETAIL_STAGE_HOST); + if (Objects.equals(currentStageHost, stageHost)) { + String backupPath = parseExternalId(current.getExternalId()).first(); + if (!restoreSourcePaths.contains(backupPath)) { + restoreSourcePaths.add(0, backupPath); + } + } + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupDao.findByUuid(parentBackupUuid); + } + + if (restoreSourcePaths.isEmpty()) { + restoreSourcePaths.add(getRestoreBackupRootPath(backup)); + } + return restoreSourcePaths; + } + + private void loadBackupDetailsIfNeeded(Backup backup) { + if (backup instanceof BackupVO && backup.getDetails() == null) { + backupDao.loadDetails((BackupVO) backup); + } + } + + private String getRestoreBackupRootPath(Backup backup) { + final String backupPath = parseExternalId(backup.getExternalId()).first(); + if (BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return java.nio.file.Path.of(backupPath).getParent().toString(); + } + return backupPath; + } + + private String getRestoreBackupFilePath(Backup backup, Backup.VolumeInfo volumeInfo) { + final String backupPath = parseExternalId(backup.getExternalId()).first(); + final String filePath = volumeInfo.getPath(); + if (BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return java.nio.file.Path.of(backupPath).getFileName().resolve(filePath).toString(); + } + return filePath; + } + + private boolean isLegacyBackup(Backup backup) { + return getBackupDetail(backup, DETAIL_BACKUP_ENGINE) == null; + } + + private List restoreBackupSourcesOnAdditionalHosts(AblestackCommvaultClient client, Backup backup, String executionHostName) { + if (!BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return Collections.emptyList(); + } + + List additionalHosts = new ArrayList<>(); + for (Map.Entry entry : getBackupChainStageHosts(backup).entrySet()) { + String stageHost = entry.getKey(); + if (StringUtils.isBlank(stageHost) || Objects.equals(stageHost, executionHostName)) { + continue; + } + restoreBackupPathsOnStageHost(client, entry.getValue(), getRestoreSourcePathsForStageHost(backup, stageHost)); + additionalHosts.add(stageHost); + } + return additionalHosts; + } + + private void restoreBackupPathsOnStageHost(AblestackCommvaultClient client, Backup backup, List restoreSourcePaths) { + final Pair externalIdParts = parseExternalId(backup.getExternalId()); + final String jobId = externalIdParts.second(); + String jobDetails = client.getJobDetails(jobId); + if (jobDetails == null) { + throw new CloudRuntimeException("Failed to get job details commvault api"); + } + + JSONObject jsonObject = new JSONObject(jobDetails); + String endTime = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); + String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); + String displayName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("displayName")); + String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); + String companyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyId")); + String companyName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyName")); + String instanceName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceName")); + String appName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("appName")); + String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); + String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); + String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); + String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); + String backupsetName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetName")); + String commCellId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("commcell").get("commCellId")); + String backupsetGUID = client.getVmBackupSetGuid(clientName, backupsetName); + if (backupsetGUID == null) { + throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); + } + + String restoreJobId = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, + appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, restoreSourcePaths); + if (restoreJobId == null) { + throw new CloudRuntimeException("Failed to restore Full VM commvault api"); + } + + String jobStatus = client.getJobStatus(restoreJobId); + if (!jobStatus.equalsIgnoreCase("Completed")) { + throw new CloudRuntimeException("Failed to restore Full VM commvault api resulted in " + jobStatus); + } + } + + private void cleanupBackupPathOnAdditionalHosts(List hostNames, String backupPath) { + if (hostNames == null || hostNames.isEmpty()) { + return; + } + int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + String command = String.format(RM_COMMAND, backupPath); + for (String hostName : hostNames) { + if (StringUtils.isBlank(hostName)) { + continue; + } + HostVO host = hostDao.findByName(hostName); + if (host == null) { + continue; + } + try { + Ternary credentials = getKVMHyperisorCredentials(host); + executeDeleteBackupPathCommand(host, credentials.first(), credentials.second(), sshPort, command); + } catch (Exception e) { + LOG.warn("Failed to cleanup Commvault restore source path [{}] on host [{}]", backupPath, hostName, e); + } + } + } + + private String getLegacyBackupFileName(Backup.VolumeInfo backupVolumeInfo) { + String diskType = Volume.Type.ROOT.equals(backupVolumeInfo.getType()) ? "root" : "datadisk"; + return String.format("%s.%s.qcow2", diskType, backupVolumeInfo.getUuid()); + } + + // 백업에서 새 인스턴스 생성 + @Override + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + return restoreVMBackup(vm, backup); + } + + // 가상머신 백업 복원 + @Override + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + return restoreVMBackup(vm, backup).first(); + } + + private Pair restoreVMBackup(VirtualMachine vm, Backup backup) { + validateNoKvmFileBasedVmSnapshots(vm); + loadBackupDetailsIfNeeded(backup); + try { + String commvaultServer = getUrlDomain(CommvaultUrl.value()); + } catch (URISyntaxException e) { + throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); + } + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); + final String externalId = backup.getExternalId(); + final Pair externalIdParts = parseExternalId(externalId); + final String path = externalIdParts.first(); + final String restoreSourcePath = getRestoreBackupRootPath(backup); + final String jobId = externalIdParts.second(); + String jobDetails = client.getJobDetails(jobId); + if (jobDetails == null) { + throw new CloudRuntimeException("Failed to get job details commvault api"); + } + JSONObject jsonObject = new JSONObject(jobDetails); + String endTime = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); + String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); + String displayName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("displayName")); + String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); + String companyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyId")); + String companyName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyName")); + String instanceName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceName")); + String appName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("appName")); + String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); + String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); + String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); + String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); + String backupsetName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetName")); + String commCellId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("commcell").get("commCellId")); + String backupsetGUID = client.getVmBackupSetGuid(clientName, backupsetName); + if (backupsetGUID == null) { + throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); + } + // 복원된 호스트 정의 + final HostVO restoreHost = hostDao.findByName(clientName); + final HostVO restoreHostVO = hostDao.findById(restoreHost.getId()); + final List additionalSourceHosts = restoreBackupSourcesOnAdditionalHosts(client, backup, clientName); + final List restoreSourcePaths = getRestoreSourcePathsForStageHost(backup, clientName); + LOG.info(String.format("Restoring vm %s from backup %s on the Commvault Backup Provider", vm, backup)); + try { + String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, restoreSourcePaths); + if (jobId2 != null) { + String jobStatus = client.getJobStatus(jobId2); + if (jobStatus.equalsIgnoreCase("Completed")) { + List backedVolumesUUIDs = backup.getBackedUpVolumes().stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(Backup.VolumeInfo::getUuid) + .collect(Collectors.toList()); + + List restoreVolumes = volumeDao.findByInstance(vm.getId()).stream() + .sorted(Comparator.comparingLong(VolumeVO::getDeviceId)) + .collect(Collectors.toList()); + + LOG.debug("Restoring vm {} from backup {} on the Commvault Backup Provider", vm, backup); + AblestackCommvaultRestoreBackupCommand restoreCommand = new AblestackCommvaultRestoreBackupCommand(); + LOG.info(restoreSourcePath); + restoreCommand.setBackupPath(restoreSourcePath); + restoreCommand.setVmName(vm.getName()); + restoreCommand.setBackupVolumesUUIDs(backedVolumesUUIDs); + if (isLegacyBackup(backup)) { + restoreCommand.setBackupFiles(backup.getBackedUpVolumes().stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(this::getLegacyBackupFileName) + .collect(Collectors.toList())); + } else { + restoreCommand.setBackupFiles(backup.getBackedUpVolumes().stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(volume -> getRestoreBackupFilePath(backup, volume)) + .collect(Collectors.toList())); + restoreCommand.setBackupFileChains(getBackupFileChains(backup.getBackedUpVolumes(), backup)); + } + restoreCommand.setVolumeChainStates(getVolumeChainStates(backup.getBackedUpVolumes(), backup)); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(restoreVolumes); + restoreCommand.setRestoreVolumePools(volumePoolsAndPaths.first()); + restoreCommand.setRestoreVolumePaths(volumePoolsAndPaths.second()); + restoreCommand.setVmExists(vm.getRemoved() == null); + restoreCommand.setVmState(vm.getState()); + restoreCommand.setRestorePlan(createRestorePlan(false)); + restoreCommand.setTimeout(CommvaultBackupRestoreTimeout.value()); + restoreCommand.setHostName(null); + restoreCommand.setBackupSourceHosts(additionalSourceHosts); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(restoreHost.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to restore backup timed out, please try again"); + } + if (!answer.getResult()) { + int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + Ternary credentials = getKVMHyperisorCredentials(restoreHostVO); + String command = String.format(RM_COMMAND, restoreSourcePath); + executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); + } + return new Pair<>(answer.getResult(), answer.getDetails()); + } else { + throw new CloudRuntimeException("Failed to restore Full VM commvault api resulted in " + jobStatus); + } + } else { + throw new CloudRuntimeException("Failed to restore Full VM commvault api"); + } + } finally { + cleanupBackupPathOnAdditionalHosts(additionalSourceHosts, restoreSourcePath); + } + } + + private Pair, List> getVolumePoolsAndPaths(List volumes) { + List volumePools = new ArrayList<>(); + List volumePaths = new ArrayList<>(); + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + if (Objects.isNull(storagePool)) { + throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); + } + + DataStore dataStore = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary); + volumePools.add(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null); + + String volumePathPrefix = getVolumePathPrefix(storagePool); + volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); + } + return new Pair<>(volumePools, volumePaths); + } + + private String getVolumePathPrefix(StoragePoolVO storagePool) { + String volumePathPrefix; + if (ScopeType.HOST.equals(storagePool.getScope()) || + Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType()) || + Storage.StoragePoolType.RBD.equals(storagePool.getPoolType())) { + volumePathPrefix = storagePool.getPath(); + } else { + // Should be Storage.StoragePoolType.NetworkFilesystem + volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); + } + return volumePathPrefix; + } + + // 백업 볼륨 복원 및 연결 + @Override + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + loadBackupDetailsIfNeeded(backup); + try { + String commvaultServer = getUrlDomain(CommvaultUrl.value()); + } catch (URISyntaxException e) { + throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); + } + final String externalId = backup.getExternalId(); + final Long zoneId = backup.getZoneId(); + final AblestackCommvaultClient client = getClient(zoneId); + final Pair externalIdParts = parseExternalId(externalId); + final String path = externalIdParts.first(); + final String restoreSourcePath = getRestoreBackupRootPath(backup); + final String jobId = externalIdParts.second(); + String jobDetails = client.getJobDetails(jobId); + if (jobDetails == null) { + throw new CloudRuntimeException("Failed to get job details commvault api"); + } + JSONObject jsonObject = new JSONObject(jobDetails); + String endTime = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); + String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); + String displayName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("displayName")); + String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); + String companyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyId")); + String companyName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyName")); + String instanceName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceName")); + String appName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("appName")); + String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); + String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); + String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); + String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); + String backupsetName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetName")); + String commCellId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("commcell").get("commCellId")); + String backupsetGUID = client.getVmBackupSetGuid(clientName, backupsetName); + if (backupsetGUID == null) { + throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); + } + final List restoreSourcePaths = getRestoreSourcePathsForStageHost(backup, clientName); + final List additionalSourceHosts = restoreBackupSourcesOnAdditionalHosts(client, backup, clientName); + try { + ensureStageHostHasCapacityForRestore(backup, clientName, restoreSourcePaths); + String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, restoreSourcePaths); + if (jobId2 != null) { + String jobStatus = client.getJobStatus(jobId2); + if (jobStatus.equalsIgnoreCase("Completed")) { + final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); + final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); + if (diskOffering == null) { + throw new CloudRuntimeException(String.format("Unable to find disk offering [%s] for backed up volume [%s]", + backupVolumeInfo.getDiskOfferingId(), backupVolumeInfo.getUuid())); + } + final Backup.VolumeInfo matchingVolume = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), backupVolumeInfo.getUuid()) + .orElseThrow(() -> new CloudRuntimeException(String.format( + "Unable to find volume %s in the list of backed up volumes for backup %s, cannot proceed with restore", + backupVolumeInfo.getUuid(), backup))); + String cacheMode = null; + final VMInstanceVO vm = vmInstanceDao.findVMByInstanceName(vmNameAndState.first()); + List listVolumes = volumeDao.findByInstanceAndType(vm.getId(), Type.ROOT); + if(CollectionUtils.isNotEmpty(listVolumes)) { + VolumeVO rootDisk = listVolumes.get(0); + DiskOffering baseDiskOffering = diskOfferingDao.findById(rootDisk.getDiskOfferingId()); + if (baseDiskOffering.getCacheMode() != null) { + cacheMode = baseDiskOffering.getCacheMode().toString(); + } + } + StoragePoolVO pool = primaryDataStoreDao.findByUuid(dataStoreUuid); + if (pool == null) { + List pools = primaryDataStoreDao.findPoolByName(dataStoreUuid); + if (CollectionUtils.isNotEmpty(pools)) { + pool = pools.get(0); + } + } + if (pool == null) { + throw new CloudRuntimeException(String.format("Unable to find primary storage pool for restore target [%s]", dataStoreUuid)); + } + HostVO vmHost = hostDao.findByIp(hostIp); + if (vmHost == null) { + vmHost = hostDao.findByName(hostIp); + } + if (vmHost == null) { + throw new CloudRuntimeException(String.format("Unable to find VM host [%s] for Commvault volume restore", hostIp)); + } + // 복원된 호스트 정의 + HostVO restoreHost = hostDao.findByName(clientName); + if (restoreHost == null) { + restoreHost = hostDao.findByIp(clientName); + } + if (restoreHost == null) { + throw new CloudRuntimeException(String.format("Unable to find restore host [%s] for Commvault volume restore", clientName)); + } + final HostVO restoreHostVO = hostDao.findById(restoreHost.getId()); + LOG.info(String.format("Restoring volume %s from backup %s on the Commvault Backup Provider", backupVolumeInfo.getUuid(), backup)); + LOG.debug("Restoring vm volume {} from backup {} on the Commvault Backup Provider", backupVolumeInfo, backup); + VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), + backup.getDomainId(), backup.getAccountId(), 0, null, + backup.getSize(), null, null, null); + String volumeUUID = UUID.randomUUID().toString(); + String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); + restoredVolume.setName("RestoredVol-" + volumeName); + restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); + restoredVolume.setUpdated(new Date()); + restoredVolume.setUuid(volumeUUID); + restoredVolume.setRemoved(null); + restoredVolume.setDisplayVolume(true); + restoredVolume.setPoolId(pool.getId()); + restoredVolume.setPoolType(pool.getPoolType()); + restoredVolume.setPath(restoredVolume.getUuid()); + restoredVolume.setState(Volume.State.Copying); + restoredVolume.setSize(backupVolumeInfo.getSize()); + restoredVolume.setDiskOfferingId(diskOffering.getId()); + if (pool.getPoolType() != Storage.StoragePoolType.RBD) { + restoredVolume.setFormat(Storage.ImageFormat.QCOW2); + } else { + restoredVolume.setFormat(Storage.ImageFormat.RAW); + } + + AblestackCommvaultRestoreBackupCommand restoreCommand = new AblestackCommvaultRestoreBackupCommand(); + restoreCommand.setBackupPath(restoreSourcePath); + restoreCommand.setVmName(vmNameAndState.first()); + restoreCommand.setBackupFiles(Collections.singletonList(isLegacyBackup(backup) ? getLegacyBackupFileName(matchingVolume) : getRestoreBackupFilePath(backup, matchingVolume))); + if (!isLegacyBackup(backup)) { + restoreCommand.setBackupFileChains(Collections.singletonList(getBackupFileChain(matchingVolume, backup))); + } + restoreCommand.setVolumeChainStates(getVolumeChainStates(Collections.singletonList(matchingVolume), backup)); + restoreCommand.setRestoreVolumePaths(Collections.singletonList(String.format("%s/%s", getVolumePathPrefix(pool), volumeUUID))); + DataStore dataStore = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + if (dataStore == null) { + throw new CloudRuntimeException(String.format("Unable to get primary datastore TO for pool [%s] while restoring volume [%s]", + pool.getUuid(), backupVolumeInfo.getUuid())); + } + restoreCommand.setRestoreVolumePools(Collections.singletonList((PrimaryDataStoreTO) dataStore.getTO())); + restoreCommand.setDiskType(matchingVolume.getType().name().toLowerCase(Locale.ROOT)); + restoreCommand.setVmExists(null); + restoreCommand.setVmState(vmNameAndState.second()); + restoreCommand.setRestoreVolumeUUID(backupVolumeInfo.getUuid()); + restoreCommand.setRestorePlan(createRestorePlan(AblestackBackupFrameworkUtils.requiresRunningVmAttach(vmNameAndState.second()))); + restoreCommand.setTimeout(CommvaultBackupRestoreTimeout.value()); + restoreCommand.setCacheMode(cacheMode); + restoreCommand.setHostName(restoreHost.getName()); + restoreCommand.setBackupSourceHosts(additionalSourceHosts); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(vmHost.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to restore backed up volume timed out, please try again"); + } + + if (answer.getResult()) { + try { + volumeDao.persist(restoredVolume); + } catch (Exception e) { + throw new CloudRuntimeException("Unable to create restored volume due to: " + e); + } + LOG.info("Successfully restored volume {} from backup {} on the Commvault Backup Provider. Restored volume UUID: {}", + backupVolumeInfo.getUuid(), backup, restoredVolume.getUuid()); + return new Pair<>(answer.getResult(), answer.getDetails()); + } else { + final int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + Ternary credentials = getKVMHyperisorCredentials(restoreHostVO); + String command = String.format(RM_COMMAND, restoreSourcePath); + executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); + return new Pair<>(false, StringUtils.defaultIfBlank(answer.getDetails(), + String.format("Restore agent returned failure for volume [%s] on host [%s]", backupVolumeInfo.getUuid(), restoreHost.getName()))); + } + } else { + String errorMessage = "Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job status is " + jobStatus; + LOG.error(errorMessage); + return new Pair<>(false, errorMessage); + } + } else { + String errorMessage = "Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job commvault api"; + LOG.error(errorMessage); + return new Pair<>(false, errorMessage); + } + } finally { + cleanupBackupPathOnAdditionalHosts(additionalSourceHosts, restoreSourcePath); + } + } + + private Optional getBackedUpVolumeInfo(List backedUpVolumes, String volumeUuid) { + return backedUpVolumes.stream() + .filter(v -> v.getUuid().equals(volumeUuid)) + .findFirst(); + } + + private void ensureStageHostHasCapacityForRestore(Backup backup, String clientName, List restoreSourcePaths) { + HostVO stageHost = hostDao.findByName(clientName); + if (stageHost == null) { + stageHost = hostDao.findByIp(clientName); + } + if (stageHost == null) { + throw new CloudRuntimeException(String.format("Unable to find stage host [%s] for Commvault restore capacity check", clientName)); + } + long requiredBytes = estimateRequiredStageBytesForRestore(backup, restoreSourcePaths); + long bufferBytes = Math.max(STAGE_SPACE_BUFFER_BYTES, requiredBytes / 5L); + long minimumAvailableBytes = requiredBytes + bufferBytes; + long availableBytes = getAvailableBytesOnHostPath(stageHost, COMMVAULT_DIRECTORY); + LOG.info("Checking Commvault restore stage capacity on host [{}]: required={} bytes, buffer={} bytes, minimumAvailable={} bytes, available={} bytes, sourcePaths={}", + stageHost.getName(), requiredBytes, bufferBytes, minimumAvailableBytes, availableBytes, restoreSourcePaths); + if (availableBytes < minimumAvailableBytes) { + throw new CloudRuntimeException(String.format( + "Insufficient stage space on host [%s] for Commvault restore. Required at least [%d] bytes including buffer, but only [%d] bytes are available under [%s].", + stageHost.getName(), minimumAvailableBytes, availableBytes, COMMVAULT_DIRECTORY)); + } + } + + private long estimateRequiredStageBytesForRestore(Backup backup, List restoreSourcePaths) { + if (CollectionUtils.isEmpty(restoreSourcePaths)) { + return Math.max(backup.getSize(), 0L); + } + long totalBytes = 0L; + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + String currentPath = parseExternalId(current.getExternalId()).first(); + if (restoreSourcePaths.contains(currentPath)) { + totalBytes += Math.max(current.getSize(), 0L); + } + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (StringUtils.isBlank(parentBackupUuid)) { + break; + } + current = backupDao.findByUuid(parentBackupUuid); + } + return totalBytes > 0 ? totalBytes : Math.max(backup.getSize(), 0L); + } + + private long getAvailableBytesOnHostPath(HostVO host, String path) { + final int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + Ternary credentials = getKVMHyperisorCredentials(host); + String command = String.format(DF_AVAILABLE_COMMAND, path); + try { + Pair response = SshHelper.sshExecute(host.getPrivateIpAddress(), sshPort, + credentials.first(), null, credentials.second(), command, 120000, 120000, 3600000); + if (!response.first()) { + throw new CloudRuntimeException(String.format("Failed to query available stage space on host %s due to: %s", + host.getName(), response.second())); + } + String output = StringUtils.trimToEmpty(response.second()); + return Long.parseLong(output); + } catch (NumberFormatException e) { + throw new CloudRuntimeException(String.format("Failed to parse available stage space on host %s for path %s", host.getName(), path), e); + } catch (Exception e) { + throw new CloudRuntimeException(String.format("Failed to query available stage space on host %s due to: %s", host.getName(), e.getMessage()), e); + } + } + + @Override + public boolean deleteBackup(Backup backup, boolean forced) { + loadBackupDetailsIfNeeded(backup); + if (!forced && hasDependentBackups(backup)) { + throw new CloudRuntimeException(String.format("Backup [%s] cannot be deleted because one or more incremental backups depend on it.", backup.getUuid())); + } + final Long zoneId = backup.getZoneId(); + final String externalId = backup.getExternalId(); + final Pair externalIdParts = parseExternalId(externalId); + final String path = externalIdParts.first(); + final String jobId = externalIdParts.second(); + final AblestackCommvaultClient client = getClient(zoneId); + String jobDetails = client.getJobDetails(jobId); + if (jobDetails != null) { + JSONObject jsonObject = new JSONObject(jobDetails); + String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); + String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); + String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); + String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); + String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); + String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); + boolean result = client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); + if (result) { + cleanupBackupPathOnStageHost(clientName, path, forced, getBackupDetail(backup, DETAIL_CHECKPOINT_NAME), getBackupDetail(backup, DETAIL_RBD_DISK_PATHS)); + } + return result; + } else { + throw new CloudRuntimeException("Failed to request backup job detail commvault api"); + } + } + + public void syncBackupMetrics(Long zoneId) { + } + + @Override + public List listRestorePoints(VirtualMachine vm) { + return null; + } + + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { + return null; + } + + @Override + public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { + if (hasKvmFileBasedVmSnapshots(vm)) { + logger.warn("VM [{}] has VM snapshots using the KvmFileBasedStorageVmSnapshot Strategy; this provider does not support backups on VMs with these snapshots!", vm); + return false; + } + if (hasVolumeSnapshots(vm)) { + logger.warn("VM [{}] has volume snapshots; this provider does not support backups on VMs with volume snapshots!", vm); + return false; + } + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); + final Host host = getVMHypervisorHostForBackup(vm); + String clientId = client.getClientId(host.getName()); + String applicationId = client.getApplicationId(clientId); + return client.createBackupSet(vm.getInstanceName(), applicationId, clientId, backupOffering.getExternalId()); + } + + private void validateNoKvmFileBasedVmSnapshots(VirtualMachine vm) { + if (hasKvmFileBasedVmSnapshots(vm)) { + logger.warn("VM [{}] has VM snapshots using the KvmFileBasedStorageVmSnapshot Strategy; backup cannot be started.", vm); + throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has KVM file-based VM snapshots.", vm.getUuid())); + } + if (hasVolumeSnapshots(vm)) { + logger.warn("VM [{}] has volume snapshots; backup cannot be started.", vm); + throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has volume snapshots.", vm.getUuid())); + } + } + + private boolean hasKvmFileBasedVmSnapshots(VirtualMachine vm) { + for (VMSnapshotVO vmSnapshotVO : vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.Disk)) { + List vmSnapshotDetails = vmSnapshotDetailsDao.listDetails(vmSnapshotVO.getId()); + if (vmSnapshotDetails.stream().anyMatch(vmSnapshotDetailsVO -> VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT.equals(vmSnapshotDetailsVO.getName()))) { + return true; + } + } + return false; + } + + private boolean hasVolumeSnapshots(VirtualMachine vm) { + for (VolumeVO volume : volumeDao.findByInstance(vm.getId())) { + List snapshots = snapshotDao.listByVolumeId(volume.getId()); + if (snapshots.stream().anyMatch(snapshot -> !Snapshot.State.Destroyed.equals(snapshot.getState()))) { + return true; + } + } + return false; + } + + @Override + public boolean removeVMFromBackupOffering(VirtualMachine vm) { + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); + List Hosts = hostDao.findByDataCenterId(vm.getDataCenterId()); + boolean allDeleted = true; + for (final HostVO host : Hosts) { + if (host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { + String backupSetId = client.getVmBackupSetId(host.getName(), vm.getInstanceName()); + if (backupSetId != null) { + boolean deleted = client.deleteBackupSet(backupSetId); + if (!deleted) { + allDeleted = false; + LOG.error("Failed to delete backupSetId: " + backupSetId +" for VM: " + vm.getInstanceName()); + } + } + } + } + return allDeleted; + } + + // 하위 클라이언트 삭제 시 백업본 데이터는 그대로 남아있지만, 해당 하위 클라이언트가 삭제되었기 때문에 스케줄도 삭제시켜야하며 + // 남아있는 백업본 데이터는 mold에서 관리하지 않고, commvault 의 plan 보존기간에 따라 데이터 에이징 됨. + @Override + public boolean willDeleteBackupsOnOfferingRemoval() { + return false; + } + + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public boolean supportsMemoryVmSnapshot() { + return false; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + return new Pair<>(0L, 0L); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + } + + @Override + public List listBackupOfferings(Long zoneId) { + return getClient(zoneId).listPlans(); + } + + @Override + public boolean isValidProviderOffering(Long zoneId, String uuid) { + List policies = listBackupOfferings(zoneId); + if (CollectionUtils.isEmpty(policies)) { + return false; + } + for (final BackupOffering policy : policies) { + if (policy.getExternalId().equals(uuid)) { + return true; + } + } + return false; + } + + @Override + public Boolean crossZoneInstanceCreationEnabled(BackupOffering backupOffering) { + return false; + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + CommvaultUrl, + CommvaultUsername, + CommvaultPassword, + CommvaultValidateSSLSecurity, + CommvaultApiRequestTimeout, + CommvaultClientVerboseLogs + }; + } + + @Override + public String getName() { + return "ablestack-commvault"; + } + + @Override + public String getDescription() { + return "Commvault Backup Plugin"; + } + + @Override + public String getConfigComponentName() { + return BackupService.class.getSimpleName(); + } + + @Override + public void syncBackups(VirtualMachine vm) { + try { + String commvaultServer = getUrlDomain(CommvaultUrl.value()); + } catch (URISyntaxException e) { + return; + } + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); + for (final Backup backup: backupDao.listByVmId(vm.getDataCenterId(), vm.getId())) { + loadBackupDetailsIfNeeded(backup); + final String externalId = backup.getExternalId(); + final Pair externalIdParts; + try { + externalIdParts = parseExternalId(externalId); + } catch (CloudRuntimeException e) { + LOG.warn("Skipping Commvault backup sync for backup [{}] due to invalid externalId [{}]", backup.getUuid(), externalId); + continue; + } + final String jobId = externalIdParts.second(); + final String path = externalIdParts.first(); + String jobDetails = client.getJobDetails(jobId); + if (jobDetails != null) { + JSONObject jsonObject = new JSONObject(jobDetails); + String retainedUntil = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").get("retainedUntil")); + String storagePolicyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("storagePolicy").get("storagePolicyId")); + BackupOfferingVO vmBackupOffering = new BackupOfferingDaoImpl().findById(vm.getBackupOfferingId()); + BackupOfferingVO offering = backupOfferingDao.createForUpdate(vmBackupOffering.getId()); + String retentionDay = client.getRetentionPeriod(storagePolicyId); + offering.setRetentionPeriod(retentionDay); + backupOfferingDao.update(offering.getId(), offering); + long timestamp = Long.parseLong(retainedUntil) * 1000L; + boolean isExpired = isRetentionExpired(retainedUntil); + if (isExpired) { + String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); + String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); + String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); + String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); + String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); + String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); + boolean result = client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); + if (result) { + cleanupBackupPathOnStageHost(clientName, path, false, getBackupDetail(backup, DETAIL_CHECKPOINT_NAME), getBackupDetail(backup, DETAIL_RBD_DISK_PATHS)); + backupDao.remove(backup.getId()); + } + } + } + } + return; + } + + @Override + public boolean checkBackupAgent(final Long zoneId) { + Map checkResult = new HashMap<>(); + final AblestackCommvaultClient client = getClient(zoneId); + String csVersionInfo = client.getCvtVersion(); + boolean version = versionCheck(csVersionInfo); + if (version) { + List Hosts = hostDao.findByDataCenterId(zoneId); + for (final HostVO host : Hosts) { + if (host.getStatus() == Status.Up && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { + String checkHost = client.getClientId(host.getName()); + if (checkHost == null) { + return false; + } else { + boolean installJob = client.getInstallActiveJob(host.getPrivateIpAddress()); + boolean checkInstall = client.getClientProps(checkHost); + if (installJob || !checkInstall) { + if (!checkInstall) { + LOG.error("The host is registered with the client, but the readiness status is not normal and you must manually check the client status."); + } + return false; + } + } + } + } + return true; + } + return false; + } + + @Override + public boolean installBackupAgent(final Long zoneId) { + Map failResult = new HashMap<>(); + final AblestackCommvaultClient client = getClient(zoneId); + List Hosts = hostDao.findByDataCenterId(zoneId); + for (final HostVO host : Hosts) { + if (host.getStatus() == Status.Up && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { + String commCell = client.getCommcell(); + JSONObject jsonObject = new JSONObject(commCell); + String commCellId = String.valueOf(jsonObject.get("commCellId")); + String commServeHostName = String.valueOf(jsonObject.get("commCellName")); + Ternary credentials = getKVMHyperisorCredentials(host); + boolean installJob = true; + LOG.info("checking for install agent on the Commvault Backup Provider in host " + host.getPrivateIpAddress()); + // 설치가 진행중인 호스트가 있는지 확인 + while (installJob) { + installJob = client.getInstallActiveJob(host.getName()); + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + LOG.error("checkBackupAgent get install active job result sleep interrupted error"); + } + } + String checkHost = client.getClientId(host.getName()); + // 호스트가 클라이언트에 등록되지 않은 경우 + if (checkHost == null) { + String jobId = client.installAgent(host.getPrivateIpAddress(), commCellId, commServeHostName, credentials.first(), credentials.second()); + if (jobId != null) { + String jobStatus = client.getJobStatus(jobId); + if (!jobStatus.equalsIgnoreCase("Completed")) { + LOG.error("installing agent on the Commvault Backup Provider failed jogId : " + jobId + " , jobStatus : " + jobStatus); + ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_HOST_AGENT_INSTALL, + "Failed install the commvault client agent on the host : " + host.getPrivateIpAddress(), User.UID_SYSTEM, ApiCommandResourceType.Host.toString()); + failResult.put(host.getPrivateIpAddress(), jobId); + } + } else { + return false; + } + } else { + // 호스트가 클라이언트에는 등록되었지만 구성이 정상적으로 되지 않은 경우 준비 상태 체크 + boolean checkInstall = client.getClientCheckReadiness(checkHost); + if (!checkInstall) { + LOG.error("The host is registered with the client, but the readiness status is not normal and you must manually check the client status."); + ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_HOST_AGENT_INSTALL, + "Failed check readiness the commvault client agent on the host : " + host.getPrivateIpAddress(), User.UID_SYSTEM, ApiCommandResourceType.Host.toString()); + return false; + } + } + } + } + if (!failResult.isEmpty()) { + return false; + } + return true; + } + + @Override + public boolean importBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { + final AblestackCommvaultClient client = getClient(zoneId); + // 선택한 백업 정책의 RPO 편집 Commvault API 호출 + String type = "deleteRpo"; + String taskId = client.getScheduleTaskId(type, externalId); + if (taskId != null) { + String subTaskId = client.getSubTaskId(taskId); + if (subTaskId != null) { + boolean result = client.deleteSchedulePolicy(taskId, subTaskId); + if (!result) { + throw new CloudRuntimeException("Failed to delete schedule policy commvault api"); + } + } + } else { + throw new CloudRuntimeException("Failed to get plan details schedule task id commvault api"); + } + // 선택한 백업 정책의 보존 기간 변경 Commvault API 호출 + type = "updateRpo"; + String planEntity = client.getScheduleTaskId(type, externalId); + JSONObject jsonObject = new JSONObject(planEntity); + String planType = String.valueOf(jsonObject.get("planType")); + String planName = String.valueOf(jsonObject.get("planName")); + String planSubtype = String.valueOf(jsonObject.get("planSubtype")); + String planId = String.valueOf(jsonObject.get("planId")); + JSONObject entityInfo = jsonObject.getJSONObject("entityInfo"); + String companyId = String.valueOf(entityInfo.get("companyId")); + String storagePolicyId = client.getStoragePolicyId(planName); + if (storagePolicyId == null) { + throw new CloudRuntimeException("Failed to get plan storage policy id commvault api"); + } + boolean result = client.getStoragePolicyDetails(planId, storagePolicyId, retentionPeriod); + if (result) { + // 호스트에 선택한 백업 정책 설정 Commvault API 호출 + String path = "/"; + List Hosts = hostDao.findByDataCenterId(zoneId); + for (final HostVO host : Hosts) { + String backupSetId = client.getDefaultBackupSetId(host.getName()); + if (backupSetId != null) { + if (!client.setBackupSet(path, planType, planName, planSubtype, planId, companyId, backupSetId)) { + throw new CloudRuntimeException("Failed to setting backup plan for client commvault api"); + } + } + } + return true; + } else { + throw new CloudRuntimeException("Failed to edit plan schedule retention period commvault api"); + } + } + + @Override + public boolean updateBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { + final AblestackCommvaultClient client = getClient(zoneId); + String type = "updateRpo"; + String planEntity = client.getScheduleTaskId(type, externalId); + JSONObject jsonObject = new JSONObject(planEntity); + String planType = String.valueOf(jsonObject.get("planType")); + String planName = String.valueOf(jsonObject.get("planName")); + String planSubtype = String.valueOf(jsonObject.get("planSubtype")); + String planId = String.valueOf(jsonObject.get("planId")); + JSONObject entityInfo = jsonObject.getJSONObject("entityInfo"); + String companyId = String.valueOf(entityInfo.get("companyId")); + String storagePolicyId = client.getStoragePolicyId(planName); + if (storagePolicyId == null) { + throw new CloudRuntimeException("Failed to get plan storage policy id commvault api"); + } + return client.getStoragePolicyDetails(planId, storagePolicyId, retentionPeriod); + } + + private static String getUrlDomain(String url) throws URISyntaxException { + URI uri; + try { + uri = new URI(url); + } catch (URI.MalformedURIException e) { + throw new CloudRuntimeException("Failed to cast URI"); + } + + return uri.getHost(); + } + + private AblestackCommvaultClient getClient(final Long zoneId) { + try { + return new AblestackCommvaultClient(CommvaultUrl.valueIn(zoneId), CommvaultUsername.valueIn(zoneId), CommvaultPassword.valueIn(zoneId), + CommvaultValidateSSLSecurity.valueIn(zoneId), CommvaultApiRequestTimeout.valueIn(zoneId)); + } catch (URISyntaxException e) { + throw new CloudRuntimeException("Failed to parse Commvault API URL: " + e.getMessage()); + } catch (NoSuchAlgorithmException | KeyManagementException e) { + LOG.error("Failed to build Commvault API client due to: ", e); + } + throw new CloudRuntimeException("Failed to build Commvault API client"); + } + + protected Ternary getKVMHyperisorCredentials(HostVO host) { + + String username = null; + String password = null; + + if (host != null && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { + hostDao.loadDetails(host); + password = host.getDetail("password"); + username = host.getDetail("username"); + } + if ( password == null || username == null) { + throw new CloudRuntimeException("Cannot find login credentials for HYPERVISOR " + Objects.requireNonNull(host).getUuid()); + } + + return new Ternary<>(username, password, null); + } + + private boolean executeDeleteBackupPathCommand(HostVO host, String username, String password, int port, String command) { + try { + Pair response = SshHelper.sshExecute(host.getPrivateIpAddress(), port, + username, null, password, command, 120000, 120000, 3600000); + + if (!response.first()) { + LOG.error(String.format("failed on HYPERVISOR %s due to: %s", host, response.second())); + } else { + return true; + } + } catch (final Exception e) { + throw new CloudRuntimeException(String.format("Failed to delete backup path on host %s due to: %s", host.getName(), e.getMessage())); + } + return false; + } + + private void cleanupBackupPathOnStageHost(String clientName, String path, boolean forced, String checkpointName, String diskPaths) { + HostVO stageHost = hostDao.findByName(clientName); + if (stageHost == null) { + throw new CloudRuntimeException(String.format("Unable to find stage host [%s] for backup cleanup", clientName)); + } + AblestackDeleteBackupCommand command = new AblestackDeleteBackupCommand(path, null, null, null, forced); + command.setBackupProvider("ablestack-commvault"); + command.setCheckpointName(checkpointName); + command.setDiskPaths(diskPaths); + try { + BackupAnswer answer = (BackupAnswer) agentManager.send(stageHost.getId(), command); + if (answer == null || !answer.getResult()) { + throw new CloudRuntimeException(String.format("Failed to delete Commvault backup path on host %s due to: %s", + stageHost.getName(), answer != null ? answer.getDetails() : "no answer received")); + } + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to delete Commvault backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to delete Commvault backup timed out, please try again"); + } + } + + public static boolean isRetentionExpired(String retainedUntil) { + if (retainedUntil == null || retainedUntil.trim().isEmpty() || "null".equals(retainedUntil)) { + return false; + } + try { + long timestamp = Long.parseLong(retainedUntil) * 1000L; + Date retainedDate = new Date(timestamp); + Date currentDate = new Date(); + return currentDate.after(retainedDate); + } catch (Exception e) { + LOG.info("parsing error: " + e.getMessage()); + return false; + } + } + + public static boolean versionCheck(String csVersionInfo) { + // 버전 체크 기준 : 11 SP32.89 + if (csVersionInfo == null) { + throw new CloudRuntimeException("commvault version must not be null."); + } + String v = csVersionInfo.trim(); + if (v.startsWith("\"") && v.endsWith("\"") && v.length() > 1) { + v = v.substring(1, v.length() - 1); + } + Matcher m = VERSION_PATTERN.matcher(v); + if (!m.matches()) { + throw new CloudRuntimeException("Unexpected commvault version format: " + csVersionInfo); + } + int major = Integer.parseInt(m.group(1)); + int fr = Integer.parseInt(m.group(2)); + int mt = Integer.parseInt(m.group(3)); + if (major < BASE_MAJOR) { + throw new CloudRuntimeException("The major version of the commvault you are trying to connect to is low. Supports versions 11.32.89 and higher."); + } else if (major == BASE_MAJOR && fr < BASE_FR) { + throw new CloudRuntimeException("The feature release version of the commvault you are trying to connect to is low. Supports versions 11.32.89 and higher."); + } else if (major == BASE_MAJOR && fr == BASE_FR && mt < BASE_MT) { + throw new CloudRuntimeException("The maintenance version of the commvault you are trying to connect to is low. Supports versions 11.32.89 and higher."); + } + return true; + } + + @Override + public Pair restoreBackupToVM(Long backupId, String vmName) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'restoreBackupToVM'"); + } +} diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultBackupOffering.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultBackupOffering.java similarity index 90% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultBackupOffering.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultBackupOffering.java index c72cd6cd2aa3..9565f9961ff6 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultBackupOffering.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultBackupOffering.java @@ -19,12 +19,12 @@ import org.apache.cloudstack.backup.BackupOffering; import java.util.Date; -public class CommvaultBackupOffering implements BackupOffering { +public class AblestackCommvaultBackupOffering implements BackupOffering { private String name; private String uid; - public CommvaultBackupOffering(String name, String uid) { + public AblestackCommvaultBackupOffering(String name, String uid) { this.name = name; this.uid = uid; } @@ -56,7 +56,7 @@ public boolean isUserDrivenBackupAllowed() { @Override public String getProvider() { - return "commvault"; + return "ablestack-commvault"; } @Override diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultClient.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultClient.java similarity index 97% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultClient.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultClient.java index 9623b2e34049..66c75b17ca4c 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultClient.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultClient.java @@ -66,8 +66,8 @@ import java.util.List; import java.util.Set; -public class CommvaultClient { - private static final Logger LOG = LogManager.getLogger(CommvaultClient.class); +public class AblestackCommvaultClient { + private static final Logger LOG = LogManager.getLogger(AblestackCommvaultClient.class); private final URI apiURI; private final String apiName; private final String apiPassword; @@ -78,7 +78,7 @@ public class CommvaultClient { private String cvtServerPassword; private final int cvtServerPort = 22; - public CommvaultClient(final String url, final String username, final String password, final boolean validateCertificate, final int timeout) throws URISyntaxException, NoSuchAlgorithmException, KeyManagementException { + public AblestackCommvaultClient(final String url, final String username, final String password, final boolean validateCertificate, final int timeout) throws URISyntaxException, NoSuchAlgorithmException, KeyManagementException { apiName = username; apiPassword = password; @@ -251,7 +251,7 @@ public List listPlans() { if (!planDetails.isMissingNode()) { String planId = planDetails.path("planId").asText(); String planName = planDetails.path("planName").asText(); - offerings.add(new CommvaultBackupOffering(planName, planId)); + offerings.add(new AblestackCommvaultBackupOffering(planName, planId)); } } } @@ -964,8 +964,14 @@ public boolean updateBackupSet(String path, String subclientId, String clientId, // POST https:///commandcenter/api/subclient//action/backup 테스트 시 Incremental 백업으로 반환되어 사용 x // POST https:///commandcenter/api/createtask // 백업 실행 API - public String createBackup(String subclientId, String storagePolicyId, String displayName, String commCellName, String clientId, String companyId, String companyName, String instanceName, String appName, String applicationId, String clientName, String backupsetId, String instanceId, String subclientGUID, String subclientName, String csGUID, String backupsetName) { + public String createBackup(String subclientId, String storagePolicyId, String displayName, String commCellName, String clientId, String companyId, String companyName, String instanceName, + String appName, String applicationId, String clientName, String backupsetId, String instanceId, String subclientGUID, String subclientName, String csGUID, + String backupsetName, String backupType) { HttpURLConnection connection = null; + final boolean incrementalBackup = "INCREMENTAL".equalsIgnoreCase(backupType); + final String backupLevel = incrementalBackup ? "INCREMENTAL" : "FULL"; + final String runIncrementalBackup = incrementalBackup ? "true" : "false"; + final String forceFullBackup = incrementalBackup ? "false" : "true"; String postUrl = apiURI.toString() + "/createtask"; try { URL url = new URL(postUrl); @@ -1012,9 +1018,9 @@ public String createBackup(String subclientId, String storagePolicyId, String di "}," + "\"options\":{" + "\"backupOpts\":{" + - "\"backupLevel\":\"FULL\"," + - "\"runIncrementalBackup\":false," + - "\"forceFullBackup\":true" + + "\"backupLevel\":\"%s\"," + + "\"runIncrementalBackup\":%s," + + "\"forceFullBackup\":%s" + "}," + "\"commonOpts\":{" + "\"overrideStoragePolicySettings\":true," + @@ -1027,7 +1033,8 @@ public String createBackup(String subclientId, String storagePolicyId, String di Integer.parseInt(subclientId), Integer.parseInt(storagePolicyId), displayName, commCellName, Integer.parseInt(clientId), Integer.parseInt(companyId), companyName, instanceName, appName, Integer.parseInt(applicationId), clientName, Integer.parseInt(backupsetId), - Integer.parseInt(instanceId), subclientGUID, subclientName, csGUID, backupsetName + Integer.parseInt(instanceId), subclientGUID, subclientName, csGUID, backupsetName, + backupLevel, runIncrementalBackup, forceFullBackup ); try (OutputStream os = connection.getOutputStream()) { byte[] input = jsonBody.getBytes(StandardCharsets.UTF_8); @@ -1379,9 +1386,9 @@ public String restoreFullVM(String subclientId, String displayName, String backu + "}," + "\"commonOptions\":{" + "\"overwriteFiles\":true," - + "\"unconditionalOverwrite\":false," + + "\"unconditionalOverwrite\":true," + "\"stripLevelType\":\"PRESERVE_LEVEL\"," - + "\"preserveLevel\":1," + + "\"preserveLevel\":0," + "\"isFromBrowseBackup\":true" + "}" + "}" @@ -1423,6 +1430,13 @@ public String restoreFullVM(String subclientId, String displayName, String backu return null; } + public String restoreFullVM(String subclientId, String displayName, String backupsetGUID, String clientId, String companyId, String companyName, String instanceName, + String appName, String applicationId, String clientName, String backupsetId, String instanceId, String backupsetName, + String commCellId, String endTime, List paths) { + return restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, + applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, String.join(",", paths)); + } + // GET https:///commandcenter/api/commcell/properties // 에이전트 설치를 위한 commcell 정보 조회 API public String getCommcell() { @@ -1727,4 +1741,4 @@ private String convertPathToJsonArray(String path) { jsonArray.append("]"); return jsonArray.toString(); } -} \ No newline at end of file +} diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultObject.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultObject.java similarity index 91% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultObject.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultObject.java index a0fe576786d3..def6234db775 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultObject.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultObject.java @@ -18,10 +18,10 @@ import java.util.List; -public interface CommvaultObject { +public interface AblestackCommvaultObject { String getUuid(); String getName(); String getHref(); String getType(); - List getLinks(); + List getLinks(); } diff --git a/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/module.properties b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/module.properties new file mode 100644 index 000000000000..de14814cfa68 --- /dev/null +++ b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=ablestack-commvault +parent=backup diff --git a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/spring-backup-commvault-context.xml b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/spring-backup-commvault-context.xml similarity index 85% rename from plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/spring-backup-commvault-context.xml rename to plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/spring-backup-commvault-context.xml index 11b0848c8577..ca2b2cd38cbe 100644 --- a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/spring-backup-commvault-context.xml +++ b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/spring-backup-commvault-context.xml @@ -20,7 +20,7 @@ http://www.springframework.org/schema/beans/spring-beans-3.0.xsd" > - - + + diff --git a/plugins/backup/commvault/pom.xml b/plugins/backup/ablestack-nas/pom.xml similarity index 93% rename from plugins/backup/commvault/pom.xml rename to plugins/backup/ablestack-nas/pom.xml index f824d49eb423..97ed28f479aa 100644 --- a/plugins/backup/commvault/pom.xml +++ b/plugins/backup/ablestack-nas/pom.xml @@ -20,8 +20,8 @@ 4.0.0 - cloud-plugin-backup-commvault - Apache CloudStack Plugin - KVM Commvault Backup and Recovery Plugin + cloud-plugin-backup-ablestack-nas + Ablestack Plugin - KVM NAS Backup and Recovery Plugin cloudstack-plugins org.apache.cloudstack diff --git a/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupOffering.java b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupOffering.java new file mode 100644 index 000000000000..b7b6beab6ce9 --- /dev/null +++ b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupOffering.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import java.util.Date; + +public class AblestackNasBackupOffering implements BackupOffering { + + private String name; + private String uid; + + public AblestackNasBackupOffering(String name, String uid) { + this.name = name; + this.uid = uid; + } + + @Override + public String getExternalId() { + return uid; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getDescription() { + return "NAS Backup Offering (Repository)"; + } + + @Override + public long getZoneId() { + return -1; + } + + @Override + public boolean isUserDrivenBackupAllowed() { + return true; + } + + @Override + public String getProvider() { + return "ablestack-nas"; + } + + @Override + public Date getCreated() { + return null; + } + + @Override + public String getUuid() { + return uid; + } + + @Override + public long getId() { + return -1; + } + + @Override + public String getRetentionPeriod() { + return null; + } +} diff --git a/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupProvider.java b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupProvider.java new file mode 100644 index 000000000000..1811137d7bf9 --- /dev/null +++ b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupProvider.java @@ -0,0 +1,1261 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.AgentManager; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + + +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import javax.inject.Inject; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.apache.cloudstack.backup.BackupManager.BackupChainSize; +import static org.apache.cloudstack.backup.BackupManager.BackupFrameworkEnabled; +import static org.apache.cloudstack.backup.BackupManager.KvmIncrementalBackup; + +public class AblestackNasBackupProvider extends AdapterBase implements BackupProvider, Configurable { + private static final Logger LOG = LogManager.getLogger(AblestackNasBackupProvider.class); + private static final String BACKUP_TYPE_FULL = "FULL"; + private static final String BACKUP_TYPE_INCREMENTAL = "INCREMENTAL"; + private static final String BACKUP_ENGINE_QCOW2 = "QCOW2"; + private static final String BACKUP_ENGINE_RBD_DIFF = "RBD_DIFF"; + private static final String DETAIL_CHECKPOINT_NAME = "nas.checkpoint.name"; + private static final String DETAIL_CHECKPOINT_PATH = "nas.checkpoint.path"; + private static final String DETAIL_PARENT_BACKUP_UUID = "nas.parent.backup.uuid"; + private static final String DETAIL_PARENT_BACKUP_PATH = "nas.parent.backup.path"; + private static final String DETAIL_PARENT_CHECKPOINT_NAME = "nas.parent.checkpoint.name"; + private static final String DETAIL_PARENT_CHECKPOINT_PATH = "nas.parent.checkpoint.path"; + private static final String DETAIL_BACKUP_ENGINE = "nas.backup.engine"; + private static final String DETAIL_RBD_DISK_PATHS = "nas.rbd.disk.paths"; + private static final String DETAIL_CHAIN_SEALED = "nas.chain.sealed"; + private static final String DETAIL_CHAIN_SEAL_REASON = "nas.chain.seal.reason"; + private static final String DETAIL_FALLBACK_VOLUME_UUIDS = "nas.fallback.volume.uuids"; + private static final String MISSING_PARENT_RBD_SNAPSHOT_ERROR = "Parent RBD snapshot"; + + ConfigKey NASBackupRestoreMountTimeout = new ConfigKey<>("Advanced", Integer.class, + "nas.backup.restore.mount.timeout", + "30", + "Timeout in seconds after which backup repository mount for restore fails.", + true, + BackupFrameworkEnabled.key()); + + ConfigKey NASBackupRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, + "nas.backup.restore.timeout", + "1800", + "Timeout in seconds after which NAS backup restore operations fail.", + true, + BackupFrameworkEnabled.key()); + + @Inject + private BackupDao backupDao; + + @Inject + private BackupDetailsDao backupDetailsDao; + + @Inject + private BackupRepositoryDao backupRepositoryDao; + + @Inject + private BackupOfferingDao backupOfferingDao; + + @Inject + private BackupRepositoryService backupRepositoryService; + + @Inject + private HostDao hostDao; + + @Inject + private VolumeDao volumeDao; + + @Inject + private SnapshotDao snapshotDao; + + @Inject + private StoragePoolHostDao storagePoolHostDao; + + @Inject + private VMInstanceDao vmInstanceDao; + + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + + @Inject + DataStoreManager dataStoreMgr; + + @Inject + private AgentManager agentManager; + + @Inject + private VMSnapshotDao vmSnapshotDao; + + @Inject + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + + @Inject + BackupManager backupManager; + + @Inject + ResourceManager resourceManager; + + @Inject + private DiskOfferingDao diskOfferingDao; + + + private Long getClusterIdFromRootVolume(VirtualMachine vm) { + VolumeVO rootVolume = volumeDao.getInstanceRootVolume(vm.getId()); + StoragePoolVO rootDiskPool = primaryDataStoreDao.findById(rootVolume.getPoolId()); + if (rootDiskPool == null) { + return null; + } + return rootDiskPool.getClusterId(); + } + + protected Host getVMHypervisorHost(VirtualMachine vm) { + Long hostId = vm.getLastHostId(); + Long clusterId = null; + + if (hostId != null) { + Host host = hostDao.findById(hostId); + if (host.getStatus() == Status.Up) { + return host; + } + // Try to find any Up host in the same cluster + clusterId = host.getClusterId(); + } else { + // Try to find any Up host in the same cluster as the root volume + clusterId = getClusterIdFromRootVolume(vm); + } + + if (clusterId != null) { + for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(clusterId)) { + if (hostInCluster.getStatus() == Status.Up) { + LOG.debug("Found Host {} in cluster {}", hostInCluster, clusterId); + return hostInCluster; + } + } + } + + // Try to find any Host in the zone + return resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, vm.getDataCenterId()); + } + + protected Host getVMHypervisorHostForBackup(VirtualMachine vm) { + Long hostId = vm.getHostId(); + if (hostId == null && VirtualMachine.State.Running.equals(vm.getState())) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for %s. Make sure the virtual machine is running", vm.getName())); + } + if (VirtualMachine.State.Stopped.equals(vm.getState())) { + hostId = vm.getLastHostId(); + } + if (hostId == null) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for stopped VM: %s", vm)); + } + final Host host = hostDao.findById(hostId); + if (host == null || !Status.Up.equals(host.getStatus()) || !Hypervisor.HypervisorType.KVM.equals(host.getHypervisorType())) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } + return host; + } + + @Override + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM) { + return takeBackup(vm, quiesceVM, null); + } + + @Override + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM, Long backupScheduleId) { + final Host host = getVMHypervisorHostForBackup(vm); + + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); + } + + validateNoKvmFileBasedVmSnapshots(vm); + List vmVolumes = volumeDao.findByInstance(vm.getId()); + vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(vmVolumes); + validateVolumePoolTypes(volumePoolsAndPaths.first()); + final BackupVO latestBackup = getLatestBackedUpBackup(vm); + final boolean incrementalBackup = shouldUseIncrementalBackup(vm, latestBackup, vmVolumes, backupScheduleId); + BackupExecutionResult result = executeBackup(vm, quiesceVM, host, backupRepository, vmVolumes, volumePoolsAndPaths, latestBackup, incrementalBackup, + incrementalBackup && vmVolumes.size() > 1); + if (!result.success && incrementalBackup && shouldRetryAsFullAfterIncrementalFailure(result, vmVolumes)) { + cleanupFailedBackupForFullRetry(result.backup); + LOG.warn("Incremental backup failed for VM [{}] due to [{}]. Retrying as full backup.", vm, result.details); + result = executeBackup(vm, quiesceVM, host, backupRepository, vmVolumes, volumePoolsAndPaths, null, false, false); + } + return new Pair<>(result.success, result.backup); + } + + private BackupExecutionResult executeBackup(VirtualMachine vm, Boolean quiesceVM, Host host, BackupRepository backupRepository, + List vmVolumes, Pair, List> volumePoolsAndPaths, + Backup parentBackup, boolean incrementalBackup, boolean retryAsFullOnFailure) { + final String backupPath = buildBackupPath(vm); + final String checkpointName = backupPath.substring(backupPath.lastIndexOf("/") + 1); + final String backupEngine = areAllVolumesOnRbdPool(volumePoolsAndPaths.first()) ? BACKUP_ENGINE_RBD_DIFF : BACKUP_ENGINE_QCOW2; + final List backupFiles = buildBackupFileNames(vmVolumes, backupEngine, incrementalBackup); + final String requestedBackupType = incrementalBackup ? BACKUP_TYPE_INCREMENTAL : BACKUP_TYPE_FULL; + + BackupVO backupVO = createBackupObject(vm, backupPath, requestedBackupType, + checkpointName, backupEngine, incrementalBackup ? parentBackup : null, volumePoolsAndPaths.second()); + AblestackNasTakeBackupCommand command = new AblestackNasTakeBackupCommand(vm.getInstanceName(), backupPath); + command.setBackupType(requestedBackupType); + command.setCheckpointName(checkpointName); + command.setBackupFiles(backupFiles); + command.setVolumePools(volumePoolsAndPaths.first()); + command.setVolumePaths(volumePoolsAndPaths.second()); + if (incrementalBackup && parentBackup != null) { + command.setParentBackupPath(parentBackup.getExternalId()); + command.setParentCheckpointName(getBackupDetail(parentBackup, DETAIL_CHECKPOINT_NAME)); + command.setParentCheckpointPath(getBackupDetail(parentBackup, DETAIL_CHECKPOINT_PATH)); + } + command.setBackupRepoType(backupRepository.getType()); + command.setBackupRepoAddress(backupRepository.getAddress()); + command.setMountOptions(backupRepository.getMountOptions()); + command.setQuiesce(quiesceVM); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), command); + } catch (AgentUnavailableException e) { + logger.error("Unable to contact backend control plane to initiate backup for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + logger.error("Operation to initiate backup timed out for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + backupVO.setDate(new Date()); + backupVO.setSize(answer.getSize()); + backupVO.setStatus(Backup.Status.BackedUp); + backupVO.setBackedUpVolumes(createVolumeInfoFromVolumes(vmVolumes, backupFiles)); + if (backupDao.update(backupVO.getId(), backupVO)) { + return BackupExecutionResult.success(backupVO); + } + throw new CloudRuntimeException("Failed to update backup"); + } + + final String details = answer != null ? answer.getDetails() : "No answer received"; + logger.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), details); + if (retryAsFullOnFailure) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } else if (answer != null && answer.getNeedsCleanup()) { + logger.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Error); + backupDao.update(backupVO.getId(), backupVO); + } else { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } + return BackupExecutionResult.failure(details, backupVO); + } + + private boolean shouldRetryAsFullAfterIncrementalFailure(BackupExecutionResult result, List vmVolumes) { + if (result == null || result.success) { + return false; + } + if (StringUtils.contains(result.details, MISSING_PARENT_RBD_SNAPSHOT_ERROR)) { + return true; + } + return vmVolumes.size() > 1; + } + + private void cleanupFailedBackupForFullRetry(Backup backup) { + if (backup == null) { + return; + } + backupDao.remove(backup.getId()); + } + + private static final class BackupExecutionResult { + private final boolean success; + private final Backup backup; + private final String details; + + private BackupExecutionResult(boolean success, Backup backup, String details) { + this.success = success; + this.backup = backup; + this.details = details; + } + + private static BackupExecutionResult success(Backup backup) { + return new BackupExecutionResult(true, backup, null); + } + + private static BackupExecutionResult failure(String details, Backup backup) { + return new BackupExecutionResult(false, backup, details); + } + } + + private String buildBackupPath(VirtualMachine vm) { + return String.format("%s/%s", vm.getInstanceName(), + new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.SSS").format(new Date())); + } + + private BackupVO createBackupObject(VirtualMachine vm, String backupPath, String backupType, String checkpointName, String backupEngine, Backup parentBackup, + List diskPaths) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(backupPath); + backup.setType(backupType); + backup.setDate(new Date()); + long virtualSize = 0L; + for (final Volume volume: volumeDao.findByInstance(vm.getId())) { + if (Volume.State.Ready.equals(volume.getState())) { + virtualSize += volume.getSize(); + } + } + backup.setProtectedSize(virtualSize); + backup.setStatus(Backup.Status.BackingUp); + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + Map details = new HashMap<>(); + Map backupDetails = backupManager.getBackupDetailsFromVM(vm); + if (backupDetails != null) { + details.putAll(backupDetails); + } + details.put(DETAIL_CHECKPOINT_NAME, checkpointName); + details.put(DETAIL_CHECKPOINT_PATH, getCheckpointPath(backupPath, checkpointName, backupEngine)); + details.put(DETAIL_BACKUP_ENGINE, backupEngine); + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine) && CollectionUtils.isNotEmpty(diskPaths)) { + details.put(DETAIL_RBD_DISK_PATHS, String.join(",", diskPaths)); + } + if (parentBackup != null) { + details.put(DETAIL_PARENT_BACKUP_UUID, parentBackup.getUuid()); + details.put(DETAIL_PARENT_BACKUP_PATH, parentBackup.getExternalId()); + details.put(DETAIL_PARENT_CHECKPOINT_NAME, getBackupDetail(parentBackup, DETAIL_CHECKPOINT_NAME)); + details.put(DETAIL_PARENT_CHECKPOINT_PATH, getBackupDetail(parentBackup, DETAIL_CHECKPOINT_PATH)); + } + backup.setDetails(details); + + return backupDao.persist(backup); + } + + private String getCheckpointPath(String backupPath, String checkpointName, String backupEngine) { + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + return String.format("%s/checkpoints/%s.meta", backupPath, checkpointName); + } + return String.format("%s/checkpoints/%s.xml", backupPath, checkpointName); + } + + private BackupVO getLatestBackedUpBackup(VirtualMachine vm) { + List backups = backupDao.listByVmIdAndOffering(vm.getDataCenterId(), vm.getId(), vm.getBackupOfferingId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .peek(backupDao::loadDetails) + .filter(backup -> getBackupDetail(backup, DETAIL_CHECKPOINT_NAME) != null) + .max(Comparator.comparing(BackupVO::getDate)) + .orElse(null); + } + + private boolean shouldUseIncrementalBackup(VirtualMachine vm, Backup latestBackup, List vmVolumes, Long backupScheduleId) { + if (latestBackup == null) { + return false; + } + + if (backupScheduleId != null && !hasBackedUpBackupForSchedule(backupScheduleId)) { + return false; + } + + final Long clusterId = getClusterIdFromRootVolume(vm); + if (clusterId == null) { + LOG.debug("Unable to resolve cluster for VM [{}], fallback to full backup.", vm); + return false; + } + + if (!KvmIncrementalBackup.valueIn(clusterId)) { + return false; + } + + if (!hasHealthyIncrementalSource(latestBackup)) { + markVolumeFallbackAndSeal(latestBackup, "unhealthy-chain"); + return false; + } + if (getBackupChainSize(vm, latestBackup) >= BackupChainSize.value()) { + sealBackupChain(latestBackup, "chain-size-limit"); + return false; + } + return true; + } + + private boolean hasBackedUpBackupForSchedule(Long backupScheduleId) { + return backupDao.listBySchedule(backupScheduleId).stream() + .anyMatch(backup -> Backup.Status.BackedUp.equals(backup.getStatus())); + } + + private int getBackupChainSize(VirtualMachine vm, Backup latestBackup) { + List backups = backupDao.listByVmIdAndOffering(vm.getDataCenterId(), vm.getId(), vm.getBackupOfferingId()).stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .peek(backupDao::loadDetails) + .collect(Collectors.toList()); + Map backupsByUuid = backups.stream().collect(Collectors.toMap(BackupVO::getUuid, backup -> (Backup) backup, (left, right) -> left)); + return AblestackBackupFrameworkUtils.getBackupChainSize(latestBackup, backupsByUuid, + current -> getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID)); + } + + private boolean hasHealthyIncrementalSource(Backup latestBackup) { + try { + return AblestackBackupFrameworkUtils.hasUsableVolumeChainStates(getVolumeChainStates(latestBackup.getBackedUpVolumes(), latestBackup)); + } catch (Exception e) { + LOG.warn("Latest NAS backup chain [{}] is not healthy enough for incremental reuse: {}", latestBackup.getUuid(), e.getMessage()); + return false; + } + } + + private void markVolumeFallbackAndSeal(Backup latestBackup, String reason) { + List unhealthyVolumeUuids = listUnhealthyVolumeUuids(latestBackup); + if (!unhealthyVolumeUuids.isEmpty()) { + updateBackupDetail(latestBackup, DETAIL_FALLBACK_VOLUME_UUIDS, String.join(",", unhealthyVolumeUuids)); + } + sealBackupChain(latestBackup, reason); + } + + private List listUnhealthyVolumeUuids(Backup backup) { + List unhealthy = new ArrayList<>(); + if (backup == null || CollectionUtils.isEmpty(backup.getBackedUpVolumes())) { + return unhealthy; + } + for (Backup.VolumeInfo volumeInfo : backup.getBackedUpVolumes()) { + List chainFiles = AblestackBackupFrameworkUtils.sanitizeChainFiles(getBackupFileChain(volumeInfo.getUuid(), backup)); + if (chainFiles.isEmpty()) { + unhealthy.add(volumeInfo.getUuid()); + } + } + return unhealthy; + } + + private void sealBackupChain(Backup backup, String reason) { + updateBackupDetail(backup, DETAIL_CHAIN_SEALED, "true"); + updateBackupDetail(backup, DETAIL_CHAIN_SEAL_REASON, reason); + } + + private void updateBackupDetail(Backup backup, String key, String value) { + if (backup == null || StringUtils.isBlank(key)) { + return; + } + backupDetailsDao.removeDetail(backup.getId(), key); + backupDetailsDao.addDetail(backup.getId(), key, value, false); + if (backup instanceof BackupVO) { + backupDao.loadDetails((BackupVO) backup); + } + } + + private boolean hasDependentBackups(Backup backup) { + List backups = backupDao.listByVmIdAndOffering(backup.getZoneId(), backup.getVmId(), backup.getBackupOfferingId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(candidate -> !Objects.equals(candidate.getId(), backup.getId())) + .peek(backupDao::loadDetails) + .anyMatch(candidate -> Objects.equals(getBackupDetail(candidate, DETAIL_PARENT_BACKUP_UUID), backup.getUuid())); + } + + private String getBackupDetail(Backup backup, String key) { + Map details = backup.getDetails(); + return details != null ? details.get(key) : null; + } + + private void validateVolumePoolTypes(List volumePools) { + boolean hasRbd = volumePools.stream().anyMatch(pool -> pool != null && Storage.StoragePoolType.RBD.equals(pool.getPoolType())); + boolean hasNonRbd = volumePools.stream().anyMatch(pool -> pool != null && !Storage.StoragePoolType.RBD.equals(pool.getPoolType())); + if (hasRbd && hasNonRbd) { + throw new CloudRuntimeException("NAS incremental backup does not support VMs with mixed RBD and non-RBD volumes"); + } + } + + private boolean areAllVolumesOnRbdPool(List volumePools) { + return CollectionUtils.isNotEmpty(volumePools) && + volumePools.stream().allMatch(pool -> pool != null && Storage.StoragePoolType.RBD.equals(pool.getPoolType())); + } + + private List buildBackupFileNames(List volumes, String backupEngine, boolean incrementalBackup) { + List backupFiles = new ArrayList<>(); + for (VolumeVO volume : volumes) { + String suffix; + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + suffix = incrementalBackup ? ".rbdiff" : ".raw"; + } else { + suffix = ".qcow2"; + } + backupFiles.add(String.format("volume-%s%s", volume.getUuid(), suffix)); + } + return backupFiles; + } + + private String createVolumeInfoFromVolumes(List volumes, List backupFiles) { + List infoList = new ArrayList<>(); + for (int i = 0; i < volumes.size(); i++) { + VolumeVO vol = volumes.get(i); + DiskOffering diskOffering = diskOfferingDao.findById(vol.getDiskOfferingId()); + String diskOfferingUuid = diskOffering != null ? diskOffering.getUuid() : null; + infoList.add(new Backup.VolumeInfo(vol.getUuid(), backupFiles.get(i), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOfferingUuid, vol.getMinIops(), vol.getMaxIops())); + } + return new com.google.gson.Gson().toJson(infoList.toArray(), Backup.VolumeInfo[].class); + } + + @Override + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + return restoreVMBackup(vm, backup); + } + + @Override + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + return restoreVMBackup(vm, backup).first(); + } + + private Pair restoreVMBackup(VirtualMachine vm, Backup backup) { + validateNoKvmFileBasedVmSnapshots(vm); + List backupVolumes = backup.getBackedUpVolumes(); + List backedVolumesUUIDs = backupVolumes.stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(Backup.VolumeInfo::getUuid) + .collect(Collectors.toList()); + + List restoreVolumes = volumeDao.findByInstance(vm.getId()).stream() + .sorted(Comparator.comparingLong(VolumeVO::getDeviceId)) + .collect(Collectors.toList()); + + LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup); + BackupRepository backupRepository = getBackupRepository(backup); + + final Host host = getVMHypervisorHost(vm); + AblestackNasRestoreBackupCommand restoreCommand = new AblestackNasRestoreBackupCommand(); + restoreCommand.setBackupPath(backup.getExternalId()); + restoreCommand.setBackupRepoType(backupRepository.getType()); + restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); + restoreCommand.setMountOptions(backupRepository.getMountOptions()); + restoreCommand.setVmName(vm.getName()); + restoreCommand.setBackupVolumesUUIDs(backedVolumesUUIDs); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(restoreVolumes); + restoreCommand.setRestoreVolumePools(volumePoolsAndPaths.first()); + restoreCommand.setRestoreVolumePaths(volumePoolsAndPaths.second()); + restoreCommand.setVolumePaths(getVolumePaths(restoreVolumes)); + restoreCommand.setBackupFiles(getBackupFiles(backupVolumes, backup)); + restoreCommand.setBackupFileChains(getBackupFileChains(backupVolumes, backup)); + restoreCommand.setVolumeChainStates(getVolumeChainStates(backupVolumes, backup)); + restoreCommand.setVmExists(vm.getRemoved() == null); + restoreCommand.setVmState(vm.getState()); + restoreCommand.setRestorePlan(createRestorePlan(false)); + restoreCommand.setMountTimeout(NASBackupRestoreMountTimeout.value()); + restoreCommand.setWait(NASBackupRestoreTimeout.value()); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to restore backup timed out, please try again"); + } + return new Pair<>(answer.getResult(), answer.getDetails()); + } + + private List getBackupFiles(List backedVolumes, Backup backup) { + List backupFiles = new ArrayList<>(); + List sortedVolumes = new ArrayList<>(backedVolumes); + sortedVolumes.sort(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)); + for (Backup.VolumeInfo backedVolume : sortedVolumes) { + if (isLegacyBackup(backup)) { + backupFiles.add(getLegacyBackupFileName(backedVolume)); + } else { + backupFiles.add(backedVolume.getPath()); + } + } + return backupFiles; + } + + private List getBackupFileChains(List backedVolumes, Backup backup) { + List backupFileChains = new ArrayList<>(); + List sortedVolumes = new ArrayList<>(backedVolumes); + sortedVolumes.sort(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)); + for (Backup.VolumeInfo backedVolume : sortedVolumes) { + backupFileChains.add(String.join(";", getBackupFileChain(backedVolume.getUuid(), backup))); + } + return backupFileChains; + } + + private List getVolumeChainStates(List backedVolumes, Backup backup) { + List volumeChainStates = new ArrayList<>(); + List sortedVolumes = new ArrayList<>(backedVolumes); + sortedVolumes.sort(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)); + String backupEngine = getBackupDetail(backup, DETAIL_BACKUP_ENGINE); + for (Backup.VolumeInfo backedVolume : sortedVolumes) { + volumeChainStates.add(new BackupVolumeChainState(backedVolume.getUuid(), backupEngine, + AblestackBackupFrameworkUtils.sanitizeChainFiles(getBackupFileChain(backedVolume.getUuid(), backup)))); + } + AblestackBackupFrameworkUtils.validateVolumeChainStates(volumeChainStates); + return volumeChainStates; + } + + private BackupRestorePlan createRestorePlan(boolean attachRequired) { + return AblestackBackupFrameworkUtils.createRestorePlan(attachRequired, true); + } + + @Override + public boolean supportsVolumeLevelChainState() { + return true; + } + + @Override + public boolean supportsRestorePlan() { + return true; + } + + @Override + public boolean supportsRestoreChainValidation() { + return true; + } + + @Override + public boolean supportsPostRestoreMaintenance() { + return true; + } + + @Override + public void runPostRestoreMaintenance(VirtualMachine vm, Backup backup, boolean volumeOnly) { + if (backup == null || CollectionUtils.isEmpty(backup.getBackedUpVolumes())) { + return; + } + final List chainStates = getVolumeChainStates(backup.getBackedUpVolumes(), backup); + AblestackBackupFrameworkUtils.validateVolumeChainStates(chainStates); + LOG.debug("Completed NAS post-restore maintenance for VM [{}], backup [{}], volumeOnly=[{}]", vm != null ? vm.getInstanceName() : null, + backup.getUuid(), volumeOnly); + } + + @Override + public boolean supportsBackgroundChainValidation() { + return true; + } + + @Override + public void validateChains(Long zoneId) { + final List vmIdsWithBackups = backupDao.listVmIdsWithBackupsInZone(zoneId); + if (CollectionUtils.isEmpty(vmIdsWithBackups)) { + return; + } + for (final Long vmId : vmIdsWithBackups) { + final Backup latestBackup = getLatestBackedUpBackupForProvider(zoneId, vmId); + if (latestBackup == null) { + continue; + } + loadBackupDetailsIfNeeded(latestBackup); + if (Boolean.parseBoolean(getBackupDetail(latestBackup, DETAIL_CHAIN_SEALED))) { + continue; + } + if (!hasHealthyIncrementalSource(latestBackup)) { + markVolumeFallbackAndSeal(latestBackup, "background-chain-validation"); + LOG.warn("Sealed NAS backup chain [{}] during background validation in zone [{}]", latestBackup.getUuid(), zoneId); + } + } + } + + private Backup getLatestBackedUpBackupForProvider(Long zoneId, Long vmId) { + return backupDao.listByVmId(zoneId, vmId).stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .filter(backup -> { + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + return offering != null && BackupProviderNameUtils.isNasFamily(offering.getProvider()); + }) + .peek(backupDao::loadDetails) + .max(Comparator.comparing(BackupVO::getDate)) + .orElse(null); + } + + private List getBackupFileChain(String volumeUuid, Backup backup) { + loadBackupDetailsIfNeeded(backup); + if (isLegacyBackup(backup)) { + Backup.VolumeInfo volumeInfo = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), volumeUuid); + return volumeInfo != null ? getLegacyBackupFileCandidates(volumeInfo) : List.of(); + } + + String backupEngine = getBackupDetail(backup, DETAIL_BACKUP_ENGINE); + if (!BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + Backup.VolumeInfo volumeInfo = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), volumeUuid); + return volumeInfo != null ? List.of(volumeInfo.getPath()) : List.of(); + } + + List chain = getBackupChain(backup); + List files = new ArrayList<>(); + for (Backup chainBackup : chain) { + Backup.VolumeInfo volumeInfo = getBackedUpVolumeInfo(chainBackup.getBackedUpVolumes(), volumeUuid); + if (volumeInfo != null) { + files.add(String.format("%s/%s", chainBackup.getExternalId(), volumeInfo.getPath())); + } + } + return files; + } + + private List getBackupChain(Backup backup) { + loadBackupDetailsIfNeeded(backup); + List backups = backupDao.listByVmIdAndOffering(backup.getZoneId(), backup.getVmId(), backup.getBackupOfferingId()); + Map backupsByUuid = new HashMap<>(); + for (Backup candidate : backups) { + if (candidate instanceof BackupVO) { + backupDao.loadDetails((BackupVO) candidate); + } + backupsByUuid.put(candidate.getUuid(), candidate); + } + + List chain = new ArrayList<>(); + Backup current = backup; + while (current != null) { + chain.add(current); + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + current = parentBackupUuid != null ? backupsByUuid.get(parentBackupUuid) : null; + } + Collections.reverse(chain); + return chain; + } + + private void loadBackupDetailsIfNeeded(Backup backup) { + if (backup instanceof BackupVO && backup.getDetails() == null) { + backupDao.loadDetails((BackupVO) backup); + } + } + + private boolean isLegacyBackup(Backup backup) { + return getBackupDetail(backup, DETAIL_BACKUP_ENGINE) == null; + } + + private String getLegacyBackupFileName(Backup.VolumeInfo volumeInfo) { + String volumePath = volumeInfo.getPath(); + if (StringUtils.isNotBlank(volumePath) && + (volumePath.endsWith(".qcow2") || volumePath.endsWith(".raw") || volumePath.endsWith(".rbdiff"))) { + return volumePath; + } + String diskPrefix = Volume.Type.ROOT.equals(volumeInfo.getType()) ? "root" : "datadisk"; + return String.format("%s.%s.qcow2", diskPrefix, volumeInfo.getPath()); + } + + private List getLegacyBackupFileCandidates(Backup.VolumeInfo volumeInfo) { + List candidates = new ArrayList<>(); + String volumePath = volumeInfo.getPath(); + if (StringUtils.isNotBlank(volumePath)) { + candidates.add(volumePath); + if (volumePath.contains("/")) { + String baseName = volumePath.substring(volumePath.lastIndexOf('/') + 1); + if (!Objects.equals(volumePath, baseName)) { + candidates.add(baseName); + } + } + } + + String legacyFileName = getLegacyBackupFileName(volumeInfo); + if (!candidates.contains(legacyFileName)) { + candidates.add(legacyFileName); + } + + if (volumePath != null && volumePath.contains("/")) { + String baseName = volumePath.substring(volumePath.lastIndexOf('/') + 1); + String diskPrefix = Volume.Type.ROOT.equals(volumeInfo.getType()) ? "root" : "datadisk"; + String baseNameLegacyFile = String.format("%s.%s.qcow2", diskPrefix, baseName); + if (!candidates.contains(baseNameLegacyFile)) { + candidates.add(baseNameLegacyFile); + } + } + + return candidates; + } + + private List getVolumePaths(List volumes) { + List volumePaths = new ArrayList<>(); + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + if (Objects.isNull(storagePool)) { + throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); + } + String volumePathPrefix; + if (ScopeType.HOST.equals(storagePool.getScope())) { + volumePathPrefix = storagePool.getPath(); + } else if (Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType())) { + volumePathPrefix = storagePool.getPath(); + } else { + volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); + } + volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); + } + return volumePaths; + } + + private Pair, List> getVolumePoolsAndPaths(List volumes) { + List volumePools = new ArrayList<>(); + List volumePaths = new ArrayList<>(); + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + if (Objects.isNull(storagePool)) { + throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); + } + + DataStore dataStore = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary); + volumePools.add(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null); + + String volumePathPrefix = getVolumePathPrefix(storagePool); + volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); + } + return new Pair<>(volumePools, volumePaths); + } + + private String getVolumePathPrefix(StoragePoolVO storagePool) { + String volumePathPrefix; + if (ScopeType.HOST.equals(storagePool.getScope()) || + Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType()) || + Storage.StoragePoolType.RBD.equals(storagePool.getPoolType())) { + volumePathPrefix = storagePool.getPath(); + } else { + // Should be Storage.StoragePoolType.NetworkFilesystem + volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); + } + return volumePathPrefix; + } + + @Override + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); + final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); + if (diskOffering == null) { + throw new CloudRuntimeException(String.format("Unable to find disk offering [%s] for backed up volume [%s]", + backupVolumeInfo.getDiskOfferingId(), backupVolumeInfo.getUuid())); + } + String cacheMode = null; + final VMInstanceVO vm = vmInstanceDao.findVMByInstanceName(vmNameAndState.first()); + if (vm == null) { + throw new CloudRuntimeException(String.format("Unable to find VM [%s] for NAS volume restore", vmNameAndState.first())); + } + List listVolumes = volumeDao.findByInstanceAndType(vm.getId(), Type.ROOT); + if(CollectionUtils.isNotEmpty(listVolumes)) { + VolumeVO rootDisk = listVolumes.get(0); + DiskOffering baseDiskOffering = diskOfferingDao.findById(rootDisk.getDiskOfferingId()); + if (baseDiskOffering != null && baseDiskOffering.getCacheMode() != null) { + cacheMode = baseDiskOffering.getCacheMode().toString(); + } + } + StoragePoolVO pool = primaryDataStoreDao.findByUuid(dataStoreUuid); + if (pool == null) { + List pools = primaryDataStoreDao.findPoolByName(dataStoreUuid); + if (CollectionUtils.isNotEmpty(pools)) { + pool = pools.get(0); + } + } + if (pool == null) { + throw new CloudRuntimeException(String.format("Unable to find primary storage pool for restore target [%s]", dataStoreUuid)); + } + HostVO vmHost = hostDao.findByIp(hostIp); + if (vmHost == null) { + vmHost = hostDao.findByName(hostIp); + } + if (vmHost == null) { + throw new CloudRuntimeException(String.format("Unable to find VM host [%s] for NAS volume restore", hostIp)); + } + + Backup.VolumeInfo matchingVolume = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), volume.getUuid()); + if (matchingVolume == null) { + throw new CloudRuntimeException(String.format("Unable to find volume %s in the list of backed up volumes for backup %s, cannot proceed with restore", volume.getUuid(), backup)); + } + Long backedUpVolumeSize = matchingVolume.getSize(); + + LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", volume, backup); + BackupRepository backupRepository = getBackupRepository(backup); + + VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), + backup.getDomainId(), backup.getAccountId(), 0, null, + backup.getSize(), null, null, null); + String volumeUUID = UUID.randomUUID().toString(); + String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); + restoredVolume.setName("RestoredVol-" + volumeName); + restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); + restoredVolume.setUpdated(new Date()); + restoredVolume.setUuid(volumeUUID); + restoredVolume.setRemoved(null); + restoredVolume.setDisplayVolume(true); + restoredVolume.setPoolId(pool.getId()); + restoredVolume.setPoolType(pool.getPoolType()); + restoredVolume.setPath(restoredVolume.getUuid()); + restoredVolume.setState(Volume.State.Copying); + restoredVolume.setSize(backupVolumeInfo.getSize()); + restoredVolume.setDiskOfferingId(diskOffering.getId()); + if (pool.getPoolType() != Storage.StoragePoolType.RBD) { + restoredVolume.setFormat(Storage.ImageFormat.QCOW2); + } else { + restoredVolume.setFormat(Storage.ImageFormat.RAW); + } + + AblestackNasRestoreBackupCommand restoreCommand = new AblestackNasRestoreBackupCommand(); + restoreCommand.setBackupPath(backup.getExternalId()); + restoreCommand.setBackupRepoType(backupRepository.getType()); + restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); + restoreCommand.setVmName(vmNameAndState.first()); + restoreCommand.setRestoreVolumePaths(Collections.singletonList(String.format("%s/%s", getVolumePathPrefix(pool), volumeUUID))); + DataStore dataStore = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + if (dataStore == null) { + throw new CloudRuntimeException(String.format("Unable to get primary datastore TO for pool [%s] while restoring volume [%s]", + pool.getUuid(), backupVolumeInfo.getUuid())); + } + restoreCommand.setRestoreVolumePools(Collections.singletonList(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null)); + restoreCommand.setDiskType(matchingVolume.getType().name().toLowerCase(Locale.ROOT)); + restoreCommand.setMountOptions(backupRepository.getMountOptions()); + restoreCommand.setVmExists(null); + restoreCommand.setVmState(vmNameAndState.second()); + restoreCommand.setMountTimeout(NASBackupRestoreMountTimeout.value()); + restoreCommand.setWait(NASBackupRestoreTimeout.value()); + restoreCommand.setCacheMode(cacheMode); + restoreCommand.setVolumePaths(Collections.singletonList(String.format("%s/%s", pool.getPath(), volumeUUID))); + restoreCommand.setBackupFiles(getBackupFiles(Collections.singletonList(matchingVolume), backup)); + restoreCommand.setBackupFileChains(Collections.singletonList(String.join(";", getBackupFileChain(matchingVolume.getUuid(), backup)))); + restoreCommand.setVolumeChainStates(getVolumeChainStates(Collections.singletonList(matchingVolume), backup)); + restoreCommand.setRestorePlan(createRestorePlan(AblestackBackupFrameworkUtils.requiresRunningVmAttach(vmNameAndState.second()))); + + BackupAnswer answer; + try { + LOG.info("Restoring volume {} from backup {} on the NAS Backup Provider using VM host [{}]", + backupVolumeInfo.getUuid(), backup, vmHost.getName()); + answer = (BackupAnswer) agentManager.send(vmHost.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to restore backed up volume timed out, please try again"); + } + + if (answer.getResult()) { + try { + volumeDao.persist(restoredVolume); + LOG.info("Successfully restored volume {} from backup {} on the NAS Backup Provider. Restored volume UUID: {}", + backupVolumeInfo.getUuid(), backup, restoredVolume.getUuid()); + } catch (Exception e) { + throw new CloudRuntimeException("Unable to create restored volume due to: " + e); + } + } + + return new Pair<>(answer.getResult(), answer.getDetails()); + } + + private BackupRepository getBackupRepository(Backup backup) { + BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException(String.format("No valid backup repository found for the backup %s, please check the attached backup offering", backup.getUuid())); + } + return backupRepository; + } + + private Backup.VolumeInfo getBackedUpVolumeInfo(List backedUpVolumes, String volumeUuid) { + return backedUpVolumes.stream() + .filter(v -> v.getUuid().equals(volumeUuid)) + .findFirst() + .orElse(null); + } + + @Override + public boolean deleteBackup(Backup backup, boolean forced) { + if (backup instanceof BackupVO && backup.getDetails() == null) { + backupDao.loadDetails((BackupVO) backup); + } + if (!forced && hasDependentBackups(backup)) { + throw new CloudRuntimeException(String.format("Backup [%s] cannot be deleted because one or more incremental backups depend on it.", backup.getUuid())); + } + + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); + } + + final Host host; + final VirtualMachine vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); + if (vm != null) { + host = getVMHypervisorHost(vm); + } else { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, backup.getZoneId()); + } + + AblestackDeleteBackupCommand command = new AblestackDeleteBackupCommand(backup.getExternalId(), backupRepository.getType(), + backupRepository.getAddress(), backupRepository.getMountOptions(), forced); + command.setBackupProvider("ablestack-nas"); + command.setCheckpointName(getBackupDetail(backup, DETAIL_CHECKPOINT_NAME)); + command.setDiskPaths(getBackupDetail(backup, DETAIL_RBD_DISK_PATHS)); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), command); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to delete backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + return true; + } + + logger.debug("There was an error removing the backup with id {}", backup.getId()); + return false; + } + + public void syncBackupMetrics(Long zoneId) { + } + + @Override + public List listRestorePoints(VirtualMachine vm) { + return null; + } + + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { + return null; + } + + @Override + public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { + if (hasKvmFileBasedVmSnapshots(vm)) { + logger.warn("VM [{}] has VM snapshots using the KvmFileBasedStorageVmSnapshot Strategy; this provider does not support backups on VMs with these snapshots!", vm); + return false; + } + if (hasVolumeSnapshots(vm)) { + logger.warn("VM [{}] has volume snapshots; this provider does not support backups on VMs with volume snapshots!", vm); + return false; + } + + return Hypervisor.HypervisorType.KVM.equals(vm.getHypervisorType()); + } + + private void validateNoKvmFileBasedVmSnapshots(VirtualMachine vm) { + if (hasKvmFileBasedVmSnapshots(vm)) { + logger.warn("VM [{}] has VM snapshots using the KvmFileBasedStorageVmSnapshot Strategy; backup cannot be started.", vm); + throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has KVM file-based VM snapshots.", vm.getUuid())); + } + if (hasVolumeSnapshots(vm)) { + logger.warn("VM [{}] has volume snapshots; backup cannot be started.", vm); + throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has volume snapshots.", vm.getUuid())); + } + } + + private boolean hasKvmFileBasedVmSnapshots(VirtualMachine vm) { + for (VMSnapshotVO vmSnapshotVO : vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.Disk)) { + List vmSnapshotDetails = vmSnapshotDetailsDao.listDetails(vmSnapshotVO.getId()); + if (vmSnapshotDetails.stream().anyMatch(vmSnapshotDetailsVO -> VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT.equals(vmSnapshotDetailsVO.getName()))) { + return true; + } + } + return false; + } + + private boolean hasVolumeSnapshots(VirtualMachine vm) { + for (VolumeVO volume : volumeDao.findByInstance(vm.getId())) { + List snapshots = snapshotDao.listByVolumeId(volume.getId()); + if (snapshots.stream().anyMatch(snapshot -> !Snapshot.State.Destroyed.equals(snapshot.getState()))) { + return true; + } + } + return false; + } + + @Override + public boolean removeVMFromBackupOffering(VirtualMachine vm) { + return true; + } + + @Override + public boolean willDeleteBackupsOnOfferingRemoval() { + return false; + } + + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public boolean supportsMemoryVmSnapshot() { + return false; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, BackupProviderNameUtils.toDisplayName(getName())); + Long totalSize = 0L; + Long usedSize = 0L; + for (final BackupRepository repository : repositories) { + if (repository.getCapacityBytes() != null) { + totalSize += repository.getCapacityBytes(); + } + if (repository.getUsedBytes() != null) { + usedSize += repository.getUsedBytes(); + } + } + return new Pair<>(usedSize, totalSize); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, BackupProviderNameUtils.toDisplayName(getName())); + final Host host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + for (final BackupRepository repository : repositories) { + GetBackupStorageStatsCommand command = new GetBackupStorageStatsCommand(repository.getType(), repository.getAddress(), repository.getMountOptions()); + BackupStorageStatsAnswer answer; + try { + answer = (BackupStorageStatsAnswer) agentManager.send(host.getId(), command); + backupRepositoryDao.updateCapacity(repository, answer.getTotalSize(), answer.getUsedSize()); + } catch (AgentUnavailableException e) { + logger.warn("Unable to contact backend control plane to get backup stats for repository: {}", repository.getName()); + } catch (OperationTimedoutException e) { + logger.warn("Operation to get backup stats timed out for the repository: " + repository.getName()); + } + } + } + + @Override + public List listBackupOfferings(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, BackupProviderNameUtils.toDisplayName(getName())); + final List offerings = new ArrayList<>(); + for (final BackupRepository repository : repositories) { + offerings.add(new AblestackNasBackupOffering(repository.getName(), repository.getUuid())); + } + return offerings; + } + + @Override + public boolean isValidProviderOffering(Long zoneId, String uuid) { + return true; + } + + @Override + public Boolean crossZoneInstanceCreationEnabled(BackupOffering backupOffering) { + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backupOffering.getId()); + if (backupRepository == null) { + throw new CloudRuntimeException("Backup repository not found for the backup offering" + backupOffering.getName()); + } + return Boolean.TRUE.equals(backupRepository.crossZoneInstanceCreationEnabled()); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + NASBackupRestoreMountTimeout, + NASBackupRestoreTimeout + }; + } + + @Override + public String getName() { + return "ablestack-nas"; + } + + @Override + public String getDescription() { + return "NAS Backup Plugin"; + } + + @Override + public String getConfigComponentName() { + return BackupService.class.getSimpleName(); + } + + @Override + public void syncBackups(VirtualMachine vm) { + } + + @Override + public boolean checkBackupAgent(final Long zoneId) { return true; } + + @Override + public boolean installBackupAgent(final Long zoneId) { return true; } + + @Override + public boolean importBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { return true; } + + @Override + public boolean updateBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { return true; } + + @Override + public Pair restoreBackupToVM(Long backupId, String vmName) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'restoreBackupToVM'"); + } +} diff --git a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/module.properties b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/module.properties similarity index 90% rename from plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/module.properties rename to plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/module.properties index 1db48b423de7..5a64e71d2dfa 100644 --- a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/module.properties +++ b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/module.properties @@ -1,7 +1,7 @@ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file +# regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at @@ -14,5 +14,5 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -name=commvault +name=ablestack-nas parent=backup diff --git a/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/spring-backup-nas-context.xml b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/spring-backup-nas-context.xml new file mode 100644 index 000000000000..ecb71f96d19b --- /dev/null +++ b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/spring-backup-nas-context.xml @@ -0,0 +1,26 @@ + + + + + + + diff --git a/plugins/backup/ablestack-nas/src/test/java/org/apache/cloudstack/backup/AblestackNasBackupProviderTest.java b/plugins/backup/ablestack-nas/src/test/java/org/apache/cloudstack/backup/AblestackNasBackupProviderTest.java new file mode 100644 index 000000000000..2d6575209897 --- /dev/null +++ b/plugins/backup/ablestack-nas/src/test/java/org/apache/cloudstack/backup/AblestackNasBackupProviderTest.java @@ -0,0 +1,357 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.agent.AgentManager; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + +@RunWith(MockitoJUnitRunner.class) +public class AblestackNasBackupProviderTest { + @Spy + @InjectMocks + private AblestackNasBackupProvider ablestackNasBackupProvider; + + @Mock + private BackupDao backupDao; + + @Mock + private BackupRepositoryDao backupRepositoryDao; + + @Mock + private BackupOfferingDao backupOfferingDao; + + @Mock + private VMInstanceDao vmInstanceDao; + + @Mock + private AgentManager agentManager; + + @Mock + private VolumeDao volumeDao; + + @Mock + private HostDao hostDao; + + @Mock + private BackupManager backupManager; + + @Mock + private ResourceManager resourceManager; + + @Mock + private PrimaryDataStoreDao storagePoolDao; + + @Mock + private VMSnapshotDao vmSnapshotDaoMock; + + @Test + public void testDeleteBackup() throws OperationTimedoutException, AgentUnavailableException { + Long hostId = 1L; + BackupVO backup = new BackupVO(); + backup.setBackupOfferingId(1L); + backup.setVmId(1L); + backup.setExternalId("externalId"); + ReflectionTestUtils.setField(backup, "id", 1L); + + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L, null); + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + HostVO host = mock(HostVO.class); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(backupRepositoryDao.findByBackupOfferingId(1L)).thenReturn(backupRepository); + Mockito.when(vmInstanceDao.findByIdIncludingRemoved(1L)).thenReturn(vm); + Mockito.when(agentManager.send(anyLong(), Mockito.any(AblestackDeleteBackupCommand.class))).thenReturn(new BackupAnswer(new AblestackDeleteBackupCommand(null, null, null, null, true), true, "details")); + Mockito.when(backupDao.remove(1L)).thenReturn(true); + + boolean result = ablestackNasBackupProvider.deleteBackup(backup, true); + Assert.assertTrue(result); + Mockito.verify(agentManager).send(anyLong(), Mockito.argThat(AblestackDeleteBackupCommand::isForced)); + } + + @Test + public void testSyncBackupStorageStats() throws AgentUnavailableException, OperationTimedoutException { + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L, null); + + HostVO host = mock(HostVO.class); + Mockito.when(resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(host); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")).thenReturn(Collections.singletonList(backupRepository)); + GetBackupStorageStatsCommand command = new GetBackupStorageStatsCommand("nfs", "address", "sync"); + BackupStorageStatsAnswer answer = new BackupStorageStatsAnswer(command, true, null); + answer.setTotalSize(100L); + answer.setUsedSize(50L); + Mockito.when(agentManager.send(anyLong(), Mockito.any(GetBackupStorageStatsCommand.class))).thenReturn(answer); + + ablestackNasBackupProvider.syncBackupStorageStats(1L); + Mockito.verify(backupRepositoryDao, Mockito.times(1)).updateCapacity(backupRepository, 100L, 50L); + } + + @Test + public void testListBackupOfferings() { + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L, null); + ReflectionTestUtils.setField(backupRepository, "uuid", "uuid"); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")).thenReturn(Collections.singletonList(backupRepository)); + + List result = ablestackNasBackupProvider.listBackupOfferings(1L); + Assert.assertEquals(1, result.size()); + Assert.assertEquals("test-repo", result.get(0).getName()); + Assert.assertEquals("uuid", result.get(0).getUuid()); + } + + @Test + public void testGetBackupStorageStats() { + BackupRepositoryVO backupRepository1 = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1000L, null); + backupRepository1.setUsedBytes(500L); + + BackupRepositoryVO backupRepository2 = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 2000L, null); + backupRepository2.setUsedBytes(600L); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")) + .thenReturn(List.of(backupRepository1, backupRepository2)); + + Pair result = ablestackNasBackupProvider.getBackupStorageStats(1L); + Assert.assertEquals(Long.valueOf(1100L), result.first()); + Assert.assertEquals(Long.valueOf(3000L), result.second()); + } + + @Test + public void takeBackupSuccessfully() throws AgentUnavailableException, OperationTimedoutException { + Long vmId = 1L; + Long hostId = 2L; + Long backupOfferingId = 3L; + Long accountId = 4L; + Long domainId = 5L; + Long zoneId = 6L; + Long backupId = 7L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getId()).thenReturn(vmId); + Mockito.when(vm.getHostId()).thenReturn(hostId); + Mockito.when(vm.getInstanceName()).thenReturn("test-vm"); + Mockito.when(vm.getBackupOfferingId()).thenReturn(backupOfferingId); + Mockito.when(vm.getAccountId()).thenReturn(accountId); + Mockito.when(vm.getDomainId()).thenReturn(domainId); + Mockito.when(vm.getDataCenterId()).thenReturn(zoneId); + Mockito.when(vm.getState()).thenReturn(VMInstanceVO.State.Running); + + BackupRepository backupRepository = mock(BackupRepository.class); + Mockito.when(backupRepository.getType()).thenReturn("nfs"); + Mockito.when(backupRepository.getAddress()).thenReturn("address"); + Mockito.when(backupRepository.getMountOptions()).thenReturn("sync"); + Mockito.when(backupRepositoryDao.findByBackupOfferingId(backupOfferingId)).thenReturn(backupRepository); + + HostVO host = mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(hostId); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + + VolumeVO volume1 = mock(VolumeVO.class); + Mockito.when(volume1.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume1.getSize()).thenReturn(100L); + VolumeVO volume2 = mock(VolumeVO.class); + Mockito.when(volume2.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume2.getSize()).thenReturn(200L); + Mockito.when(volumeDao.findByInstance(vmId)).thenReturn(List.of(volume1, volume2)); + + BackupAnswer answer = mock(BackupAnswer.class); + Mockito.when(answer.getResult()).thenReturn(true); + Mockito.when(answer.getSize()).thenReturn(100L); + Mockito.when(agentManager.send(anyLong(), Mockito.any(AblestackNasTakeBackupCommand.class))).thenReturn(answer); + + Mockito.when(backupDao.persist(Mockito.any(BackupVO.class))).thenAnswer(invocation -> invocation.getArgument(0)); + Mockito.when(backupDao.update(Mockito.anyLong(), Mockito.any(BackupVO.class))).thenReturn(true); + + Pair result = ablestackNasBackupProvider.takeBackup(vm, false); + + Assert.assertTrue(result.first()); + Assert.assertNotNull(result.second()); + BackupVO backup = (BackupVO) result.second(); + Assert.assertEquals(Optional.ofNullable(100L), Optional.ofNullable(backup.getSize())); + Assert.assertEquals(Backup.Status.BackedUp, backup.getStatus()); + Assert.assertEquals("FULL", backup.getType()); + Assert.assertEquals(Optional.of(300L), Optional.of(backup.getProtectedSize())); + Assert.assertEquals(Optional.of(backupOfferingId), Optional.of(backup.getBackupOfferingId())); + Assert.assertEquals(Optional.of(accountId), Optional.of(backup.getAccountId())); + Assert.assertEquals(Optional.of(domainId), Optional.of(backup.getDomainId())); + Assert.assertEquals(Optional.of(zoneId), Optional.of(backup.getZoneId())); + + Mockito.verify(backupDao).persist(Mockito.any(BackupVO.class)); + Mockito.verify(backupDao).update(Mockito.anyLong(), Mockito.any(BackupVO.class)); + Mockito.verify(agentManager).send(anyLong(), Mockito.any(AblestackNasTakeBackupCommand.class)); + } + + @Test + public void testGetVMHypervisorHost() { + Long hostId = 1L; + Long vmId = 1L; + Long zoneId = 1L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + + HostVO host = mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(hostId); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(hostId, result.getId())); + Mockito.verify(hostDao).findById(hostId); + } + + @Test + public void testGetVMHypervisorHostWithHostDown() { + Long hostId = 1L; + Long clusterId = 2L; + Long vmId = 1L; + Long zoneId = 1L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + + HostVO downHost = mock(HostVO.class); + Mockito.when(downHost.getStatus()).thenReturn(Status.Down); + Mockito.when(downHost.getClusterId()).thenReturn(clusterId); + Mockito.when(hostDao.findById(hostId)).thenReturn(downHost); + + HostVO upHostInCluster = mock(HostVO.class); + Mockito.when(upHostInCluster.getId()).thenReturn(3L); + Mockito.when(upHostInCluster.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(List.of(upHostInCluster)); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(Long.valueOf(3L), result.getId())); + Mockito.verify(hostDao).findById(hostId); + Mockito.verify(hostDao).findHypervisorHostInCluster(clusterId); + } + + @Test + public void testGetVMHypervisorHostWithUpHostViaRootVolumeCluster() { + Long vmId = 1L; + Long zoneId = 1L; + Long clusterId = 2L; + Long poolId = 3L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(null); + Mockito.when(vm.getId()).thenReturn(vmId); + + VolumeVO rootVolume = mock(VolumeVO.class); + Mockito.when(rootVolume.getPoolId()).thenReturn(poolId); + Mockito.when(volumeDao.getInstanceRootVolume(vmId)).thenReturn(rootVolume); + + StoragePoolVO storagePool = mock(StoragePoolVO.class); + Mockito.when(storagePool.getClusterId()).thenReturn(clusterId); + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO upHostInCluster = mock(HostVO.class); + Mockito.when(upHostInCluster.getId()).thenReturn(4L); + Mockito.when(upHostInCluster.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(List.of(upHostInCluster)); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(Long.valueOf(4L), result.getId())); + Mockito.verify(volumeDao).getInstanceRootVolume(vmId); + Mockito.verify(storagePoolDao).findById(poolId); + Mockito.verify(hostDao).findHypervisorHostInCluster(clusterId); + } + + @Test + public void testGetVMHypervisorHostFallbackToZoneWideKVMHost() { + Long hostId = 1L; + Long clusterId = 2L; + Long vmId = 1L; + Long zoneId = 1L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + Mockito.when(vm.getDataCenterId()).thenReturn(zoneId); + + HostVO downHost = mock(HostVO.class); + Mockito.when(downHost.getStatus()).thenReturn(Status.Down); + Mockito.when(downHost.getClusterId()).thenReturn(clusterId); + Mockito.when(hostDao.findById(hostId)).thenReturn(downHost); + + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(Collections.emptyList()); + + HostVO fallbackHost = mock(HostVO.class); + Mockito.when(fallbackHost.getId()).thenReturn(5L); + Mockito.when(resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId)) + .thenReturn(fallbackHost); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(Long.valueOf(5L), result.getId())); + Mockito.verify(hostDao).findById(hostId); + Mockito.verify(hostDao).findHypervisorHostInCluster(clusterId); + Mockito.verify(resourceManager).findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + } +} diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/CommvaultBackupProvider.java b/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/CommvaultBackupProvider.java deleted file mode 100644 index a6cdc1e71a6b..000000000000 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/CommvaultBackupProvider.java +++ /dev/null @@ -1,1180 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.backup; - -import com.cloud.agent.AgentManager; -import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.domain.Domain; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.offering.DiskOffering; -import com.cloud.resource.ResourceManager; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ScopeType; -import com.cloud.storage.Storage; -import com.cloud.storage.Volume; -import com.cloud.storage.Volume.Type; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.DiskOfferingDao; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.user.User; -import com.cloud.user.Account; -import com.cloud.user.AccountService; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.Ternary; -import com.cloud.utils.ssh.SshHelper; -import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventTypes; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.dao.VMInstanceDao; -import com.cloud.vm.snapshot.VMSnapshot; -import com.cloud.vm.snapshot.dao.VMSnapshotDao; -import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.backup.commvault.CommvaultClient; -import org.apache.cloudstack.backup.dao.BackupDao; -import org.apache.cloudstack.backup.dao.BackupOfferingDao; -import org.apache.cloudstack.backup.dao.BackupOfferingDaoImpl; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.commons.collections.CollectionUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.xml.utils.URI; -import org.json.JSONObject; -import java.net.URISyntaxException; -import java.security.KeyManagementException; -import java.security.NoSuchAlgorithmException; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.HashMap; -import java.util.Date; -import java.util.Objects; -import java.util.UUID; -import java.util.Optional; -import java.util.stream.Collectors; -import java.util.Collections; -import java.util.Comparator; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import javax.inject.Inject; - -import static org.apache.cloudstack.backup.BackupManager.BackupFrameworkEnabled; - -public class CommvaultBackupProvider extends AdapterBase implements BackupProvider, Configurable { - - private static final Logger LOG = LogManager.getLogger(CommvaultBackupProvider.class); - private static final String RM_COMMAND = "rm -rf %s"; - private static final int BASE_MAJOR = 11; - private static final int BASE_FR = 32; - private static final int BASE_MT = 89; - private static final Pattern VERSION_PATTERN = Pattern.compile("^(\\d+)\\s*SP\\s*(\\d+)(?:\\.(\\d+))?$", Pattern.CASE_INSENSITIVE); - private static final String COMMVAULT_DIRECTORY = "/tmp/mold/backup"; - - public ConfigKey CommvaultUrl = new ConfigKey<>("Advanced", String.class, - "backup.plugin.commvault.url", "https://localhost/commandcenter/api", - "Commvault Command Center API URL.", true, ConfigKey.Scope.Zone); - - private ConfigKey CommvaultUsername = new ConfigKey<>("Advanced", String.class, - "backup.plugin.commvault.username", "admin", - "Commvault Command Center API username.", true, ConfigKey.Scope.Zone); - - private ConfigKey CommvaultPassword = new ConfigKey<>("Secure", String.class, - "backup.plugin.commvault.password", "password", - "Commvault Command Center API password.", true, ConfigKey.Scope.Zone); - - private ConfigKey CommvaultValidateSSLSecurity = new ConfigKey<>("Advanced", Boolean.class, - "backup.plugin.commvault.validate.ssl", "false", - "Validate the SSL certificate when connecting to Commvault Command Center API service.", true, ConfigKey.Scope.Zone); - - private ConfigKey CommvaultApiRequestTimeout = new ConfigKey<>("Advanced", Integer.class, - "backup.plugin.commvault.request.timeout", "300", - "Commvault Command Center API request timeout in seconds.", true, ConfigKey.Scope.Zone); - - private static ConfigKey CommvaultRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, - "backup.plugin.commvault.restore.timeout", "600", - "Commvault B&R API restore backup timeout in seconds.", true, ConfigKey.Scope.Zone); - - private static ConfigKey CommvaultTaskPollInterval = new ConfigKey<>("Advanced", Integer.class, - "backup.plugin.commvault.task.poll.interval", "5", - "The time interval in seconds when the management server polls for Commvault task status.", true, ConfigKey.Scope.Zone); - - private static ConfigKey CommvaultTaskPollMaxRetry = new ConfigKey<>("Advanced", Integer.class, - "backup.plugin.commvault.task.poll.max.retry", "120", - "The max number of retrying times when the management server polls for Commvault task status.", true, ConfigKey.Scope.Zone); - - private ConfigKey CommvaultClientVerboseLogs = new ConfigKey<>("Advanced", Boolean.class, - "backup.plugin.commvault.client.verbosity", "false", - "Produce Verbose logs in Hypervisor", true, ConfigKey.Scope.Zone); - - private ConfigKey CommvaultBackupRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, - "commvault.backup.restore.timeout", - "30", - "Timeout in seconds after which qemu-img execute when restoring", - true, - BackupFrameworkEnabled.key()); - - @Inject - private BackupDao backupDao; - - @Inject - private BackupOfferingDao backupOfferingDao; - - @Inject - private HostDao hostDao; - - @Inject - private ClusterDao clusterDao; - - @Inject - private VolumeDao volumeDao; - - @Inject - private SnapshotDataStoreDao snapshotStoreDao; - - @Inject - private StoragePoolHostDao storagePoolHostDao; - - @Inject - private VMInstanceDao vmInstanceDao; - - @Inject - private AccountService accountService; - - @Inject - DataStoreManager dataStoreMgr; - - @Inject - private AgentManager agentManager; - - @Inject - private VMSnapshotDao vmSnapshotDao; - - @Inject - private PrimaryDataStoreDao primaryDataStoreDao; - - @Inject - private ConfigurationDao configDao; - - @Inject - private BackupManager backupManager; - - @Inject - ResourceManager resourceManager; - - @Inject - private DiskOfferingDao diskOfferingDao; - - private Long getClusterIdFromRootVolume(VirtualMachine vm) { - VolumeVO rootVolume = volumeDao.getInstanceRootVolume(vm.getId()); - StoragePoolVO rootDiskPool = primaryDataStoreDao.findById(rootVolume.getPoolId()); - if (rootDiskPool == null) { - return null; - } - return rootDiskPool.getClusterId(); - } - - protected Host getVMHypervisorHost(VirtualMachine vm) { - Long hostId = vm.getLastHostId(); - Long clusterId = null; - - if (hostId != null) { - Host host = hostDao.findById(hostId); - if (host.getStatus() == Status.Up) { - return host; - } - // Try to find any Up host in the same cluster - clusterId = host.getClusterId(); - } else { - // Try to find any Up host in the same cluster as the root volume - clusterId = getClusterIdFromRootVolume(vm); - } - - if (clusterId != null) { - for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(clusterId)) { - if (hostInCluster.getStatus() == Status.Up) { - LOG.debug("Found Host {} in cluster {}", hostInCluster, clusterId); - return hostInCluster; - } - } - } - - // Try to find any Host in the zone - return resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, vm.getDataCenterId()); - } - - protected Host getVMHypervisorHostForBackup(VirtualMachine vm) { - Long hostId = vm.getHostId(); - if (hostId == null && VirtualMachine.State.Running.equals(vm.getState())) { - throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for %s. Make sure the virtual machine is running", vm.getName())); - } - if (VirtualMachine.State.Stopped.equals(vm.getState())) { - hostId = vm.getLastHostId(); - } - if (hostId == null) { - throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for stopped VM: %s", vm)); - } - final Host host = hostDao.findById(hostId); - if (host == null || !Status.Up.equals(host.getStatus()) || !Hypervisor.HypervisorType.KVM.equals(host.getHypervisorType())) { - throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); - } - return host; - } - - @Override - public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { - final Host vmHost = getVMHypervisorHostForBackup(vm); - final HostVO vmHostVO = hostDao.findById(vmHost.getId()); - if (CollectionUtils.isNotEmpty(vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.DiskAndMemory))) { - LOG.debug("Commvault backup provider cannot take backups of a VM [{}] with disk-and-memory VM snapshots. Restoring the backup will corrupt any newer disk-and-memory " + - "VM snapshots.", vm); - throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has disk-and-memory VM snapshots.", vm.getUuid())); - } - - try { - String commvaultServer = getUrlDomain(CommvaultUrl.value()); - } catch (URISyntaxException e) { - throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); - } - // 백업 중인 작업 조회 - final CommvaultClient client = getClient(vm.getDataCenterId()); - boolean activeJob = client.getActiveJob(vm.getInstanceName()); - if (activeJob) { - throw new CloudRuntimeException("There are backup jobs running on the virtual machine. Please try again later."); - } - - BackupOfferingVO vmBackupOffering = new BackupOfferingDaoImpl().findById(vm.getBackupOfferingId()); - String planId = vmBackupOffering.getExternalId(); - - // 클라이언트의 백업세트 조회하여 호스트 정의 - String checkVm = client.getVmBackupSetId(vmHost.getName(), vm.getInstanceName()); - if (checkVm == null) { - String clientId = client.getClientId(vmHost.getName()); - String applicationId = client.getApplicationId(clientId); - boolean result = client.createBackupSet(vm.getInstanceName(), applicationId, clientId, planId); - if (!result) { - throw new CloudRuntimeException("Execution of the API that creates a backup set of a virtual machine on the host failed."); - } - } - - final Date creationDate = new Date(); - final String backupPath = String.format("%s/%s/%s", COMMVAULT_DIRECTORY, vm.getInstanceName(), - new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss").format(creationDate)); - - BackupVO backupVO = createBackupObject(vm, backupPath); - CommvaultTakeBackupCommand command = new CommvaultTakeBackupCommand(vm.getInstanceName(), backupPath); - command.setQuiesce(quiesceVM); - - if (VirtualMachine.State.Stopped.equals(vm.getState())) { - List vmVolumes = volumeDao.findByInstance(vm.getId()); - vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); - Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(vmVolumes); - command.setVolumePools(volumePoolsAndPaths.first()); - command.setVolumePaths(volumePoolsAndPaths.second()); - } - - BackupAnswer answer; - try { - answer = (BackupAnswer) agentManager.send(vmHost.getId(), command); - } catch (AgentUnavailableException e) { - LOG.error("Unable to contact backend control plane to initiate backup for VM {}", vm.getInstanceName()); - backupVO.setStatus(Backup.Status.Failed); - backupDao.remove(backupVO.getId()); - throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); - } catch (OperationTimedoutException e) { - LOG.error("Operation to initiate backup timed out for VM {}", vm.getInstanceName()); - backupVO.setStatus(Backup.Status.Failed); - backupDao.remove(backupVO.getId()); - throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); - } - - if (answer != null && answer.getResult()) { - int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); - Ternary credentials = getKVMHyperisorCredentials(vmHostVO); - String cmd = String.format(RM_COMMAND, backupPath); - // 생성된 백업 폴더 경로로 해당 백업 세트의 백업 콘텐츠 경로 업데이트 - String clientId = client.getClientId(vmHost.getName()); - String subClientEntity = client.getSubclient(clientId, vm.getInstanceName()); - if (subClientEntity == null) { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to get subclient info commvault api"); - } else { - JSONObject jsonObject = new JSONObject(subClientEntity); - String subclientId = String.valueOf(jsonObject.get("subclientId")); - String applicationId = String.valueOf(jsonObject.get("applicationId")); - String backupsetId = String.valueOf(jsonObject.get("backupsetId")); - String instanceId = String.valueOf(jsonObject.get("instanceId")); - String backupsetName = String.valueOf(jsonObject.get("backupsetName")); - String displayName = String.valueOf(jsonObject.get("displayName")); - String commCellName = String.valueOf(jsonObject.get("commCellName")); - String companyId = String.valueOf(jsonObject.getJSONObject("entityInfo").get("companyId")); - String companyName = String.valueOf(jsonObject.getJSONObject("entityInfo").get("companyName")); - String instanceName = String.valueOf(jsonObject.get("instanceName")); - String appName = String.valueOf(jsonObject.get("appName")); - String clientName = String.valueOf(jsonObject.get("clientName")); - String subclientGUID = String.valueOf(jsonObject.get("subclientGUID")); - String subclientName = String.valueOf(jsonObject.get("subclientName")); - String csGUID = String.valueOf(jsonObject.get("csGUID")); - boolean upResult = client.updateBackupSet(backupPath, subclientId, clientId, planId, applicationId, backupsetId, instanceId, subclientName, backupsetName); - if (upResult) { - String planName = client.getPlanName(planId); - String storagePolicyId = client.getStoragePolicyId(planName); - if (planName == null || storagePolicyId == null) { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to get storage policy id commvault api"); - } else { - // 백업 실행 - String jobId = client.createBackup(subclientId, storagePolicyId, displayName, commCellName, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, subclientGUID, subclientName, csGUID, backupsetName); - if (jobId != null) { - String jobStatus = client.getJobStatus(jobId); - String externalId = backupPath + "," + jobId; - if (jobStatus.equalsIgnoreCase("Completed")) { - String jobDetails = client.getJobDetails(jobId); - if (jobDetails != null) { - JSONObject jsonObject2 = new JSONObject(jobDetails); - String endTime = String.valueOf(jsonObject2.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); - long timestamp = Long.parseLong(endTime) * 1000L; - Date endDate = new Date(timestamp); - SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); - String formattedString = formatterDateTime.format(endDate); - String size = String.valueOf(jsonObject2.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("sizeOfApplication")); - String type = String.valueOf(jsonObject2.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").get("backupType")); - backupVO.setExternalId(externalId); - backupVO.setType(type.toUpperCase()); - try { - backupVO.setDate(formatterDateTime.parse(formattedString)); - } catch (ParseException e) { - String msg = String.format("Unable to parse date [%s].", endTime); - LOG.error(msg, e); - throw new CloudRuntimeException(msg, e); - } - backupVO.setSize(Long.parseLong(size)); - backupVO.setStatus(Backup.Status.BackedUp); - List vols = new ArrayList<>(volumeDao.findByInstance(vm.getId())); - backupVO.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(vols)); - if (backupDao.update(backupVO.getId(), backupVO)) { - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); - return new Pair<>(true, backupVO); - } else { - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); - throw new CloudRuntimeException("Failed to update backup"); - } - } else { - backupVO.setExternalId(externalId); - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to get details job commvault api"); - } - } else { - backupVO.setExternalId(externalId); - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to create backup job status is " + jobStatus); - } - } else { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to create backup job commvault api"); - } - } - } else { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to update backupset content path commvault api"); - } - } - backupVO.setStatus(Backup.Status.Failed); - backupDao.remove(backupVO.getId()); - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); - return new Pair<>(false, null); - } else { - LOG.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), answer != null ? answer.getDetails() : "No answer received"); - if (answer.getNeedsCleanup()) { - LOG.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); - backupVO.setStatus(Backup.Status.Error); - backupDao.update(backupVO.getId(), backupVO); - } else { - backupVO.setStatus(Backup.Status.Failed); - backupDao.remove(backupVO.getId()); - } - return new Pair<>(false, null); - } - } - - private BackupVO createBackupObject(VirtualMachine vm, String backupPath) { - BackupVO backup = new BackupVO(); - backup.setVmId(vm.getId()); - backup.setExternalId(backupPath); - backup.setType("FULL"); - backup.setDate(new Date()); - long virtualSize = 0L; - for (final Volume volume: volumeDao.findByInstance(vm.getId())) { - if (Volume.State.Ready.equals(volume.getState())) { - virtualSize += volume.getSize(); - } - } - backup.setProtectedSize(virtualSize); - backup.setStatus(Backup.Status.BackingUp); - backup.setBackupOfferingId(vm.getBackupOfferingId()); - backup.setAccountId(vm.getAccountId()); - backup.setDomainId(vm.getDomainId()); - backup.setZoneId(vm.getDataCenterId()); - backup.setName(backupManager.getBackupNameFromVM(vm)); - Map details = backupManager.getBackupDetailsFromVM(vm); - backup.setDetails(details); - - return backupDao.persist(backup); - } - - // 백업에서 새 인스턴스 생성 - @Override - public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { - return restoreVMBackup(vm, backup); - } - - // 가상머신 백업 복원 - @Override - public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { - return restoreVMBackup(vm, backup).first(); - } - - private Pair restoreVMBackup(VirtualMachine vm, Backup backup) { - try { - String commvaultServer = getUrlDomain(CommvaultUrl.value()); - } catch (URISyntaxException e) { - throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); - } - final CommvaultClient client = getClient(vm.getDataCenterId()); - final String externalId = backup.getExternalId(); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - final String path = externalId.substring(0, externalId.lastIndexOf(',')); - String jobDetails = client.getJobDetails(jobId); - if (jobDetails == null) { - throw new CloudRuntimeException("Failed to get job details commvault api"); - } - JSONObject jsonObject = new JSONObject(jobDetails); - String endTime = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); - String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); - String displayName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("displayName")); - String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); - String companyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyId")); - String companyName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyName")); - String instanceName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceName")); - String appName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("appName")); - String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); - String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); - String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); - String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); - String backupsetName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetName")); - String commCellId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("commcell").get("commCellId")); - String backupsetGUID = client.getVmBackupSetGuid(clientName, backupsetName); - if (backupsetGUID == null) { - throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); - } - // 복원된 호스트 정의 - final HostVO restoreHost = hostDao.findByName(clientName); - final HostVO restoreHostVO = hostDao.findById(restoreHost.getId()); - LOG.info(String.format("Restoring vm %s from backup %s on the Commvault Backup Provider", vm, backup)); - // 복원 실행 - String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, path); - if (jobId2 != null) { - String jobStatus = client.getJobStatus(jobId2); - if (jobStatus.equalsIgnoreCase("Completed")) { - List backedVolumesUUIDs = backup.getBackedUpVolumes().stream() - .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) - .map(Backup.VolumeInfo::getUuid) - .collect(Collectors.toList()); - - List restoreVolumes = volumeDao.findByInstance(vm.getId()).stream() - .sorted(Comparator.comparingLong(VolumeVO::getDeviceId)) - .collect(Collectors.toList()); - - LOG.debug("Restoring vm {} from backup {} on the Commvault Backup Provider", vm, backup); - // 가상머신이 실행중인 호스트 정의 - final Host vmHost = getVMHypervisorHost(vm); - final HostVO vmHostVO = hostDao.findById(vmHost.getId()); - CommvaultRestoreBackupCommand restoreCommand = new CommvaultRestoreBackupCommand(); - LOG.info(path); - restoreCommand.setBackupPath(path); - restoreCommand.setVmName(vm.getName()); - restoreCommand.setBackupVolumesUUIDs(backedVolumesUUIDs); - Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(restoreVolumes); - restoreCommand.setRestoreVolumePools(volumePoolsAndPaths.first()); - restoreCommand.setRestoreVolumePaths(volumePoolsAndPaths.second()); - restoreCommand.setVmExists(vm.getRemoved() == null); - restoreCommand.setVmState(vm.getState()); - restoreCommand.setTimeout(CommvaultBackupRestoreTimeout.value()); - // 복원된 호스트와 가상머신이 실행중인 호스트가 같은 경우 null, 다른 경우 추가 - restoreCommand.setHostName(restoreHost.getId() == vmHost.getId() ? null : restoreHost.getName()); - - BackupAnswer answer; - try { - answer = (BackupAnswer) agentManager.send(vmHost.getId(), restoreCommand); - } catch (AgentUnavailableException e) { - throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); - } catch (OperationTimedoutException e) { - throw new CloudRuntimeException("Operation to restore backup timed out, please try again"); - } - if (!answer.getResult()) { - int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); - Ternary credentials = getKVMHyperisorCredentials(vmHostVO); - String command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, command); - if (restoreHost.getId() != vmHost.getId()) { - credentials = getKVMHyperisorCredentials(restoreHostVO); - command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); - } - } - return new Pair<>(answer.getResult(), answer.getDetails()); - } else { - throw new CloudRuntimeException("Failed to restore Full VM commvault api resulted in " + jobStatus); - } - } else { - throw new CloudRuntimeException("Failed to restore Full VM commvault api"); - } - } - - private Pair, List> getVolumePoolsAndPaths(List volumes) { - List volumePools = new ArrayList<>(); - List volumePaths = new ArrayList<>(); - for (VolumeVO volume : volumes) { - StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); - if (Objects.isNull(storagePool)) { - throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); - } - - DataStore dataStore = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary); - volumePools.add(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null); - - String volumePathPrefix = getVolumePathPrefix(storagePool); - volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); - } - return new Pair<>(volumePools, volumePaths); - } - - private String getVolumePathPrefix(StoragePoolVO storagePool) { - String volumePathPrefix; - if (ScopeType.HOST.equals(storagePool.getScope()) || - Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType()) || - Storage.StoragePoolType.RBD.equals(storagePool.getPoolType())) { - volumePathPrefix = storagePool.getPath(); - } else { - // Should be Storage.StoragePoolType.NetworkFilesystem - volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); - } - return volumePathPrefix; - } - - // 백업 볼륨 복원 및 연결 - @Override - public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { - try { - String commvaultServer = getUrlDomain(CommvaultUrl.value()); - } catch (URISyntaxException e) { - throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); - } - final String externalId = backup.getExternalId(); - final Long zoneId = backup.getZoneId(); - final CommvaultClient client = getClient(zoneId); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - final String path = externalId.substring(0, externalId.lastIndexOf(',')); - String jobDetails = client.getJobDetails(jobId); - if (jobDetails == null) { - throw new CloudRuntimeException("Failed to get job details commvault api"); - } - JSONObject jsonObject = new JSONObject(jobDetails); - String endTime = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); - String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); - String displayName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("displayName")); - String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); - String companyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyId")); - String companyName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyName")); - String instanceName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceName")); - String appName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("appName")); - String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); - String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); - String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); - String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); - String backupsetName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetName")); - String commCellId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("commcell").get("commCellId")); - String backupsetGUID = client.getVmBackupSetGuid(clientName, backupsetName); - if (backupsetGUID == null) { - throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); - } - // 복원 실행 - String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, path); - if (jobId2 != null) { - String jobStatus = client.getJobStatus(jobId2); - if (jobStatus.equalsIgnoreCase("Completed")) { - final int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); - final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); - final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); - String cacheMode = null; - final VMInstanceVO vm = vmInstanceDao.findVMByInstanceName(vmNameAndState.first()); - List listVolumes = volumeDao.findByInstanceAndType(vm.getId(), Type.ROOT); - if(CollectionUtils.isNotEmpty(listVolumes)) { - VolumeVO rootDisk = listVolumes.get(0); - DiskOffering baseDiskOffering = diskOfferingDao.findById(rootDisk.getDiskOfferingId()); - if (baseDiskOffering.getCacheMode() != null) { - cacheMode = baseDiskOffering.getCacheMode().toString(); - } - } - final StoragePoolVO pool = primaryDataStoreDao.findByUuid(dataStoreUuid); - // 백업 볼륨 복원 및 연결 시 연결할 가상머신이 실행중인 경우 해당 호스트, 정지중인 경우 랜덤 호스트 정의백업 - final HostVO vmHost = hostDao.findByIp(hostIp); - final HostVO vmHostVO = hostDao.findById(vmHost.getId()); - // 복원된 호스트 정의 - final HostVO restoreHost = hostDao.findByName(clientName); - final HostVO restoreHostVO = hostDao.findById(restoreHost.getId()); - LOG.info(String.format("Restoring volume %s from backup %s on the Commvault Backup Provider", volume.getUuid(), backup)); - LOG.debug("Restoring vm volume {} from backup {} on the Commvault Backup Provider", backupVolumeInfo, backup); - VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), - backup.getDomainId(), backup.getAccountId(), 0, null, - backup.getSize(), null, null, null); - String volumeUUID = UUID.randomUUID().toString(); - String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); - restoredVolume.setName("RestoredVol-" + volumeName); - restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); - restoredVolume.setUpdated(new Date()); - restoredVolume.setUuid(volumeUUID); - restoredVolume.setRemoved(null); - restoredVolume.setDisplayVolume(true); - restoredVolume.setPoolId(pool.getId()); - restoredVolume.setPoolType(pool.getPoolType()); - restoredVolume.setPath(restoredVolume.getUuid()); - restoredVolume.setState(Volume.State.Copying); - restoredVolume.setSize(backupVolumeInfo.getSize()); - restoredVolume.setDiskOfferingId(diskOffering.getId()); - if (pool.getPoolType() != Storage.StoragePoolType.RBD) { - restoredVolume.setFormat(Storage.ImageFormat.QCOW2); - } else { - restoredVolume.setFormat(Storage.ImageFormat.RAW); - } - - CommvaultRestoreBackupCommand restoreCommand = new CommvaultRestoreBackupCommand(); - restoreCommand.setBackupPath(path); - restoreCommand.setVmName(vmNameAndState.first()); - restoreCommand.setRestoreVolumePaths(Collections.singletonList(String.format("%s/%s", getVolumePathPrefix(pool), volumeUUID))); - DataStore dataStore = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); - restoreCommand.setRestoreVolumePools(Collections.singletonList(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null)); - restoreCommand.setDiskType(backupVolumeInfo.getType().name().toLowerCase(Locale.ROOT)); - restoreCommand.setVmExists(null); - restoreCommand.setVmState(vmNameAndState.second()); - restoreCommand.setRestoreVolumeUUID(backupVolumeInfo.getUuid()); - restoreCommand.setTimeout(CommvaultBackupRestoreTimeout.value()); - restoreCommand.setCacheMode(cacheMode); - // 복원된 호스트와 가상머신이 실행중인 호스트가 같은 경우 null, 다른 경우 추가 - restoreCommand.setHostName(restoreHost.getId() == vmHost.getId() ? null : restoreHost.getName()); - - BackupAnswer answer; - try { - answer = (BackupAnswer) agentManager.send(vmHost.getId(), restoreCommand); - } catch (AgentUnavailableException e) { - throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); - } catch (OperationTimedoutException e) { - throw new CloudRuntimeException("Operation to restore backed up volume timed out, please try again"); - } - - if (answer.getResult()) { - try { - volumeDao.persist(restoredVolume); - } catch (Exception e) { - throw new CloudRuntimeException("Unable to create restored volume due to: " + e); - } - if (restoreHost.getId() != vmHost.getId()) { - Ternary credentials = getKVMHyperisorCredentials(restoreHostVO); - String command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); - } - return new Pair<>(answer.getResult(), answer.getDetails()); - } else { - Ternary credentials = getKVMHyperisorCredentials(vmHostVO); - String command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, command); - if (restoreHost.getId() != vmHost.getId()) { - credentials = getKVMHyperisorCredentials(restoreHostVO); - command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); - } - } - } else { - LOG.error("Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job status is " + jobStatus); - } - } else { - LOG.error("Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job commvault api"); - } - return new Pair<>(false, null); - } - - private Optional getBackedUpVolumeInfo(List backedUpVolumes, String volumeUuid) { - return backedUpVolumes.stream() - .filter(v -> v.getUuid().equals(volumeUuid)) - .findFirst(); - } - - @Override - public boolean deleteBackup(Backup backup, boolean forced) { - final Long zoneId = backup.getZoneId(); - final String externalId = backup.getExternalId(); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - String path = externalId.substring(0, externalId.lastIndexOf(',')); - final CommvaultClient client = getClient(zoneId); - String jobDetails = client.getJobDetails(jobId); - if (jobDetails != null) { - JSONObject jsonObject = new JSONObject(jobDetails); - String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); - String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); - String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); - String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); - String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); - String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); - return client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); - } else { - throw new CloudRuntimeException("Failed to request backup job detail commvault api"); - } - } - - public void syncBackupMetrics(Long zoneId) { - } - - @Override - public List listRestorePoints(VirtualMachine vm) { - return null; - } - - @Override - public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { - return null; - } - - @Override - public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { - final CommvaultClient client = getClient(vm.getDataCenterId()); - final Host host = getVMHypervisorHostForBackup(vm); - String clientId = client.getClientId(host.getName()); - String applicationId = client.getApplicationId(clientId); - return client.createBackupSet(vm.getInstanceName(), applicationId, clientId, backupOffering.getExternalId()); - } - - @Override - public boolean removeVMFromBackupOffering(VirtualMachine vm) { - final CommvaultClient client = getClient(vm.getDataCenterId()); - List Hosts = hostDao.findByDataCenterId(vm.getDataCenterId()); - boolean allDeleted = true; - for (final HostVO host : Hosts) { - if (host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { - String backupSetId = client.getVmBackupSetId(host.getName(), vm.getInstanceName()); - if (backupSetId != null) { - boolean deleted = client.deleteBackupSet(backupSetId); - if (!deleted) { - allDeleted = false; - LOG.error("Failed to delete backupSetId: " + backupSetId +" for VM: " + vm.getInstanceName()); - } - } - } - } - return allDeleted; - } - - // 하위 클라이언트 삭제 시 백업본 데이터는 그대로 남아있지만, 해당 하위 클라이언트가 삭제되었기 때문에 스케줄도 삭제시켜야하며 - // 남아있는 백업본 데이터는 mold에서 관리하지 않고, commvault 의 plan 보존기간에 따라 데이터 에이징 됨. - @Override - public boolean willDeleteBackupsOnOfferingRemoval() { - return true; - } - - @Override - public boolean supportsInstanceFromBackup() { - return true; - } - - @Override - public boolean supportsMemoryVmSnapshot() { - return false; - } - - @Override - public Pair getBackupStorageStats(Long zoneId) { - return new Pair<>(0L, 0L); - } - - @Override - public void syncBackupStorageStats(Long zoneId) { - } - - @Override - public List listBackupOfferings(Long zoneId) { - return getClient(zoneId).listPlans(); - } - - @Override - public boolean isValidProviderOffering(Long zoneId, String uuid) { - List policies = listBackupOfferings(zoneId); - if (CollectionUtils.isEmpty(policies)) { - return false; - } - for (final BackupOffering policy : policies) { - if (policy.getExternalId().equals(uuid)) { - return true; - } - } - return false; - } - - @Override - public Boolean crossZoneInstanceCreationEnabled(BackupOffering backupOffering) { - return false; - } - - @Override - public ConfigKey[] getConfigKeys() { - return new ConfigKey[]{ - CommvaultUrl, - CommvaultUsername, - CommvaultPassword, - CommvaultValidateSSLSecurity, - CommvaultApiRequestTimeout, - CommvaultClientVerboseLogs - }; - } - - @Override - public String getName() { - return "commvault"; - } - - @Override - public String getDescription() { - return "Commvault Backup Plugin"; - } - - @Override - public String getConfigComponentName() { - return BackupService.class.getSimpleName(); - } - - @Override - public void syncBackups(VirtualMachine vm) { - try { - String commvaultServer = getUrlDomain(CommvaultUrl.value()); - } catch (URISyntaxException e) { - return; - } - final CommvaultClient client = getClient(vm.getDataCenterId()); - for (final Backup backup: backupDao.listByVmId(vm.getDataCenterId(), vm.getId())) { - String externalId = backup.getExternalId(); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - String path = externalId.substring(0, externalId.lastIndexOf(',')); - String jobDetails = client.getJobDetails(jobId); - if (jobDetails != null) { - JSONObject jsonObject = new JSONObject(jobDetails); - String retainedUntil = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").get("retainedUntil")); - String storagePolicyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("storagePolicy").get("storagePolicyId")); - BackupOfferingVO vmBackupOffering = new BackupOfferingDaoImpl().findById(vm.getBackupOfferingId()); - BackupOfferingVO offering = backupOfferingDao.createForUpdate(vmBackupOffering.getId()); - String retentionDay = client.getRetentionPeriod(storagePolicyId); - offering.setRetentionPeriod(retentionDay); - backupOfferingDao.update(offering.getId(), offering); - long timestamp = Long.parseLong(retainedUntil) * 1000L; - boolean isExpired = isRetentionExpired(retainedUntil); - if (isExpired) { - String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); - String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); - String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); - String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); - String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); - String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); - boolean result = client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); - if (result) { - backupDao.remove(backup.getId()); - } - } - } - } - return; - } - - @Override - public boolean checkBackupAgent(final Long zoneId) { - Map checkResult = new HashMap<>(); - final CommvaultClient client = getClient(zoneId); - String csVersionInfo = client.getCvtVersion(); - boolean version = versionCheck(csVersionInfo); - if (version) { - List Hosts = hostDao.findByDataCenterId(zoneId); - for (final HostVO host : Hosts) { - if (host.getStatus() == Status.Up && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { - String checkHost = client.getClientId(host.getName()); - if (checkHost == null) { - return false; - } else { - boolean installJob = client.getInstallActiveJob(host.getPrivateIpAddress()); - boolean checkInstall = client.getClientProps(checkHost); - if (installJob || !checkInstall) { - if (!checkInstall) { - LOG.error("The host is registered with the client, but the readiness status is not normal and you must manually check the client status."); - } - return false; - } - } - } - } - return true; - } - return false; - } - - @Override - public boolean installBackupAgent(final Long zoneId) { - Map failResult = new HashMap<>(); - final CommvaultClient client = getClient(zoneId); - List Hosts = hostDao.findByDataCenterId(zoneId); - for (final HostVO host : Hosts) { - if (host.getStatus() == Status.Up && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { - String commCell = client.getCommcell(); - JSONObject jsonObject = new JSONObject(commCell); - String commCellId = String.valueOf(jsonObject.get("commCellId")); - String commServeHostName = String.valueOf(jsonObject.get("commCellName")); - Ternary credentials = getKVMHyperisorCredentials(host); - boolean installJob = true; - LOG.info("checking for install agent on the Commvault Backup Provider in host " + host.getPrivateIpAddress()); - // 설치가 진행중인 호스트가 있는지 확인 - while (installJob) { - installJob = client.getInstallActiveJob(host.getName()); - try { - Thread.sleep(30000); - } catch (InterruptedException e) { - LOG.error("checkBackupAgent get install active job result sleep interrupted error"); - } - } - String checkHost = client.getClientId(host.getName()); - // 호스트가 클라이언트에 등록되지 않은 경우 - if (checkHost == null) { - String jobId = client.installAgent(host.getPrivateIpAddress(), commCellId, commServeHostName, credentials.first(), credentials.second()); - if (jobId != null) { - String jobStatus = client.getJobStatus(jobId); - if (!jobStatus.equalsIgnoreCase("Completed")) { - LOG.error("installing agent on the Commvault Backup Provider failed jogId : " + jobId + " , jobStatus : " + jobStatus); - ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_HOST_AGENT_INSTALL, - "Failed install the commvault client agent on the host : " + host.getPrivateIpAddress(), User.UID_SYSTEM, ApiCommandResourceType.Host.toString()); - failResult.put(host.getPrivateIpAddress(), jobId); - } - } else { - return false; - } - } else { - // 호스트가 클라이언트에는 등록되었지만 구성이 정상적으로 되지 않은 경우 준비 상태 체크 - boolean checkInstall = client.getClientCheckReadiness(checkHost); - if (!checkInstall) { - LOG.error("The host is registered with the client, but the readiness status is not normal and you must manually check the client status."); - ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_HOST_AGENT_INSTALL, - "Failed check readiness the commvault client agent on the host : " + host.getPrivateIpAddress(), User.UID_SYSTEM, ApiCommandResourceType.Host.toString()); - return false; - } - } - } - } - if (!failResult.isEmpty()) { - return false; - } - return true; - } - - @Override - public boolean importBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { - final CommvaultClient client = getClient(zoneId); - // 선택한 백업 정책의 RPO 편집 Commvault API 호출 - String type = "deleteRpo"; - String taskId = client.getScheduleTaskId(type, externalId); - if (taskId != null) { - String subTaskId = client.getSubTaskId(taskId); - if (subTaskId != null) { - boolean result = client.deleteSchedulePolicy(taskId, subTaskId); - if (!result) { - throw new CloudRuntimeException("Failed to delete schedule policy commvault api"); - } - } - } else { - throw new CloudRuntimeException("Failed to get plan details schedule task id commvault api"); - } - // 선택한 백업 정책의 보존 기간 변경 Commvault API 호출 - type = "updateRpo"; - String planEntity = client.getScheduleTaskId(type, externalId); - JSONObject jsonObject = new JSONObject(planEntity); - String planType = String.valueOf(jsonObject.get("planType")); - String planName = String.valueOf(jsonObject.get("planName")); - String planSubtype = String.valueOf(jsonObject.get("planSubtype")); - String planId = String.valueOf(jsonObject.get("planId")); - JSONObject entityInfo = jsonObject.getJSONObject("entityInfo"); - String companyId = String.valueOf(entityInfo.get("companyId")); - String storagePolicyId = client.getStoragePolicyId(planName); - if (storagePolicyId == null) { - throw new CloudRuntimeException("Failed to get plan storage policy id commvault api"); - } - boolean result = client.getStoragePolicyDetails(planId, storagePolicyId, retentionPeriod); - if (result) { - // 호스트에 선택한 백업 정책 설정 Commvault API 호출 - String path = "/"; - List Hosts = hostDao.findByDataCenterId(zoneId); - for (final HostVO host : Hosts) { - String backupSetId = client.getDefaultBackupSetId(host.getName()); - if (backupSetId != null) { - if (!client.setBackupSet(path, planType, planName, planSubtype, planId, companyId, backupSetId)) { - throw new CloudRuntimeException("Failed to setting backup plan for client commvault api"); - } - } - } - return true; - } else { - throw new CloudRuntimeException("Failed to edit plan schedule retention period commvault api"); - } - } - - @Override - public boolean updateBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { - final CommvaultClient client = getClient(zoneId); - String type = "updateRpo"; - String planEntity = client.getScheduleTaskId(type, externalId); - JSONObject jsonObject = new JSONObject(planEntity); - String planType = String.valueOf(jsonObject.get("planType")); - String planName = String.valueOf(jsonObject.get("planName")); - String planSubtype = String.valueOf(jsonObject.get("planSubtype")); - String planId = String.valueOf(jsonObject.get("planId")); - JSONObject entityInfo = jsonObject.getJSONObject("entityInfo"); - String companyId = String.valueOf(entityInfo.get("companyId")); - String storagePolicyId = client.getStoragePolicyId(planName); - if (storagePolicyId == null) { - throw new CloudRuntimeException("Failed to get plan storage policy id commvault api"); - } - return client.getStoragePolicyDetails(planId, storagePolicyId, retentionPeriod); - } - - private static String getUrlDomain(String url) throws URISyntaxException { - URI uri; - try { - uri = new URI(url); - } catch (URI.MalformedURIException e) { - throw new CloudRuntimeException("Failed to cast URI"); - } - - return uri.getHost(); - } - - private CommvaultClient getClient(final Long zoneId) { - try { - return new CommvaultClient(CommvaultUrl.valueIn(zoneId), CommvaultUsername.valueIn(zoneId), CommvaultPassword.valueIn(zoneId), - CommvaultValidateSSLSecurity.valueIn(zoneId), CommvaultApiRequestTimeout.valueIn(zoneId)); - } catch (URISyntaxException e) { - throw new CloudRuntimeException("Failed to parse Commvault API URL: " + e.getMessage()); - } catch (NoSuchAlgorithmException | KeyManagementException e) { - LOG.error("Failed to build Commvault API client due to: ", e); - } - throw new CloudRuntimeException("Failed to build Commvault API client"); - } - - protected Ternary getKVMHyperisorCredentials(HostVO host) { - - String username = null; - String password = null; - - if (host != null && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { - hostDao.loadDetails(host); - password = host.getDetail("password"); - username = host.getDetail("username"); - } - if ( password == null || username == null) { - throw new CloudRuntimeException("Cannot find login credentials for HYPERVISOR " + Objects.requireNonNull(host).getUuid()); - } - - return new Ternary<>(username, password, null); - } - - private boolean executeDeleteBackupPathCommand(HostVO host, String username, String password, int port, String command) { - try { - Pair response = SshHelper.sshExecute(host.getPrivateIpAddress(), port, - username, null, password, command, 120000, 120000, 3600000); - - if (!response.first()) { - LOG.error(String.format("failed on HYPERVISOR %s due to: %s", host, response.second())); - } else { - return true; - } - } catch (final Exception e) { - throw new CloudRuntimeException(String.format("Failed to delete backup path on host %s due to: %s", host.getName(), e.getMessage())); - } - return false; - } - - public static boolean isRetentionExpired(String retainedUntil) { - if (retainedUntil == null || retainedUntil.trim().isEmpty() || "null".equals(retainedUntil)) { - return false; - } - try { - long timestamp = Long.parseLong(retainedUntil) * 1000L; - Date retainedDate = new Date(timestamp); - Date currentDate = new Date(); - return currentDate.after(retainedDate); - } catch (Exception e) { - LOG.info("parsing error: " + e.getMessage()); - return false; - } - } - - public static boolean versionCheck(String csVersionInfo) { - // 버전 체크 기준 : 11 SP32.89 - if (csVersionInfo == null) { - throw new CloudRuntimeException("commvault version must not be null."); - } - String v = csVersionInfo.trim(); - if (v.startsWith("\"") && v.endsWith("\"") && v.length() > 1) { - v = v.substring(1, v.length() - 1); - } - Matcher m = VERSION_PATTERN.matcher(v); - if (!m.matches()) { - throw new CloudRuntimeException("Unexpected commvault version format: " + csVersionInfo); - } - int major = Integer.parseInt(m.group(1)); - int fr = Integer.parseInt(m.group(2)); - int mt = Integer.parseInt(m.group(3)); - if (major < BASE_MAJOR) { - throw new CloudRuntimeException("The major version of the commvault you are trying to connect to is low. Supports versions 11.32.89 and higher."); - } else if (major == BASE_MAJOR && fr < BASE_FR) { - throw new CloudRuntimeException("The feature release version of the commvault you are trying to connect to is low. Supports versions 11.32.89 and higher."); - } else if (major == BASE_MAJOR && fr == BASE_FR && mt < BASE_MT) { - throw new CloudRuntimeException("The maintenance version of the commvault you are trying to connect to is low. Supports versions 11.32.89 and higher."); - } - return true; - } - - @Override - public Pair restoreBackupToVM(Long backupId, String vmName) { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'restoreBackupToVM'"); - } - -} \ No newline at end of file diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 57a70bc0ccc5..c09b4b39f057 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -426,7 +426,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv private String vmActivityCheckPathRbd; private String vmActivityCheckPathClvm; private String nasBackupPath; - private String cvtBackupPath; + private String ableNasBackupPath; + private String ableCvtBackupPath; private String securityGroupPath; private String ovsPvlanDhcpHostPath; private String ovsPvlanVmPath; @@ -857,8 +858,12 @@ public String getNasBackupPath() { return nasBackupPath; } - public String getCvtBackupPath() { - return cvtBackupPath; + public String getAbleNasBackupPath() { + return ableNasBackupPath; + } + + public String getAbleCvtBackupPath() { + return ableCvtBackupPath; } public String getOvsPvlanDhcpHostPath() { @@ -1198,9 +1203,14 @@ public boolean configure(final String name, final Map params) th throw new ConfigurationException("Unable to find nasbackup.sh"); } - cvtBackupPath = Script.findScript(kvmScriptsDir, "cvtbackup.sh"); - if (cvtBackupPath == null) { - throw new ConfigurationException("Unable to find cvtbackup.sh"); + ableNasBackupPath = Script.findScript(kvmScriptsDir, "ablestack_nasbackup.sh"); + if (ableNasBackupPath == null) { + throw new ConfigurationException("Unable to find ablestack_nasbackup.sh"); + } + + ableCvtBackupPath = Script.findScript(kvmScriptsDir, "ablestack_cvtbackup.sh"); + if (ableCvtBackupPath == null) { + throw new ConfigurationException("Unable to find ablestack_cvtbackup.sh"); } createTmplPath = Script.findScript(storageScriptsDir, "createtmplt.sh"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultBackupHelper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultBackupHelper.java new file mode 100644 index 000000000000..df663addd2ce --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultBackupHelper.java @@ -0,0 +1,361 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.AblestackCommvaultTakeBackupCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainInfo.DomainState; +import org.libvirt.LibvirtException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +class LibvirtAblestackCommvaultBackupHelper { + protected Logger LOGGER = LogManager.getLogger(LibvirtAblestackCommvaultBackupHelper.class); + static final Integer EXIT_CLEANUP_FAILED = 20; + private static final int BACKUP_JOB_POLL_INTERVAL_MS = 10000; + + enum BackupExecutionMode { + RUNNING("backup-running"), + STOPPED("backup-stopped"), + RBD("backup-rbd"); + + private final String scriptOperation; + + BackupExecutionMode(String scriptOperation) { + this.scriptOperation = scriptOperation; + } + + String getScriptOperation() { + return scriptOperation; + } + } + + private final LibvirtComputingResource resource; + + LibvirtAblestackCommvaultBackupHelper(LibvirtComputingResource resource) { + this.resource = resource; + } + + Pair executeBackup(AblestackCommvaultTakeBackupCommand command) { + List diskPaths = resolveDiskPaths(command.getVolumePools(), command.getVolumePaths()); + BackupExecutionMode executionMode = determineExecutionMode(command.getVmName(), command.getVolumePools()); + LOGGER.debug("Commvault backup execution mode=[{}], vm=[{}], backupType=[{}], diskPaths=[{}]", + executionMode, command.getVmName(), command.getBackupType(), diskPaths); + if (BackupExecutionMode.STOPPED.equals(executionMode)) { + return executeStoppedVmBackup(command, diskPaths); + } + + List commands = new ArrayList<>(); + String[] scriptCommand = buildBackupScriptCommand(command, diskPaths, executionMode); + LOGGER.debug("Executing Commvault backup script command=[{}]", String.join(" ", scriptCommand)); + commands.add(scriptCommand); + return Script.executePipedCommands(commands, resource.getCmdsTimeout()); + } + + List resolveDiskPaths(List volumePools, List volumePaths) { + List diskPaths = new ArrayList<>(); + if (volumePaths == null) { + return diskPaths; + } + + KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr(); + for (int idx = 0; idx < volumePaths.size(); idx++) { + PrimaryDataStoreTO volumePool = volumePools.get(idx); + String volumePath = volumePaths.get(idx); + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + diskPaths.add(volumePath); + continue; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + diskPaths.add(KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath)); + } + return diskPaths; + } + + private String[] buildBackupScriptCommand(AblestackCommvaultTakeBackupCommand command, List diskPaths, BackupExecutionMode executionMode) { + return new String[] { + resource.getAbleCvtBackupPath(), + "-o", executionMode.getScriptOperation(), + "-v", command.getVmName(), + "-p", command.getBackupPath(), + "-b", Objects.nonNull(command.getBackupType()) ? command.getBackupType() : "", + "-c", Objects.nonNull(command.getCheckpointName()) ? command.getCheckpointName() : "", + "-r", Objects.nonNull(command.getParentBackupPath()) ? command.getParentBackupPath() : "", + "-i", Objects.nonNull(command.getParentCheckpointName()) ? command.getParentCheckpointName() : "", + "-j", Objects.nonNull(command.getParentCheckpointPath()) ? command.getParentCheckpointPath() : "", + "-f", command.getBackupFiles() == null || command.getBackupFiles().isEmpty() ? "" : String.join(",", command.getBackupFiles()), + "-q", command.getQuiesce() != null && command.getQuiesce() ? "true" : "false", + "-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths) + }; + } + + private BackupExecutionMode determineExecutionMode(String vmName, List volumePools) { + if (volumePools != null && volumePools.stream().anyMatch(pool -> pool != null && pool.getPoolType() == Storage.StoragePoolType.RBD)) { + return BackupExecutionMode.RBD; + } + return isVmRunning(vmName) ? BackupExecutionMode.RUNNING : BackupExecutionMode.STOPPED; + } + + private boolean isVmRunning(String vmName) { + try { + Connect conn = LibvirtConnection.getConnectionByVmName(vmName); + Domain domain = resource.getDomain(conn, vmName); + return domain != null && DomainState.VIR_DOMAIN_RUNNING.equals(domain.getInfo().state); + } catch (LibvirtException e) { + return false; + } + } + + private Pair executeStoppedVmBackup(AblestackCommvaultTakeBackupCommand command, List diskPaths) { + String dummyVmName = String.format("DUMMY-VM-%s", command.getCheckpointName().replace('.', '-')); + Path dest = Path.of(command.getBackupPath()); + Connect conn = null; + try { + LOGGER.info("Starting stopped VM Commvault backup for vm=[{}], dummyVm=[{}], backupType=[{}]", + command.getVmName(), dummyVmName, command.getBackupType()); + validateStoppedBackupDiskPaths(diskPaths); + if (isIncremental(command)) { + resource.validateLibvirtAndQemuVersionForIncrementalSnapshots(); + } + Files.createDirectories(dest.resolve("checkpoints")); + + conn = LibvirtConnection.getConnection(); + String dummyVmXml = buildDummyVmXml(dummyVmName, diskPaths); + resource.startVM(conn, dummyVmName, dummyVmXml, Domain.CreateFlags.PAUSED); + + if (isIncremental(command) && command.getParentCheckpointPath() != null && !command.getParentCheckpointPath().isEmpty()) { + redefineCheckpointIfNeeded(dummyVmName, Path.of(command.getParentCheckpointPath())); + } + + List diskLabels = getDiskLabels(conn, dummyVmName); + Path backupXml = writeBackupXml(dest, command, diskLabels); + Path checkpointXml = writeCheckpointXml(dest, command, diskLabels); + + String backupBeginCommand = String.format("virsh -c qemu:///system backup-begin --domain %s --backupxml %s --checkpointxml %s", + shellQuote(dummyVmName), shellQuote(backupXml.toString()), shellQuote(checkpointXml.toString())); + LOGGER.debug("Starting stopped VM Commvault backup-begin command=[{}]", backupBeginCommand); + if (Script.runSimpleBashScriptForExitValue(backupBeginCommand, resource.getCmdsTimeout(), false) != 0) { + LOGGER.error("Failed to start backup for stopped VM Commvault dummy domain [{}]", dummyVmName); + return new Pair<>(1, "Failed to start backup for dummy VM " + dummyVmName); + } + + try { + waitForBackup(dummyVmName); + } catch (IOException e) { + cancelBackupJob(dummyVmName); + throw e; + } + + if (isIncremental(command) && command.getParentBackupPath() != null && !command.getParentBackupPath().isEmpty()) { + rebaseIncrementalChain(dest, command, diskPaths); + } + + dumpCheckpointXml(dummyVmName, command.getCheckpointName(), dest); + Files.deleteIfExists(backupXml); + Files.deleteIfExists(checkpointXml); + Script.runSimpleBashScriptForExitValue("sync", resource.getCmdsTimeout(), false); + LOGGER.info("Completed stopped VM Commvault backup for vm=[{}], dummyVm=[{}]", command.getVmName(), dummyVmName); + return new Pair<>(0, "success"); + } catch (Exception e) { + LOGGER.error("Stopped VM Commvault backup failed for vm=[{}], dummyVm=[{}] due to: {}", + command.getVmName(), dummyVmName, e.getMessage(), e); + return new Pair<>(1, e.getMessage()); + } finally { + cleanupDummyVm(dummyVmName); + } + } + + private String buildDummyVmXml(String vmName, List diskPaths) { + String arch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64"; + String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC; + String emulator = resource.getHypervisorPath(); + StringBuilder xml = new StringBuilder(); + xml.append("") + .append("").append(vmName).append("") + .append("256") + .append("256") + .append("1") + .append("hvm") + .append("").append(emulator).append(""); + for (int i = 0; i < diskPaths.size(); i++) { + char letter = (char) ('a' + i); + String diskPath = diskPaths.get(i); + xml.append("") + .append("") + .append("") + .append(""); + } + xml.append(""); + return xml.toString(); + } + + private void validateStoppedBackupDiskPaths(List diskPaths) { + if (diskPaths.stream().anyMatch(path -> path != null && path.startsWith("rbd:"))) { + throw new IllegalArgumentException("Stopped VM dummy backup flow supports only file-backed disks. RBD backups must use the dedicated RBD backup path."); + } + } + + private void redefineCheckpointIfNeeded(String vmName, Path checkpointPath) throws IOException { + if (!Files.exists(checkpointPath)) { + return; + } + String checkpointName = checkpointPath.getFileName().toString().replace(".xml", ""); + int infoExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-info --domain %s --checkpointname %s > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointName))); + if (infoExit == 0) { + return; + } + int redefineExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-create --domain %s --xmlfile %s --redefine > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointPath.toString()))); + if (redefineExit != 0) { + throw new IOException("Failed to redefine checkpoint " + checkpointName + " on domain " + vmName); + } + } + + private List getDiskLabels(Connect conn, String vmName) { + return resource.getDisks(conn, vmName).stream() + .map(d -> d.getDiskLabel()) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + private Path writeBackupXml(Path dest, AblestackCommvaultTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder(""); + for (int i = 0; i < diskLabels.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("volume-%d.qcow2", i)); + xml.append("") + .append(""); + if (isIncremental(command) && command.getParentCheckpointName() != null && !command.getParentCheckpointName().isEmpty()) { + xml.append("").append(command.getParentCheckpointName()).append(""); + } + xml.append(""); + } + xml.append(""); + Path backupXml = dest.resolve("backup.xml"); + Files.writeString(backupXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return backupXml; + } + + private Path writeCheckpointXml(Path dest, AblestackCommvaultTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder("").append(command.getCheckpointName()).append(""); + for (String diskLabel : diskLabels) { + xml.append(""); + } + xml.append(""); + Path checkpointXml = dest.resolve("checkpoint.xml"); + Files.writeString(checkpointXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return checkpointXml; + } + + private void waitForBackup(String vmName) throws IOException { + int timeout = resource.getCmdsTimeout(); + while (timeout > 0) { + String result = checkBackupJob(vmName); + if (result != null && result.contains("Completed") && result.contains("Backup")) { + return; + } + if (result != null && result.contains("Failed")) { + throw new IOException("Virsh backup job failed for dummy VM " + vmName); + } + timeout -= BACKUP_JOB_POLL_INTERVAL_MS; + try { + Thread.sleep(BACKUP_JOB_POLL_INTERVAL_MS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } + } + throw new IOException("Timed out waiting for backup job of dummy VM " + vmName); + } + + private void cancelBackupJob(String vmName) { + Script.runSimpleBashScriptForExitValue(String.format("virsh -c qemu:///system domjobabort --domain %s > /dev/null 2>&1", shellQuote(vmName))); + } + + private String checkBackupJob(String vmName) { + return Script.runSimpleBashScriptWithFullResult( + String.format("virsh -c qemu:///system domjobinfo %s --completed --keep-completed", shellQuote(vmName)), 10); + } + + private void rebaseIncrementalChain(Path dest, AblestackCommvaultTakeBackupCommand command, List diskPaths) throws IOException { + for (int i = 0; i < diskPaths.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("volume-%d.qcow2", i)); + int exit = Script.runSimpleBashScriptForExitValue(String.format( + "qemu-img rebase -u -F qcow2 -b %s %s", + shellQuote(Path.of(command.getParentBackupPath(), backupFile).toString()), + shellQuote(dest.resolve(backupFile).toString())), resource.getCmdsTimeout(), false); + if (exit != 0) { + throw new IOException("qemu-img rebase failed for " + backupFile); + } + } + } + + private void dumpCheckpointXml(String vmName, String checkpointName, Path dest) { + Path checkpointDest = dest.resolve("checkpoints").resolve(checkpointName + ".xml"); + Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-dumpxml --domain %s --checkpointname %s --no-domain > %s 2>/dev/null", + shellQuote(vmName), shellQuote(checkpointName), shellQuote(checkpointDest.toString()))); + } + + private void cleanupDummyVm(String dummyVmName) { + Script.runSimpleBashScriptForExitValue(String.format("virsh -c qemu:///system destroy %s > /dev/null 2>&1 || true", shellQuote(dummyVmName))); + Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system undefine %s --nvram > /dev/null 2>&1 || virsh -c qemu:///system undefine %s > /dev/null 2>&1 || true", + shellQuote(dummyVmName), shellQuote(dummyVmName))); + } + + private boolean isIncremental(AblestackCommvaultTakeBackupCommand command) { + return "INCREMENTAL".equalsIgnoreCase(command.getBackupType()); + } + + private String getBackupFileByIndex(AblestackCommvaultTakeBackupCommand command, int index, String fallback) { + List backupFiles = command.getBackupFiles(); + if (backupFiles == null || index >= backupFiles.size()) { + return fallback; + } + return backupFiles.get(index); + } + + private String shellQuote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultRestoreBackupCommandWrapper.java new file mode 100644 index 000000000000..8a82a9312227 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultRestoreBackupCommandWrapper.java @@ -0,0 +1,950 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.backup.AblestackBackupFrameworkUtils; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackCommvaultRestoreBackupCommand; +import org.apache.cloudstack.backup.BackupRestorePlan; +import org.apache.cloudstack.backup.BackupRestoreStage; +import org.apache.cloudstack.backup.BackupVolumeChainState; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +@ResourceWrapper(handles = AblestackCommvaultRestoreBackupCommand.class) +public class LibvirtAblestackCommvaultRestoreBackupCommandWrapper extends CommandWrapper { + private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; + private static final String ATTACH_QCOW2_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none"; + private static final String ATTACH_RBD_DISK_XML_COMMAND = " virsh attach-device %s /dev/stdin < backedVolumeUUIDs = command.getBackupVolumesUUIDs(); + List backupFiles = command.getBackupFiles(); + List backupFileChains = command.getBackupFileChains(); + List volumeChainStates = command.getVolumeChainStates(); + List restoreVolumePools = command.getRestoreVolumePools(); + List restoreVolumePaths = command.getRestoreVolumePaths(); + String restoreVolumeUuid = command.getRestoreVolumeUUID(); + int timeout = command.getTimeout(); + String cacheMode = command.getCacheMode(); + String hostName = command.getHostName(); + List backupSourceHosts = command.getBackupSourceHosts(); + BackupRestorePlan restorePlan = command.getRestorePlan(); + KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); + + String newVolumeId = null; + try { + validateChainStatePlan(volumeChainStates, restorePlan); + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.PREPARE_SOURCE) && hostName != null) { + fetchBackupFile(hostName, backupPath); + } + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.PREPARE_SOURCE) && backupSourceHosts != null && !backupSourceHosts.isEmpty()) { + LinkedHashSet sourceHosts = new LinkedHashSet<>(backupSourceHosts); + for (String sourceHost : sourceHosts) { + if (StringUtils.isBlank(sourceHost) || Objects.equals(sourceHost, hostName)) { + continue; + } + fetchBackupFile(sourceHost, backupPath); + } + } + if (Objects.isNull(vmExists)) { + PrimaryDataStoreTO volumePool = restoreVolumePools.get(0); + String volumePath = restoreVolumePaths.get(0); + int lastIndex = volumePath.lastIndexOf("/"); + newVolumeId = volumePath.substring(lastIndex + 1); + restoreVolume(storagePoolMgr, backupPath, volumePool, volumePath, diskType, restoreVolumeUuid, backupFiles, backupFileChains, volumeChainStates, + new Pair<>(vmName, command.getVmState()), timeout, cacheMode, restorePlan); + } else if (Boolean.TRUE.equals(vmExists)) { + restoreVolumesOfExistingVM(storagePoolMgr, restoreVolumePools, restoreVolumePaths, backedVolumeUUIDs, backupPath, backupFiles, backupFileChains, + volumeChainStates, timeout, restorePlan); + } else { + restoreVolumesOfDestroyedVMs(storagePoolMgr, restoreVolumePools, restoreVolumePaths, vmName, backupPath, backupFiles, backupFileChains, + volumeChainStates, timeout, restorePlan); + } + } catch (CloudRuntimeException e) { + String errorMessage = e.getMessage() != null ? e.getMessage() : ""; + return new BackupAnswer(command, false, errorMessage); + } + + return new BackupAnswer(command, true, newVolumeId); + } + + private void restoreVolumesOfExistingVM(KVMStoragePoolManager storagePoolMgr, List restoreVolumePools, List restoreVolumePaths, List backedVolumesUUIDs, + String backupPath, List backupFiles, List backupFileChains, + List volumeChainStates, int timeout, BackupRestorePlan restorePlan) { + String diskType = "root"; + try { + for (int idx = 0; idx < restoreVolumePaths.size(); idx++) { + PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); + String restoreVolumePath = restoreVolumePaths.get(idx); + String backupVolumeUuid = backedVolumesUUIDs.get(idx); + List localBackupPaths = getLocalBackupPaths(backupPath, backupFiles, backupFileChains, volumeChainStates, idx, getLegacyBackupFileName(diskType, backupVolumeUuid)); + validateResolvedChainPaths(localBackupPaths, restoreVolumePath); + diskType = "datadisk"; + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, restoreVolumePath, localBackupPaths, timeout, backupPath, idx)) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", backupVolumeUuid)); + } + } + } finally { + cleanupBackupDirectory(backupPath, restorePlan); + } + } + + private void restoreVolumesOfDestroyedVMs(KVMStoragePoolManager storagePoolMgr, List volumePools, List volumePaths, String vmName, String backupPath, + List backupFiles, List backupFileChains, + List volumeChainStates, int timeout, BackupRestorePlan restorePlan) { + String diskType = "root"; + try { + for (int i = 0; i < volumePaths.size(); i++) { + PrimaryDataStoreTO volumePool = volumePools.get(i); + String volumePath = volumePaths.get(i); + String volumeUuid = volumePath.substring(volumePath.lastIndexOf(File.separator) + 1); + List localBackupPaths = getLocalBackupPaths(backupPath, backupFiles, backupFileChains, volumeChainStates, i, getLegacyBackupFileName(diskType, volumeUuid)); + validateResolvedChainPaths(localBackupPaths, volumePath); + diskType = "datadisk"; + if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, localBackupPaths, timeout, backupPath, i)) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", volumeUuid)); + } + } + } finally { + cleanupBackupDirectory(backupPath, restorePlan); + } + } + + private void restoreVolume(KVMStoragePoolManager storagePoolMgr, String backupPath, PrimaryDataStoreTO volumePool, String volumePath, String diskType, String volumeUUID, + List backupFiles, List backupFileChains, List volumeChainStates, + Pair vmNameAndState, int timeout, String cacheMode, BackupRestorePlan restorePlan) { + try { + List localBackupPaths = getLocalBackupPaths(backupPath, backupFiles, backupFileChains, volumeChainStates, 0, getLegacyBackupFileName(diskType, volumeUUID)); + validateResolvedChainPaths(localBackupPaths, volumePath); + if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, localBackupPaths, timeout, backupPath, 0, true)) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", volumeUUID)); + } + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.ATTACH_VOLUME) + && VirtualMachine.State.Running.equals(vmNameAndState.second())) { + if (!attachVolumeToVm(storagePoolMgr, vmNameAndState.first(), volumePool, volumePath, cacheMode)) { + throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first())); + } + } + } finally { + cleanupBackupDirectory(backupPath, restorePlan); + } + } + + private void validateChainStatePlan(List volumeChainStates, BackupRestorePlan restorePlan) { + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.VALIDATE_CHAIN) && volumeChainStates != null && !volumeChainStates.isEmpty()) { + try { + AblestackBackupFrameworkUtils.validateVolumeChainStates(volumeChainStates); + } catch (IllegalArgumentException e) { + throw new CloudRuntimeException(e.getMessage(), e); + } + } + } + + private void validateResolvedChainPaths(List resolvedPaths, String volumePath) { + if (resolvedPaths == null || resolvedPaths.isEmpty()) { + throw new CloudRuntimeException(String.format("No resolved backup chain paths found for volume [%s]", volumePath)); + } + } + + private void cleanupBackupDirectory(String backupPath, BackupRestorePlan restorePlan) { + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.CLEANUP_SOURCE)) { + deleteBackupDirectory(backupPath); + } + } + + private void deleteBackupDirectory(String backupDirectory) { + try { + FileUtils.deleteDirectory(new File(backupDirectory)); + } catch (IOException e) { + logger.error(String.format("Failed to delete backup directory: %s", backupDirectory), e); + throw new CloudRuntimeException("Failed to delete the backup directory"); + } + } + + private List getLocalBackupPaths(String backupPath, List backupFiles, List backupFileChains, + List volumeChainStates, int index, String legacyBackupFileName) { + LinkedHashSet localPaths = new LinkedHashSet<>(); + boolean resolvedFromVolumeChainStates = false; + if (volumeChainStates != null && volumeChainStates.size() > index) { + for (String chainPath : volumeChainStates.get(index).getChainFiles()) { + if (StringUtils.isBlank(chainPath)) { + continue; + } + localPaths.add(resolveBackupPath(backupPath, chainPath)); + resolvedFromVolumeChainStates = true; + } + } + if (!resolvedFromVolumeChainStates && backupFileChains != null && backupFileChains.size() > index && StringUtils.isNotBlank(backupFileChains.get(index))) { + for (String chainPath : backupFileChains.get(index).split(";")) { + if (StringUtils.isBlank(chainPath)) { + continue; + } + localPaths.add(resolveBackupPath(backupPath, chainPath)); + } + } + if (localPaths.isEmpty() && backupFiles != null && backupFiles.size() > index && StringUtils.isNotBlank(backupFiles.get(index))) { + localPaths.add(resolveBackupPath(backupPath, backupFiles.get(index))); + } + if (localPaths.isEmpty()) { + localPaths.add(String.format(FILE_PATH_PLACEHOLDER, backupPath, legacyBackupFileName)); + } + return new ArrayList<>(localPaths); + } + + private String resolveBackupPath(String backupPath, String chainPath) { + if (chainPath.startsWith("/")) { + return chainPath; + } + if (chainPath.contains("/")) { + return String.format(FILE_PATH_PLACEHOLDER, backupPath, chainPath); + } + return String.format(FILE_PATH_PLACEHOLDER, backupPath, chainPath); + } + + private String getLegacyBackupFileName(String diskType, String volumeUuid) { + return String.format("%s.%s.qcow2", diskType.toLowerCase(Locale.ROOT), volumeUuid); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex) { + return replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, backupRootPath, backupIndex, false); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex, boolean createTargetVolume) { + if (backupPaths == null || backupPaths.isEmpty()) { + return false; + } + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChainToFileVolume(volumePath, backupPaths, timeout, backupRootPath, backupIndex); + } + return replaceFileVolumeWithBackup(volumePath, getLastExistingBackupPath(backupPaths), timeout); + } + + return replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + private boolean restoreIncrementalRbdBackupChainToFileVolume(String volumePath, List backupPaths, int timeout, String backupRootPath, int backupIndex) { + if (StringUtils.isBlank(backupRootPath)) { + throw new CloudRuntimeException("Unable to locate backup root path for incremental RBD restore"); + } + RbdImageSpec sourceImage = getRbdImageSpecFromMetadata(backupRootPath, backupIndex); + String tempImage = sourceImage.buildTempImageSpec(); + try { + if (!importBackupChainToTemporaryRbd(backupPaths, timeout, sourceImage, tempImage)) { + return false; + } + return convertTemporaryRbdToFileVolume(volumePath, timeout, sourceImage, tempImage); + } finally { + removeTemporaryRbdImage(sourceImage, tempImage, timeout); + } + } + + private String getFirstExistingBackupPath(List backupPaths) { + for (String backupPath : backupPaths) { + if (StringUtils.isNotBlank(backupPath) && Files.exists(Paths.get(backupPath))) { + return backupPath; + } + } + return backupPaths.get(0); + } + + private String getLastExistingBackupPath(List backupPaths) { + for (int i = backupPaths.size() - 1; i >= 0; i--) { + String backupPath = backupPaths.get(i); + if (StringUtils.isNotBlank(backupPath) && Files.exists(Paths.get(backupPath))) { + return backupPath; + } + } + return backupPaths.get(backupPaths.size() - 1); + } + + private boolean replaceFileVolumeWithBackup(String volumePath, String backupPath, int timeout) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private boolean convertTemporaryRbdToFileVolume(String volumePath, int timeout, RbdImageSpec sourceImage, String tempImage) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(sourceImage.buildQemuUri(tempImage), QemuImg.PhysicalDiskFormat.RAW); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : tempImage; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : volumePath; + logger.error("Failed to convert temporary RBD {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private QemuImg.PhysicalDiskFormat getBackupFileFormat(String backupPath) { + if (backupPath.endsWith(".raw")) { + return QemuImg.PhysicalDiskFormat.RAW; + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private QemuImg.PhysicalDiskFormat getFileVolumeFormat(String volumePath) { + if (!Files.exists(Paths.get(volumePath))) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } + try { + QemuImg qemu = new QemuImg(0); + java.util.Map info = qemu.info(new QemuImgFile(volumePath)); + String format = info.get("file_format"); + if (StringUtils.isNotBlank(format)) { + return QemuImg.PhysicalDiskFormat.valueOf(format.toUpperCase(Locale.ROOT)); + } + } catch (QemuImgException | LibvirtException | IllegalArgumentException e) { + logger.warn("Failed to detect file volume format for path {}. Falling back to qcow2.", volumePath, e); + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private boolean replaceRbdVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, boolean createTargetVolume) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChain(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + String backupPath = getFirstExistingBackupPath(backupPaths); + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, volumeStoragePool); + if (getBackupFileFormat(backupPath) == QemuImg.PhysicalDiskFormat.RAW) { + return importRawBackupToRbd(volumeStoragePool, normalizedVolumePath, backupPath, timeout, createTargetVolume); + } + + QemuImg qemu; + try { + qemu = new QemuImg(timeout * 1000, true, false); + if (!createTargetVolume) { + KVMPhysicalDisk rdbDisk = volumeStoragePool.getPhysicalDisk(normalizedVolumePath); + logger.debug("Restoring RBD volume: {}", rdbDisk.toString()); + qemu.setSkipTargetVolumeCreation(true); + } + } catch (LibvirtException ex) { + throw new CloudRuntimeException("Failed to create qemu-img command to restore RBD volume with backup", ex); + } + + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, normalizedVolumePath); + destVolumeFile = new QemuImgFile(rbdDestVolumeFile, QemuImg.PhysicalDiskFormat.RAW); + + logger.debug("Starting convert backup {} to RBD volume {}", backupPath, normalizedVolumePath); + qemu.convert(srcBackupFile, destVolumeFile); + logger.debug("Successfully converted backup {} to RBD volume {}", backupPath, normalizedVolumePath); + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + + return true; + } + + private boolean importRawBackupToRbd(KVMStoragePool volumeStoragePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { + if (!createTargetVolume && !deleteExistingRbdVolumeIfPresent(volumeStoragePool, volumePath)) { + logger.error("Failed to delete existing RBD volume {} before raw import", volumePath); + return false; + } + + String importCommand = buildRbdImportCommand(volumeStoragePool, backupPath, volumePath); + CommandExecutionResult importResult = executeBashCommandWithResult(importCommand, timeout, "Import raw backup to RBD"); + if (importResult.exitCode != 0) { + logger.error("Failed to import raw backup {} into volume {}. Exit code: {}, output: {}", backupPath, volumePath, importResult.exitCode, importResult.output); + return false; + } + return true; + } + + private boolean deleteExistingRbdVolumeIfPresent(KVMStoragePool volumeStoragePool, String volumePath) { + try { + return volumeStoragePool.deletePhysicalDisk(volumePath, Storage.ImageFormat.RAW); + } catch (CloudRuntimeException e) { + if (isMissingRbdImageError(e)) { + logger.info("Skipping deletion for missing RBD volume {} before restore", volumePath); + return true; + } + throw e; + } + } + + private boolean isMissingRbdImageError(CloudRuntimeException e) { + String message = e.getMessage(); + return StringUtils.containsIgnoreCase(message, "Failed to open image") + && StringUtils.containsIgnoreCase(message, "No such file or directory"); + } + + private boolean restoreIncrementalRbdBackupChain(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, + int timeout, boolean createTargetVolume) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid())); + if (!replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, normalizedVolumePath, List.of(backupPaths.get(0)), timeout, createTargetVolume)) { + return false; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(volumeStoragePool, normalizedVolumePath, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on volume %s", parentCheckpoint, normalizedVolumePath)); + } + String importDiffCommand = buildRbdImportDiffCommand(volumeStoragePool, backupPath, normalizedVolumePath); + CommandExecutionResult importDiffResult = executeBashCommandWithResult(importDiffCommand, timeout, "Import RBD diff to target volume"); + if (importDiffResult.exitCode != 0) { + logger.error("Failed to import RBD diff {} into volume {}. Exit code: {}, output: {}", backupPath, normalizedVolumePath, + importDiffResult.exitCode, importDiffResult.output); + return false; + } + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(volumeStoragePool, normalizedVolumePath, restoreSnapshots, timeout); + } + } + + private String normalizeRbdVolumePath(String volumePath, KVMStoragePool storagePool) { + if (StringUtils.isBlank(volumePath)) { + return volumePath; + } + String normalized = volumePath; + String poolPath = storagePool.getSourceDir(); + if (StringUtils.isNotBlank(poolPath)) { + String poolPrefix = poolPath + "/"; + if (normalized.startsWith(poolPrefix)) { + normalized = normalized.substring(poolPrefix.length()); + } + } + if (normalized.startsWith("/")) { + normalized = normalized.substring(normalized.lastIndexOf('/') + 1); + } + return normalized; + } + + private String buildRbdImportDiffCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import-diff ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String buildRbdImportCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String formatRbdMonHosts(String hosts, int port) { + String[] hostValues = hosts.split(","); + List formattedHosts = new ArrayList<>(); + for (String host : hostValues) { + String normalizedHost = host.replace("[", "").replace("]", "").trim(); + if (StringUtils.isBlank(normalizedHost)) { + continue; + } + formattedHosts.add(port > 0 ? normalizedHost + ":" + port : normalizedHost); + } + return String.join(",", formattedHosts); + } + + private boolean importBackupChainToTemporaryRbd(List backupPaths, int timeout, RbdImageSpec sourceImage, String tempImage) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + String importCommand = sourceImage.buildRbdCommand("import", quote(backupPaths.get(0)), quote(tempImage)); + CommandExecutionResult importResult = executeBashCommandWithResult(importCommand, timeout, "Import raw backup to temporary RBD"); + if (importResult.exitCode != 0) { + logger.error("Failed to import base RBD backup {} into temporary image {}. Exit code: {}, output: {}", backupPaths.get(0), tempImage, + importResult.exitCode, importResult.output); + return false; + } + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(sourceImage, tempImage, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(sourceImage, tempImage, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on temporary image %s", parentCheckpoint, tempImage)); + } + String importDiffCommand = sourceImage.buildRbdCommand("import-diff", quote(backupPath), quote(tempImage)); + CommandExecutionResult importDiffResult = executeBashCommandWithResult(importDiffCommand, timeout, "Import RBD diff to temporary image"); + if (importDiffResult.exitCode != 0) { + logger.error("Failed to import RBD diff {} into temporary image {}. Exit code: {}, output: {}", backupPath, tempImage, + importDiffResult.exitCode, importDiffResult.output); + return false; + } + if (!ensureRbdSnapshotExists(sourceImage, tempImage, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(sourceImage, tempImage, restoreSnapshots, timeout); + } + } + + private Map readRbdBackupMetadata(String backupPath) { + java.nio.file.Path metadataPath = Paths.get(backupPath).getParent().resolve("rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + return Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private boolean ensureRbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + if (rbdSnapshotExists(storagePool, volumePath, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap create", volumePath + "@" + snapshotName); + CommandExecutionResult createSnapshotResult = executeBashCommandWithResult(createSnapshotCommand, timeout, "Create RBD snapshot on target volume"); + if (createSnapshotResult.exitCode != 0) { + logger.error("Failed to create RBD snapshot {} on volume {}. Exit code: {}, output: {}", snapshotName, volumePath, + createSnapshotResult.exitCode, createSnapshotResult.output); + return false; + } + return true; + } + + private boolean ensureRbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + if (rbdSnapshotExists(imageSpec, image, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = imageSpec.buildRbdCommand("snap", "create", quote(image + "@" + snapshotName)); + CommandExecutionResult createSnapshotResult = executeBashCommandWithResult(createSnapshotCommand, timeout, "Create RBD snapshot on temporary image"); + if (createSnapshotResult.exitCode != 0) { + logger.error("Failed to create RBD snapshot {} on image {}. Exit code: {}, output: {}", snapshotName, image, + createSnapshotResult.exitCode, createSnapshotResult.output); + return false; + } + return true; + } + + private CommandExecutionResult executeBashCommandWithResult(String command, int timeoutInSeconds, String description) { + logger.debug("{} command: {}", description, command); + String wrappedCommand = String.format("set -o pipefail; { %s; } 2>&1; rc=$?; echo \"%s${rc}\"", command, COMMAND_EXIT_MARKER); + String output = Script.runSimpleBashScriptWithFullResult(wrappedCommand, timeoutInSeconds); + if (StringUtils.isBlank(output)) { + return new CommandExecutionResult(-1, ""); + } + int markerIndex = output.lastIndexOf(COMMAND_EXIT_MARKER); + if (markerIndex < 0) { + logger.warn("{} command output did not include an exit marker. Output: {}", description, output); + return new CommandExecutionResult(-1, output.trim()); + } + String commandOutput = output.substring(0, markerIndex).trim(); + String exitCodeString = output.substring(markerIndex + COMMAND_EXIT_MARKER.length()).trim(); + int exitCode; + try { + exitCode = Integer.parseInt(exitCodeString); + } catch (NumberFormatException e) { + logger.warn("{} command exit marker was not a valid integer. Output: {}", description, output, e); + exitCode = -1; + } + if (exitCode == 0) { + logger.debug("{} command completed successfully. Output: {}", description, commandOutput); + } else { + logger.error("{} command failed with exit code {}. Output: {}", description, exitCode, commandOutput); + } + return new CommandExecutionResult(exitCode, commandOutput); + } + + private static final class CommandExecutionResult { + private final int exitCode; + private final String output; + + private CommandExecutionResult(int exitCode, String output) { + this.exitCode = exitCode; + this.output = output; + } + } + + private boolean rbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + String existsCommand = buildRbdSnapshotCommand(storagePool, "snap ls", volumePath) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private boolean rbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + String existsCommand = imageSpec.buildRbdCommand("snap", "ls", quote(image)) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private void cleanupRbdRestoreSnapshots(KVMStoragePool storagePool, String volumePath, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap rm", volumePath + "@" + snapshotName); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private void cleanupRbdRestoreSnapshots(RbdImageSpec imageSpec, String image, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = imageSpec.buildRbdCommand("snap", "rm", quote(image + "@" + snapshotName)); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private String buildRbdSnapshotCommand(KVMStoragePool storagePool, String action, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" ").append(action).append(" ").append(target); + return command.toString(); + } + + private void removeTemporaryRbdImage(RbdImageSpec sourceImage, String tempImage, int timeout) { + String removeCommand = sourceImage.buildRbdCommand("rm", quote(tempImage)); + Script.runSimpleBashScriptForExitValue(removeCommand, timeout * 1000, false); + } + + private RbdImageSpec getRbdImageSpecFromMetadata(String backupRootPath, int backupIndex) { + java.nio.file.Path metadataPath = Paths.get(backupRootPath, "rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + java.util.Map metadata = Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + String diskPaths = metadata.get("disk_paths"); + if (StringUtils.isBlank(diskPaths)) { + throw new CloudRuntimeException("RBD backup metadata does not contain disk_paths"); + } + List values = Arrays.asList(diskPaths.split(",")); + if (backupIndex >= values.size()) { + throw new CloudRuntimeException(String.format("RBD backup metadata does not contain disk path for index %d", backupIndex)); + } + return RbdImageSpec.fromUri(values.get(backupIndex)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private String quote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + + private boolean attachVolumeToVm(KVMStoragePoolManager storagePoolMgr, String vmName, PrimaryDataStoreTO volumePool, String volumePath, String cacheMode) { + String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); + int exitValue; + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_QCOW2_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); + } else { + String xmlForRbdDisk = getXmlForRbdDisk(storagePoolMgr, volumePool, volumePath, deviceToAttachDiskTo, cacheMode); + logger.debug("RBD disk xml to attach: {}", xmlForRbdDisk); + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_RBD_DISK_XML_COMMAND, vmName, xmlForRbdDisk)); + } + return exitValue == 0; + } + + private String getDeviceToAttachDisk(String vmName) { + String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); + char lastChar = currentDevice.charAt(currentDevice.length() - 1); + char incrementedChar = (char) (lastChar + 1); + return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; + } + + private String getXmlForRbdDisk(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String deviceToAttachDiskTo, String cacheMode) { + StringBuilder diskBuilder = new StringBuilder(); + diskBuilder.append("\n\n"); + + diskBuilder.append(" \n"); + + diskBuilder.append("\n"); + for (String sourceHost : volumePool.getHost().split(",")) { + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + String authUserName = null; + final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + if (primaryPool != null) { + authUserName = primaryPool.getAuthUserName(); + } + if (StringUtils.isNotBlank(authUserName)) { + diskBuilder.append("\n"); + diskBuilder.append("\n"); + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + diskBuilder.append("\n"); + return diskBuilder.toString(); + } + + private void fetchBackupFile(String hostName, String backupPath) { + int mkdirExit = Script.runSimpleBashScriptForExitValue(String.format(MKDIR_P, backupPath)); + if (mkdirExit != 0) { + throw new CloudRuntimeException(String.format("Failed to create local backup directory: %s", backupPath)); + } + + String cmd = String.format(RSYNC_DIR_FROM_REMOTE, hostName, backupPath, backupPath); + logger.debug("Fetching commvault backup directory from remote host. cmd={}", cmd); + + int exit = Script.runSimpleBashScriptForExitValue(cmd); + if (exit != 0) { + throw new CloudRuntimeException(String.format( + "Failed to fetch backup directory from remote host [%s]. remotePath=[%s], localPath=[%s]", + hostName, backupPath, backupPath)); + } + } + + private static final class RbdImageSpec { + private final String image; + private final String monHost; + private final String user; + private final String key; + + private RbdImageSpec(String image, String monHost, String user, String key) { + this.image = image; + this.monHost = monHost; + this.user = user; + this.key = key; + } + + private static RbdImageSpec fromUri(String uri) { + String image = null; + String monHost = null; + String user = null; + String key = null; + if (uri.startsWith("rbd:")) { + String payload = uri.substring("rbd:".length()); + image = payload.contains(":") ? payload.substring(0, payload.indexOf(':')) : payload; + monHost = extract(uri, ":mon_host=([^:]*)"); + if (monHost != null) { + monHost = monHost.replace("\\;", ",").replace("\\:", ":"); + } + user = extract(uri, ":id=([^:]*)"); + key = extract(uri, ":key=([^:]*)"); + } else if (uri.startsWith("rbd/")) { + image = uri; + } + if (StringUtils.isBlank(image)) { + throw new CloudRuntimeException(String.format("Unable to parse RBD disk path: %s", uri)); + } + return new RbdImageSpec(image, monHost, user, key); + } + + private static String extract(String value, String regex) { + java.util.regex.Matcher matcher = java.util.regex.Pattern.compile(regex).matcher(value); + return matcher.find() ? matcher.group(1) : null; + } + + private String buildTempImageSpec() { + return String.format("%s-csrestore-%s", image, org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric(8).toLowerCase(Locale.ROOT)); + } + + private String buildRbdCommand(String action, String source, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(monHost)) { + command.append(" -m ").append(quoteArg(monHost)); + } + if (StringUtils.isNotBlank(user)) { + command.append(" --id ").append(quoteArg(user)); + } + if (StringUtils.isNotBlank(key)) { + command.append(" --key ").append(quoteArg(key)); + } + command.append(" ").append(action); + if (StringUtils.isNotBlank(source)) { + command.append(" ").append(source); + } + if (StringUtils.isNotBlank(target)) { + command.append(" ").append(target); + } + return command.toString(); + } + + private String buildRbdCommand(String action, String target) { + return buildRbdCommand(action, null, target); + } + + private String buildQemuUri(String imageSpec) { + StringBuilder uri = new StringBuilder("rbd:").append(imageSpec); + if (StringUtils.isNotBlank(monHost)) { + uri.append(":mon_host=").append(monHost.replace(",", "\\;")); + } + if (StringUtils.isNotBlank(user)) { + uri.append(":id=").append(user); + } + if (StringUtils.isNotBlank(key)) { + uri.append(":key=").append(key); + } + return uri.toString(); + } + + private String quoteArg(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultTakeBackupCommandWrapper.java new file mode 100644 index 000000000000..1509cae11cc9 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultTakeBackupCommandWrapper.java @@ -0,0 +1,50 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackCommvaultTakeBackupCommand; + +@ResourceWrapper(handles = AblestackCommvaultTakeBackupCommand.class) +public class LibvirtAblestackCommvaultTakeBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(AblestackCommvaultTakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + LibvirtAblestackCommvaultBackupHelper backupHelper = new LibvirtAblestackCommvaultBackupHelper(libvirtComputingResource); + Pair result = backupHelper.executeBackup(command); + + if (result.first() != 0) { + logger.debug("Failed to take VM backup"); + BackupAnswer answer = new BackupAnswer(command, false, null); + if (result.first() == LibvirtAblestackCommvaultBackupHelper.EXIT_CLEANUP_FAILED) { + logger.debug("Backup cleanup failed"); + answer.setNeedsCleanup(true); + } + return answer; + } + + BackupAnswer answer = new BackupAnswer(command, true, "success"); + return answer; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackDeleteBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackDeleteBackupCommandWrapper.java new file mode 100644 index 000000000000..7355a4bf56e7 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackDeleteBackupCommandWrapper.java @@ -0,0 +1,103 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackDeleteBackupCommand; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.List; + +@ResourceWrapper(handles = AblestackDeleteBackupCommand.class) +public class LibvirtAblestackDeleteBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(AblestackDeleteBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + final String backupPath = command.getBackupPath(); + final String backupRepoType = command.getBackupRepoType(); + final String backupRepoAddress = command.getBackupRepoAddress(); + final String mountOptions = command.getMountOptions(); + final String backupProvider = command.getBackupProvider(); + final String checkpointName = command.getCheckpointName(); + final String diskPaths = command.getDiskPaths(); + final boolean forced = command.isForced(); + + List commands = new ArrayList<>(); + if ("ablestack-commvault".equalsIgnoreCase(backupProvider)) { + List deleteCommand = new ArrayList<>(); + deleteCommand.add(libvirtComputingResource.getAbleCvtBackupPath()); + deleteCommand.add("-o"); + deleteCommand.add("delete"); + deleteCommand.add("-p"); + deleteCommand.add(backupPath); + deleteCommand.add("-x"); + deleteCommand.add(Boolean.toString(forced)); + if (StringUtils.isNotBlank(checkpointName)) { + deleteCommand.add("-c"); + deleteCommand.add(checkpointName); + } + if (StringUtils.isNotBlank(diskPaths)) { + deleteCommand.add("-d"); + deleteCommand.add(diskPaths); + } + commands.add(deleteCommand.toArray(new String[0])); + } else { + List deleteCommand = new ArrayList<>(); + deleteCommand.add(libvirtComputingResource.getAbleNasBackupPath()); + deleteCommand.add("-o"); + deleteCommand.add("delete"); + deleteCommand.add("-t"); + deleteCommand.add(backupRepoType); + deleteCommand.add("-s"); + deleteCommand.add(backupRepoAddress); + deleteCommand.add("-m"); + deleteCommand.add(mountOptions); + deleteCommand.add("-p"); + deleteCommand.add(backupPath); + deleteCommand.add("-x"); + deleteCommand.add(Boolean.toString(forced)); + if (StringUtils.isNotBlank(checkpointName)) { + deleteCommand.add("-c"); + deleteCommand.add(checkpointName); + } + if (StringUtils.isNotBlank(diskPaths)) { + deleteCommand.add("-d"); + deleteCommand.add(diskPaths); + } + commands.add(deleteCommand.toArray(new String[0])); + } + + Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); + + logger.debug(String.format("Backup delete result: %s , exit code: %s", result.second(), result.first())); + + if (result.first() != 0) { + logger.debug(String.format("Failed to delete VM backup: %s", result.second())); + return new BackupAnswer(command, false, result.second()); + } + return new BackupAnswer(command, true, null); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasBackupHelper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasBackupHelper.java new file mode 100644 index 000000000000..e7bb0dc8e1cd --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasBackupHelper.java @@ -0,0 +1,454 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.amazonaws.util.CollectionUtils; +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.AblestackNasTakeBackupCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainInfo.DomainState; +import org.libvirt.LibvirtException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +class LibvirtAblestackNasBackupHelper { + protected Logger LOGGER = LogManager.getLogger(LibvirtAblestackNasBackupHelper.class); + static final Integer EXIT_CLEANUP_FAILED = 20; + private static final int BACKUP_JOB_POLL_INTERVAL_MS = 10000; + + enum BackupExecutionMode { + RUNNING("backup-running"), + STOPPED("backup-stopped"), + RBD("backup-rbd"); + + private final String scriptOperation; + + BackupExecutionMode(String scriptOperation) { + this.scriptOperation = scriptOperation; + } + + String getScriptOperation() { + return scriptOperation; + } + } + + private final LibvirtComputingResource resource; + + LibvirtAblestackNasBackupHelper(LibvirtComputingResource resource) { + this.resource = resource; + } + + Pair executeBackup(AblestackNasTakeBackupCommand command) { + LOGGER.info("LibvirtNasBackupHelper executeBackup entered for vm=[{}], backupPath=[{}], backupType=[{}]", + command.getVmName(), command.getBackupPath(), command.getBackupType()); + List diskPaths = resolveDiskPaths(command.getVolumePools(), command.getVolumePaths()); + BackupExecutionMode executionMode = determineExecutionMode(command.getVmName(), command.getVolumePools()); + LOGGER.debug("NAS backup execution mode=[{}], vm=[{}], backupType=[{}], diskPaths=[{}]", + executionMode, command.getVmName(), command.getBackupType(), diskPaths); + if (BackupExecutionMode.STOPPED.equals(executionMode)) { + return executeStoppedVmBackup(command, diskPaths); + } + List commands = new ArrayList<>(); + String[] scriptCommand = buildBackupScriptCommand(command, diskPaths, executionMode); + LOGGER.debug("Executing NAS backup script command=[{}]", String.join(" ", scriptCommand)); + commands.add(scriptCommand); + return Script.executePipedCommands(commands, resource.getCmdsTimeout()); + } + + List resolveDiskPaths(List volumePools, List volumePaths) { + List diskPaths = new ArrayList<>(); + if (Objects.isNull(volumePaths)) { + return diskPaths; + } + + KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr(); + for (int idx = 0; idx < volumePaths.size(); idx++) { + PrimaryDataStoreTO volumePool = volumePools.get(idx); + String volumePath = volumePaths.get(idx); + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + diskPaths.add(volumePath); + continue; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + diskPaths.add(KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath)); + } + + return diskPaths; + } + + long parseBackupSize(String output, List diskPaths) { + if (output == null || output.isBlank()) { + return 0L; + } + + List parsedSizes = Arrays.stream(output.trim().split("\n")) + .map(String::trim) + .filter(this::isWholeNumber) + .map(Long::parseLong) + .collect(Collectors.toList()); + + if (parsedSizes.isEmpty()) { + LOGGER.warn("Unable to parse NAS backup size from output=[{}]", output); + return 0L; + } + + if (CollectionUtils.isNullOrEmpty(diskPaths)) { + return parsedSizes.get(parsedSizes.size() - 1); + } + + return parsedSizes.stream().mapToLong(Long::longValue).sum(); + } + + private boolean isWholeNumber(String value) { + return value != null && !value.isEmpty() && value.chars().allMatch(Character::isDigit); + } + + private String[] buildBackupScriptCommand(AblestackNasTakeBackupCommand command, List diskPaths, BackupExecutionMode executionMode) { + return new String[] { + resource.getAbleNasBackupPath(), + "-o", executionMode.getScriptOperation(), + "-v", command.getVmName(), + "-t", command.getBackupRepoType(), + "-s", command.getBackupRepoAddress(), + "-m", Objects.nonNull(command.getMountOptions()) ? command.getMountOptions() : "", + "-p", command.getBackupPath(), + "-b", Objects.nonNull(command.getBackupType()) ? command.getBackupType() : "", + "-c", Objects.nonNull(command.getCheckpointName()) ? command.getCheckpointName() : "", + "-r", Objects.nonNull(command.getParentBackupPath()) ? command.getParentBackupPath() : "", + "-i", Objects.nonNull(command.getParentCheckpointName()) ? command.getParentCheckpointName() : "", + "-j", Objects.nonNull(command.getParentCheckpointPath()) ? command.getParentCheckpointPath() : "", + "-q", command.getQuiesce() != null && command.getQuiesce() ? "true" : "false", + "-f", CollectionUtils.isNullOrEmpty(command.getBackupFiles()) ? "" : String.join(",", command.getBackupFiles()), + "-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths) + }; + } + + private BackupExecutionMode determineExecutionMode(String vmName, List volumePools) { + if (hasRbdVolumes(volumePools)) { + return BackupExecutionMode.RBD; + } + return isVmRunning(vmName) ? BackupExecutionMode.RUNNING : BackupExecutionMode.STOPPED; + } + + private boolean hasRbdVolumes(List volumePools) { + if (CollectionUtils.isNullOrEmpty(volumePools)) { + return false; + } + return volumePools.stream().anyMatch(pool -> pool != null && pool.getPoolType() == Storage.StoragePoolType.RBD); + } + + private boolean isVmRunning(String vmName) { + try { + Connect conn = LibvirtConnection.getConnectionByVmName(vmName); + Domain domain = resource.getDomain(conn, vmName); + return domain != null && DomainState.VIR_DOMAIN_RUNNING.equals(domain.getInfo().state); + } catch (LibvirtException e) { + return false; + } + } + + private Pair executeStoppedVmBackup(AblestackNasTakeBackupCommand command, List diskPaths) { + Path mountPoint = null; + String dummyVmName = String.format("DUMMY-VM-%s", command.getCheckpointName().replace('.', '-')); + Connect conn = null; + try { + LOGGER.info("Starting stopped VM NAS backup for vm=[{}], dummyVm=[{}], backupType=[{}]", + command.getVmName(), dummyVmName, command.getBackupType()); + validateStoppedBackupDiskPaths(diskPaths); + if (isIncremental(command)) { + resource.validateLibvirtAndQemuVersionForIncrementalSnapshots(); + } + mountPoint = mountRepository(command); + Path dest = mountPoint.resolve(command.getBackupPath()); + Files.createDirectories(dest.resolve("checkpoints")); + + conn = LibvirtConnection.getConnection(); + String dummyVmXml = buildDummyVmXml(dummyVmName, diskPaths, conn); + resource.startVM(conn, dummyVmName, dummyVmXml, Domain.CreateFlags.PAUSED); + + if (isIncremental(command) && command.getParentCheckpointPath() != null && !command.getParentCheckpointPath().isEmpty()) { + redefineCheckpointIfNeeded(dummyVmName, mountPoint.resolve(command.getParentCheckpointPath())); + } + + List diskLabels = getDiskLabels(conn, dummyVmName); + Path backupXml = writeBackupXml(dest, command, diskLabels); + Path checkpointXml = writeCheckpointXml(dest, command, diskLabels); + + String backupBeginCommand = String.format("virsh -c qemu:///system backup-begin --domain %s --backupxml %s --checkpointxml %s", + shellQuote(dummyVmName), shellQuote(backupXml.toString()), shellQuote(checkpointXml.toString())); + LOGGER.debug("Starting stopped VM NAS backup-begin command=[{}]", backupBeginCommand); + if (Script.runSimpleBashScriptForExitValue(backupBeginCommand, resource.getCmdsTimeout(), false) != 0) { + LOGGER.error("Failed to start backup for stopped VM dummy domain [{}]", dummyVmName); + return new Pair<>(1, "Failed to start backup for dummy VM " + dummyVmName); + } + + try { + waitForBackup(dummyVmName); + } catch (IOException e) { + cancelBackupJob(dummyVmName); + throw e; + } + + if (isIncremental(command) && command.getParentBackupPath() != null && !command.getParentBackupPath().isEmpty()) { + rebaseIncrementalChain(dest, command, diskPaths); + } + + dumpCheckpointXml(dummyVmName, command.getCheckpointName(), dest); + + Files.deleteIfExists(backupXml); + Files.deleteIfExists(checkpointXml); + runCommand(String.format("sync")); + String output = listTopLevelFileSizes(dest); + LOGGER.info("Completed stopped VM NAS backup for vm=[{}], dummyVm=[{}]", command.getVmName(), dummyVmName); + return new Pair<>(0, output); + } catch (Exception e) { + LOGGER.error("Stopped VM NAS backup failed for vm=[{}], dummyVm=[{}] due to: {}", + command.getVmName(), dummyVmName, e.getMessage(), e); + return new Pair<>(1, e.getMessage()); + } finally { + cleanupDummyVm(dummyVmName); + unmountRepository(mountPoint); + } + } + + private Path mountRepository(AblestackNasTakeBackupCommand command) throws IOException { + Path mountPoint = Files.createTempDirectory("csbackup."); + StringBuilder mount = new StringBuilder() + .append("mount -t ").append(shellQuote(command.getBackupRepoType())) + .append(" ").append(shellQuote(command.getBackupRepoAddress())) + .append(" ").append(shellQuote(mountPoint.toString())); + if (command.getMountOptions() != null && !command.getMountOptions().isEmpty()) { + mount.append(" -o ").append(shellQuote(command.getMountOptions())); + } + if (Script.runSimpleBashScriptForExitValue(mount.toString(), resource.getCmdsTimeout(), false) != 0) { + throw new IOException("Failed to mount backup repository"); + } + return mountPoint; + } + + private void unmountRepository(Path mountPoint) { + if (mountPoint == null) { + return; + } + Script.runSimpleBashScriptForExitValue(String.format("umount %s", shellQuote(mountPoint.toString()))); + try { + Files.deleteIfExists(mountPoint); + } catch (IOException ignored) { + } + } + + private String buildDummyVmXml(String vmName, List diskPaths, Connect conn) throws LibvirtException { + String arch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64"; + String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC; + String emulator = resource.getHypervisorPath(); + StringBuilder xml = new StringBuilder(); + xml.append("") + .append("").append(vmName).append("") + .append("128") + .append("1") + .append("hvm") + .append("").append(emulator).append(""); + for (int i = 0; i < diskPaths.size(); i++) { + char letter = (char) ('a' + i); + xml.append("") + .append("") + .append("") + .append(""); + } + xml.append(""); + return xml.toString(); + } + + private void validateStoppedBackupDiskPaths(List diskPaths) { + if (diskPaths.stream().anyMatch(path -> path != null && path.startsWith("rbd:"))) { + throw new IllegalArgumentException("Stopped VM dummy backup flow supports only file-backed disks. RBD backups must use the dedicated RBD backup path."); + } + } + + private void redefineCheckpointIfNeeded(String vmName, Path checkpointPath) throws IOException { + if (!Files.exists(checkpointPath)) { + return; + } + String checkpointName = checkpointPath.getFileName().toString().replace(".xml", ""); + int infoExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-info --domain %s --checkpointname %s > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointName))); + if (infoExit == 0) { + return; + } + int redefineExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-create --domain %s --xmlfile %s --redefine > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointPath.toString()))); + if (redefineExit != 0) { + throw new IOException("Failed to redefine checkpoint " + checkpointName + " on domain " + vmName); + } + } + + private List getDiskLabels(Connect conn, String vmName) { + return resource.getDisks(conn, vmName).stream() + .map(disk -> disk.getDiskLabel()) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + private Path writeBackupXml(Path dest, AblestackNasTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder(""); + if (isIncremental(command) && command.getParentCheckpointName() != null && !command.getParentCheckpointName().isEmpty()) { + xml.append("").append(command.getParentCheckpointName()).append(""); + } + xml.append(""); + for (int i = 0; i < diskLabels.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("disk-%d.qcow2", i)); + xml.append("") + .append("") + .append(""); + } + xml.append(""); + Path backupXml = dest.resolve("backup.xml"); + Files.writeString(backupXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return backupXml; + } + + private Path writeCheckpointXml(Path dest, AblestackNasTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder("") + .append(command.getCheckpointName()) + .append(""); + for (String diskLabel : diskLabels) { + xml.append(""); + } + xml.append(""); + Path checkpointXml = dest.resolve("checkpoint.xml"); + Files.writeString(checkpointXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return checkpointXml; + } + + private void waitForBackup(String vmName) throws IOException { + int timeout = resource.getCmdsTimeout(); + while (timeout > 0) { + String result = checkBackupJob(vmName); + if (result != null && result.contains("Completed") && result.contains("Backup")) { + return; + } + if (result != null && result.contains("Failed")) { + throw new IOException("Virsh backup job failed for dummy VM " + vmName); + } + timeout -= BACKUP_JOB_POLL_INTERVAL_MS; + try { + Thread.sleep(BACKUP_JOB_POLL_INTERVAL_MS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } + } + throw new IOException("Timed out waiting for backup job of dummy VM " + vmName); + } + + private void cancelBackupJob(String vmName) { + Script.runSimpleBashScriptForExitValue(String.format("virsh -c qemu:///system domjobabort --domain %s > /dev/null 2>&1", shellQuote(vmName))); + } + + private String checkBackupJob(String vmName) { + return Script.runSimpleBashScriptWithFullResult( + String.format("virsh -c qemu:///system domjobinfo %s --completed --keep-completed", shellQuote(vmName)), 10); + } + + private void rebaseIncrementalChain(Path dest, AblestackNasTakeBackupCommand command, List diskPaths) throws IOException { + for (int i = 0; i < diskPaths.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("disk-%d.qcow2", i)); + Path output = dest.resolve(backupFile); + String parent = "../" + Path.of(command.getParentBackupPath()).getFileName() + "/" + backupFile; + int exit = Script.runSimpleBashScriptForExitValue(String.format( + "qemu-img rebase -u -F qcow2 -b %s %s", + shellQuote(parent), shellQuote(output.toString())), resource.getCmdsTimeout(), false); + if (exit != 0) { + throw new IOException("qemu-img rebase failed for " + output + " with parent " + parent); + } + } + } + + private void dumpCheckpointXml(String vmName, String checkpointName, Path dest) { + Path checkpointDest = dest.resolve("checkpoints").resolve(checkpointName + ".xml"); + Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-dumpxml --domain %s --checkpointname %s --no-domain > %s 2>/dev/null", + shellQuote(vmName), shellQuote(checkpointName), shellQuote(checkpointDest.toString()))); + } + + private String listTopLevelFileSizes(Path dest) throws IOException { + try (var stream = Files.list(dest)) { + return stream.filter(Files::isRegularFile) + .sorted(Comparator.comparing(path -> path.getFileName().toString())) + .map(path -> { + try { + return String.valueOf(Files.size(path)); + } catch (IOException e) { + return "0"; + } + }) + .collect(Collectors.joining("\n")); + } + } + + private void cleanupDummyVm(String dummyVmName) { + runCommand(String.format("virsh -c qemu:///system destroy %s > /dev/null 2>&1 || true", shellQuote(dummyVmName))); + runCommand(String.format("virsh -c qemu:///system undefine %s --nvram > /dev/null 2>&1 || virsh -c qemu:///system undefine %s > /dev/null 2>&1 || true", + shellQuote(dummyVmName), shellQuote(dummyVmName))); + } + + private void runCommand(String command) { + Script.runSimpleBashScriptForExitValue(command, resource.getCmdsTimeout(), false); + } + + private boolean isIncremental(AblestackNasTakeBackupCommand command) { + return "INCREMENTAL".equalsIgnoreCase(command.getBackupType()); + } + + private String getBackupFileByIndex(AblestackNasTakeBackupCommand command, int index, String fallback) { + List backupFiles = command.getBackupFiles(); + if (CollectionUtils.isNullOrEmpty(backupFiles) || index >= backupFiles.size()) { + return fallback; + } + return backupFiles.get(index); + } + + private String shellQuote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasRestoreBackupCommandWrapper.java new file mode 100644 index 000000000000..fd2108638ccf --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasRestoreBackupCommandWrapper.java @@ -0,0 +1,903 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.backup.AblestackBackupFrameworkUtils; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackNasRestoreBackupCommand; +import org.apache.cloudstack.backup.BackupRestorePlan; +import org.apache.cloudstack.backup.BackupRestoreStage; +import org.apache.cloudstack.backup.BackupVolumeChainState; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +@ResourceWrapper(handles = AblestackNasRestoreBackupCommand.class) +public class LibvirtAblestackNasRestoreBackupCommandWrapper extends CommandWrapper { + private static final String BACKUP_TEMP_FILE_PREFIX = "csbackup"; + private static final String MOUNT_COMMAND = "sudo mount -t %s %s %s"; + private static final String UMOUNT_COMMAND = "sudo umount %s"; + private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; + private static final String ATTACH_QCOW2_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none"; + private static final String ATTACH_RBD_DISK_XML_COMMAND = " virsh attach-device %s /dev/stdin < restoreVolumePools = command.getRestoreVolumePools(); + List restoreVolumePaths = command.getRestoreVolumePaths(); + Integer mountTimeout = command.getMountTimeout() * 1000; + int timeout = command.getWait() > 0 ? command.getWait() : command.getMountTimeout(); + String cacheMode = command.getCacheMode(); + KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); + List volumePaths = command.getVolumePaths(); + List backupFiles = command.getBackupFiles(); + List backupFileChains = command.getBackupFileChains(); + List volumeChainStates = command.getVolumeChainStates(); + BackupRestorePlan restorePlan = command.getRestorePlan(); + + String newVolumeId = null; + try { + validateChainStatePlan(volumeChainStates, restorePlan); + String mountDirectory = AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.PREPARE_SOURCE) + ? mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions, mountTimeout) : null; + if (Objects.isNull(vmExists)) { + String volumePath = volumePaths.get(0); + String backupFile = backupFiles.get(0); + BackupVolumeChainState volumeChainState = volumeChainStates != null && !volumeChainStates.isEmpty() ? volumeChainStates.get(0) : null; + String backupFileChain = volumeChainState != null ? String.join(";", volumeChainState.getChainFiles()) : + (backupFileChains != null && !backupFileChains.isEmpty() ? backupFileChains.get(0) : null); + validateResolvedChainPaths(getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain), volumePath); + int lastIndex = volumePath.lastIndexOf("/"); + newVolumeId = volumePath.substring(lastIndex + 1); + restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, backupFile, backupFileChain, + new Pair<>(vmName, command.getVmState()), mountOptions, mountTimeout, timeout, storagePoolMgr, restoreVolumePools.get(0), cacheMode, restorePlan); + } else if (Boolean.TRUE.equals(vmExists)) { + restoreVolumesOfExistingVM(restoreVolumePaths, backupPath, backupFiles, backupFileChains, volumeChainStates, mountDirectory, timeout, storagePoolMgr, + restoreVolumePools, restorePlan); + } else { + restoreVolumesOfDestroyedVMs(restoreVolumePaths, backupPath, backupFiles, backupFileChains, volumeChainStates, backupRepoAddress, backupRepoType, mountOptions, + mountTimeout, storagePoolMgr, restoreVolumePools, timeout, restorePlan); + } + } catch (CloudRuntimeException e) { + String errorMessage = e.getMessage() != null ? e.getMessage() : ""; + return new BackupAnswer(command, false, errorMessage); + } + + return new BackupAnswer(command, true, newVolumeId); + } + + private void restoreVolumesOfExistingVM(List volumePaths, String backupPath, List backupFiles, List backupFileChains, + List volumeChainStates, + String mountDirectory, Integer timeout, KVMStoragePoolManager storagePoolMgr, List restoreVolumePools, + BackupRestorePlan restorePlan) { + try { + for (int idx = 0; idx < volumePaths.size(); idx++) { + String volumePath = volumePaths.get(idx); + String backupFile = backupFiles.get(idx); + BackupVolumeChainState volumeChainState = volumeChainStates != null && volumeChainStates.size() > idx ? volumeChainStates.get(idx) : null; + String backupFileChain = volumeChainState != null ? String.join(";", volumeChainState.getChainFiles()) : + (backupFileChains != null && backupFileChains.size() > idx ? backupFileChains.get(idx) : null); + List mountedBackupPaths = getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain); + validateResolvedChainPaths(mountedBackupPaths, volumePath); + PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, volumePath, mountedBackupPaths, timeout, + String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), idx)) { + throw new CloudRuntimeException(String.format("Unable to restore backup from volume [%s].", volumePath)); + } + } + } finally { + cleanupMountedBackupDirectory(mountDirectory, restorePlan); + } + } + + private void restoreVolumesOfDestroyedVMs(List volumePaths, String backupPath, List backupFiles, List backupFileChains, + List volumeChainStates, + String backupRepoAddress, String backupRepoType, String mountOptions, Integer mountTimeout, KVMStoragePoolManager storagePoolMgr, + List restoreVolumePools, Integer timeout, BackupRestorePlan restorePlan) { + String mountDirectory = AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.PREPARE_SOURCE) + ? mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions, mountTimeout) : null; + try { + for (int idx = 0; idx < volumePaths.size(); idx++) { + String volumePath = volumePaths.get(idx); + String backupFile = backupFiles.get(idx); + BackupVolumeChainState volumeChainState = volumeChainStates != null && volumeChainStates.size() > idx ? volumeChainStates.get(idx) : null; + String backupFileChain = volumeChainState != null ? String.join(";", volumeChainState.getChainFiles()) : + (backupFileChains != null && backupFileChains.size() > idx ? backupFileChains.get(idx) : null); + List mountedBackupPaths = getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain); + validateResolvedChainPaths(mountedBackupPaths, volumePath); + PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, volumePath, mountedBackupPaths, timeout, + String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), idx)) { + throw new CloudRuntimeException(String.format("Unable to restore backup from volume [%s].", volumePath)); + } + } + } finally { + cleanupMountedBackupDirectory(mountDirectory, restorePlan); + } + } + + private void restoreVolume(String backupPath, String backupRepoType, String backupRepoAddress, String volumePath, String backupFile, String backupFileChain, + Pair vmNameAndState, String mountOptions, Integer mountTimeout, Integer timeout, + KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO restoreVolumePool, String cacheMode, BackupRestorePlan restorePlan) { + String mountDirectory = AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.PREPARE_SOURCE) + ? mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions, mountTimeout) : null; + try { + List mountedBackupPaths = getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain); + validateResolvedChainPaths(mountedBackupPaths, volumePath); + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, volumePath, mountedBackupPaths, timeout, + String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), 0, true)) { + throw new CloudRuntimeException(String.format("Unable to restore backup from volume [%s].", volumePath)); + } + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.ATTACH_VOLUME) + && VirtualMachine.State.Running.equals(vmNameAndState.second())) { + if (!attachVolumeToVm(storagePoolMgr, vmNameAndState.first(), restoreVolumePool, volumePath, cacheMode)) { + throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first())); + } + } + } finally { + cleanupMountedBackupDirectory(mountDirectory, restorePlan); + } + } + + private void validateChainStatePlan(List volumeChainStates, BackupRestorePlan restorePlan) { + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.VALIDATE_CHAIN) && volumeChainStates != null && !volumeChainStates.isEmpty()) { + try { + AblestackBackupFrameworkUtils.validateVolumeChainStates(volumeChainStates); + } catch (IllegalArgumentException e) { + throw new CloudRuntimeException(e.getMessage(), e); + } + } + } + + private void validateResolvedChainPaths(List resolvedPaths, String volumePath) { + if (resolvedPaths == null || resolvedPaths.isEmpty()) { + throw new CloudRuntimeException(String.format("No resolved backup chain paths found for volume [%s]", volumePath)); + } + } + + private void cleanupMountedBackupDirectory(String mountDirectory, BackupRestorePlan restorePlan) { + if (StringUtils.isBlank(mountDirectory)) { + return; + } + if (AblestackBackupFrameworkUtils.hasRestoreStage(restorePlan, BackupRestoreStage.CLEANUP_SOURCE)) { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + } + + + private String mountBackupDirectory(String backupRepoAddress, String backupRepoType, String mountOptions, Integer mountTimeout) { + String randomChars = RandomStringUtils.random(5, true, false); + String mountDirectory = String.format("%s.%s",BACKUP_TEMP_FILE_PREFIX , randomChars); + + try { + mountDirectory = Files.createTempDirectory(mountDirectory).toString(); + } catch (IOException e) { + logger.error(String.format("Failed to create the tmp mount directory {} for restore", mountDirectory), e); + throw new CloudRuntimeException("Failed to create the tmp mount directory for restore on the KVM host"); + } + + String mount = String.format(MOUNT_COMMAND, backupRepoType, backupRepoAddress, mountDirectory); + if ("cifs".equals(backupRepoType)) { + if (Objects.isNull(mountOptions) || mountOptions.trim().isEmpty()) { + mountOptions = "nobrl"; + } else { + mountOptions += ",nobrl"; + } + } + if (Objects.nonNull(mountOptions) && !mountOptions.trim().isEmpty()) { + mount += " -o " + mountOptions; + } + + int exitValue = Script.runSimpleBashScriptForExitValue(mount, mountTimeout, false); + if (exitValue != 0) { + logger.error(String.format("Failed to mount repository {} of type {} to the directory {}", backupRepoAddress, backupRepoType, mountDirectory)); + throw new CloudRuntimeException("Failed to mount the backup repository on the KVM host"); + } + return mountDirectory; + } + + private void unmountBackupDirectory(String backupDirectory) { + String umountCmd = String.format(UMOUNT_COMMAND, backupDirectory); + int exitValue = Script.runSimpleBashScriptForExitValue(umountCmd); + if (exitValue != 0) { + logger.error(String.format("Failed to unmount backup directory {}", backupDirectory)); + throw new CloudRuntimeException("Failed to unmount the backup directory"); + } + } + + private void deleteTemporaryDirectory(String backupDirectory) { + try { + Files.deleteIfExists(Paths.get(backupDirectory)); + } catch (IOException e) { + logger.error(String.format("Failed to delete backup directory: %s", backupDirectory), e); + throw new CloudRuntimeException("Failed to delete the backup directory"); + } + } + + private List getMountedBackupPaths(String mountDirectory, String backupPath, String backupFile, String backupFileChain) { + LinkedHashSet mountedPaths = new LinkedHashSet<>(); + if (StringUtils.isNotBlank(backupFileChain)) { + for (String chainPath : backupFileChain.split(";")) { + if (StringUtils.isBlank(chainPath)) { + continue; + } + String normalizedPath = chainPath.startsWith("/") ? chainPath.substring(1) : chainPath; + if (!normalizedPath.contains("/") && StringUtils.isNotBlank(backupPath)) { + mountedPaths.add(String.format(FILE_PATH_PLACEHOLDER, String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), normalizedPath)); + } else { + mountedPaths.add(String.format(FILE_PATH_PLACEHOLDER, mountDirectory, normalizedPath)); + } + } + } + if (mountedPaths.isEmpty() && StringUtils.isNotBlank(backupFile)) { + mountedPaths.add(String.format(FILE_PATH_PLACEHOLDER, String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), backupFile)); + } + return new ArrayList<>(mountedPaths); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex) { + return replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, backupRootPath, backupIndex, false); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex, boolean createTargetVolume) { + if (backupPaths == null || backupPaths.isEmpty()) { + return false; + } + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChainToFileVolume(volumePath, backupPaths, timeout, backupRootPath, backupIndex); + } + return replaceFileVolumeWithBackup(volumePath, getFirstExistingBackupPath(backupPaths), timeout); + } + + return replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + private boolean restoreIncrementalRbdBackupChainToFileVolume(String volumePath, List backupPaths, int timeout, String backupRootPath, int backupIndex) { + if (StringUtils.isBlank(backupRootPath)) { + throw new CloudRuntimeException("Unable to locate backup root path for incremental RBD restore"); + } + RbdImageSpec sourceImage = getRbdImageSpecFromMetadata(backupRootPath, backupIndex); + String tempImage = sourceImage.buildTempImageSpec(); + try { + if (!importBackupChainToTemporaryRbd(backupPaths, timeout, sourceImage, tempImage)) { + return false; + } + return convertTemporaryRbdToFileVolume(volumePath, timeout, sourceImage, tempImage); + } finally { + removeTemporaryRbdImage(sourceImage, tempImage, timeout); + } + } + + private String getFirstExistingBackupPath(List backupPaths) { + for (String backupPath : backupPaths) { + if (StringUtils.isNotBlank(backupPath) && Files.exists(Paths.get(backupPath))) { + return backupPath; + } + } + return backupPaths.get(0); + } + + private boolean replaceFileVolumeWithBackup(String volumePath, String backupPath, int timeout) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private boolean convertTemporaryRbdToFileVolume(String volumePath, int timeout, RbdImageSpec sourceImage, String tempImage) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(sourceImage.buildQemuUri(tempImage), QemuImg.PhysicalDiskFormat.RAW); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : tempImage; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : volumePath; + logger.error("Failed to convert temporary RBD {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private QemuImg.PhysicalDiskFormat getBackupFileFormat(String backupPath) { + if (backupPath.endsWith(".raw")) { + return QemuImg.PhysicalDiskFormat.RAW; + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private QemuImg.PhysicalDiskFormat getFileVolumeFormat(String volumePath) { + if (!Files.exists(Paths.get(volumePath))) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } + try { + QemuImg qemu = new QemuImg(0); + Map info = qemu.info(new QemuImgFile(volumePath)); + String format = info.get("file_format"); + if (StringUtils.isNotBlank(format)) { + return QemuImg.PhysicalDiskFormat.valueOf(format.toUpperCase(Locale.ROOT)); + } + } catch (QemuImgException | LibvirtException | IllegalArgumentException e) { + logger.warn("Failed to detect file volume format for path {}. Falling back to qcow2.", volumePath, e); + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private boolean replaceRbdVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, boolean createTargetVolume) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChain(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + String backupPath = getFirstExistingBackupPath(backupPaths); + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, volumeStoragePool); + if (getBackupFileFormat(backupPath) == QemuImg.PhysicalDiskFormat.RAW) { + return importRawBackupToRbd(volumeStoragePool, normalizedVolumePath, backupPath, timeout, createTargetVolume); + } + + QemuImg qemu; + try { + qemu = new QemuImg(timeout * 1000, true, false); + if (!createTargetVolume) { + KVMPhysicalDisk rdbDisk = volumeStoragePool.getPhysicalDisk(normalizedVolumePath); + logger.debug("Restoring RBD volume: {}", rdbDisk.toString()); + qemu.setSkipTargetVolumeCreation(true); + } + } catch (LibvirtException ex) { + throw new CloudRuntimeException("Failed to create qemu-img command to restore RBD volume with backup", ex); + } + + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, normalizedVolumePath); + destVolumeFile = new QemuImgFile(rbdDestVolumeFile, QemuImg.PhysicalDiskFormat.RAW); + + logger.debug("Starting convert backup {} to RBD volume {}", backupPath, normalizedVolumePath); + qemu.convert(srcBackupFile, destVolumeFile); + logger.debug("Successfully converted backup {} to RBD volume {}", backupPath, normalizedVolumePath); + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + + return true; + } + + private boolean importRawBackupToRbd(KVMStoragePool volumeStoragePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { + if (!createTargetVolume && !deleteExistingRbdVolumeIfPresent(volumeStoragePool, volumePath)) { + logger.error("Failed to delete existing RBD volume {} before raw import", volumePath); + return false; + } + + String importCommand = buildRbdImportCommand(volumeStoragePool, backupPath, volumePath); + if (Script.runSimpleBashScriptForExitValue(importCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import raw backup {} into volume {}", backupPath, volumePath); + return false; + } + return true; + } + + private boolean deleteExistingRbdVolumeIfPresent(KVMStoragePool volumeStoragePool, String volumePath) { + try { + return volumeStoragePool.deletePhysicalDisk(volumePath, Storage.ImageFormat.RAW); + } catch (CloudRuntimeException e) { + if (isMissingRbdImageError(e)) { + logger.info("Skipping deletion for missing RBD volume {} before restore", volumePath); + return true; + } + throw e; + } + } + + private boolean isMissingRbdImageError(CloudRuntimeException e) { + String message = e.getMessage(); + return StringUtils.containsIgnoreCase(message, "Failed to open image") + && StringUtils.containsIgnoreCase(message, "No such file or directory"); + } + + private boolean restoreIncrementalRbdBackupChain(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, + int timeout, boolean createTargetVolume) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid())); + if (!replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, normalizedVolumePath, List.of(backupPaths.get(0)), timeout, createTargetVolume)) { + return false; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(volumeStoragePool, normalizedVolumePath, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on volume %s", parentCheckpoint, normalizedVolumePath)); + } + + String importDiffCommand = buildRbdImportDiffCommand(volumeStoragePool, backupPath, normalizedVolumePath); + if (Script.runSimpleBashScriptForExitValue(importDiffCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import RBD diff {} into volume {}", backupPath, normalizedVolumePath); + return false; + } + + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(volumeStoragePool, normalizedVolumePath, restoreSnapshots, timeout); + } + } + + private String normalizeRbdVolumePath(String volumePath, KVMStoragePool storagePool) { + if (StringUtils.isBlank(volumePath)) { + return volumePath; + } + String normalized = volumePath; + String poolPath = storagePool.getSourceDir(); + if (StringUtils.isNotBlank(poolPath)) { + String poolPrefix = poolPath + "/"; + if (normalized.startsWith(poolPrefix)) { + normalized = normalized.substring(poolPrefix.length()); + } + } + if (normalized.startsWith("/")) { + normalized = normalized.substring(normalized.lastIndexOf('/') + 1); + } + return normalized; + } + + private String buildRbdImportDiffCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import-diff ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String buildRbdImportCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String formatRbdMonHosts(String hosts, int port) { + String[] hostValues = hosts.split(","); + List formattedHosts = new ArrayList<>(); + for (String host : hostValues) { + String normalizedHost = host.replace("[", "").replace("]", "").trim(); + if (StringUtils.isBlank(normalizedHost)) { + continue; + } + formattedHosts.add(port > 0 ? normalizedHost + ":" + port : normalizedHost); + } + return String.join(",", formattedHosts); + } + + private boolean importBackupChainToTemporaryRbd(List backupPaths, int timeout, RbdImageSpec sourceImage, String tempImage) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + String importCommand = sourceImage.buildRbdCommand("import", quote(backupPaths.get(0)), quote(tempImage)); + if (Script.runSimpleBashScriptForExitValue(importCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import base RBD backup {} into temporary image {}", backupPaths.get(0), tempImage); + return false; + } + + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(sourceImage, tempImage, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(sourceImage, tempImage, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on temporary image %s", parentCheckpoint, tempImage)); + } + String importDiffCommand = sourceImage.buildRbdCommand("import-diff", quote(backupPath), quote(tempImage)); + if (Script.runSimpleBashScriptForExitValue(importDiffCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import RBD diff {} into temporary image {}", backupPath, tempImage); + return false; + } + if (!ensureRbdSnapshotExists(sourceImage, tempImage, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(sourceImage, tempImage, restoreSnapshots, timeout); + } + } + + private Map readRbdBackupMetadata(String backupPath) { + java.nio.file.Path metadataPath = Paths.get(backupPath).getParent().resolve("rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + return Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private boolean ensureRbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + if (rbdSnapshotExists(storagePool, volumePath, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap create", volumePath + "@" + snapshotName); + if (Script.runSimpleBashScriptForExitValue(createSnapshotCommand, timeout * 1000, false) != 0) { + logger.error("Failed to create RBD snapshot {} on volume {}", snapshotName, volumePath); + return false; + } + return true; + } + + private boolean ensureRbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + if (rbdSnapshotExists(imageSpec, image, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = imageSpec.buildRbdCommand("snap", "create", quote(image + "@" + snapshotName)); + if (Script.runSimpleBashScriptForExitValue(createSnapshotCommand, timeout * 1000, false) != 0) { + logger.error("Failed to create RBD snapshot {} on image {}", snapshotName, image); + return false; + } + return true; + } + + private boolean rbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + String existsCommand = buildRbdSnapshotCommand(storagePool, "snap ls", volumePath) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private boolean rbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + String existsCommand = imageSpec.buildRbdCommand("snap", "ls", quote(image)) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private void cleanupRbdRestoreSnapshots(KVMStoragePool storagePool, String volumePath, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap rm", volumePath + "@" + snapshotName); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private void cleanupRbdRestoreSnapshots(RbdImageSpec imageSpec, String image, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = imageSpec.buildRbdCommand("snap", "rm", quote(image + "@" + snapshotName)); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private String buildRbdSnapshotCommand(KVMStoragePool storagePool, String action, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" ").append(action).append(" ").append(target); + return command.toString(); + } + + private void removeTemporaryRbdImage(RbdImageSpec sourceImage, String tempImage, int timeout) { + String removeCommand = sourceImage.buildRbdCommand("rm", quote(tempImage)); + Script.runSimpleBashScriptForExitValue(removeCommand, timeout * 1000, false); + } + + private RbdImageSpec getRbdImageSpecFromMetadata(String backupRootPath, int backupIndex) { + java.nio.file.Path metadataPath = Paths.get(backupRootPath, "rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + Map metadata = Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + String diskPaths = metadata.get("disk_paths"); + if (StringUtils.isBlank(diskPaths)) { + throw new CloudRuntimeException("RBD backup metadata does not contain disk_paths"); + } + List values = Arrays.asList(diskPaths.split(",")); + if (backupIndex >= values.size()) { + throw new CloudRuntimeException(String.format("RBD backup metadata does not contain disk path for index %d", backupIndex)); + } + return RbdImageSpec.fromUri(values.get(backupIndex)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private String quote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + + private static final class RbdImageSpec { + private final String image; + private final String monHost; + private final String user; + private final String key; + + private RbdImageSpec(String image, String monHost, String user, String key) { + this.image = image; + this.monHost = monHost; + this.user = user; + this.key = key; + } + + private static RbdImageSpec fromUri(String uri) { + String image = null; + String monHost = null; + String user = null; + String key = null; + if (uri.startsWith("rbd:")) { + String payload = uri.substring("rbd:".length()); + image = payload.contains(":") ? payload.substring(0, payload.indexOf(':')) : payload; + monHost = extract(uri, ":mon_host=([^:]*)"); + if (monHost != null) { + monHost = monHost.replace("\\;", ",").replace("\\:", ":"); + } + user = extract(uri, ":id=([^:]*)"); + key = extract(uri, ":key=([^:]*)"); + } else if (uri.startsWith("rbd/")) { + image = uri; + } + if (StringUtils.isBlank(image)) { + throw new CloudRuntimeException(String.format("Unable to parse RBD disk path: %s", uri)); + } + return new RbdImageSpec(image, monHost, user, key); + } + + private static String extract(String value, String regex) { + java.util.regex.Matcher matcher = java.util.regex.Pattern.compile(regex).matcher(value); + return matcher.find() ? matcher.group(1) : null; + } + + private String buildTempImageSpec() { + return String.format("%s-csrestore-%s", image, RandomStringUtils.randomAlphanumeric(8).toLowerCase(Locale.ROOT)); + } + + private String buildRbdCommand(String action, String source, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(monHost)) { + command.append(" -m ").append(quoteArg(monHost)); + } + if (StringUtils.isNotBlank(user)) { + command.append(" --id ").append(quoteArg(user)); + } + if (StringUtils.isNotBlank(key)) { + command.append(" --key ").append(quoteArg(key)); + } + command.append(" ").append(action); + if (StringUtils.isNotBlank(source)) { + command.append(" ").append(source); + } + if (StringUtils.isNotBlank(target)) { + command.append(" ").append(target); + } + return command.toString(); + } + + private String buildRbdCommand(String action, String target) { + return buildRbdCommand(action, null, target); + } + + private String buildQemuUri(String imageSpec) { + StringBuilder uri = new StringBuilder("rbd:").append(imageSpec); + if (StringUtils.isNotBlank(monHost)) { + uri.append(":mon_host=").append(monHost.replace(",", "\\;")); + } + if (StringUtils.isNotBlank(user)) { + uri.append(":id=").append(user); + } + if (StringUtils.isNotBlank(key)) { + uri.append(":key=").append(key); + } + return uri.toString(); + } + + private String quoteArg(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + } + + private boolean attachVolumeToVm(KVMStoragePoolManager storagePoolMgr, String vmName, PrimaryDataStoreTO volumePool, String volumePath, String cacheMode) { + String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); + int exitValue; + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_QCOW2_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); + } else { + String xmlForRbdDisk = getXmlForRbdDisk(storagePoolMgr, volumePool, volumePath, deviceToAttachDiskTo, cacheMode); + logger.debug("RBD disk xml to attach: {}", xmlForRbdDisk); + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_RBD_DISK_XML_COMMAND, vmName, xmlForRbdDisk)); + } + return exitValue == 0; + } + + private String getDeviceToAttachDisk(String vmName) { + String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); + char lastChar = currentDevice.charAt(currentDevice.length() - 1); + char incrementedChar = (char) (lastChar + 1); + return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; + } + + private String getXmlForRbdDisk(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String deviceToAttachDiskTo, String cacheMode) { + StringBuilder diskBuilder = new StringBuilder(); + diskBuilder.append("\n\n"); + + diskBuilder.append(" \n"); + + diskBuilder.append("\n"); + for (String sourceHost : volumePool.getHost().split(",")) { + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + String authUserName = null; + final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + if (primaryPool != null) { + authUserName = primaryPool.getAuthUserName(); + } + if (StringUtils.isNotBlank(authUserName)) { + diskBuilder.append("\n"); + diskBuilder.append("\n"); + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + diskBuilder.append("\n"); + return diskBuilder.toString(); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasTakeBackupCommandWrapper.java new file mode 100644 index 000000000000..4147c95b2ffd --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasTakeBackupCommandWrapper.java @@ -0,0 +1,65 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackNasTakeBackupCommand; + +import java.util.List; + +@ResourceWrapper(handles = AblestackNasTakeBackupCommand.class) +public class LibvirtAblestackNasTakeBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(AblestackNasTakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + logger.info("LibvirtTakeBackupCommandWrapper entering execute for vm=[{}], backupPath=[{}], backupType=[{}]", + command.getVmName(), command.getBackupPath(), command.getBackupType()); + LibvirtAblestackNasBackupHelper backupHelper = new LibvirtAblestackNasBackupHelper(libvirtComputingResource); + List diskPaths = backupHelper.resolveDiskPaths(command.getVolumePools(), command.getVolumePaths()); + logger.info("LibvirtTakeBackupCommandWrapper invoking helper for vm=[{}], diskPaths=[{}]", + command.getVmName(), diskPaths); + Pair result = backupHelper.executeBackup(command); + logger.info("LibvirtTakeBackupCommandWrapper helper returned for vm=[{}], resultCode=[{}], details=[{}]", + command.getVmName(), result.first(), result.second()); + + if (result.first() != 0) { + logger.debug("Failed to take VM backup: " + result.second()); + BackupAnswer answer = new BackupAnswer(command, false, result.second().trim()); + if (result.first() == LibvirtAblestackNasBackupHelper.EXIT_CLEANUP_FAILED) { + logger.debug("Backup cleanup failed"); + answer.setNeedsCleanup(true); + } + return answer; + } + + BackupAnswer answer = new BackupAnswer(command, true, result.second().trim()); + try { + answer.setSize(backupHelper.parseBackupSize(result.second(), diskPaths)); + } catch (RuntimeException e) { + logger.warn("Failed to parse NAS backup size for vm=[{}], details=[{}]", + command.getVmName(), result.second(), e); + } + return answer; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultRestoreBackupCommandWrapper.java deleted file mode 100644 index 5ffce02fd138..000000000000 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultRestoreBackupCommandWrapper.java +++ /dev/null @@ -1,319 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// - -package com.cloud.hypervisor.kvm.resource.wrapper; - -import com.cloud.agent.api.Answer; -import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; -import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; -import com.cloud.hypervisor.kvm.storage.KVMStoragePool; -import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; -import com.cloud.resource.CommandWrapper; -import com.cloud.resource.ResourceWrapper; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; -import com.cloud.vm.VirtualMachine; -import org.apache.cloudstack.backup.BackupAnswer; -import org.apache.cloudstack.backup.CommvaultRestoreBackupCommand; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.libvirt.LibvirtException; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Locale; -import java.util.Objects; - -@ResourceWrapper(handles = CommvaultRestoreBackupCommand.class) -public class LibvirtCommvaultRestoreBackupCommandWrapper extends CommandWrapper { - private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; - private static final String ATTACH_QCOW2_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none"; - private static final String ATTACH_RBD_DISK_XML_COMMAND = " virsh attach-device %s /dev/stdin < backedVolumeUUIDs = command.getBackupVolumesUUIDs(); - List restoreVolumePools = command.getRestoreVolumePools(); - List restoreVolumePaths = command.getRestoreVolumePaths(); - String restoreVolumeUuid = command.getRestoreVolumeUUID(); - int timeout = command.getWait(); - String cacheMode = command.getCacheMode(); - String hostName = command.getHostName(); - KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); - - String newVolumeId = null; - try { - if (hostName != null) { - fetchBackupFile(hostName, backupPath); - } - if (Objects.isNull(vmExists)) { - PrimaryDataStoreTO volumePool = restoreVolumePools.get(0); - String volumePath = restoreVolumePaths.get(0); - int lastIndex = volumePath.lastIndexOf("/"); - newVolumeId = volumePath.substring(lastIndex + 1); - restoreVolume(storagePoolMgr, backupPath, volumePool, volumePath, diskType, restoreVolumeUuid, - new Pair<>(vmName, command.getVmState()), timeout, cacheMode); - } else if (Boolean.TRUE.equals(vmExists)) { - restoreVolumesOfExistingVM(storagePoolMgr, restoreVolumePools, restoreVolumePaths, backedVolumeUUIDs, backupPath, timeout); - } else { - restoreVolumesOfDestroyedVMs(storagePoolMgr, restoreVolumePools, restoreVolumePaths, vmName, backupPath, timeout); - } - } catch (CloudRuntimeException e) { - String errorMessage = e.getMessage() != null ? e.getMessage() : ""; - return new BackupAnswer(command, false, errorMessage); - } - - return new BackupAnswer(command, true, newVolumeId); - } - - private void verifyBackupFile(String backupPath, String volUuid) { - if (!checkBackupPathExists(backupPath)) { - throw new CloudRuntimeException(String.format("Backup file for the volume [%s] does not exist.", volUuid)); - } - if (!checkBackupFileImage(backupPath)) { - throw new CloudRuntimeException(String.format("Backup qcow2 file for the volume [%s] is corrupt.", volUuid)); - } - } - - private void restoreVolumesOfExistingVM(KVMStoragePoolManager storagePoolMgr, List restoreVolumePools, List restoreVolumePaths, List backedVolumesUUIDs, - String backupPath, int timeout) { - String diskType = "root"; - try { - for (int idx = 0; idx < restoreVolumePaths.size(); idx++) { - PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); - String restoreVolumePath = restoreVolumePaths.get(idx); - String backupVolumeUuid = backedVolumesUUIDs.get(idx); - Pair bkpPathAndVolUuid = getBackupPath(null, backupPath, diskType, backupVolumeUuid); - diskType = "datadisk"; - verifyBackupFile(bkpPathAndVolUuid.first(), bkpPathAndVolUuid.second()); - if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, restoreVolumePath, bkpPathAndVolUuid.first(), timeout)) { - throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); - } - } - } finally { - deleteBackupDirectory(backupPath); - } - } - - private void restoreVolumesOfDestroyedVMs(KVMStoragePoolManager storagePoolMgr, List volumePools, List volumePaths, String vmName, String backupPath, int timeout) { - String diskType = "root"; - try { - for (int i = 0; i < volumePaths.size(); i++) { - PrimaryDataStoreTO volumePool = volumePools.get(i); - String volumePath = volumePaths.get(i); - Pair bkpPathAndVolUuid = getBackupPath(volumePath, backupPath, diskType, null); - diskType = "datadisk"; - verifyBackupFile(bkpPathAndVolUuid.first(), bkpPathAndVolUuid.second()); - if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, bkpPathAndVolUuid.first(), timeout)) { - throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); - } - } - } finally { - deleteBackupDirectory(backupPath); - } - } - - private void restoreVolume(KVMStoragePoolManager storagePoolMgr, String backupPath, PrimaryDataStoreTO volumePool, String volumePath, String diskType, String volumeUUID, - Pair vmNameAndState, int timeout, String cacheMode) { - Pair bkpPathAndVolUuid; - try { - bkpPathAndVolUuid = getBackupPath(volumePath, backupPath, diskType, volumeUUID); - verifyBackupFile(bkpPathAndVolUuid.first(), bkpPathAndVolUuid.second()); - if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, bkpPathAndVolUuid.first(), timeout, true)) { - throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); - } - if (VirtualMachine.State.Running.equals(vmNameAndState.second())) { - if (!attachVolumeToVm(storagePoolMgr, vmNameAndState.first(), volumePool, volumePath, cacheMode)) { - throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first())); - } - } - } finally { - deleteBackupDirectory(backupPath); - } - } - - private void deleteBackupDirectory(String backupDirectory) { - try { - FileUtils.deleteDirectory(new File(backupDirectory)); - } catch (IOException e) { - logger.error(String.format("Failed to delete backup directory: %s", backupDirectory), e); - throw new CloudRuntimeException("Failed to delete the backup directory"); - } - } - - private Pair getBackupPath(String volumePath, String backupPath, String diskType, String volumeUuid) { - String volUuid = Objects.isNull(volumeUuid) ? volumePath.substring(volumePath.lastIndexOf(File.separator) + 1) : volumeUuid; - String backupFileName = String.format("%s.%s.qcow2", diskType.toLowerCase(Locale.ROOT), volUuid); - backupPath = String.format(FILE_PATH_PLACEHOLDER, backupPath, backupFileName); - return new Pair<>(backupPath, volUuid); - } - - private boolean checkBackupFileImage(String backupPath) { - int exitValue = Script.runSimpleBashScriptForExitValue(String.format("qemu-img check %s", backupPath)); - return exitValue == 0; - } - - private boolean checkBackupPathExists(String backupPath) { - int exitValue = Script.runSimpleBashScriptForExitValue(String.format("ls %s", backupPath)); - return exitValue == 0; - } - - private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String backupPath, int timeout) { - return replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPath, timeout, false); - } - - private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { - if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { - int exitValue = Script.runSimpleBashScriptForExitValue(String.format(RSYNC_COMMAND, backupPath, volumePath)); - return exitValue == 0; - } - - return replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPath, timeout, createTargetVolume); - } - - private boolean replaceRbdVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { - KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); - QemuImg qemu; - try { - qemu = new QemuImg(timeout * 1000, true, false); - if (!createTargetVolume) { - KVMPhysicalDisk rdbDisk = volumeStoragePool.getPhysicalDisk(volumePath); - logger.debug("Restoring RBD volume: {}", rdbDisk.toString()); - qemu.setSkipTargetVolumeCreation(true); - } - } catch (LibvirtException ex) { - throw new CloudRuntimeException("Failed to create qemu-img command to restore RBD volume with backup", ex); - } - - QemuImgFile srcBackupFile = null; - QemuImgFile destVolumeFile = null; - try { - srcBackupFile = new QemuImgFile(backupPath, QemuImg.PhysicalDiskFormat.QCOW2); - String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath); - destVolumeFile = new QemuImgFile(rbdDestVolumeFile, QemuImg.PhysicalDiskFormat.RAW); - - logger.debug("Starting convert backup {} to RBD volume {}", backupPath, volumePath); - qemu.convert(srcBackupFile, destVolumeFile); - logger.debug("Successfully converted backup {} to RBD volume {}", backupPath, volumePath); - } catch (QemuImgException | LibvirtException e) { - String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; - String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; - logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); - return false; - } - - return true; - } - - private boolean attachVolumeToVm(KVMStoragePoolManager storagePoolMgr, String vmName, PrimaryDataStoreTO volumePool, String volumePath, String cacheMode) { - String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); - int exitValue; - if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { - exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_QCOW2_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); - } else { - String xmlForRbdDisk = getXmlForRbdDisk(storagePoolMgr, volumePool, volumePath, deviceToAttachDiskTo, cacheMode); - logger.debug("RBD disk xml to attach: {}", xmlForRbdDisk); - exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_RBD_DISK_XML_COMMAND, vmName, xmlForRbdDisk)); - } - return exitValue == 0; - } - - private String getDeviceToAttachDisk(String vmName) { - String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); - char lastChar = currentDevice.charAt(currentDevice.length() - 1); - char incrementedChar = (char) (lastChar + 1); - return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; - } - - private String getXmlForRbdDisk(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String deviceToAttachDiskTo, String cacheMode) { - StringBuilder diskBuilder = new StringBuilder(); - diskBuilder.append("\n\n"); - - diskBuilder.append(" \n"); - - diskBuilder.append("\n"); - for (String sourceHost : volumePool.getHost().split(",")) { - diskBuilder.append("\n"); - } - diskBuilder.append("\n"); - String authUserName = null; - final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); - if (primaryPool != null) { - authUserName = primaryPool.getAuthUserName(); - } - if (StringUtils.isNotBlank(authUserName)) { - diskBuilder.append("\n"); - diskBuilder.append("\n"); - diskBuilder.append("\n"); - } - diskBuilder.append("\n"); - diskBuilder.append("\n"); - return diskBuilder.toString(); - } - - private void fetchBackupFile(String hostName, String backupPath) { - int mkdirExit = Script.runSimpleBashScriptForExitValue(String.format(MKDIR_P, backupPath)); - if (mkdirExit != 0) { - throw new CloudRuntimeException(String.format("Failed to create local backup directory: %s", backupPath)); - } - - String cmd = String.format(RSYNC_DIR_FROM_REMOTE, hostName, backupPath, backupPath); - logger.debug("Fetching commvault backup directory from remote host. cmd={}", cmd); - - int exit = Script.runSimpleBashScriptForExitValue(cmd); - if (exit != 0) { - throw new CloudRuntimeException(String.format( - "Failed to fetch backup directory from remote host [%s]. remotePath=[%s], localPath=[%s]", - hostName, backupPath, backupPath)); - } - } -} \ No newline at end of file diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultTakeBackupCommandWrapper.java deleted file mode 100644 index 277d38e8573d..000000000000 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultTakeBackupCommandWrapper.java +++ /dev/null @@ -1,91 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// - -package com.cloud.hypervisor.kvm.resource.wrapper; - -import com.cloud.agent.api.Answer; -import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; -import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; -import com.cloud.hypervisor.kvm.storage.KVMStoragePool; -import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; -import com.cloud.resource.CommandWrapper; -import com.cloud.resource.ResourceWrapper; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.script.Script; -import org.apache.cloudstack.backup.BackupAnswer; -import org.apache.cloudstack.backup.CommvaultTakeBackupCommand; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -@ResourceWrapper(handles = CommvaultTakeBackupCommand.class) -public class LibvirtCommvaultTakeBackupCommandWrapper extends CommandWrapper { - private static final Integer EXIT_CLEANUP_FAILED = 20; - @Override - public Answer execute(CommvaultTakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { - final String vmName = command.getVmName(); - final String backupPath = command.getBackupPath(); - List volumePools = command.getVolumePools(); - final List volumePaths = command.getVolumePaths(); - KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); - - List diskPaths = new ArrayList<>(); - if (Objects.nonNull(volumePaths)) { - for (int idx = 0; idx < volumePaths.size(); idx++) { - PrimaryDataStoreTO volumePool = volumePools.get(idx); - String volumePath = volumePaths.get(idx); - if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { - diskPaths.add(volumePath); - } else { - KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); - String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath); - diskPaths.add(rbdDestVolumeFile); - } - } - } - - List commands = new ArrayList<>(); - commands.add(new String[]{ - libvirtComputingResource.getCvtBackupPath(), - "-o", "backup", - "-v", vmName, - "-p", backupPath, - "-q", command.getQuiesce() != null && command.getQuiesce() ? "true" : "false", - "-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths) - }); - - Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); - - if (result.first() != 0) { - logger.debug("Failed to take VM backup"); - BackupAnswer answer = new BackupAnswer(command, false, null); - if (result.first() == EXIT_CLEANUP_FAILED) { - logger.debug("Backup cleanup failed"); - answer.setNeedsCleanup(true); - } - return answer; - } - - BackupAnswer answer = new BackupAnswer(command, true, "success"); - return answer; - } -} diff --git a/plugins/pom.xml b/plugins/pom.xml index 8950cda479bb..6f7c97ca70dc 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -63,7 +63,8 @@ backup/dummy backup/networker backup/nas - backup/commvault + backup/ablestack-nas + backup/ablestack-commvault backup/bx ca/root-ca diff --git a/scripts/vm/hypervisor/kvm/ablestack_cvtbackup.sh b/scripts/vm/hypervisor/kvm/ablestack_cvtbackup.sh new file mode 100644 index 000000000000..43dbc0edee1e --- /dev/null +++ b/scripts/vm/hypervisor/kvm/ablestack_cvtbackup.sh @@ -0,0 +1,481 @@ +#!/usr/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -eo pipefail + +# CloudStack B&R Commvault Backup and Recovery Tool for KVM + +# TODO: do libvirt/logging etc checks + +### Declare variables ### + +OP="" +VM="" +BACKUP_DIR="" +DISK_PATHS="" +QUIESCE="" +BACKUP_TYPE="FULL" +CHECKPOINT_NAME="" +PARENT_BACKUP_DIR="" +PARENT_CHECKPOINT_NAME="" +PARENT_CHECKPOINT_PATH="" +BACKUP_FILES="" +FORCED="false" +logFile="/var/log/cloudstack/agent/agent.log" + +EXIT_CLEANUP_FAILED=20 + +log() { + [[ "$verb" -eq 1 ]] && builtin echo "$@" + if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then + builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile" + else + builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile" + fi +} + +vercomp() { + local IFS=. + local i ver1=($1) ver2=($3) + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 0 + elif ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 + fi + done + return 0 +} + +sanity_checks() { + hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}') + libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1) + apiVersion=$(virsh version | grep API | awk '{print $(NF)}') + + vercomp "$hvVersion" ">=" "4.2.0" + hvStatus=$? + vercomp "$libvVersion" ">=" "7.2.0" + libvStatus=$? + + if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then + log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]" + else + echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0" + exit 1 + fi +} + +cleanup() { + local status=0 + rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } + if [[ $status -ne 0 ]]; then + echo "Backup cleanup failed" + exit $EXIT_CLEANUP_FAILED + fi +} + +split_csv() { + tr ',' '\n' <<< "$1" +} + +is_rbd_disk_path() { + local disk_path="$1" + [[ "$disk_path" == rbd:* || "$disk_path" == rbd/* ]] +} + +get_backup_file_by_index() { + local index="$1" + local fallback="$2" + if [[ -z "$BACKUP_FILES" ]]; then + echo "$fallback" + return + fi + local current=0 + while IFS= read -r value; do + if [[ "$current" -eq "$index" ]]; then + echo "$value" + return + fi + current=$((current + 1)) + done < <(split_csv "$BACKUP_FILES") + echo "$fallback" +} + +dump_checkpoint_xml() { + local vm_name="$1" + if [[ -n "$CHECKPOINT_NAME" ]]; then + virsh -c qemu:///system checkpoint-dumpxml --domain "$vm_name" --checkpointname "$CHECKPOINT_NAME" --no-domain > "$dest/checkpoints/$CHECKPOINT_NAME.xml" 2>/dev/null || true + fi +} + +redefine_checkpoint_if_needed() { + local vm_name="$1" + local checkpoint_file="$2" + if [[ -z "$PARENT_CHECKPOINT_NAME" || -z "$checkpoint_file" || ! -f "$checkpoint_file" ]]; then + return + fi + if virsh -c qemu:///system checkpoint-info --domain "$vm_name" --checkpointname "$PARENT_CHECKPOINT_NAME" > /dev/null 2>&1; then + return + fi + if ! virsh -c qemu:///system checkpoint-create --domain "$vm_name" --xmlfile "$checkpoint_file" --redefine > /dev/null 2>&1; then + echo "Failed to redefine checkpoint $PARENT_CHECKPOINT_NAME on domain $vm_name" + exit 1 + fi +} + + +parse_rbd_uri() { + local uri="$1" + log -ne "parse_rbd_uri called with uri=[$uri]" + + RBD_IMAGE="" + RBD_MON_HOST="" + RBD_USER="" + RBD_KEY="" + + if [[ "$uri" == rbd:* ]]; then + local payload="${uri#rbd:}" + RBD_IMAGE="${payload%%:*}" + + if [[ "$uri" =~ :mon_host=([^:]*) ]]; then + RBD_MON_HOST="${BASH_REMATCH[1]}" + RBD_MON_HOST="${RBD_MON_HOST//\\;/,}" + RBD_MON_HOST="${RBD_MON_HOST//\\:/:}" + fi + + if [[ "$uri" =~ :id=([^:]*) ]]; then + RBD_USER="${BASH_REMATCH[1]}" + fi + + if [[ "$uri" =~ :key=([^:]*) ]]; then + RBD_KEY="${BASH_REMATCH[1]}" + fi + elif [[ "$uri" == rbd/* ]]; then + RBD_IMAGE="$uri" + else + echo "Invalid RBD disk path: $uri" + cleanup + fi + + if [[ -z "$RBD_IMAGE" ]]; then + echo "Failed to parse RBD image from uri: $uri" + cleanup + fi + + log -ne "Parsed RBD uri -> IMAGE=[$RBD_IMAGE], MON=[$RBD_MON_HOST], USER=[$RBD_USER]" +} + +build_rbd_cmd() { + RBD_CMD=(rbd) + if [[ -n "$RBD_MON_HOST" ]]; then + RBD_CMD+=(-m "$RBD_MON_HOST") + fi + if [[ -n "$RBD_USER" ]]; then + RBD_CMD+=(--id "$RBD_USER") + fi + if [[ -n "$RBD_KEY" ]]; then + RBD_CMD+=(--key "$RBD_KEY") + fi +} + +write_rbd_backup_metadata() { + local backup_type="$1" + local checkpoint_name="$2" + local parent_checkpoint_name="$3" + + cat > "$dest/rbd-backup.meta" < "$dest/checkpoints/$checkpoint_name.meta" < /dev/null 2>&1; then + virsh -c qemu:///system dumpxml "$vm_name" > "$dest/domain-config.xml" 2>/dev/null || true + virsh -c qemu:///system dominfo "$vm_name" > "$dest/dominfo.xml" 2>/dev/null || true + virsh -c qemu:///system domiflist "$vm_name" > "$dest/domiflist.xml" 2>/dev/null || true + virsh -c qemu:///system domblklist "$vm_name" > "$dest/domblklist.xml" 2>/dev/null || true + + if [[ -n "$CHECKPOINT_NAME" ]]; then + cat > "$dest/checkpoints/$CHECKPOINT_NAME.meta" <" > "$dest/backup.xml" + local index=0 + for disk in $(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/{print $3}'); do + local target_file="$dest/$(get_backup_file_by_index "$index")" + echo "" >> "$dest/backup.xml" + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + echo "$PARENT_CHECKPOINT_NAME" >> "$dest/backup.xml" + fi + echo "" >> "$dest/backup.xml" + index=$((index + 1)) + done + echo "" >> "$dest/backup.xml" + + echo "$CHECKPOINT_NAME" > "$dest/checkpoint.xml" + for disk in $(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/{print $3}'); do + echo "" >> "$dest/checkpoint.xml" + done + echo "" >> "$dest/checkpoint.xml" + + local thaw=0 + if [[ ${QUIESCE} == "true" ]]; then + if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then + thaw=1 + fi + fi + + local backup_begin=0 + if virsh -c qemu:///system backup-begin --domain "$VM" --backupxml "$dest/backup.xml" --checkpointxml "$dest/checkpoint.xml" > /dev/null 2>&1; then + backup_begin=1 + fi + + if [[ $thaw -eq 1 ]]; then + virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' > /dev/null 2>&1 || true + fi + + if [[ $backup_begin -ne 1 ]]; then + cleanup + exit 1 + fi + + backup_domain_information "$VM" + + while true; do + status=$(virsh -c qemu:///system domjobinfo "$VM" --completed --keep-completed | awk '/Job type:/ {print $3}') + case "$status" in + Completed) break ;; + Failed) echo "Virsh backup job failed"; cleanup ;; + esac + sleep 5 + done + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_BACKUP_DIR" ]]; then + while IFS= read -r backup_file; do + [[ -z "$backup_file" ]] && continue + qemu-img rebase -u -F qcow2 -b "$PARENT_BACKUP_DIR/$backup_file" "$dest/$backup_file" > /dev/null 2>&1 || true + done < <(split_csv "$BACKUP_FILES") + fi + + dump_checkpoint_xml "$VM" + rm -f "$dest/backup.xml" "$dest/checkpoint.xml" + sync +} + +backup_rbd_volumes() { + mkdir -p "$dest/checkpoints" || { echo "Failed to create backup directory $dest"; exit 1; } + backup_domain_information "$VM" + local index=0 + while IFS= read -r disk_path; do + [[ -z "$disk_path" ]] && continue + local created_snapshot="" + log -ne "Loop disk raw value=[$disk_path]" + parse_rbd_uri "$disk_path" + build_rbd_cmd + log -ne "Built RBD command: ${RBD_CMD[*]}" + + local output_file="$dest/$(get_backup_file_by_index "$index" "${RBD_IMAGE##*/}.raw")" + log -ne "Starting RBD backup for disk path [$disk_path], resolved image [$RBD_IMAGE], output [$output_file]" + + if ! timeout 30s "${RBD_CMD[@]}" info "$RBD_IMAGE" >> "$logFile" 2>&1; then + echo "Failed to access RBD image $RBD_IMAGE" + cleanup + fi + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 30s "${RBD_CMD[@]}" snap ls "$RBD_IMAGE" 2>>"$logFile" | awk 'NR>1 {print $2}' | grep -Fxq "$PARENT_CHECKPOINT_NAME"; then + echo "Parent RBD snapshot ${RBD_IMAGE}@${PARENT_CHECKPOINT_NAME} not found for incremental backup" + cleanup + fi + fi + + if ! timeout 30s "${RBD_CMD[@]}" snap create "${RBD_IMAGE}@${CHECKPOINT_NAME}" >> "$logFile" 2>&1; then + echo "Failed to create RBD snapshot ${RBD_IMAGE}@${CHECKPOINT_NAME}" + cleanup + fi + created_snapshot="${RBD_IMAGE}@${CHECKPOINT_NAME}" + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 6h "${RBD_CMD[@]}" export-diff --from-snap "$PARENT_CHECKPOINT_NAME" "${RBD_IMAGE}@${CHECKPOINT_NAME}" "$output_file" >> "$logFile" 2>&1; then + echo "Failed to export incremental RBD diff for ${RBD_IMAGE}@${CHECKPOINT_NAME}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + else + if ! timeout 6h "${RBD_CMD[@]}" export "${RBD_IMAGE}@${CHECKPOINT_NAME}" "$output_file" >> "$logFile" 2>&1; then + echo "Failed to export full RBD snapshot ${RBD_IMAGE}@${CHECKPOINT_NAME}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + fi + + log -ne "Finished exporting backup file [$output_file] size=[$(stat -c %s "$output_file" 2>/dev/null)]" + index=$((index + 1)) + done < <(split_csv "$DISK_PATHS") + + write_rbd_backup_metadata "$BACKUP_TYPE" "$CHECKPOINT_NAME" "$PARENT_CHECKPOINT_NAME" + write_rbd_checkpoint_metadata "$CHECKPOINT_NAME" "$PARENT_CHECKPOINT_NAME" +} + +has_child_backup() { + local checkpoint_name="$1" + [[ -z "$checkpoint_name" ]] && return 1 + grep -R -q "^parent_checkpoint_name=$checkpoint_name$" "$(dirname "$dest")"/*/rbd-backup.meta 2>/dev/null +} + +delete_rbd_snapshot_if_unreferenced() { + local disk_paths="$1" + local checkpoint_name="$2" + + [[ -z "$checkpoint_name" ]] && return 0 + + if has_child_backup "$checkpoint_name"; then + log -ne "Skip snapshot delete [$checkpoint_name] (child exists)" + return 0 + fi + + while IFS= read -r disk_path; do + [[ -z "$disk_path" ]] && continue + parse_rbd_uri "$disk_path" + build_rbd_cmd + + if timeout 30s "${RBD_CMD[@]}" snap ls "$RBD_IMAGE" 2>/dev/null | awk 'NR>1 {print $2}' | grep -Fxq "$checkpoint_name"; then + log -ne "Deleting snapshot [${RBD_IMAGE}@${checkpoint_name}]" + "${RBD_CMD[@]}" snap rm "${RBD_IMAGE}@${checkpoint_name}" >> "$logFile" 2>&1 || true + fi + done < <(split_csv "$disk_paths") +} + +delete_backup() { + if [[ -f "$dest/rbd-backup.meta" ]]; then + source "$dest/rbd-backup.meta" + + log -ne "Deleting backup with metadata [$dest]" + + if [[ "$FORCED" != "true" ]] && has_child_backup "$checkpoint_name"; then + echo "Cannot delete backup [$backup_dir]: child backup exists" + exit 1 + fi + + delete_rbd_snapshot_if_unreferenced "$disk_paths" "$checkpoint_name" + elif [[ -n "$CHECKPOINT_NAME" && -n "$DISK_PATHS" ]]; then + log -ne "Deleting backup using command metadata [$dest]" + delete_rbd_snapshot_if_unreferenced "$DISK_PATHS" "$CHECKPOINT_NAME" + fi + + rm -frv "$dest" + sync +} + +usage() { + echo "" + echo "Usage: $0 -o -v|--vm -p -b -c -r -i -j -f -d -q|--quiesce " + echo "" + exit 1 +} + +while [[ $# -gt 0 ]]; do + case $1 in + -o|--operation) OP="$2"; shift; shift ;; + -v|--vm) VM="$2"; shift; shift ;; + -p|--path) BACKUP_DIR="$2"; shift; shift ;; + -b|--backuptype) BACKUP_TYPE="$2"; shift; shift ;; + -c|--checkpoint) CHECKPOINT_NAME="$2"; shift; shift ;; + -r|--parentbackup) PARENT_BACKUP_DIR="$2"; shift; shift ;; + -i|--parentcheckpoint) PARENT_CHECKPOINT_NAME="$2"; shift; shift ;; + -j|--parentcheckpointpath) PARENT_CHECKPOINT_PATH="$2"; shift; shift ;; + -f|--backupfiles) BACKUP_FILES="$2"; shift; shift ;; + -q|--quiesce) QUIESCE="$2"; shift; shift ;; + -d|--diskpaths) DISK_PATHS="$2"; shift; shift ;; + -x|--forced) FORCED="$2"; shift; shift ;; + -h|--help) usage ;; + *) echo "Invalid option: $1"; usage ;; + esac +done + +if [[ -z "$BACKUP_DIR" ]]; then + echo "Backup path (-p|--path) is required" + exit 1 +fi + +dest="$BACKUP_DIR" +sanity_checks + +log -ne "ablestack_cvtbackup.sh start op=[$OP] vm=[$VM] backupDir=[$BACKUP_DIR] backupType=[$BACKUP_TYPE] checkpoint=[$CHECKPOINT_NAME] parentBackup=[$PARENT_BACKUP_DIR] parentCheckpoint=[$PARENT_CHECKPOINT_NAME] diskPaths=[$DISK_PATHS] backupFiles=[$BACKUP_FILES]" + +if [[ "$OP" == "backup-running" ]]; then + backup_running_vm +elif [[ "$OP" == "backup-rbd" ]]; then + backup_rbd_volumes +elif [[ "$OP" == "delete" ]]; then + delete_backup +else + echo "Unsupported operation: $OP" + exit 1 +fi diff --git a/scripts/vm/hypervisor/kvm/ablestack_nasbackup.sh b/scripts/vm/hypervisor/kvm/ablestack_nasbackup.sh new file mode 100755 index 000000000000..1960720ec7a9 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/ablestack_nasbackup.sh @@ -0,0 +1,626 @@ +#!/usr/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -eo pipefail + +# CloudStack B&R NAS Backup and Recovery Tool for KVM + +# TODO: do libvirt/logging etc checks + +### Declare variables ### + +OP="" +VM="" +NAS_TYPE="" +NAS_ADDRESS="" +MOUNT_OPTS="" +BACKUP_DIR="" +BACKUP_TYPE="" +CHECKPOINT_NAME="" +PARENT_BACKUP_DIR="" +PARENT_CHECKPOINT_NAME="" +PARENT_CHECKPOINT_PATH="" +BACKUP_FILES="" +DISK_PATHS="" +QUIESCE="" +FORCED="false" +logFile="/var/log/cloudstack/agent/agent.log" + +EXIT_CLEANUP_FAILED=20 + +log() { + [[ "$verb" -eq 1 ]] && builtin echo "$@" + if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then + builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile" + else + builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile" + fi +} + +vercomp() { + local IFS=. + local i ver1=($1) ver2=($3) + + # Compare each segment of the version numbers + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + ver2[i]=0 + fi + + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 0 # Version 1 is greater + elif ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 # Version 2 is greater + fi + done + return 0 # Versions are equal +} + +sanity_checks() { + hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}') + libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1) + apiVersion=$(virsh version | grep API | awk '{print $(NF)}') + + vercomp "$hvVersion" ">=" "4.2.0" + hvStatus=$? + vercomp "$libvVersion" ">=" "7.2.0" + libvStatus=$? + + if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then + log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]" + else + echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0" + exit 1 + fi +} + +### Operation methods ### + +backup_running_vm() { + mount_operation + mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } + mkdir -p "$dest/checkpoints" || { echo "Failed to create checkpoint directory $dest/checkpoints"; exit 1; } + + local parent_checkpoint_file="" + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_PATH" ]]; then + parent_checkpoint_file="$mount_point/$PARENT_CHECKPOINT_PATH" + redefine_checkpoint_if_needed "$VM" "$parent_checkpoint_file" + fi + + echo "" > "$dest/backup.xml" + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + echo "$PARENT_CHECKPOINT_NAME" >> "$dest/backup.xml" + fi + echo "" >> "$dest/backup.xml" + echo "$CHECKPOINT_NAME" > "$dest/checkpoint.xml" + local index=0 + while IFS='|' read -r disk target; do + [[ -z "$disk" ]] && continue + local backup_file + backup_file=$(get_backup_file_by_index "$index" "$(basename "$target").qcow2") + echo "" >> "$dest/backup.xml" + echo "" >> "$dest/checkpoint.xml" + index=$((index + 1)) + done < <(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/ {print $3 "|" $4}') + echo "" >> "$dest/backup.xml" + echo "" >> "$dest/checkpoint.xml" + + local thaw=0 + if [[ ${QUIESCE} == "true" ]]; then + if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then + thaw=1 + fi + fi + + # Start push backup + local backup_begin=0 + if virsh -c qemu:///system backup-begin --domain "$VM" --backupxml "$dest/backup.xml" --checkpointxml "$dest/checkpoint.xml" 2>&1 > /dev/null; then + backup_begin=1; + fi + + if [[ $thaw -eq 1 ]]; then + if ! response=$(virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' 2>&1 > /dev/null); then + echo "Failed to thaw the filesystem for vm $VM: $response" + cleanup + exit 1 + fi + fi + + if [[ $backup_begin -ne 1 ]]; then + cleanup + exit 1 + fi + + backup_domain_information "$VM" + + while true; do + status=$(virsh -c qemu:///system domjobinfo "$VM" --completed --keep-completed | awk '/Job type:/ {print $3}') + case "$status" in + Completed) + break ;; + Failed) + echo "Virsh backup job failed" + cleanup ;; + esac + sleep 5 + done + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_BACKUP_DIR" ]]; then + local index=0 + while IFS='|' read -r disk target; do + [[ -z "$disk" ]] && continue + local backup_file + backup_file=$(get_backup_file_by_index "$index" "$(basename "$target").qcow2") + output="$dest/$backup_file" + parent="../$(basename "$PARENT_BACKUP_DIR")/$backup_file" + if ! qemu-img rebase -u -F qcow2 -b "$parent" "$output" > "$logFile" 2> >(cat >&2); then + echo "qemu-img rebase failed for $output with parent $parent" + cleanup + fi + index=$((index + 1)) + done < <(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/ {print $3 "|" $4}') + fi + + dump_checkpoint_xml "$VM" + rm -f "$dest/backup.xml" + rm -f "$dest/checkpoint.xml" + sync + + # Print statistics + virsh -c qemu:///system domjobinfo "$VM" --completed + du -sb "$dest" | cut -f1 + + umount "$mount_point" + rmdir "$mount_point" +} + +backup_rbd_volumes() { + log -ne "Entered backup_rbd_volumes with DISK_PATHS=[$DISK_PATHS], BACKUP_FILES=[$BACKUP_FILES], BACKUP_DIR=[$BACKUP_DIR]" + mount_operation + mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } + + backup_domain_information "$VM" + + local index=0 + while IFS= read -r disk; do + local created_snapshot="" + log -ne "Loop disk raw value=[$disk]" + [[ -z "$disk" ]] && continue + + parse_rbd_uri "$disk" + log -ne "Parsed disk [$disk] -> RBD_IMAGE=[$RBD_IMAGE], MON=[$RBD_MON_HOST], USER=[$RBD_USER]" + + if [[ -z "$RBD_IMAGE" ]]; then + echo "Unable to parse RBD disk path: $disk" + cleanup + fi + + build_rbd_cmd + log -ne "Built RBD command: ${RBD_CMD[*]}" + + local backup_file + backup_file=$(get_backup_file_by_index "$index" "${RBD_IMAGE##*/}.raw") + local output="$dest/$backup_file" + local current_snapshot="${CHECKPOINT_NAME}" + + log -ne "Resolved backup file [$backup_file], destination [$output]" + log -ne "Starting RBD backup for disk path [$disk], resolved image [$RBD_IMAGE], output [$output]" + + if ! timeout 30s "${RBD_CMD[@]}" info "$RBD_IMAGE" >> "$logFile" 2>&1; then + echo "Failed to access RBD image $RBD_IMAGE" + cleanup + fi + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 30s "${RBD_CMD[@]}" snap ls "$RBD_IMAGE" 2>>"$logFile" | awk 'NR>1 {print $2}' | grep -Fxq "$PARENT_CHECKPOINT_NAME"; then + echo "Parent RBD snapshot ${RBD_IMAGE}@${PARENT_CHECKPOINT_NAME} not found for incremental backup" + cleanup + fi + fi + + if ! timeout 30s "${RBD_CMD[@]}" snap create "${RBD_IMAGE}@${current_snapshot}" >> "$logFile" 2>&1; then + echo "Failed to create RBD snapshot ${RBD_IMAGE}@${current_snapshot}" + cleanup + fi + created_snapshot="${RBD_IMAGE}@${current_snapshot}" + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 6h "${RBD_CMD[@]}" export-diff --from-snap "$PARENT_CHECKPOINT_NAME" "${RBD_IMAGE}@${current_snapshot}" "$output" >> "$logFile" 2>&1; then + echo "Failed to export incremental RBD diff for ${RBD_IMAGE}@${current_snapshot}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + else + if ! timeout 6h "${RBD_CMD[@]}" export "${RBD_IMAGE}@${current_snapshot}" "$output" >> "$logFile" 2>&1; then + echo "Failed to export full RBD snapshot ${RBD_IMAGE}@${current_snapshot}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + fi + + log -ne "Finished exporting backup file [$output] size=[$(stat -c %s "$output" 2>/dev/null)]" + stat -c %s "$output" + index=$((index + 1)) + done < <(split_csv "$DISK_PATHS") + + write_rbd_backup_metadata "$BACKUP_TYPE" "$CHECKPOINT_NAME" "$PARENT_CHECKPOINT_NAME" + + sync + log -ne "RBD backup completed checkpoint=[$CHECKPOINT_NAME] parent=[$PARENT_CHECKPOINT_NAME]" + umount "$mount_point" + rmdir "$mount_point" +} + +backup_domain_information() { + local vm_name="$1" + + [[ -z "$vm_name" ]] && return 0 + + mkdir -p "$dest/checkpoints" || { + echo "Failed to create checkpoint directory $dest/checkpoints" + exit 1 + } + + if virsh -c qemu:///system dominfo "$vm_name" > /dev/null 2>&1; then + virsh -c qemu:///system dumpxml "$vm_name" > "$dest/domain-config.xml" 2>/dev/null || true + virsh -c qemu:///system dominfo "$vm_name" > "$dest/dominfo.xml" 2>/dev/null || true + virsh -c qemu:///system domiflist "$vm_name" > "$dest/domiflist.xml" 2>/dev/null || true + virsh -c qemu:///system domblklist "$vm_name" > "$dest/domblklist.xml" 2>/dev/null || true + + if [[ -n "$CHECKPOINT_NAME" ]]; then + cat > "$dest/checkpoints/$CHECKPOINT_NAME.meta" </dev/null +} + +delete_rbd_snapshot_if_unreferenced() { + local disk_paths="$1" + local checkpoint_name="$2" + + [[ -z "$checkpoint_name" ]] && return 0 + + if has_child_backup "$checkpoint_name"; then + log -ne "Skip snapshot delete [$checkpoint_name] (child exists)" + return 0 + fi + + while IFS= read -r disk; do + [[ -z "$disk" ]] && continue + parse_rbd_uri "$disk" + build_rbd_cmd + + if [[ -n "$RBD_IMAGE" ]]; then + log -ne "Deleting snapshot [${RBD_IMAGE}@${checkpoint_name}]" + "${RBD_CMD[@]}" snap rm "${RBD_IMAGE}@${checkpoint_name}" >> "$logFile" 2>&1 || true + fi + done < <(split_csv "$disk_paths") +} + +delete_backup() { + mount_operation + + if [[ -f "$dest/rbd-backup.meta" ]]; then + source "$dest/rbd-backup.meta" + + log -ne "Deleting backup with metadata [$dest]" + + if [[ "$FORCED" != "true" ]] && has_child_backup "$checkpoint_name"; then + echo "Cannot delete backup [$backup_dir]: child backup exists" + umount "$mount_point" + rmdir "$mount_point" + exit 1 + fi + + delete_rbd_snapshot_if_unreferenced "$disk_paths" "$checkpoint_name" + elif [[ -n "$CHECKPOINT_NAME" && -n "$DISK_PATHS" ]]; then + log -ne "Deleting backup using command metadata [$dest]" + delete_rbd_snapshot_if_unreferenced "$DISK_PATHS" "$CHECKPOINT_NAME" + fi + + rm -frv $dest + sync + umount $mount_point + rmdir $mount_point +} + +get_backup_stats() { + mount_operation + + echo $mount_point + df -P $mount_point 2>/dev/null | awk 'NR==2 {print $2, $3}' + umount $mount_point + rmdir $mount_point +} + +mount_operation() { + mount_point=$(mktemp -d -t csbackup.XXXXX) + dest="$mount_point/${BACKUP_DIR}" + if [ ${NAS_TYPE} == "cifs" ]; then + MOUNT_OPTS="${MOUNT_OPTS},nobrl" + fi + mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS}) 2>&1 | tee -a "$logFile" + if [ $? -eq 0 ]; then + log -ne "Successfully mounted ${NAS_TYPE} store" + else + echo "Failed to mount ${NAS_TYPE} store" + exit 1 + fi +} + +cleanup() { + local status=0 + + rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } + umount "$mount_point" || { echo "Failed to unmount $mount_point"; status=1; } + rmdir "$mount_point" || { echo "Failed to remove mount point $mount_point"; status=1; } + + if [[ $status -ne 0 ]]; then + echo "Backup cleanup failed" + exit $EXIT_CLEANUP_FAILED + fi +} + +split_csv() { + tr ',' '\n' <<< "$1" +} + +is_rbd_disk_path() { + local disk_path="$1" + [[ "$disk_path" == rbd:* || "$disk_path" == rbd/* ]] +} + +get_backup_file_by_index() { + local index="$1" + local fallback="$2" + if [[ -z "$BACKUP_FILES" ]]; then + echo "$fallback" + return + fi + local current=0 + while IFS= read -r value; do + if [[ "$current" -eq "$index" ]]; then + echo "$value" + return + fi + current=$((current + 1)) + done < <(split_csv "$BACKUP_FILES") + echo "$fallback" +} + +dump_checkpoint_xml() { + local vm_name="$1" + if [[ -n "$CHECKPOINT_NAME" ]]; then + virsh -c qemu:///system checkpoint-dumpxml --domain "$vm_name" --checkpointname "$CHECKPOINT_NAME" --no-domain > "$dest/checkpoints/$CHECKPOINT_NAME.xml" 2>/dev/null || true + fi +} + +redefine_checkpoint_if_needed() { + local vm_name="$1" + local checkpoint_file="$2" + if [[ -z "$PARENT_CHECKPOINT_NAME" || -z "$checkpoint_file" || ! -f "$checkpoint_file" ]]; then + return + fi + if virsh -c qemu:///system checkpoint-info --domain "$vm_name" --checkpointname "$PARENT_CHECKPOINT_NAME" > /dev/null 2>&1; then + return + fi + if ! virsh -c qemu:///system checkpoint-create --domain "$vm_name" --xmlfile "$checkpoint_file" --redefine > /dev/null 2>&1; then + echo "Failed to redefine checkpoint $PARENT_CHECKPOINT_NAME on domain $vm_name" + cleanup + fi +} + +parse_rbd_uri() { + local uri="$1" + log -ne "parse_rbd_uri called with uri=[$uri]" + + RBD_IMAGE="" + RBD_MON_HOST="" + RBD_USER="" + RBD_KEY="" + + if [[ "$uri" == rbd:* ]]; then + local payload="${uri#rbd:}" + RBD_IMAGE="${payload%%:*}" + + if [[ "$uri" =~ :mon_host=([^:]*) ]]; then + RBD_MON_HOST="${BASH_REMATCH[1]}" + RBD_MON_HOST="${RBD_MON_HOST//\\;/,}" + RBD_MON_HOST="${RBD_MON_HOST//\\:/:}" + fi + + if [[ "$uri" =~ :id=([^:]*) ]]; then + RBD_USER="${BASH_REMATCH[1]}" + fi + + if [[ "$uri" =~ :key=([^:]*) ]]; then + RBD_KEY="${BASH_REMATCH[1]}" + fi + elif [[ "$uri" == rbd/* ]]; then + RBD_IMAGE="$uri" + else + echo "Invalid RBD disk path: $uri" + cleanup + fi + + if [[ -z "$RBD_IMAGE" ]]; then + echo "Failed to parse RBD image from uri: $uri" + cleanup + fi + + log -ne "Parsed RBD uri -> IMAGE=[$RBD_IMAGE], MON=[$RBD_MON_HOST], USER=[$RBD_USER]" +} + +build_rbd_cmd() { + RBD_CMD=(rbd) + if [[ -n "$RBD_MON_HOST" ]]; then + RBD_CMD+=(-m "$RBD_MON_HOST") + fi + if [[ -n "$RBD_USER" ]]; then + RBD_CMD+=(--id "$RBD_USER") + fi + if [[ -n "$RBD_KEY" ]]; then + RBD_CMD+=(--key "$RBD_KEY") + fi +} + +write_rbd_backup_metadata() { + local backup_type="$1" + local checkpoint_name="$2" + local parent_checkpoint_name="$3" + + cat > "$dest/rbd-backup.meta" < -v|--vm -t -s -m -p -b -c -r -i -j -f -d -q|--quiesce -x|--forced " + echo "" + exit 1 +} + +while [[ $# -gt 0 ]]; do + case $1 in + -o|--operation) + OP="$2" + shift + shift + ;; + -v|--vm) + VM="$2" + shift + shift + ;; + -t|--type) + NAS_TYPE="$2" + shift + shift + ;; + -s|--storage) + NAS_ADDRESS="$2" + shift + shift + ;; + -m|--mount) + MOUNT_OPTS="$2" + shift + shift + ;; + -p|--path) + BACKUP_DIR="$2" + shift + shift + ;; + -b|--backuptype) + BACKUP_TYPE="$2" + shift + shift + ;; + -c|--checkpoint) + CHECKPOINT_NAME="$2" + shift + shift + ;; + -r|--parentpath) + PARENT_BACKUP_DIR="$2" + shift + shift + ;; + -i|--parentcheckpoint) + PARENT_CHECKPOINT_NAME="$2" + shift + shift + ;; + -j|--parentcheckpointpath) + PARENT_CHECKPOINT_PATH="$2" + shift + shift + ;; + -f|--backupfiles) + BACKUP_FILES="$2" + shift + shift + ;; + -q|--quiesce) + QUIESCE="$2" + shift + shift + ;; + -x|--forced) + FORCED="$2" + shift + shift + ;; + -d|--diskpaths) + DISK_PATHS="$2" + shift + shift + ;; + -h|--help) + usage + shift + ;; + *) + echo "Invalid option: $1" + usage + ;; + esac +done + +# Perform Initial sanity checks +sanity_checks + +log -ne "nasbackup.sh start op=[$OP] vm=[$VM] backupDir=[$BACKUP_DIR] backupType=[$BACKUP_TYPE] checkpoint=[$CHECKPOINT_NAME] parentBackup=[$PARENT_BACKUP_DIR] parentCheckpoint=[$PARENT_CHECKPOINT_NAME] diskPaths=[$DISK_PATHS] backupFiles=[$BACKUP_FILES]" + +if [ "$OP" = "backup-running" ]; then + backup_running_vm +elif [ "$OP" = "backup-rbd" ]; then + backup_rbd_volumes +elif [ "$OP" = "delete" ]; then + delete_backup +elif [ "$OP" = "stats" ]; then + get_backup_stats +fi diff --git a/scripts/vm/hypervisor/kvm/cvtbackup.sh b/scripts/vm/hypervisor/kvm/cvtbackup.sh deleted file mode 100644 index 0493654fce02..000000000000 --- a/scripts/vm/hypervisor/kvm/cvtbackup.sh +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -eo pipefail - -# CloudStack B&R Commvault Backup and Recovery Tool for KVM - -# TODO: do libvirt/logging etc checks - -### Declare variables ### - -OP="" -VM="" -BACKUP_DIR="" -DISK_PATHS="" -QUIESCE="" -logFile="/var/log/cloudstack/agent/agent.log" - -EXIT_CLEANUP_FAILED=20 - -log() { - [[ "$verb" -eq 1 ]] && builtin echo "$@" - if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then - builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile" - else - builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile" - fi -} - -vercomp() { - local IFS=. - local i ver1=($1) ver2=($3) - - # Compare each segment of the version numbers - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]]; then - ver2[i]=0 - fi - - if ((10#${ver1[i]} > 10#${ver2[i]})); then - return 0 # Version 1 is greater - elif ((10#${ver1[i]} < 10#${ver2[i]})); then - return 2 # Version 2 is greater - fi - done - return 0 # Versions are equal -} - -sanity_checks() { - hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}') - libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1) - apiVersion=$(virsh version | grep API | awk '{print $(NF)}') - - # Compare qemu version (hvVersion >= 4.2.0) - vercomp "$hvVersion" ">=" "4.2.0" - hvStatus=$? - - # Compare libvirt version (libvVersion >= 7.2.0) - vercomp "$libvVersion" ">=" "7.2.0" - libvStatus=$? - - if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then - log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]" - else - echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0" - exit 1 - fi - - log -ne "Environment Sanity Checks successfully passed" -} - -### Operation methods ### - -backup_running_vm() { - mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } - - name="root" - echo "" > $dest/backup.xml - for disk in $(virsh -c qemu:///system domblklist $VM --details 2>/dev/null | awk '/disk/{print$3}'); do - volpath=$(virsh -c qemu:///system domblklist $VM --details | awk "/$disk/{print $4}" | sed 's/.*\///') - echo "" >> $dest/backup.xml - name="datadisk" - done - echo "" >> $dest/backup.xml - - local thaw=0 - if [[ ${QUIESCE} == "true" ]]; then - log -ne "Pause option is enabled on a running virtual machine" - if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then - thaw=1 - fi - fi - - # Start push backup - local backup_begin=0 - if virsh -c qemu:///system backup-begin --domain $VM --backupxml $dest/backup.xml 2>&1 > /dev/null; then - backup_begin=1; - fi - - if [[ $thaw -eq 1 ]]; then - if ! response=$(virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' 2>&1 > /dev/null); then - echo "Failed to thaw the filesystem for vm $VM: $response" - cleanup - exit 1 - fi - fi - - if [[ $backup_begin -ne 1 ]]; then - cleanup - exit 1 - fi - - # Backup domain information - virsh -c qemu:///system dumpxml $VM > $dest/domain-config.xml 2>/dev/null - virsh -c qemu:///system dominfo $VM > $dest/dominfo.xml 2>/dev/null - virsh -c qemu:///system domiflist $VM > $dest/domiflist.xml 2>/dev/null - virsh -c qemu:///system domblklist $VM > $dest/domblklist.xml 2>/dev/null - - while true; do - status=$(virsh -c qemu:///system domjobinfo $VM --completed --keep-completed | awk '/Job type:/ {print $3}') - case "$status" in - Completed) - break ;; - Failed) - echo "Virsh backup job failed" - cleanup ;; - esac - sleep 5 - done - sync - -} - -backup_stopped_vm() { - mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } - - IFS="," - - name="root" - for disk in $DISK_PATHS; do - if [[ "$disk" == rbd:* ]]; then - # disk for rbd => rbd:/:mon_host=... - # sample: rbd:cloudstack/53d5c355-d726-4d3e-9422-046a503a0b12:mon_host=10.0.1.2... - beforeUuid="${disk#*/}" # Remove up to first slash after rbd: - volUuid="${beforeUuid%%:*}" # Remove everything after colon to get the uuid - else - volUuid="${disk##*/}" - fi - output="$dest/$name.$volUuid.qcow2" - if ! qemu-img convert -O qcow2 "$disk" "$output" > "$logFile" 2> >(cat >&2); then - echo "qemu-img convert failed for $disk $output" - cleanup - fi - name="datadisk" - done - sync - -} - -cleanup() { - local status=0 - - rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } - - if [[ $status -ne 0 ]]; then - echo "Backup cleanup failed" - exit $EXIT_CLEANUP_FAILED - fi -} - -function usage { - echo "" - echo "Usage: $0 -o -v|--vm -p -d -q|--quiesce " - echo "" - exit 1 -} - -while [[ $# -gt 0 ]]; do - case $1 in - -o|--operation) - OP="$2" - shift - shift - ;; - -v|--vm) - VM="$2" - shift - shift - ;; - -p|--path) - BACKUP_DIR="$2" - shift - shift - ;; - -q|--quiesce) - QUIESCE="$2" - shift - shift - ;; - -d|--diskpaths) - DISK_PATHS="$2" - shift - shift - ;; - -h|--help) - usage - shift - ;; - *) - echo "Invalid option: $1" - usage - ;; - esac -done - -if [[ -z "$BACKUP_DIR" ]]; then - echo "Backup path (-p|--path) is required" - exit 1 -fi - -dest="$BACKUP_DIR" - -# Perform Initial sanity checks -sanity_checks - -if [[ "$OP" != "backup" ]]; then - echo "Unsupported operation: $OP" - exit 1 -fi - -STATE=$(virsh -c qemu:///system list | awk -v vm="$VM" '$2 == vm {print $3}') - -if [[ -n "$STATE" && "$STATE" == "running" ]]; then - backup_running_vm -else - backup_stopped_vm -fi - -exit 0 diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index bf5152ddf484..7356217b20ac 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -118,6 +118,7 @@ import org.apache.cloudstack.api.response.LBStickinessPolicyResponse; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import org.apache.cloudstack.api.response.LoadBalancerResponse; import org.apache.cloudstack.api.response.ManagementServerResponse; import org.apache.cloudstack.api.response.NetworkACLItemResponse; @@ -5570,7 +5571,7 @@ public BackupRepositoryResponse createBackupRepositoryResponse(BackupRepository response.setId(backupRepository.getUuid()); response.setCreated(backupRepository.getCreated()); response.setAddress(backupRepository.getAddress()); - response.setProviderName(backupRepository.getProvider()); + response.setProviderName(BackupProviderNameUtils.toDisplayName(backupRepository.getProvider())); response.setType(backupRepository.getType()); if (StringUtils.isNotBlank(backupRepository.getMountOptions())) { response.setMountOptions(backupRepository.getMountOptions()); diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 3fa004a4ddea..e4240eb722a9 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -4221,6 +4221,26 @@ private Snapshot takeSnapshotInternal(Long volumeId, Long policyId, Long snapsho } } + private void validateNoBackupActivityOrHistoryForVolumeSnapshot(Long volumeId, String operation) { + VolumeVO volume = _volsDao.findById(volumeId); + if (volume == null || volume.getInstanceId() == null) { + return; + } + + Long vmId = volume.getInstanceId(); + boolean hasBackupInProgress = backupDao.listByVmId(null, vmId).stream() + .anyMatch(backup -> Backup.Status.BackingUp.equals(backup.getStatus()) || Backup.Status.Restoring.equals(backup.getStatus())); + if (hasBackupInProgress) { + throw new InvalidParameterValueException(String.format("Snapshot %s failed because a backup or restore is currently in progress for the Instance.", operation)); + } + + boolean hasExistingBackup = backupDao.listByVmId(null, vmId).stream() + .anyMatch(backup -> Backup.Status.BackedUp.equals(backup.getStatus())); + if (hasExistingBackup) { + throw new InvalidParameterValueException(String.format("Snapshot %s failed because the Instance has backups.", operation)); + } + } + @NotNull private List getPoolIdsByPolicy(Long policyId, List poolIds) { if (CollectionUtils.isNotEmpty(poolIds)) { @@ -4402,6 +4422,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, } } } + validateNoBackupActivityOrHistoryForVolumeSnapshot(volumeId, "create"); return snapshotMgr.allocSnapshot(volumeId, policyId, snapshotName, locationType, false, zoneIds); } @@ -4482,6 +4503,7 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName throw new InvalidParameterValueException("Cannot perform this operation, unsupported VM snapshot type."); } + validateNoBackupActivityOrHistoryForVolumeSnapshot(volumeId, "create"); return snapshotMgr.allocSnapshot(volumeId, Snapshot.MANUAL_POLICY_ID, snapshotName, null, true, null); } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 3e01bc89fa4c..e67fb7998355 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -78,6 +78,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.resourcedetail.SnapshotPolicyDetailVO; import org.apache.cloudstack.resourcedetail.dao.SnapshotPolicyDetailsDao; import org.apache.cloudstack.snapshot.SnapshotHelper; @@ -255,6 +257,8 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement public TaggedResourceService taggedResourceService; @Inject private AnnotationDao annotationDao; + @Inject + private BackupDao backupDao; @Inject protected SnapshotHelper snapshotHelper; @@ -384,6 +388,7 @@ public Snapshot revertSnapshot(Long snapshotId) { } Long instanceId = volume.getInstanceId(); + validateNoBackupActivityOrHistoryForVolumeSnapshot(volume.getId(), "revert"); // If this volume is attached to an VM, then the VM needs to be in the stopped state // in order to revert the volume @@ -620,6 +625,7 @@ public Snapshot archiveSnapshot(Long snapshotId) { if (snapshotOnPrimary == null || !snapshotOnPrimary.getStatus().equals(ObjectInDataStoreStateMachine.State.Ready)) { throw new CloudRuntimeException("Can only archive snapshots present on primary storage. " + "Cannot find snapshot " + snapshotId + " on primary storage"); } + validateNoBackupActivityOrHistoryForVolumeSnapshot(snapshotOnPrimary.getVolumeId(), "archive"); SnapshotInfo snapshotOnSecondary = snapshotSrv.backupSnapshot(snapshotOnPrimary); SnapshotVO snapshotVO = _snapshotDao.findById(snapshotOnSecondary.getId()); @@ -932,6 +938,7 @@ public boolean deleteSnapshot(long snapshotId, Long zoneId) { } _accountMgr.checkAccess(caller, null, true, snapshotCheck); + validateNoBackupActivityOrHistoryForVolumeSnapshot(snapshotCheck.getVolumeId(), "delete"); SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, zoneId, SnapshotOperation.DELETE); if (snapshotStrategy == null) { @@ -1210,6 +1217,26 @@ protected void validatePolicyZones(List zoneIds, List poolIds, Volum } } + private void validateNoBackupActivityOrHistoryForVolumeSnapshot(Long volumeId, String operation) { + VolumeVO volume = _volsDao.findById(volumeId); + if (volume == null || volume.getInstanceId() == null) { + return; + } + + Long vmId = volume.getInstanceId(); + boolean hasBackupInProgress = backupDao.listByVmId(null, vmId).stream() + .anyMatch(backup -> Backup.Status.BackingUp.equals(backup.getStatus()) || Backup.Status.Restoring.equals(backup.getStatus())); + if (hasBackupInProgress) { + throw new InvalidParameterValueException(String.format("Snapshot %s failed because a backup or restore is currently in progress for the Instance.", operation)); + } + + boolean hasExistingBackup = backupDao.listByVmId(null, vmId).stream() + .anyMatch(backup -> Backup.Status.BackedUp.equals(backup.getStatus())); + if (hasExistingBackup) { + throw new InvalidParameterValueException(String.format("Snapshot %s failed because the Instance has backups.", operation)); + } + } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_POLICY_CREATE, eventDescription = "creating snapshot policy") diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 03e691b596e1..07d03a91710b 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -55,6 +55,7 @@ import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.secstorage.heuristics.HeuristicType; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; @@ -324,6 +325,15 @@ protected DataStore verifyHeuristicRulesForZone(VMTemplateVO template, Long zone return null; } + protected boolean isWritableImageStore(DataStore imageStore, Long zoneId) { + ImageStoreVO imageStoreVO = _imgStoreDao.findById(imageStore.getId()); + if (imageStoreVO == null) { + logger.warn("Unable to find image store [{}] in zone [{}] while validating heuristic rule selection.", imageStore, zoneId); + return false; + } + return !imageStoreVO.isReadonly(); + } + protected void standardImageStoreAllocation(List imageStores, VMTemplateVO template) { Set zoneSet = new HashSet(); Collections.shuffle(imageStores); diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index dc79c766072b..a5f1ea2b2229 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -29,6 +29,8 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.annotation.AnnotationService; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; @@ -179,6 +181,8 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme PrimaryDataStoreDao _storagePoolDao; @Inject private AnnotationDao annotationDao; + @Inject + private BackupDao backupDao; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -448,6 +452,8 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc throw new CloudRuntimeException("There are other active Instance Snapshot tasks on the Instance, please try again later"); } + validateNoBackupActivityOrHistoryForVMSnapshot(vmId, "create"); + VMSnapshot.Type vmSnapshotType = VMSnapshot.Type.Disk; if (snapshotMemory && userVmVo.getState() == VirtualMachine.State.Running) vmSnapshotType = VMSnapshot.Type.DiskAndMemory; @@ -647,6 +653,20 @@ public boolean hasActiveVMSnapshotTasks(Long vmId) { return activeVMSnapshots.size() > 0; } + private void validateNoBackupActivityOrHistoryForVMSnapshot(Long vmId, String operation) { + boolean hasBackupInProgress = backupDao.listByVmId(null, vmId).stream() + .anyMatch(backup -> Backup.Status.BackingUp.equals(backup.getStatus()) || Backup.Status.Restoring.equals(backup.getStatus())); + if (hasBackupInProgress) { + throw new InvalidParameterValueException(String.format("Instance Snapshot %s failed because a backup or restore is currently in progress for the Instance.", operation)); + } + + boolean hasExistingBackup = backupDao.listByVmId(null, vmId).stream() + .anyMatch(backup -> Backup.Status.BackedUp.equals(backup.getStatus())); + if (hasExistingBackup) { + throw new InvalidParameterValueException(String.format("Instance Snapshot %s failed because the Instance has backups.", operation)); + } + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_SNAPSHOT_DELETE, eventDescription = "Delete Instance Snapshots", async = true) public boolean deleteVMSnapshot(Long vmSnapshotId) { @@ -673,6 +693,8 @@ public boolean deleteVMSnapshot(Long vmSnapshotId) { throw new InvalidParameterValueException("There are other active Instance Snapshot tasks on the Instance, please try again later"); } + validateNoBackupActivityOrHistoryForVMSnapshot(vmSnapshot.getVmId(), "delete"); + // serialize VM operation AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { @@ -738,6 +760,8 @@ private boolean orchestrateDeleteVMSnapshot(Long vmSnapshotId) { throw new InvalidParameterValueException("There are other active Instance Snapshot tasks on the Instance, please try again later"); } + validateNoBackupActivityOrHistoryForVMSnapshot(vmSnapshot.getVmId(), "delete"); + annotationDao.removeByEntityType(AnnotationService.EntityType.VM_SNAPSHOT.name(), vmSnapshot.getUuid()); if (vmSnapshot.getState() == VMSnapshot.State.Allocated) { return _vmSnapshotDao.remove(vmSnapshot.getId()); @@ -919,6 +943,8 @@ private UserVm orchestrateRevertToVMSnapshot(Long vmSnapshotId) throws Insuffici throw new InvalidParameterValueException("There is other active Instance Snapshot tasks on the Instance, please try again later"); } + validateNoBackupActivityOrHistoryForVMSnapshot(vmId, "revert"); + Account caller = getCaller(); _accountMgr.checkAccess(caller, null, true, vmSnapshotVo); @@ -1050,6 +1076,7 @@ public VirtualMachine getVMBySnapshotId(Long id) { @Override public boolean deleteAllVMSnapshots(long vmId, VMSnapshot.Type type) { + validateNoBackupActivityOrHistoryForVMSnapshot(vmId, "delete"); // serialize VM operation AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { @@ -1094,6 +1121,7 @@ else if (jobResult instanceof Throwable) } private boolean orchestrateDeleteAllVMSnapshots(long vmId, VMSnapshot.Type type) { + validateNoBackupActivityOrHistoryForVMSnapshot(vmId, "delete"); boolean result = true; List listVmSnapshots = _vmSnapshotDao.findByVm(vmId); if (listVmSnapshots == null || listVmSnapshots.isEmpty()) { diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index f01e3b914ac0..ad53b518232a 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -32,6 +32,7 @@ import java.util.TimeZone; import java.util.Timer; import java.util.TimerTask; +import java.util.Iterator; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -249,9 +250,12 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { private AsyncJobDispatcher asyncJobDispatcher; private Timer backupTimer; private Date currentTimestamp; + private static final int POST_RESTORE_MAINTENANCE_MAX_RETRIES = 5; + private static final long POST_RESTORE_MAINTENANCE_RETRY_INTERVAL_MS = 60_000L; private static Map backupProvidersMap = new HashMap<>(); private List backupProviders; + private final List postRestoreMaintenanceTasks = Collections.synchronizedList(new ArrayList<>()); public AsyncJobDispatcher getAsyncJobDispatcher() { return asyncJobDispatcher; @@ -273,9 +277,10 @@ public List listBackupProviderOfferings(final Long zoneId, final } List allOfferings = new ArrayList<>(); List providers = getBackupProvidersForZone(zoneId); + final String canonicalProviderName = BackupProviderNameUtils.canonicalize(providerName); for (BackupProvider provider : providers) { - if (provider.getName().equalsIgnoreCase(providerName)) { + if (provider.getName().equalsIgnoreCase(canonicalProviderName)) { try { logger.debug("Listing external backup offerings for provider {} in zone {}", provider.getName(), zoneId); List offerings = provider.listBackupOfferings(zoneId); @@ -295,7 +300,7 @@ public List listBackupProviderOfferings(final Long zoneId, final public BackupOffering importBackupOffering(final ImportBackupOfferingCmd cmd) { validateBackupForZone(cmd.getZoneId()); - String providerName = cmd.getProvider(); + String providerName = BackupProviderNameUtils.canonicalize(cmd.getProvider()); if (StringUtils.isEmpty(providerName)) { throw new CloudRuntimeException("Provider name must be specified"); } @@ -696,7 +701,9 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { final int maxBackups = validateAndGetDefaultBackupRetentionIfRequired(cmd.getMaxBackups(), offering, vm); - if ((!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) && cmd.getQuiesceVM() != null) { + if (!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider()) && + cmd.getQuiesceVM() != null) { throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS, Commvault backup provider"); } @@ -898,7 +905,9 @@ public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllo throw new CloudRuntimeException("The assigned backup offering does not allow ad-hoc user backup"); } - if ((!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) && cmd.getQuiesceVM() != null) { + if (!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider()) && + cmd.getQuiesceVM() != null) { throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS, Commvault backup provider"); } @@ -938,7 +947,7 @@ public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllo vmId, ApiCommandResourceType.VirtualMachine.toString(), true, 0); - Pair result = backupProvider.takeBackup(vm, cmd.getQuiesceVM()); + Pair result = backupProvider.takeBackup(vm, cmd.getQuiesceVM(), backupScheduleId); if (!result.first()) { throw new CloudRuntimeException("Failed to create VM backup"); } @@ -1016,37 +1025,150 @@ protected void deleteOldestBackupFromScheduleIfRequired(Long vmId, long backupSc return; } - logger.debug("Checking if it is required to delete the oldest backups from the schedule with ID [{}], to meet its retention requirement of [{}] backups.", backupScheduleId, backupScheduleVO.getMaxBackups()); + logger.debug("Checking if it is required to delete the oldest backup chains from the schedule with ID [{}], to meet its retention requirement of [{}] chains.", backupScheduleId, backupScheduleVO.getMaxBackups()); List backups = backupDao.listBySchedule(backupScheduleId); - int amountOfBackupsToDelete = backups.size() - backupScheduleVO.getMaxBackups(); - if (amountOfBackupsToDelete > 0) { - deleteExcessBackups(backups, amountOfBackupsToDelete, backupScheduleId); + List> backupChains = getBackupChainsForSchedule(backups); + int amountOfChainsToDelete = backupChains.size() - backupScheduleVO.getMaxBackups(); + if (amountOfChainsToDelete > 0) { + deleteExcessBackups(backupChains, amountOfChainsToDelete, backupScheduleId); } else { - logger.debug("Not required to delete any backups from the schedule [ID: {}]: [backups size: {}] and [retention: {}].", backupScheduleId, backups.size(), backupScheduleVO.getMaxBackups()); + logger.debug("Not required to delete any backup chains from the schedule [ID: {}]: [chain count: {}] and [retention: {}].", backupScheduleId, backupChains.size(), backupScheduleVO.getMaxBackups()); } } /** * Deletes a certain number of backups associated with a schedule. * - * @param backups List of backups associated with a schedule - * @param amountOfBackupsToDelete Number of backups to be deleted from the list of backups + * @param backupChains List of backup chains associated with a schedule + * @param amountOfChainsToDelete Number of backup chains to be deleted from the list of chains * @param backupScheduleId ID of the backup schedule associated with the backups */ - protected void deleteExcessBackups(List backups, int amountOfBackupsToDelete, long backupScheduleId) { - logger.debug("Deleting the [{}] oldest backups from the schedule [ID: {}].", amountOfBackupsToDelete, backupScheduleId); + protected void deleteExcessBackups(List> backupChains, int amountOfChainsToDelete, long backupScheduleId) { + String cleanupTarget = backupScheduleId > 0 ? String.format("schedule [ID: %s]", backupScheduleId) : "VM retention policy"; + logger.debug("Deleting up to [{}] oldest backup chains from {}.", amountOfChainsToDelete, cleanupTarget); - for (int i = 0; i < amountOfBackupsToDelete; i++) { - BackupVO backup = backups.get(i); - if (deleteBackup(backup.getId(), false)) { - String eventDescription = String.format("Successfully deleted backup for VM [ID: %s], suiting the retention specified in the backup schedule [ID: %s]", backup.getVmId(), backupScheduleId); - logger.info(eventDescription); - ActionEventUtils.onCompletedActionEvent( - User.UID_SYSTEM, backup.getAccountId(), EventVO.LEVEL_INFO, - EventTypes.EVENT_VM_BACKUP_DELETE, eventDescription, backup.getId(), ApiCommandResourceType.Backup.toString(), 0 - ); + int deletedChains = 0; + for (int i = 0; i < amountOfChainsToDelete && i < backupChains.size(); i++) { + if (deleteBackupChain(backupChains.get(i), backupScheduleId)) { + deletedChains++; } } + + if (deletedChains < amountOfChainsToDelete) { + logger.warn("Retention cleanup for {} deleted [{}] chains out of the requested [{}]. The remaining chains could not be deleted safely.", + cleanupTarget, deletedChains, amountOfChainsToDelete); + } + } + + private boolean deleteBackupChain(List chain, long backupScheduleId) { + if (CollectionUtils.isEmpty(chain)) { + return true; + } + + String cleanupTarget = backupScheduleId > 0 ? String.format("schedule [ID: %s]", backupScheduleId) : "VM retention policy"; + + List remainingBackups = chain.stream() + .sorted(Comparator.comparing(BackupVO::getDate)) + .collect(Collectors.toCollection(ArrayList::new)); + int deletedBackups = 0; + + while (!remainingBackups.isEmpty()) { + List leafBackups = getLeafBackups(remainingBackups); + if (CollectionUtils.isEmpty(leafBackups)) { + logger.warn("Could not find a deletable leaf while removing an obsolete backup chain for {}.", cleanupTarget); + return false; + } + + for (BackupVO backup : leafBackups) { + try { + if (!deleteBackup(backup.getId(), false)) { + logger.warn("Failed to delete backup [ID: {}, UUID: {}] while deleting a chain for {}.", backup.getId(), backup.getUuid(), cleanupTarget); + return false; + } + String eventDescription = backupScheduleId > 0 + ? String.format("Successfully deleted backup for VM [ID: %s], suiting the retention specified in the backup schedule [ID: %s]", backup.getVmId(), backupScheduleId) + : String.format("Successfully deleted backup for VM [ID: %s], suiting the retention specified by the VM backup schedules", backup.getVmId()); + logger.info(eventDescription); + ActionEventUtils.onCompletedActionEvent( + User.UID_SYSTEM, backup.getAccountId(), EventVO.LEVEL_INFO, + EventTypes.EVENT_VM_BACKUP_DELETE, eventDescription, backup.getId(), ApiCommandResourceType.Backup.toString(), 0 + ); + deletedBackups++; + remainingBackups.remove(backup); + } catch (Exception e) { + logger.warn("Skipping retention deletion for backup [ID: {}, UUID: {}] on {} because it is not currently safe to remove: {}", + backup.getId(), backup.getUuid(), cleanupTarget, e.getMessage()); + return false; + } + } + } + + logger.info("Deleted [{}] backups from an obsolete backup chain for {}.", deletedBackups, cleanupTarget); + return true; + } + + private List> getBackupChainsForSchedule(List backups) { + if (CollectionUtils.isEmpty(backups)) { + return new ArrayList<>(); + } + + Map backupsByUuid = backups.stream() + .collect(Collectors.toMap(BackupVO::getUuid, backup -> backup, (left, right) -> left, LinkedHashMap::new)); + Map> chainsByRootUuid = new LinkedHashMap<>(); + + for (BackupVO backup : backups) { + String rootUuid = getRootBackupUuid(backup, backupsByUuid); + chainsByRootUuid.computeIfAbsent(rootUuid, ignored -> new ArrayList<>()).add(backup); + } + + return chainsByRootUuid.values().stream() + .map(chain -> chain.stream() + .sorted(Comparator.comparing(BackupVO::getDate)) + .collect(Collectors.toCollection(ArrayList::new))) + .sorted(Comparator.comparing(chain -> chain.get(0).getDate())) + .collect(Collectors.toCollection(ArrayList::new)); + } + + private String getRootBackupUuid(BackupVO backup, Map backupsByUuid) { + BackupVO current = backup; + Set visitedBackups = new HashSet<>(); + + while (current != null && visitedBackups.add(current.getUuid())) { + String parentBackupUuid = getParentBackupUuid(current); + if (StringUtils.isBlank(parentBackupUuid) || !backupsByUuid.containsKey(parentBackupUuid)) { + return current.getUuid(); + } + current = backupsByUuid.get(parentBackupUuid); + } + + return backup.getUuid(); + } + + private List getLeafBackups(List backups) { + Set parentBackupUuids = backups.stream() + .map(this::getParentBackupUuid) + .filter(StringUtils::isNotBlank) + .collect(Collectors.toSet()); + + return backups.stream() + .filter(backup -> !parentBackupUuids.contains(backup.getUuid())) + .sorted(Comparator.comparing(BackupVO::getDate).reversed()) + .collect(Collectors.toCollection(ArrayList::new)); + } + + private String getParentBackupUuid(BackupVO backup) { + backupDao.loadDetails(backup); + Map details = backup.getDetails(); + if (details == null || details.isEmpty()) { + return null; + } + + return details.entrySet().stream() + .filter(entry -> StringUtils.endsWith(entry.getKey(), ".parent.backup.uuid")) + .map(Map.Entry::getValue) + .filter(StringUtils::isNotBlank) + .findFirst() + .orElse(null); } @Override @@ -1227,6 +1349,7 @@ protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering off vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),0); throw new CloudRuntimeException("Error restoring VM from backup with uuid " + backup.getUuid()); } + runPostRestoreMaintenance(backupProvider, vm, backup, false); // The restore process is executed by a backup provider outside of ACS, I am using the catch-all (Exception) to // ensure that no provider-side exception is missed. Therefore, we have a proper handling of exceptions, and rollbacks if needed. } catch (Exception e) { @@ -1498,7 +1621,8 @@ public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws Cl String host = null; String dataStore = null; - if (!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) { + if (!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider())) { Pair restoreInfo = getRestoreVolumeHostAndDatastore(vm); host = restoreInfo.first().getPrivateIpAddress(); dataStore = restoreInfo.second().getUuid(); @@ -1576,7 +1700,8 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, BackupProvider backupProvider = getBackupProvider(offering.getProvider()); VolumeVO backedUpVolume = volumeDao.findByUuid(backedUpVolumeUuid); Pair restoreInfo; - if ((!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) || backedUpVolume == null) { + if ((!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider())) || backedUpVolume == null) { restoreInfo = getRestoreVolumeHostAndDatastore(vm); } else { restoreInfo = getRestoreVolumeHostAndDatastoreForNas(vm, backedUpVolume); @@ -1599,9 +1724,16 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, throw new CloudRuntimeException(String.format("Error restoring volume [%s] of VM [%s] to host [%s] using backup provider [%s] due to: [%s].", backedUpVolumeUuid, vm.getUuid(), host.getUuid(), backupProvider.getName(), result.second())); } - if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), backupVolumeInfo, - backedUpVolumeUuid, vm, datastore.getUuid(), backup)) { - throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s].", backedUpVolumeUuid, vm.getUuid())); + try { + if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), backupVolumeInfo, + backedUpVolumeUuid, vm, datastore.getUuid(), backup)) { + cleanupRestoredVolumeAfterAttachFailure(result.second()); + throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s].", backedUpVolumeUuid, vm.getUuid())); + } + runPostRestoreMaintenance(backupProvider, vm, backup, true); + } catch (Exception e) { + cleanupRestoredVolumeAfterAttachFailure(result.second()); + throw e; } return true; } @@ -1618,6 +1750,8 @@ protected Pair restoreBackedUpVolume(final Backup.VolumeInfo ba result = backupProvider.restoreBackedUpVolume(backup, backupVolumeInfo, hostData, datastoreData, new Pair<>(vm.getName(), vm.getState())); if (BooleanUtils.isTrue(result.first())) { + logger.info("Successfully restored volume [UUID: {}] using host [{}] and datastore [{}] through backup provider [{}]. Result details: [{}]", + backupVolumeInfo.getUuid(), hostData, datastoreData, backupProvider.getName(), result.second()); return result; } } catch (Exception e) { @@ -1629,6 +1763,47 @@ protected Pair restoreBackedUpVolume(final Backup.VolumeInfo ba return result; } + private void runPostRestoreMaintenance(final BackupProvider backupProvider, final VirtualMachine vm, final Backup backup, final boolean volumeOnly) { + if (!backupProvider.supportsPostRestoreMaintenance()) { + return; + } + try { + backupProvider.runPostRestoreMaintenance(vm, backup, volumeOnly); + } catch (Exception e) { + logger.warn("Post-restore maintenance failed for provider {} on VM {} and backup {}: {}", backupProvider.getName(), + vm != null ? vm.getUuid() : null, backup != null ? backup.getUuid() : null, e.getMessage(), e); + schedulePostRestoreMaintenanceRetry(backupProvider, vm, backup, volumeOnly); + } + } + + private void schedulePostRestoreMaintenanceRetry(final BackupProvider backupProvider, final VirtualMachine vm, final Backup backup, final boolean volumeOnly) { + if (backupProvider == null || vm == null || backup == null) { + return; + } + synchronized (postRestoreMaintenanceTasks) { + postRestoreMaintenanceTasks.add(new PostRestoreMaintenanceTask(backupProvider.getName(), vm.getId(), backup.getId(), volumeOnly, 1, + System.currentTimeMillis() + POST_RESTORE_MAINTENANCE_RETRY_INTERVAL_MS)); + } + } + + private static final class PostRestoreMaintenanceTask { + private final String providerName; + private final long vmId; + private final long backupId; + private final boolean volumeOnly; + private int retryCount; + private long nextAttemptEpochMs; + + private PostRestoreMaintenanceTask(String providerName, long vmId, long backupId, boolean volumeOnly, int retryCount, long nextAttemptEpochMs) { + this.providerName = providerName; + this.vmId = vmId; + this.backupId = backupId; + this.volumeOnly = volumeOnly; + this.retryCount = retryCount; + this.nextAttemptEpochMs = nextAttemptEpochMs; + } + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_DELETE, eventDescription = "deleting VM backup", async = true) public boolean deleteBackup(final Long backupId, final Boolean forced) { @@ -1657,6 +1832,7 @@ public boolean deleteBackup(final Long backupId, final Boolean forced) { Long backupSize = backup.getSize() != null ? backup.getSize() : 0L; resourceLimitMgr.decrementResourceCount(backup.getAccountId(), Resource.ResourceType.backup_storage, backupSize); if (backupDao.remove(backup.getId())) { + backupDetailsDao.removeDetails(backup.getId()); checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(vm, backup); return true; } else { @@ -1723,6 +1899,22 @@ private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, Bac } } + private void cleanupRestoredVolumeAfterAttachFailure(String restoredVolumeLocation) { + if (StringUtils.isBlank(restoredVolumeLocation)) { + return; + } + VolumeVO restoredVolume = volumeDao.findByUuid(restoredVolumeLocation); + if (restoredVolume == null) { + return; + } + try { + Account caller = CallContext.current() != null ? CallContext.current().getCallingAccount() : accountDao.findById(restoredVolume.getAccountId()); + volumeApiService.deleteVolume(restoredVolume.getId(), caller); + } catch (Exception e) { + logger.warn("Failed to cleanup restored volume {} after attach failure", restoredVolumeLocation, e); + } + } + private void checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(VirtualMachine vm, Backup backup) { if (vm != null && (vm.getBackupOfferingId() == null || vm.getBackupOfferingId() != backup.getBackupOfferingId())) { @@ -1755,7 +1947,18 @@ public void validateBackupForZone(final Long zoneId) { @Override public List listBackupProviders() { - return backupProviders; + final List providers = new ArrayList<>(); + final Set seenProviders = new HashSet<>(); + for (final BackupProvider provider : backupProviders) { + if (provider == null) { + continue; + } + final String displayName = BackupProviderNameUtils.toDisplayName(provider.getName()); + if (seenProviders.add(displayName)) { + providers.add(provider); + } + } + return providers; } @Override @@ -1785,7 +1988,12 @@ public List getBackupProvidersForZone(final Long zoneId) { if (!StringUtils.isEmpty(trimmedName)) { try { BackupProvider provider = getBackupProvider(trimmedName); - providers.add(provider); + boolean exists = providers.stream().anyMatch(p -> + BackupProviderNameUtils.toDisplayName(p.getName()).equalsIgnoreCase( + BackupProviderNameUtils.toDisplayName(provider.getName()))); + if (!exists) { + providers.add(provider); + } } catch (CloudRuntimeException e) { logger.warn("Failed to load backup provider: " + trimmedName + " for zone: " + zoneId, e); } @@ -1801,10 +2009,11 @@ public BackupProvider getBackupProvider(final String name) { if (StringUtils.isEmpty(name)) { throw new CloudRuntimeException("Invalid backup provider name provided"); } - if (!backupProvidersMap.containsKey(name)) { - throw new CloudRuntimeException("Failed to find backup provider by the name: " + name); - } - return backupProvidersMap.get(name); + final String canonicalName = BackupProviderNameUtils.canonicalize(name); + if (!backupProvidersMap.containsKey(canonicalName)) { + throw new CloudRuntimeException("Failed to find backup provider by the name: " + canonicalName); + } + return backupProvidersMap.get(canonicalName); } @Override @@ -1858,6 +2067,8 @@ public ConfigKey[] getConfigKeys() { BackupProviderPlugin, BackupSyncPollingInterval, BackupEnableAttachDetachVolumes, + KvmIncrementalBackup, + BackupChainSize, DefaultMaxAccountBackups, DefaultMaxAccountBackupStorage, DefaultMaxProjectBackups, @@ -2083,6 +2294,7 @@ protected void runInContext() { if (logger.isTraceEnabled()) { logger.trace("Backup sync background task is running..."); } + processPostRestoreMaintenanceTasks(); for (final DataCenter dataCenter : dataCenterDao.listAllZones()) { if (dataCenter == null || isDisabled(dataCenter.getId())) { logger.debug("Backup Sync Task is not enabled in zone [{}]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter); @@ -2092,8 +2304,13 @@ protected void runInContext() { List providers = getBackupProvidersForZone(dataCenter.getId()); for (BackupProvider backupProvider : providers) { try { - backupProvider.syncBackupStorageStats(dataCenter.getId()); - syncOutOfBandBackups(backupProvider, dataCenter); + if (backupProvider.supportsBackgroundSync()) { + backupProvider.syncBackupStorageStats(dataCenter.getId()); + syncOutOfBandBackups(backupProvider, dataCenter); + } + if (backupProvider.supportsBackgroundChainValidation()) { + backupProvider.validateChains(dataCenter.getId()); + } updateBackupUsageRecords(backupProvider, dataCenter); } catch (Exception e) { logger.error("Failed to sync backups for provider {} in zone {}: {}", backupProvider.getName(), dataCenter.getId(), e.getMessage(), e); @@ -2105,8 +2322,49 @@ protected void runInContext() { } } + private void processPostRestoreMaintenanceTasks() { + synchronized (postRestoreMaintenanceTasks) { + if (postRestoreMaintenanceTasks.isEmpty()) { + return; + } + final long now = System.currentTimeMillis(); + final Iterator iterator = postRestoreMaintenanceTasks.iterator(); + while (iterator.hasNext()) { + final PostRestoreMaintenanceTask task = iterator.next(); + if (task.nextAttemptEpochMs > now) { + continue; + } + try { + final BackupProvider backupProvider = getBackupProvider(task.providerName); + final BackupVO backup = backupDao.findByIdIncludingRemoved(task.backupId); + final VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(task.vmId); + if (backup == null || vm == null || !backupProvider.supportsPostRestoreMaintenance()) { + iterator.remove(); + continue; + } + backupProvider.runPostRestoreMaintenance(vm, backup, task.volumeOnly); + iterator.remove(); + } catch (Exception e) { + if (task.retryCount >= POST_RESTORE_MAINTENANCE_MAX_RETRIES) { + logger.warn("Exhausted post-restore maintenance retries for provider {}, VM {}, backup {} due to: {}", + task.providerName, task.vmId, task.backupId, e.getMessage(), e); + iterator.remove(); + continue; + } + task.retryCount++; + task.nextAttemptEpochMs = now + (POST_RESTORE_MAINTENANCE_RETRY_INTERVAL_MS * task.retryCount); + logger.warn("Post-restore maintenance retry {} scheduled for provider {}, VM {}, backup {} due to: {}", + task.retryCount, task.providerName, task.vmId, task.backupId, e.getMessage()); + } + } + } + } + private void syncOutOfBandBackups(final BackupProvider backupProvider, DataCenter dataCenter) { - if (backupProvider.getName().equalsIgnoreCase("commvault")) { + if (!backupProvider.supportsOutOfBandBackupSync()) { + return; + } + if (backupProvider.supportsProviderManagedBackupAgents()) { boolean check = backupProvider.checkBackupAgent(dataCenter.getId()); if (!check) { boolean install = false; @@ -2122,7 +2380,9 @@ private void syncOutOfBandBackups(final BackupProvider backupProvider, DataCente logger.debug("Can't find any VM to sync backups in zone {}", dataCenter); return; } - backupProvider.syncBackupMetrics(dataCenter.getId()); + if (backupProvider.supportsBackupMetricsSync()) { + backupProvider.syncBackupMetrics(dataCenter.getId()); + } for (final VMInstanceVO vm : vms) { try { logger.debug(String.format("Trying to sync backups of VM [%s] using backup provider [%s].", vm, backupProvider.getName())); @@ -2320,7 +2580,7 @@ public BackupOffering updateBackupOffering(UpdateBackupOfferingCmd updateBackupO if (retentionPeriod != null) { final BackupProvider provider = getBackupProvider(providerName); - if (!provider.getName().equalsIgnoreCase("commvault")){ + if (!provider.supportsRetentionPlanUpdate()) { throw new CloudRuntimeException("Failed to update backup offering, Because the backup offering provider is not set to commvault."); } boolean result = provider.updateBackupPlan(zoneId, retentionPeriod, externalId); diff --git a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java index 34232c6c7c3c..045c6dbf5ee1 100644 --- a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java +++ b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java @@ -17,6 +17,8 @@ // under the License. package org.apache.cloudstack.backup; +import java.util.Date; + import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.alert.AlertManager; @@ -662,7 +664,7 @@ public void createBackupTestCreateScheduledBackup() throws ResourceAllocationExc when(backup.getId()).thenReturn(backupId); when(backup.getSize()).thenReturn(newBackupSize); when(backupProvider.getName()).thenReturn("testbackupprovider"); - when(backupProvider.takeBackup(vmInstanceVOMock, null)).thenReturn(new Pair<>(true, backup)); + when(backupProvider.takeBackup(vmInstanceVOMock, null, scheduleId)).thenReturn(new Pair<>(true, backup)); Map backupProvidersMap = new HashMap<>(); backupProvidersMap.put(backupProvider.getName().toLowerCase(), backupProvider); ReflectionTestUtils.setField(backupManager, "backupProvidersMap", backupProvidersMap); @@ -1759,6 +1761,10 @@ public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenRetentio @Test public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenAmountOfBackupsToBeDeletedIsLessThanOne() { List backups = List.of(Mockito.mock(BackupVO.class), Mockito.mock(BackupVO.class)); + when(backups.get(0).getUuid()).thenReturn("backup-1"); + when(backups.get(1).getUuid()).thenReturn("backup-2"); + when(backups.get(0).getDate()).thenReturn(new Date(1L)); + when(backups.get(1).getDate()).thenReturn(new Date(2L)); Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock); Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(2); Mockito.when(backupDao.listBySchedule(1L)).thenReturn(backups); @@ -1769,6 +1775,10 @@ public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenAmountOf @Test public void deleteOldestBackupFromScheduleIfRequiredTestDeleteBackupsWhenRequired() { List backups = List.of(Mockito.mock(BackupVO.class), Mockito.mock(BackupVO.class)); + when(backups.get(0).getUuid()).thenReturn("backup-1"); + when(backups.get(1).getUuid()).thenReturn("backup-2"); + when(backups.get(0).getDate()).thenReturn(new Date(1L)); + when(backups.get(1).getDate()).thenReturn(new Date(2L)); Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock); Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(1); Mockito.when(backupDao.listBySchedule(1L)).thenReturn(backups); @@ -1780,14 +1790,38 @@ public void deleteOldestBackupFromScheduleIfRequiredTestDeleteBackupsWhenRequire @Test public void deleteExcessBackupsTestEnsureBackupsAreDeletedWhenMethodIsCalled() { try (MockedStatic actionEventUtils = Mockito.mockStatic(ActionEventUtils.class)) { - List backups = List.of(Mockito.mock(BackupVO.class), - Mockito.mock(BackupVO.class), - Mockito.mock(BackupVO.class)); - - Mockito.when(backups.get(0).getId()).thenReturn(1L); - Mockito.when(backups.get(1).getId()).thenReturn(2L); - Mockito.when(backups.get(0).getAccountId()).thenReturn(1L); - Mockito.when(backups.get(1).getAccountId()).thenReturn(2L); + BackupVO backup1 = Mockito.mock(BackupVO.class); + BackupVO backup2 = Mockito.mock(BackupVO.class); + BackupVO backup3 = Mockito.mock(BackupVO.class); + List chain = List.of(backup1, backup2, backup3); + List> backupChains = List.of(chain); + + Mockito.when(backup1.getId()).thenReturn(1L); + Mockito.when(backup2.getId()).thenReturn(2L); + Mockito.when(backup3.getId()).thenReturn(3L); + Mockito.when(backup1.getUuid()).thenReturn("full-1"); + Mockito.when(backup2.getUuid()).thenReturn("inc-1"); + Mockito.when(backup3.getUuid()).thenReturn("inc-2"); + Mockito.when(backup1.getDate()).thenReturn(new Date(1L)); + Mockito.when(backup2.getDate()).thenReturn(new Date(2L)); + Mockito.when(backup3.getDate()).thenReturn(new Date(3L)); + Mockito.when(backup1.getAccountId()).thenReturn(1L); + Mockito.when(backup2.getAccountId()).thenReturn(2L); + Mockito.when(backup3.getAccountId()).thenReturn(3L); + Mockito.when(backup1.getVmId()).thenReturn(1L); + Mockito.when(backup2.getVmId()).thenReturn(1L); + Mockito.when(backup3.getVmId()).thenReturn(1L); + Mockito.doAnswer(invocation -> { + BackupVO backup = invocation.getArgument(0); + if ("full-1".equals(backup.getUuid())) { + backup.setDetails(Map.of()); + } else if ("inc-1".equals(backup.getUuid())) { + backup.setDetails(Map.of("test.parent.backup.uuid", "full-1")); + } else { + backup.setDetails(Map.of("test.parent.backup.uuid", "inc-1")); + } + return null; + }).when(backupDao).loadDetails(Mockito.any(BackupVO.class)); Mockito.doReturn(true).when(backupManager).deleteBackup(Mockito.anyLong(), Mockito.eq(false)); actionEventUtils.when(() -> ActionEventUtils.onStartedActionEvent( @@ -1799,8 +1833,8 @@ public void deleteExcessBackupsTestEnsureBackupsAreDeletedWhenMethodIsCalled() { Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyInt())).thenReturn(2L); - backupManager.deleteExcessBackups(backups, 2, 1L); - Mockito.verify(backupManager, times(2)).deleteBackup(Mockito.anyLong(), Mockito.eq(false)); + backupManager.deleteExcessBackups(backupChains, 1, 1L); + Mockito.verify(backupManager, times(3)).deleteBackup(Mockito.anyLong(), Mockito.eq(false)); } } diff --git a/ui/src/components/view/DeployVMFromBackup.vue b/ui/src/components/view/DeployVMFromBackup.vue index b5769c3913a0..a406028914f7 100644 --- a/ui/src/components/view/DeployVMFromBackup.vue +++ b/ui/src/components/view/DeployVMFromBackup.vue @@ -475,9 +475,10 @@
0 ? this.preFillContent : {} + if (Array.isArray(this.dataPreFill.networkids) && this.dataPreFill.networkids.length > 0) { + this.form.networkids = [...this.dataPreFill.networkids] + if (!this.form.defaultnetworkid) { + this.defaultnetworkid = this.dataPreFill.networkids[0] + this.form.defaultnetworkid = this.dataPreFill.networkids[0] + } + } this.showOverrideDiskOfferingOption = this.dataPreFill.overridediskoffering if (this.dataPreFill.isIso) { @@ -1660,6 +1671,12 @@ export default { const param = this.params.networks this.fetchOptions(param, 'networks') }, + resetDefaultNetworkSelectionState () { + this.defaultnetworkid = '' + this.hasInitializedDefaultNetworkSelection = false + this.networkConfig = [] + this.form.defaultnetworkid = undefined + }, resetData () { this.vm = { name: null, @@ -1681,11 +1698,13 @@ export default { disksize: null } this.zoneSelected = false + this.hasInitializedDefaultNetworkSelection = false this.formRef.value.resetFields() this.fetchData() }, updateFieldValue (name, value) { if (name === 'templateid') { + this.resetDefaultNetworkSelectionState() this.tabKey = 'templateid' this.form.templateid = value this.form.isoid = null @@ -1723,6 +1742,7 @@ export default { } } } else if (name === 'isoid') { + this.resetDefaultNetworkSelectionState() this.templateConfigurations = [] this.selectedTemplateConfiguration = {} this.templateNics = [] @@ -1776,10 +1796,36 @@ export default { }, updateNetworks (ids) { this.form.networkids = ids + this.networks = this.getSelectedNetworksWithExistingConfig( + _.filter(this.options.networks, (option) => _.includes(ids, option.id)) + ) + if (!this.hasInitializedDefaultNetworkSelection && ids && ids.length > 0 && !this.defaultnetworkid) { + this.hasInitializedDefaultNetworkSelection = true + this.updateDefaultNetworks(ids[0]) + return + } + if (!ids || ids.length === 0 || !ids.includes(this.defaultnetworkid)) { + this.updateDefaultNetworks('') + } }, updateDefaultNetworks (id) { this.defaultnetworkid = id this.form.defaultnetworkid = id + + if (!id) { + return + } + + const existingIds = Array.isArray(this.form.networkids) ? [...this.form.networkids] : [] + + if (!existingIds.includes(id)) { + existingIds.unshift(id) + this.form.networkids = existingIds + } + + this.networks = this.getSelectedNetworksWithExistingConfig( + _.filter(this.options.networks, option => _.includes(this.form.networkids, option.id)) + ) }, updateNetworkConfig (networks) { this.networkConfig = networks @@ -2289,8 +2335,16 @@ export default { }, onTabChange (key, type) { this[type] = key + this.resetDefaultNetworkSelectionState() + if (key === 'isoid') { this.fetchAllIsos() + } else if (key === 'templateid') { + this.fetchAllTemplates() + } + + if (this.form.networkids && this.form.networkids.length > 0) { + this.updateNetworks(this.form.networkids) } }, fetchIsos (isoFilter, params) { diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index ab7de54efd63..a55cf589ad2a 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -278,6 +278,7 @@ export default { docHelp: 'adminguide/virtual_machines.html#creating-vm-backups', dataView: true, show: (record) => { return record.backupofferingid }, + disabled: (record) => { return record.hostcontrolstate === 'Offline' }, popup: true, component: shallowRef(defineAsyncComponent(() => import('@/views/compute/StartBackup.vue'))) }, @@ -289,6 +290,7 @@ export default { dataView: true, popup: true, show: (record) => { return record.backupofferingid }, + disabled: (record) => { return record.hostcontrolstate === 'Offline' }, component: shallowRef(defineAsyncComponent(() => import('@/views/compute/BackupScheduleWizard.vue'))), mapping: { virtualmachineid: { @@ -308,6 +310,7 @@ export default { dataView: true, args: ['virtualmachineid', 'forced'], show: (record) => { return record.backupofferingid }, + disabled: (record) => { return record.hostcontrolstate === 'Offline' }, mapping: { virtualmachineid: { value: (record, params) => { return record.id } diff --git a/ui/src/utils/plugins.js b/ui/src/utils/plugins.js index f8cc200af390..ee1d8dd062d9 100644 --- a/ui/src/utils/plugins.js +++ b/ui/src/utils/plugins.js @@ -678,7 +678,7 @@ export const backupUtilPlugin = { if (!provider && typeof provider !== 'string') { return false } - return ['nas', 'commvault'].includes(provider.toLowerCase()) + return ['nas', 'commvault', 'ablestack-nas', 'ablestack-commvault'].includes(provider.toLowerCase()) } } } diff --git a/ui/src/views/compute/StartBackup.vue b/ui/src/views/compute/StartBackup.vue index 96c337ab0bd0..496a10598719 100644 --- a/ui/src/views/compute/StartBackup.vue +++ b/ui/src/views/compute/StartBackup.vue @@ -88,7 +88,7 @@ export default { }, computed: { canSetNameAndDescription () { - return ['nas', 'dummy'].includes(this.provider) + return ['nas', 'commvault', 'dummy', 'ablestack-nas', 'ablestack-commvault'].includes(this.provider) } }, methods: { @@ -100,7 +100,6 @@ export default { this.loading = true getAPI('listBackupOfferings', { id: this.resource.backupofferingid }).then(json => { this.provider = json.listbackupofferingsresponse.backupoffering[0].provider - console.log('this.provider', this.provider) }).finally(() => { this.loading = false }) diff --git a/ui/src/views/compute/wizard/NetworkConfiguration.vue b/ui/src/views/compute/wizard/NetworkConfiguration.vue index a125af394a00..88e099c0e795 100644 --- a/ui/src/views/compute/wizard/NetworkConfiguration.vue +++ b/ui/src/views/compute/wizard/NetworkConfiguration.vue @@ -31,6 +31,7 @@ :dataSource="dataItems" :pagination="false" :rowSelection="rowSelection" + :customRow="onClickRow" :rowKey="record => record.id" size="middle" :scroll="{ y: 225 }"> diff --git a/ui/src/views/compute/wizard/NetworkSelection.vue b/ui/src/views/compute/wizard/NetworkSelection.vue index 97db6edc294b..b340d187f96a 100644 --- a/ui/src/views/compute/wizard/NetworkSelection.vue +++ b/ui/src/views/compute/wizard/NetworkSelection.vue @@ -227,6 +227,7 @@ export default { type: 'checkbox', selectedRowKeys: this.selectedRowKeys, onChange: (rows) => { + this.selectedRowKeys = rows this.$emit('select-network-item', rows) } } @@ -246,9 +247,15 @@ export default { } }, watch: { - value (newValue, oldValue) { - if (newValue && !_.isEqual(newValue, oldValue)) { - this.selectedRowKeys = newValue + value: { + immediate: true, + deep: true, + handler (newValue, oldValue) { + if (Array.isArray(newValue) && !_.isEqual(newValue, oldValue)) { + this.selectedRowKeys = [...newValue] + } else if (!newValue || newValue.length === 0) { + this.selectedRowKeys = [] + } } }, loading () { @@ -261,11 +268,7 @@ export default { }) if (!this.loading) { if (this.preFillContent.networkids) { - const validNetworkIds = this.preFillContent.networkids.filter(networkId => - this.items.some(item => item.id === networkId) - ) - this.selectedRowKeys = validNetworkIds - this.$emit('select-network-item', validNetworkIds) + this.applyPreFillSelection() } else { if (this.items && this.items.length > 0) { if (this.oldZoneId === this.zoneId) { @@ -284,6 +287,10 @@ export default { items: { deep: true, handler () { + if (this.preFillContent.networkids && this.items && this.items.length > 0) { + this.applyPreFillSelection() + return + } if (this.items && this.items.length > 0 && this.networksBeforeCreate) { var user = this.$store.getters.userInfo @@ -326,6 +333,13 @@ export default { }, inject: ['vmFetchNetworks'], methods: { + applyPreFillSelection () { + const validNetworkIds = this.preFillContent.networkids.filter(networkId => + this.items.some(item => item.id === networkId) + ) + this.selectedRowKeys = validNetworkIds + this.$emit('select-network-item', validNetworkIds) + }, fetchVPCs () { const projectId = store?.getters?.project?.id || null if (!projectId) {