From a787622ad20683329a72872b9357ab77ce0951f1 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 16 Dec 2025 11:18:38 -0800 Subject: [PATCH 01/14] Populate instance config and data node config during server startup --- .../ambry/clustermap/ClusterParticipant.java | 5 + .../ambry/clustermap/DataNodeConfig.java | 28 ++ .../github/ambry/config/ClusterMapConfig.java | 56 +++ .../ambry/clustermap/DiskInfoCollector.java | 235 +++++++++++++ .../github/ambry/clustermap/HelixFactory.java | 45 +++ .../ambry/clustermap/HelixParticipant.java | 29 ++ .../clustermap/LiStatefulSetMetadata.java | 132 +++++++ .../clustermap/NimbusServiceMetadata.java | 66 ++++ .../clustermap/DiskInfoCollectorTest.java | 242 +++++++++++++ .../ambry/clustermap/HelixFactoryTest.java | 322 ++++++++++++++++++ .../clustermap/HelixParticipantTest.java | 216 ++++++++++++ .../clustermap/LiStatefulSetMetadataTest.java | 180 ++++++++++ .../clustermap/NimbusServiceMetadataTest.java | 282 +++++++++++++++ .../com/github/ambry/server/AmbryServer.java | 5 +- .../github/ambry/server/AmbryServerTest.java | 47 +++ .../java/com/github/ambry/utils/Utils.java | 45 +++ .../com/github/ambry/utils/UtilsTest.java | 138 ++++++++ 17 files changed, 2071 insertions(+), 2 deletions(-) create mode 100644 ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java create mode 100644 ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java create mode 100644 ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java create mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java create mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java create mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java create mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java diff --git a/ambry-api/src/main/java/com/github/ambry/clustermap/ClusterParticipant.java b/ambry-api/src/main/java/com/github/ambry/clustermap/ClusterParticipant.java index 9361c3425c..2335df599d 100644 --- a/ambry-api/src/main/java/com/github/ambry/clustermap/ClusterParticipant.java +++ b/ambry-api/src/main/java/com/github/ambry/clustermap/ClusterParticipant.java @@ -45,6 +45,11 @@ void participateAndBlockStateTransition(List ambryStatsReports */ void unblockStateTransition(); + /** + * Populates initial data node config. + */ + boolean populateDataNodeConfig(); + /** * Set the sealed state of the given replica. * @param replicaId the {@link ReplicaId} whose sealed state will be updated. diff --git a/ambry-api/src/main/java/com/github/ambry/clustermap/DataNodeConfig.java b/ambry-api/src/main/java/com/github/ambry/clustermap/DataNodeConfig.java index 20e5c2bf36..6131b7ba65 100644 --- a/ambry-api/src/main/java/com/github/ambry/clustermap/DataNodeConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/clustermap/DataNodeConfig.java @@ -42,6 +42,25 @@ public class DataNodeConfig { private final Map diskConfigs = new TreeMap<>(); private final Map> extraMapFields = new HashMap<>(); + /** + * @param datacenterName the datacenter this server is in. + * @param hostName the host name of the server. + * @param http2Port the HTTP2 port, or {@code null} if the server does not have one. + * @param port the port of the server. + * @param sslPort the ssl port, or {@code null} if the server does not have one. + */ + public DataNodeConfig(String datacenterName, String hostName, Integer http2Port, int port, + Integer sslPort) { + this.datacenterName = datacenterName; + this.hostName = hostName; + this.http2Port = http2Port; + this.port = port; + this.sslPort = sslPort; + this.instanceName = ""; + this.rackId = ""; + this.xid = 0; + } + /** * @param instanceName a name that can be used as a unique key for this server. * @param hostName the host name of the server. @@ -156,6 +175,15 @@ Map getDiskConfigs() { return diskConfigs; } + /** + * Add a disk configuration to this DataNode. + * @param mountPath the mount path of the disk + * @param diskConfig the disk configuration + */ + public void addDiskConfig(String mountPath, DiskConfig diskConfig) { + diskConfigs.put(mountPath, diskConfig); + } + /** * This can be used for extra fields that are not recognized by {@link DataNodeConfigSource} but still need to be * read from or written to the source of truth. This should be used sparingly and is mainly provided for legacy diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index 893c3af17a..e121afa193 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -417,6 +417,55 @@ public class ClusterMapConfig { @Default("false") public final boolean enableFileCopyProtocol; + /** + * Path to the nimbus service metadata file containing instance information + */ + @Config("clustermap.nimbus.service.metadata.file.path") + @Default("./etc/metadata/nimbus-service.json") + public final String nimbusServiceMetadataFilePath; + + /** + * Path to the LiStatefulSet metadata file containing Kubernetes StatefulSet information + */ + @Config("clustermap.listatefulset.metadata.file.path") + @Default("./etc/metadata/liStatefulSet.json") + public final String liStatefulSetMetadataFilePath; + + /** + * Percentage of disk space to reserve, default to 5% + */ + @Config("clustermap.reserve.disk.space.percentage") + @Default("0.05") + public final double clusterMapReserveDiskSpacePercentage; + + /** + * Prefix for resource tags in cluster map + */ + @Config("clustermap.resource.tag.prefix") + @Default("TAG_") + public final String clusterMapResourceTagPrefix; + + /** + * Default HTTP2 port for cluster nodes + */ + @Config("clustermap.default.http2.port") + @Default("15388") + public final int clusterMapDefaultHttp2Port; + + /** + * Default port for cluster nodes + */ + @Config("clustermap.default.port") + @Default("15088") + public final int clusterMapDefaultPort; + + /** + * Default SSL port for cluster nodes + */ + @Config("clustermap.default.ssl.port") + @Default("15288") + public final int clusterMapDefaultSslPort; + public ClusterMapConfig(VerifiableProperties verifiableProperties) { clusterMapFixedTimeoutDatanodeErrorThreshold = verifiableProperties.getIntInRange("clustermap.fixedtimeout.datanode.error.threshold", 3, 1, 100); @@ -508,5 +557,12 @@ public ClusterMapConfig(VerifiableProperties verifiableProperties) { routerPutSuccessTarget = verifiableProperties.getIntInRange(ROUTER_PUT_SUCCESS_TARGET, 2, 1, Integer.MAX_VALUE); clusterMapPartitionFilteringEnabled = verifiableProperties.getBoolean(PARTITION_FILTERING_ENABLED, false); enableFileCopyProtocol = verifiableProperties.getBoolean(ENABLE_FILE_COPY_PROTOCOL, false); + nimbusServiceMetadataFilePath = verifiableProperties.getString("clustermap.nimbus.service.metadata.file.path"); + liStatefulSetMetadataFilePath = verifiableProperties.getString("clustermap.listatefulset.metadata.file.path"); + clusterMapReserveDiskSpacePercentage = verifiableProperties.getDouble("clustermap.reserve.disk.space.percentage"); + clusterMapResourceTagPrefix = verifiableProperties.getString("clustermap.resource.tag.prefix"); + clusterMapDefaultHttp2Port = verifiableProperties.getInt("clustermap.default.http2.port"); + clusterMapDefaultPort = verifiableProperties.getInt("clustermap.default.port"); + clusterMapDefaultSslPort = verifiableProperties.getInt("clustermap.default.ssl.port"); } } diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java new file mode 100644 index 0000000000..8186320802 --- /dev/null +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java @@ -0,0 +1,235 @@ +/* + * Copyright 2024 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Collects disk information by running system command 'df -h'. + */ +public class DiskInfoCollector { + private static final Logger logger = LoggerFactory.getLogger(DiskInfoCollector.class); + + // Pattern to match df -h output lines for Ambry mount points + // Example: /dev/sdh1 21T 14T 6.5T 68% /mnt/u001/ambrydata + private static final Pattern DF_PATTERN = Pattern.compile( + "^(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\d+)%\\s+(/mnt/u\\d+/ambrydata)$"); + + /** + * Represents disk information from df command. + */ + public static class DiskInfo { + private final String filesystem; + private final String size; + private final String used; + private final String available; + private final int usePercentage; + private final String mountPoint; + + public DiskInfo(String filesystem, String size, String used, String available, int usePercentage, String mountPoint) { + this.filesystem = filesystem; + this.size = size; + this.used = used; + this.available = available; + this.usePercentage = usePercentage; + this.mountPoint = mountPoint; + } + + public String getFilesystem() { + return filesystem; + } + + public String getSize() { + return size; + } + + public String getUsed() { + return used; + } + + public String getAvailable() { + return available; + } + + public int getUsePercentage() { + return usePercentage; + } + + public String getMountPoint() { + return mountPoint; + } + + /** + * Convert size string (e.g., "100G", "1.5T") to bytes. + * @return size in bytes, or -1 if parsing fails + */ + public long getSizeInBytes() { + return parseSize(size); + } + + private static long parseSize(String sizeStr) { + if (sizeStr == null || sizeStr.trim().isEmpty()) { + return -1; + } + + try { + sizeStr = sizeStr.trim().toUpperCase(); + // Extract number and unit + Pattern pattern = Pattern.compile("^([0-9.]+)([KMGTPE]?)$"); + Matcher matcher = pattern.matcher(sizeStr); + if (!matcher.matches()) { + return -1; + } + + double value = Double.parseDouble(matcher.group(1)); + String unit = matcher.group(2); + long multiplier; + switch (unit) { + case "K": + multiplier = 1024L; + break; + case "M": + multiplier = 1024L * 1024L; + break; + case "G": + multiplier = 1024L * 1024L * 1024L; + break; + case "T": + multiplier = 1024L * 1024L * 1024L * 1024L; + break; + case "P": + multiplier = 1024L * 1024L * 1024L * 1024L * 1024L; + break; + case "E": + multiplier = 1024L * 1024L * 1024L * 1024L * 1024L * 1024L; + break; + default: + multiplier = 1L; // Bytes + break; + } + + return (long) (value * multiplier); + } catch (NumberFormatException e) { + logger.warn("Failed to parse size: {}", sizeStr, e); + return -1; + } + } + + @Override + public String toString() { + return "DiskInfo{" + + "filesystem='" + filesystem + '\'' + + ", size='" + size + '\'' + + ", used='" + used + '\'' + + ", available='" + available + '\'' + + ", usePercentage=" + usePercentage + + ", mountPoint='" + mountPoint + '\'' + + '}'; + } + } + + /** + * Collect disk information by running 'df -h' command. + * @return map of mount point to DiskInfo + */ + public static Map collectDiskInfo() { + Map diskInfoMap = new HashMap<>(); + + try { + logger.info("Running command: df -h"); + ProcessBuilder processBuilder = new ProcessBuilder("df", "-h"); + processBuilder.redirectErrorStream(true); + Process process = processBuilder.start(); + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { + String line; + boolean isFirstLine = true; + + while ((line = reader.readLine()) != null) { + // Skip header line + if (isFirstLine) { + logger.debug("df header: {}", line); + isFirstLine = false; + continue; + } + + DiskInfo diskInfo = parseDfLine(line); + if (diskInfo != null) { + diskInfoMap.put(diskInfo.getMountPoint(), diskInfo); + logger.info("Found disk: {} -> {}", diskInfo.getMountPoint(), diskInfo); + } + } + } + + } catch (IOException e) { + logger.error("Failed to run df command", e); + } + + logger.info("Collected disk info for {} mount points", diskInfoMap.size()); + return diskInfoMap; + } + + /** + * Parse a single line from df -h output. + * @param line the line to parse + * @return DiskInfo object, or null if parsing fails + */ + private static DiskInfo parseDfLine(String line) { + if (line == null || line.trim().isEmpty()) { + return null; + } + + line = line.trim(); + Matcher matcher = DF_PATTERN.matcher(line); + if (!matcher.matches()) { + logger.debug("Line doesn't match df pattern: {}", line); + return null; + } + + try { + String filesystem = matcher.group(1); + String size = matcher.group(2); + String used = matcher.group(3); + String available = matcher.group(4); + int usePercentage = Integer.parseInt(matcher.group(5)); + String mountPoint = matcher.group(6); + + return new DiskInfo(filesystem, size, used, available, usePercentage, mountPoint); + } catch (NumberFormatException e) { + logger.warn("Failed to parse df line: {}", line, e); + return null; + } + } + + /** + * Get total capacity across all provided disks. + * @param diskInfoMap map of disk information + * @return total capacity in bytes + */ + public static long getTotalCapacity(Map diskInfoMap) { + return diskInfoMap.values().stream() + .mapToLong(DiskInfo::getSizeInBytes) + .filter(size -> size > 0) + .sum(); + } +} diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java index 63c77313f4..6919964a50 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java @@ -15,6 +15,9 @@ import com.github.ambry.config.ClusterMapConfig; import com.github.ambry.utils.SystemTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; @@ -36,6 +39,8 @@ */ public class HelixFactory { private static final Logger LOGGER = LoggerFactory.getLogger(HelixFactory.class); + private static final String DOMAIN_TEMPLATE = "mz=%s,host=%s,applicationInstanceId=%s"; + // exposed for use in testing private final Map helixManagers = new ConcurrentHashMap<>(); private final Map dataNodeConfigSources = new ConcurrentHashMap<>(); @@ -114,6 +119,30 @@ HelixManager buildZKHelixManager(String clusterName, String instanceName, Instan instanceConfigBuilder.setPort(port); } + NimbusServiceMetadata nimbusMetadata = NimbusServiceMetadata.readFromFile(clusterMapConfig.nimbusServiceMetadataFilePath); + if (nimbusMetadata != null) { + LOGGER.info("Loaded nimbus service metadata - AppInstanceID: {}, NodeName: {}, MaintenanceZone: {}", + nimbusMetadata.getAppInstanceID(), nimbusMetadata.getNodeName(), nimbusMetadata.getMaintenanceZone()); + instanceConfigBuilder.setDomain(String.format(DOMAIN_TEMPLATE, nimbusMetadata.getMaintenanceZone(), nimbusMetadata.getNodeName(), nimbusMetadata.getAppInstanceID())); + } + + LiStatefulSetMetadata liStatefulSetMetadata = LiStatefulSetMetadata.readFromFile(clusterMapConfig.liStatefulSetMetadataFilePath); + if (liStatefulSetMetadata != null) { + List resourceTags = liStatefulSetMetadata.getResourceTags(); + LOGGER.info("Loaded LiStatefulSet metadata - Name: {}, ResourceTags: {}", + liStatefulSetMetadata.getName(), resourceTags); + for (String resourceTag : resourceTags) { + instanceConfigBuilder.addTag(clusterMapConfig.clusterMapResourceTagPrefix + resourceTag); + } + } + + // Short term solution to collect disk information via df -h, while pending DEPEND-92318. + Map diskInfo = DiskInfoCollector.collectDiskInfo(); + if (!diskInfo.isEmpty()) { + Map capacityMap = calculateInstanceCapacityMap(diskInfo, clusterMapConfig); + instanceConfigBuilder.setInstanceCapacityMap(capacityMap); + } + HelixManagerProperty participantHelixProperty = new HelixManagerProperty.Builder().setDefaultInstanceConfigBuilder(instanceConfigBuilder).build(); HelixManagerProperty defaultHelixManagerProperty = new HelixManagerProperty.Builder().setDefaultInstanceConfigBuilder(new InstanceConfig.Builder()).build(); @@ -129,6 +158,22 @@ HelixManager buildZKHelixManager(String clusterName, String instanceName, Instan return helixManager; } + /** + * Calculate the capacity map for the instance based on the disk information. + * @param diskInfo the disk information. + * @param clusterMapConfig the {@link ClusterMapConfig} to use. + * @return the capacity map for the instance. + */ + private Map calculateInstanceCapacityMap(Map diskInfo, ClusterMapConfig clusterMapConfig) { + long totalDiskCapacity = DiskInfoCollector.getTotalCapacity(diskInfo); + // Convert to GiB and apply reserved space + int capacityGiB = (int) (totalDiskCapacity / (1024.0 * 1024.0 * 1024.0) * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); + // Create the capacity map + Map capacityMap = new HashMap<>(); + capacityMap.put("DISK", capacityGiB); + return capacityMap; + } + /** * @param clusterMapConfig the {@link ClusterMapConfig} to use. * @param zkAddr the ZooKeeper address to connect to. If a {@link HelixManager} is required and one is already in the diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java index 6162673071..b009b41db2 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java @@ -622,6 +622,35 @@ public void unblockStateTransition() { this.blockStateTransitionLatch.countDown(); } + // Populate the data node config to property store + @Override + public boolean populateDataNodeConfig() { + // Short term solution to collect disk information via df -h, while pending DEPEND-92318. + Map diskInfo = DiskInfoCollector.collectDiskInfo(); + if (!diskInfo.isEmpty()) { + logger.info("Populating DataNode config"); + DataNodeConfig dataNodeConfig = new DataNodeConfig( + clusterMapConfig.clusterMapDatacenterName, + clusterMapConfig.clusterMapHostName, + clusterMapConfig.clusterMapDefaultHttp2Port, + clusterMapConfig.clusterMapDefaultPort, + clusterMapConfig.clusterMapDefaultSslPort); + for (Map.Entry entry : diskInfo.entrySet()) { + // e.g. /mnt/u001/ambrydata + String mountPath = entry.getKey(); + // Calculate capacity with reserved space + long totalCapacity = entry.getValue().getSizeInBytes(); + long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); + DataNodeConfig.DiskConfig diskConfig = + new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); + dataNodeConfig.addDiskConfig(mountPath, diskConfig); + } + return dataNodeConfigSource.set(dataNodeConfig); + } + logger.error("No disk information collected, nothing to populate"); + return false; + } + /** * A zookeeper based implementation for distributed lock. */ diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java new file mode 100644 index 0000000000..7d6b5e225e --- /dev/null +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java @@ -0,0 +1,132 @@ +/* + * Copyright 2024 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.github.ambry.utils.Utils; +import java.util.ArrayList; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Represents metadata from liStatefulSet.json file containing Kubernetes StatefulSet information. + */ +@JsonIgnoreProperties(ignoreUnknown = true) +public class LiStatefulSetMetadata { + private static final Logger logger = LoggerFactory.getLogger(LiStatefulSetMetadata.class); + + @JsonProperty("metadata") + private Metadata metadata; + + /** + * Read LiStatefulSet metadata from the specified file path. + * @param filePath the path to the liStatefulSet.json file + * @return LiStatefulSetMetadata instance, or null if file cannot be read + */ + public static LiStatefulSetMetadata readFromFile(String filePath) { + return Utils.readJsonFromFile(filePath, LiStatefulSetMetadata.class); + } + + /** + * Get the StatefulSet name from metadata. + * @return the name, or null if not available + */ + public String getName() { + return metadata != null ? metadata.name : null; + } + + /** + * Extract resource tags from the StatefulSet name. + * Expected format: "v1.ambry-prod.{resourceTag}" or "v1.ambry-prod.{start}-{end}" for ranges + * Examples: + * - "v1.ambry-video.10032" -> ["10032"] + * - "v1.ambry-video.10032-10033" -> ["10032", "10033"] + * @return list of resource tags, empty if not found + */ + public List getResourceTags() { + String name = getName(); + if (name != null && name.contains(".")) { + String[] parts = name.split("\\."); + if (parts.length == 3) { + String resourcePart = parts[parts.length - 1]; // Get the last part + return parseResourceTags(resourcePart); + } + } + return new ArrayList<>(); + } + + /** + * Parse resource tags from the resource part, handling ranges. + * @param resourcePart the resource part (e.g., "10032" or "10032-10033") + * @return list of resource tags + */ + private List parseResourceTags(String resourcePart) { + List tags = new ArrayList<>(); + if (resourcePart == null || resourcePart.trim().isEmpty()) { + return tags; + } + resourcePart = resourcePart.trim(); + + // Check if it's a range (contains hyphen and both parts are numeric) + if (resourcePart.contains("-")) { + String[] rangeParts = resourcePart.split("-"); + if (rangeParts.length == 2) { + String startStr = rangeParts[0].trim(); + String endStr = rangeParts[1].trim(); + try { + int start = Integer.parseInt(startStr); + int end = Integer.parseInt(endStr); + // Validate range + if (start <= end) { + for (int i = start; i <= end; i++) { + tags.add(String.valueOf(i)); + } + return tags; + } else { + logger.warn("Invalid range in resource part: {} (start={}, end={})", resourcePart, start, end); + } + } catch (NumberFormatException e) { + logger.warn("Non-numeric range in resource part: {}", resourcePart); + } + } + } + + // If not a valid range, treat as single tag + tags.add(resourcePart); + return tags; + } + + // Getter + public Metadata getMetadata() { + return metadata; + } + + @Override + public String toString() { + return "LiStatefulSetMetadata{" + + "name='" + getName() + '\'' + + ", resourceTags=" + getResourceTags() + + '}'; + } + + // Inner class for JSON structure + @JsonIgnoreProperties(ignoreUnknown = true) + public static class Metadata { + @JsonProperty("name") + public String name; + } +} diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java new file mode 100644 index 0000000000..88f0d98e60 --- /dev/null +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.github.ambry.utils.Utils; + +/** + * Represents metadata from nimbus-service.json file containing service instance information. + */ +@JsonIgnoreProperties(ignoreUnknown = true) +public class NimbusServiceMetadata { + + @JsonProperty("appInstanceID") + private String appInstanceID; + + @JsonProperty("nodeName") + private String nodeName; + + @JsonProperty("maintenanceZone") + private String maintenanceZone; + + /** + * Read nimbus service metadata from the specified file path. + * @param filePath the path to the nimbus-service.json file + * @return NimbusServiceMetadata instance, or null if file cannot be read + */ + public static NimbusServiceMetadata readFromFile(String filePath) { + return Utils.readJsonFromFile(filePath, NimbusServiceMetadata.class); + } + + // Getters + public String getAppInstanceID() { + return appInstanceID; + } + + public String getNodeName() { + return nodeName; + } + + public String getMaintenanceZone() { + return maintenanceZone; + } + + @Override + public String toString() { + return "NimbusServiceMetadata{" + + "appInstanceID='" + appInstanceID + '\'' + + ", nodeName='" + nodeName + '\'' + + ", maintenanceZone='" + maintenanceZone + '\'' + + '}'; + } +} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java new file mode 100644 index 0000000000..64f24acf63 --- /dev/null +++ b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java @@ -0,0 +1,242 @@ +/* + * Copyright 2024 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import java.util.Map; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + * Test for {@link DiskInfoCollector}. + */ +public class DiskInfoCollectorTest { + + /** + * Test DiskInfo constructor and getters. + */ + @Test + public void testDiskInfoConstructorAndGetters() { + String filesystem = "/dev/sdh1"; + String size = "21T"; + String used = "14T"; + String available = "6.5T"; + int usePercentage = 68; + String mountPoint = "/mnt/u001/ambrydata"; + + DiskInfoCollector.DiskInfo diskInfo = new DiskInfoCollector.DiskInfo( + filesystem, size, used, available, usePercentage, mountPoint); + + assertEquals("Filesystem should match", filesystem, diskInfo.getFilesystem()); + assertEquals("Size should match", size, diskInfo.getSize()); + assertEquals("Used should match", used, diskInfo.getUsed()); + assertEquals("Available should match", available, diskInfo.getAvailable()); + assertEquals("Use percentage should match", usePercentage, diskInfo.getUsePercentage()); + assertEquals("Mount point should match", mountPoint, diskInfo.getMountPoint()); + } + + /** + * Test DiskInfo toString method. + */ + @Test + public void testDiskInfoToString() { + DiskInfoCollector.DiskInfo diskInfo = new DiskInfoCollector.DiskInfo( + "/dev/sdh1", "21T", "14T", "6.5T", 68, "/mnt/u001/ambrydata"); + + String result = diskInfo.toString(); + assertTrue("Should contain filesystem", result.contains("/dev/sdh1")); + assertTrue("Should contain size", result.contains("21T")); + assertTrue("Should contain used", result.contains("14T")); + assertTrue("Should contain available", result.contains("6.5T")); + assertTrue("Should contain use percentage", result.contains("68")); + assertTrue("Should contain mount point", result.contains("/mnt/u001/ambrydata")); + } + + /** + * Test getSizeInBytes with various size formats. + */ + @Test + public void testGetSizeInBytes() { + // Test bytes + DiskInfoCollector.DiskInfo diskInfo1 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1024", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Bytes should be parsed correctly", 1024L, diskInfo1.getSizeInBytes()); + + // Test kilobytes + DiskInfoCollector.DiskInfo diskInfo2 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1K", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Kilobytes should be parsed correctly", 1024L, diskInfo2.getSizeInBytes()); + + // Test megabytes + DiskInfoCollector.DiskInfo diskInfo3 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1M", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Megabytes should be parsed correctly", 1024L * 1024L, diskInfo3.getSizeInBytes()); + + // Test gigabytes + DiskInfoCollector.DiskInfo diskInfo4 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1G", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Gigabytes should be parsed correctly", 1024L * 1024L * 1024L, diskInfo4.getSizeInBytes()); + + // Test terabytes + DiskInfoCollector.DiskInfo diskInfo5 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1T", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Terabytes should be parsed correctly", 1024L * 1024L * 1024L * 1024L, diskInfo5.getSizeInBytes()); + + // Test petabytes + DiskInfoCollector.DiskInfo diskInfo6 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1P", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Petabytes should be parsed correctly", 1024L * 1024L * 1024L * 1024L * 1024L, diskInfo6.getSizeInBytes()); + + // Test exabytes + DiskInfoCollector.DiskInfo diskInfo7 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1E", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Exabytes should be parsed correctly", 1024L * 1024L * 1024L * 1024L * 1024L * 1024L, diskInfo7.getSizeInBytes()); + + // Test decimal values + DiskInfoCollector.DiskInfo diskInfo8 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1.5G", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Decimal gigabytes should be parsed correctly", (long)(1.5 * 1024L * 1024L * 1024L), diskInfo8.getSizeInBytes()); + + // Test case insensitive + DiskInfoCollector.DiskInfo diskInfo9 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1g", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Lowercase units should be parsed correctly", 1024L * 1024L * 1024L, diskInfo9.getSizeInBytes()); + } + + /** + * Test getSizeInBytes with invalid formats. + */ + @Test + public void testGetSizeInBytesInvalidFormats() { + // Test null + DiskInfoCollector.DiskInfo diskInfo1 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", null, "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Null size should return -1", -1L, diskInfo1.getSizeInBytes()); + + // Test empty string + DiskInfoCollector.DiskInfo diskInfo2 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Empty size should return -1", -1L, diskInfo2.getSizeInBytes()); + + // Test invalid format + DiskInfoCollector.DiskInfo diskInfo3 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "invalid", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Invalid size should return -1", -1L, diskInfo3.getSizeInBytes()); + + // Test invalid number + DiskInfoCollector.DiskInfo diskInfo4 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "abcG", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Invalid number should return -1", -1L, diskInfo4.getSizeInBytes()); + + // Test unsupported unit + DiskInfoCollector.DiskInfo diskInfo5 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1Z", "512", "512", 50, "/mnt/u001/ambrydata"); + assertEquals("Unsupported unit should return -1", -1L, diskInfo5.getSizeInBytes()); + } + + /** + * Test collectDiskInfo method. + * Note: This test will actually run the 'df -h' command, so results may vary by system. + */ + @Test + public void testCollectDiskInfo() { + Map diskInfoMap = DiskInfoCollector.collectDiskInfo(); + + // Should return a map (may be empty if no Ambry mount points exist) + assertNotNull("DiskInfo map should not be null", diskInfoMap); + + // If there are results, validate the structure + for (Map.Entry entry : diskInfoMap.entrySet()) { + String mountPoint = entry.getKey(); + DiskInfoCollector.DiskInfo diskInfo = entry.getValue(); + + assertNotNull("Mount point should not be null", mountPoint); + assertNotNull("DiskInfo should not be null", diskInfo); + assertEquals("Mount point should match DiskInfo mount point", mountPoint, diskInfo.getMountPoint()); + assertTrue("Mount point should match Ambry pattern", mountPoint.matches("/mnt/u\\d+/ambrydata")); + assertNotNull("Filesystem should not be null", diskInfo.getFilesystem()); + assertNotNull("Size should not be null", diskInfo.getSize()); + assertNotNull("Used should not be null", diskInfo.getUsed()); + assertNotNull("Available should not be null", diskInfo.getAvailable()); + assertTrue("Use percentage should be valid", diskInfo.getUsePercentage() >= 0 && diskInfo.getUsePercentage() <= 100); + } + } + + /** + * Test getTotalCapacity method. + */ + @Test + public void testGetTotalCapacity() { + Map diskInfoMap = new java.util.HashMap<>(); + + // Test empty map + assertEquals("Empty map should have zero capacity", 0L, DiskInfoCollector.getTotalCapacity(diskInfoMap)); + + // Add some disk info + diskInfoMap.put("/mnt/u001/ambrydata", new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1G", "512M", "512M", 50, "/mnt/u001/ambrydata")); + diskInfoMap.put("/mnt/u002/ambrydata", new DiskInfoCollector.DiskInfo( + "/dev/sdb1", "2G", "1G", "1G", 50, "/mnt/u002/ambrydata")); + + long expectedTotal = (1024L * 1024L * 1024L) + (2L * 1024L * 1024L * 1024L); // 1G + 2G = 3G + assertEquals("Total capacity should be sum of all disks", expectedTotal, DiskInfoCollector.getTotalCapacity(diskInfoMap)); + + // Add disk with invalid size + diskInfoMap.put("/mnt/u003/ambrydata", new DiskInfoCollector.DiskInfo( + "/dev/sdc1", "invalid", "1G", "1G", 50, "/mnt/u003/ambrydata")); + + // Should still be 3G (invalid size is filtered out) + assertEquals("Invalid sizes should be filtered out", expectedTotal, DiskInfoCollector.getTotalCapacity(diskInfoMap)); + } + + /** + * Test parseDfLine method indirectly by testing the pattern matching. + */ + @Test + public void testDfLinePatternMatching() { + // Test valid df line + Map result1 = DiskInfoCollector.collectDiskInfo(); + // This will test the actual df command, but we can't easily test parseDfLine directly + // since it's private. The collectDiskInfo test above covers this functionality. + + // We can test the pattern indirectly by knowing what should match + String validLine = "/dev/sdh1 21T 14T 6.5T 68% /mnt/u001/ambrydata"; + // The pattern should match this format, but since parseDfLine is private, + // we rely on the collectDiskInfo method to test this functionality + + assertNotNull("collectDiskInfo should work without throwing exceptions", result1); + } + + /** + * Test edge cases for DiskInfo creation. + */ + @Test + public void testDiskInfoEdgeCases() { + // Test with zero use percentage + DiskInfoCollector.DiskInfo diskInfo1 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1G", "0", "1G", 0, "/mnt/u001/ambrydata"); + assertEquals("Zero use percentage should be valid", 0, diskInfo1.getUsePercentage()); + + // Test with 100% use percentage + DiskInfoCollector.DiskInfo diskInfo2 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "1G", "1G", "0", 100, "/mnt/u001/ambrydata"); + assertEquals("100% use percentage should be valid", 100, diskInfo2.getUsePercentage()); + + // Test with very large sizes + DiskInfoCollector.DiskInfo diskInfo3 = new DiskInfoCollector.DiskInfo( + "/dev/sda1", "100T", "50T", "50T", 50, "/mnt/u001/ambrydata"); + assertTrue("Very large sizes should be parsed correctly", diskInfo3.getSizeInBytes() > 0); + } +} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java new file mode 100644 index 0000000000..8c5c5b0ebf --- /dev/null +++ b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java @@ -0,0 +1,322 @@ +/* + * Copyright 2017 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import com.github.ambry.config.ClusterMapConfig; +import com.github.ambry.config.VerifiableProperties; +import java.util.Properties; +import org.apache.helix.HelixManager; +import org.apache.helix.InstanceType; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +/** + * Test for {@link HelixFactory}. + */ +public class HelixFactoryTest { + + private static final String CLUSTER_NAME = "test-cluster"; + private static final String INSTANCE_NAME = "localhost_1234"; + private static final String ZK_ADDR = "localhost:2181"; + + /** + * Test ManagerKey equals and hashCode methods. + */ + @Test + public void testManagerKeyEqualsAndHashCode() { + HelixFactory.ManagerKey key1 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); + HelixFactory.ManagerKey key2 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); + HelixFactory.ManagerKey key3 = new HelixFactory.ManagerKey("different-cluster", INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); + HelixFactory.ManagerKey key4 = new HelixFactory.ManagerKey(CLUSTER_NAME, "different-instance", InstanceType.PARTICIPANT, ZK_ADDR); + HelixFactory.ManagerKey key5 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR); + HelixFactory.ManagerKey key6 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, "different-zk"); + + // Test equals + assertEquals("Same keys should be equal", key1, key2); + assertNotEquals("Different cluster should not be equal", key1, key3); + assertNotEquals("Different instance should not be equal", key1, key4); + assertNotEquals("Different instance type should not be equal", key1, key5); + assertNotEquals("Different zk address should not be equal", key1, key6); + assertNotEquals("Key should not equal null", key1, null); + assertNotEquals("Key should not equal different class", key1, "string"); + + // Test reflexive + assertEquals("Key should equal itself", key1, key1); + + // Test hashCode consistency + assertEquals("Equal keys should have same hash code", key1.hashCode(), key2.hashCode()); + assertNotEquals("Different keys should have different hash codes", key1.hashCode(), key3.hashCode()); + } + + /** + * Test getZKHelixManager with auto-registration disabled. + */ + @Test + public void testGetZKHelixManagerWithoutAutoRegistration() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.auto.registration.enabled", "false"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + HelixManager manager1 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + HelixManager manager2 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + + assertNotNull("Manager should not be null", manager1); + assertSame("Same manager should be returned for same parameters", manager1, manager2); + } + + /** + * Test getZKHelixManager with auto-registration enabled. + */ + @Test + public void testGetZKHelixManagerWithAutoRegistration() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.auto.registration.enabled", "true"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + HelixManager manager = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + + assertNotNull("Manager should not be null", manager); + } + + /** + * Test getZKHelixManager with different instance types. + */ + @Test + public void testGetZKHelixManagerWithDifferentInstanceTypes() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.auto.registration.enabled", "true"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + HelixManager participantManager = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + HelixManager spectatorManager = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR, clusterMapConfig); + + assertNotNull("Participant manager should not be null", participantManager); + assertNotNull("Spectator manager should not be null", spectatorManager); + assertNotSame("Different instance types should return different managers", participantManager, spectatorManager); + } + + /** + * Test getZKHelixManager caching behavior. + */ + @Test + public void testGetZKHelixManagerCaching() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + + // Same parameters should return cached instance + HelixManager manager1 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + HelixManager manager2 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + assertSame("Same parameters should return cached manager", manager1, manager2); + + // Different parameters should return different instances + HelixManager manager3 = helixFactory.getZKHelixManager("different-cluster", INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + assertNotSame("Different cluster should return different manager", manager1, manager3); + + HelixManager manager4 = helixFactory.getZKHelixManager(CLUSTER_NAME, "different-instance", InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + assertNotSame("Different instance should return different manager", manager1, manager4); + + HelixManager manager5 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR, clusterMapConfig); + assertNotSame("Different instance type should return different manager", manager1, manager5); + + HelixManager manager6 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, "different-zk", clusterMapConfig); + assertNotSame("Different ZK address should return different manager", manager1, manager6); + } + + /** + * Test getZkHelixManagerAndConnect method. + */ + @Test + public void testGetZkHelixManagerAndConnect() throws Exception { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = spy(new HelixFactory()); + HelixManager mockManager = mock(HelixManager.class); + + // Mock the buildZKHelixManager method to return our mock + doReturn(mockManager).when(helixFactory).buildZKHelixManager(anyString(), anyString(), any(InstanceType.class), anyString(), any(ClusterMapConfig.class)); + + // Test when manager is not connected + when(mockManager.isConnected()).thenReturn(false); + HelixManager result1 = helixFactory.getZkHelixManagerAndConnect(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + + assertSame("Should return the same manager", mockManager, result1); + verify(mockManager, times(1)).connect(); + + // Test when manager is already connected + when(mockManager.isConnected()).thenReturn(true); + HelixManager result2 = helixFactory.getZkHelixManagerAndConnect(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + + assertSame("Should return the same manager", mockManager, result2); + // connect() should not be called again + verify(mockManager, times(1)).connect(); + } + + /** + * Test getDataNodeConfigSource method. + */ + @Test + public void testGetDataNodeConfigSource() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.data.node.config.source.type", "PROPERTY_STORE"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + DataNodeConfigSourceMetrics metrics = mock(DataNodeConfigSourceMetrics.class); + + DataNodeConfigSource source1 = helixFactory.getDataNodeConfigSource(clusterMapConfig, ZK_ADDR, metrics); + DataNodeConfigSource source2 = helixFactory.getDataNodeConfigSource(clusterMapConfig, ZK_ADDR, metrics); + + assertNotNull("DataNodeConfigSource should not be null", source1); + assertSame("Same ZK address should return cached source", source1, source2); + + // Different ZK address should return different source + DataNodeConfigSource source3 = helixFactory.getDataNodeConfigSource(clusterMapConfig, "different-zk", metrics); + assertNotSame("Different ZK address should return different source", source1, source3); + } + + /** + * Test buildZKHelixManager with null ClusterMapConfig. + */ + @Test + public void testBuildZKHelixManagerWithNullConfig() { + HelixFactory helixFactory = new HelixFactory(); + HelixManager manager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, null); + + assertNotNull("Manager should not be null even with null config", manager); + } + + /** + * Test buildZKHelixManager with auto-registration disabled. + */ + @Test + public void testBuildZKHelixManagerAutoRegistrationDisabled() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.auto.registration.enabled", "false"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + HelixManager manager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + + assertNotNull("Manager should not be null", manager); + } + + /** + * Test buildZKHelixManager with auto-registration enabled. + */ + @Test + public void testBuildZKHelixManagerAutoRegistrationEnabled() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.auto.registration.enabled", "true"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + HelixManager manager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + + assertNotNull("Manager should not be null", manager); + } + + /** + * Test that HelixFactory properly handles different instance types with auto-registration. + */ + @Test + public void testBuildZKHelixManagerInstanceTypes() { + Properties props = new Properties(); + props.setProperty("clustermap.cluster.name", CLUSTER_NAME); + props.setProperty("clustermap.datacenter.name", "DC1"); + props.setProperty("clustermap.host.name", "localhost"); + props.setProperty("clustermap.port", "1234"); + props.setProperty("clustermap.auto.registration.enabled", "true"); + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + + HelixFactory helixFactory = new HelixFactory(); + + HelixManager participantManager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); + HelixManager spectatorManager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR, clusterMapConfig); + HelixManager adminManager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.ADMINISTRATOR, ZK_ADDR, clusterMapConfig); + + assertNotNull("Participant manager should not be null", participantManager); + assertNotNull("Spectator manager should not be null", spectatorManager); + assertNotNull("Admin manager should not be null", adminManager); + } + + /** + * Test ManagerKey constructor. + */ + @Test + public void testManagerKeyConstructor() { + HelixFactory.ManagerKey key = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); + + assertNotNull("ManagerKey should not be null", key); + // We can't directly test the private fields, but we can test equals/hashCode behavior + HelixFactory.ManagerKey sameKey = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); + assertEquals("Keys with same parameters should be equal", key, sameKey); + } + + /** + * Test edge cases for ManagerKey equals method. + */ + @Test + public void testManagerKeyEqualsEdgeCases() { + HelixFactory.ManagerKey key = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); + + // Test with null values + HelixFactory.ManagerKey keyWithNulls = new HelixFactory.ManagerKey(null, null, null, null); + assertNotEquals("Key with nulls should not equal key with values", key, keyWithNulls); + + HelixFactory.ManagerKey anotherKeyWithNulls = new HelixFactory.ManagerKey(null, null, null, null); + assertEquals("Keys with same null values should be equal", keyWithNulls, anotherKeyWithNulls); + } +} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java index 57aaae228d..5ed581bd47 100644 --- a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java +++ b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java @@ -1196,6 +1196,222 @@ public void testOfflineToBootstrapWithDelayedStateTransition() throws Exception helixParticipant.close(); } + /** + * Test populateDataNodeConfig method with comprehensive scenarios + * @throws Exception + */ + @Test + public void testPopulateDataNodeConfig() throws Exception { + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + MetricRegistry metricRegistry = new MetricRegistry(); + + // Test with mocked DataNodeConfigSource to control behavior + DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); + HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, + new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { + @Override + public boolean populateDataNodeConfig() { + return testPopulateDataNodeConfigWithMockedDiskInfo(mockDataNodeConfigSource); + } + + private boolean testPopulateDataNodeConfigWithMockedDiskInfo(DataNodeConfigSource configSource) { + // Simulate the actual method logic with controlled disk info + Map diskInfo = createMockDiskInfo(); + if (!diskInfo.isEmpty()) { + DataNodeConfig dataNodeConfig = new DataNodeConfig( + clusterMapConfig.clusterMapDatacenterName, + clusterMapConfig.clusterMapHostName, + clusterMapConfig.clusterMapDefaultHttp2Port, + clusterMapConfig.clusterMapDefaultPort, + clusterMapConfig.clusterMapDefaultSslPort); + for (Map.Entry entry : diskInfo.entrySet()) { + String mountPath = entry.getKey(); + long totalCapacity = entry.getValue().getSizeInBytes(); + long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); + DataNodeConfig.DiskConfig diskConfig = + new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); + dataNodeConfig.addDiskConfig(mountPath, diskConfig); + } + return configSource.set(dataNodeConfig); + } + return false; + } + + private Map createMockDiskInfo() { + Map diskInfo = new HashMap<>(); + diskInfo.put("/mnt/u001/ambrydata", + new DiskInfoCollector.DiskInfo("/dev/sda1", "1T", "500G", "500G", 50, "/mnt/u001/ambrydata")); + diskInfo.put("/mnt/u002/ambrydata", + new DiskInfoCollector.DiskInfo("/dev/sdb1", "2T", "1T", "1T", 50, "/mnt/u002/ambrydata")); + return diskInfo; + } + }; + + // Test successful population + when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(true); + assertTrue("populateDataNodeConfig should return true when successful", + helixParticipant.populateDataNodeConfig()); + verify(mockDataNodeConfigSource, times(1)).set(any(DataNodeConfig.class)); + + // Test DataNodeConfigSource failure + when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(false); + assertFalse("populateDataNodeConfig should return false when DataNodeConfigSource.set fails", + helixParticipant.populateDataNodeConfig()); + + helixParticipant.close(); + } + + /** + * Test populateDataNodeConfig with empty disk info scenario + * @throws Exception + */ + @Test + public void testPopulateDataNodeConfigEmptyDiskInfo() throws Exception { + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + MetricRegistry metricRegistry = new MetricRegistry(); + + DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); + HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, + new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { + @Override + public boolean populateDataNodeConfig() { + // Simulate empty disk info collection + Map diskInfo = new HashMap<>(); + if (!diskInfo.isEmpty()) { + DataNodeConfig dataNodeConfig = new DataNodeConfig( + clusterMapConfig.clusterMapDatacenterName, + clusterMapConfig.clusterMapHostName, + clusterMapConfig.clusterMapDefaultHttp2Port, + clusterMapConfig.clusterMapDefaultPort, + clusterMapConfig.clusterMapDefaultSslPort); + return mockDataNodeConfigSource.set(dataNodeConfig); + } + return false; + } + }; + + // Test empty disk info returns false and doesn't call set + assertFalse("populateDataNodeConfig should return false when no disk info is collected", + helixParticipant.populateDataNodeConfig()); + verify(mockDataNodeConfigSource, never()).set(any(DataNodeConfig.class)); + + helixParticipant.close(); + } + + /** + * Test populateDataNodeConfig capacity calculation with reserved space + * @throws Exception + */ + @Test + public void testPopulateDataNodeConfigCapacityCalculation() throws Exception { + // Set custom reserved space percentage for testing + Properties testProps = new Properties(props); + testProps.setProperty("clustermap.reserve.disk.space.percentage", "0.1"); // 10% reserved + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(testProps)); + MetricRegistry metricRegistry = new MetricRegistry(); + + DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); + when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(true); + + HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, + new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { + @Override + public boolean populateDataNodeConfig() { + // Create disk info with known capacity + Map diskInfo = new HashMap<>(); + // 1TB disk + diskInfo.put("/mnt/u001/ambrydata", + new DiskInfoCollector.DiskInfo("/dev/sda1", "1T", "500G", "500G", 50, "/mnt/u001/ambrydata")); + + if (!diskInfo.isEmpty()) { + DataNodeConfig dataNodeConfig = new DataNodeConfig( + clusterMapConfig.clusterMapDatacenterName, + clusterMapConfig.clusterMapHostName, + clusterMapConfig.clusterMapDefaultHttp2Port, + clusterMapConfig.clusterMapDefaultPort, + clusterMapConfig.clusterMapDefaultSslPort); + for (Map.Entry entry : diskInfo.entrySet()) { + String mountPath = entry.getKey(); + long totalCapacity = entry.getValue().getSizeInBytes(); + long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); + DataNodeConfig.DiskConfig diskConfig = + new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); + dataNodeConfig.addDiskConfig(mountPath, diskConfig); + + // Verify capacity calculation: 1TB * (1 - 0.1) = 0.9TB + long expectedCapacity = (long) (1024L * 1024L * 1024L * 1024L * 0.9); // 0.9TB in bytes + assertEquals("Available capacity should be 90% of total capacity", + expectedCapacity, availableCapacity); + } + return mockDataNodeConfigSource.set(dataNodeConfig); + } + return false; + } + }; + + assertTrue("populateDataNodeConfig should succeed with proper capacity calculation", + helixParticipant.populateDataNodeConfig()); + + helixParticipant.close(); + } + + /** + * Test populateDataNodeConfig with multiple disk scenarios + * @throws Exception + */ + @Test + public void testPopulateDataNodeConfigMultipleDisks() throws Exception { + ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); + MetricRegistry metricRegistry = new MetricRegistry(); + + DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); + when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(true); + + HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, + new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { + @Override + public boolean populateDataNodeConfig() { + // Create multiple disk infos + Map diskInfo = new HashMap<>(); + diskInfo.put("/mnt/u001/ambrydata", + new DiskInfoCollector.DiskInfo("/dev/sda1", "1T", "500G", "500G", 50, "/mnt/u001/ambrydata")); + diskInfo.put("/mnt/u002/ambrydata", + new DiskInfoCollector.DiskInfo("/dev/sdb1", "2T", "1T", "1T", 50, "/mnt/u002/ambrydata")); + diskInfo.put("/mnt/u003/ambrydata", + new DiskInfoCollector.DiskInfo("/dev/sdc1", "500G", "250G", "250G", 50, "/mnt/u003/ambrydata")); + + if (!diskInfo.isEmpty()) { + DataNodeConfig dataNodeConfig = new DataNodeConfig( + clusterMapConfig.clusterMapDatacenterName, + clusterMapConfig.clusterMapHostName, + clusterMapConfig.clusterMapDefaultHttp2Port, + clusterMapConfig.clusterMapDefaultPort, + clusterMapConfig.clusterMapDefaultSslPort); + + int diskCount = 0; + for (Map.Entry entry : diskInfo.entrySet()) { + String mountPath = entry.getKey(); + long totalCapacity = entry.getValue().getSizeInBytes(); + long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); + DataNodeConfig.DiskConfig diskConfig = + new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); + dataNodeConfig.addDiskConfig(mountPath, diskConfig); + diskCount++; + } + + assertEquals("Should process all 3 disks", 3, diskCount); + return mockDataNodeConfigSource.set(dataNodeConfig); + } + return false; + } + }; + + assertTrue("populateDataNodeConfig should succeed with multiple disks", + helixParticipant.populateDataNodeConfig()); + + helixParticipant.close(); + } + /** * Test two distributed locks * @param lock1 diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java new file mode 100644 index 0000000000..125d1fdab5 --- /dev/null +++ b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java @@ -0,0 +1,180 @@ +/* + * Copyright 2024 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import java.util.Arrays; +import java.util.List; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + * Test for {@link LiStatefulSetMetadata}. + */ +public class LiStatefulSetMetadataTest { + + /** + * Test getResourceTags with single resource tag. + */ + @Test + public void testGetResourceTagsSingle() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032"); + List resourceTags = metadata.getResourceTags(); + + assertEquals("Should have one resource tag", 1, resourceTags.size()); + assertEquals("Resource tag should match", "10032", resourceTags.get(0)); + } + + /** + * Test getResourceTags with range. + */ + @Test + public void testGetResourceTagsRange() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032-10033"); + List resourceTags = metadata.getResourceTags(); + + assertEquals("Should have two resource tags", 2, resourceTags.size()); + assertEquals("First resource tag should be 10032", "10032", resourceTags.get(0)); + assertEquals("Second resource tag should be 10033", "10033", resourceTags.get(1)); + } + + /** + * Test getResourceTags with larger range. + */ + @Test + public void testGetResourceTagsLargerRange() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10030-10034"); + List resourceTags = metadata.getResourceTags(); + + List expected = Arrays.asList("10030", "10031", "10032", "10033", "10034"); + assertEquals("Should have five resource tags", 5, resourceTags.size()); + assertEquals("Resource tags should match expected range", expected, resourceTags); + } + + /** + * Test getResourceTags with invalid range (non-numeric). + */ + @Test + public void testGetResourceTagsInvalidRange() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.abc-def"); + List resourceTags = metadata.getResourceTags(); + + assertEquals("Should treat as single tag", 1, resourceTags.size()); + assertEquals("Should return original string", "abc-def", resourceTags.get(0)); + } + + /** + * Test getResourceTags with invalid range (start > end). + */ + @Test + public void testGetResourceTagsInvalidRangeOrder() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10033-10032"); + List resourceTags = metadata.getResourceTags(); + + assertEquals("Should treat as single tag", 1, resourceTags.size()); + assertEquals("Should return original string", "10033-10032", resourceTags.get(0)); + } + + /** + * Test getResourceTags with malformed range (multiple hyphens). + */ + @Test + public void testGetResourceTagsMalformedRange() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032-10033-10034"); + List resourceTags = metadata.getResourceTags(); + + assertEquals("Should treat as single tag", 1, resourceTags.size()); + assertEquals("Should return original string", "10032-10033-10034", resourceTags.get(0)); + } + + /** + * Test getResourceTags with empty name. + */ + @Test + public void testGetResourceTagsEmptyName() { + LiStatefulSetMetadata metadata = createMetadata(""); + List resourceTags = metadata.getResourceTags(); + + assertTrue("Should return empty list", resourceTags.isEmpty()); + } + + /** + * Test getResourceTags with null name. + */ + @Test + public void testGetResourceTagsNullName() { + LiStatefulSetMetadata metadata = createMetadata(null); + List resourceTags = metadata.getResourceTags(); + + assertTrue("Should return empty list", resourceTags.isEmpty()); + } + + /** + * Test getResourceTags with invalid format (not 3 parts). + */ + @Test + public void testGetResourceTagsInvalidFormat() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry"); + List resourceTags = metadata.getResourceTags(); + + assertTrue("Should return empty list for invalid format", resourceTags.isEmpty()); + } + + /** + * Test getResourceTags with whitespace in range. + */ + @Test + public void testGetResourceTagsWithWhitespace() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video. 10032 - 10033 "); + List resourceTags = metadata.getResourceTags(); + + assertEquals("Should have two resource tags", 2, resourceTags.size()); + assertEquals("First resource tag should be 10032", "10032", resourceTags.get(0)); + assertEquals("Second resource tag should be 10033", "10033", resourceTags.get(1)); + } + + + /** + * Test toString method with resource tags. + */ + @Test + public void testToString() { + LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032-10033"); + String result = metadata.toString(); + + assertTrue("Should contain name", result.contains("v1.ambry-video.10032-10033")); + assertTrue("Should contain resource tags", result.contains("[10032, 10033]")); + } + + /** + * Helper method to create LiStatefulSetMetadata with given name. + */ + private LiStatefulSetMetadata createMetadata(String name) { + LiStatefulSetMetadata metadata = new LiStatefulSetMetadata(); + LiStatefulSetMetadata.Metadata innerMetadata = new LiStatefulSetMetadata.Metadata(); + innerMetadata.name = name; + + // Use reflection to set the private field + try { + java.lang.reflect.Field field = LiStatefulSetMetadata.class.getDeclaredField("metadata"); + field.setAccessible(true); + field.set(metadata, innerMetadata); + } catch (Exception e) { + throw new RuntimeException("Failed to set metadata field", e); + } + + return metadata; + } +} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java new file mode 100644 index 0000000000..00f8b46eaa --- /dev/null +++ b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java @@ -0,0 +1,282 @@ +/* + * Copyright 2024 LinkedIn Corp. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +package com.github.ambry.clustermap; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + * Test for {@link NimbusServiceMetadata}. + */ +public class NimbusServiceMetadataTest { + + private Path tempDir; + + @Before + public void setUp() throws IOException { + tempDir = Files.createTempDirectory("nimbus-test"); + } + + @After + public void tearDown() throws IOException { + if (tempDir != null) { + Files.walk(tempDir) + .map(Path::toFile) + .forEach(File::delete); + } + } + + /** + * Test successful reading of valid nimbus service metadata. + */ + @Test + public void testReadFromFileValid() throws IOException { + String jsonContent = "{\n" + + " \"appInstanceID\": \"test-app-123\",\n" + + " \"nodeName\": \"test-node-01\",\n" + + " \"maintenanceZone\": \"zone-a\"\n" + + "}"; + + File metadataFile = createTempFile("nimbus-service.json", jsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null", metadata); + assertEquals("App instance ID should match", "test-app-123", metadata.getAppInstanceID()); + assertEquals("Node name should match", "test-node-01", metadata.getNodeName()); + assertEquals("Maintenance zone should match", "zone-a", metadata.getMaintenanceZone()); + } + + /** + * Test reading with missing optional fields. + */ + @Test + public void testReadFromFilePartialData() throws IOException { + String jsonContent = "{\n" + + " \"appInstanceID\": \"test-app-456\"\n" + + "}"; + + File metadataFile = createTempFile("nimbus-service-partial.json", jsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null", metadata); + assertEquals("App instance ID should match", "test-app-456", metadata.getAppInstanceID()); + assertNull("Node name should be null", metadata.getNodeName()); + assertNull("Maintenance zone should be null", metadata.getMaintenanceZone()); + } + + /** + * Test reading with extra unknown fields (should be ignored). + */ + @Test + public void testReadFromFileWithUnknownFields() throws IOException { + String jsonContent = "{\n" + + " \"appInstanceID\": \"test-app-789\",\n" + + " \"nodeName\": \"test-node-02\",\n" + + " \"maintenanceZone\": \"zone-b\",\n" + + " \"unknownField\": \"should-be-ignored\",\n" + + " \"anotherUnknownField\": 12345\n" + + "}"; + + File metadataFile = createTempFile("nimbus-service-extra.json", jsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null", metadata); + assertEquals("App instance ID should match", "test-app-789", metadata.getAppInstanceID()); + assertEquals("Node name should match", "test-node-02", metadata.getNodeName()); + assertEquals("Maintenance zone should match", "zone-b", metadata.getMaintenanceZone()); + } + + /** + * Test reading from null file path. + */ + @Test + public void testReadFromFileNullPath() { + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(null); + assertNull("Metadata should be null for null path", metadata); + } + + /** + * Test reading from empty file path. + */ + @Test + public void testReadFromFileEmptyPath() { + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(""); + assertNull("Metadata should be null for empty path", metadata); + + metadata = NimbusServiceMetadata.readFromFile(" "); + assertNull("Metadata should be null for whitespace path", metadata); + } + + /** + * Test reading from non-existent file. + */ + @Test + public void testReadFromFileNonExistent() { + String nonExistentPath = tempDir.resolve("non-existent-file.json").toString(); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(nonExistentPath); + assertNull("Metadata should be null for non-existent file", metadata); + } + + /** + * Test reading from unreadable file. + */ + @Test + public void testReadFromFileUnreadable() throws IOException { + File metadataFile = createTempFile("unreadable.json", "{}"); + metadataFile.setReadable(false); + + try { + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + // On some systems, setReadable(false) might not work, so we check if it actually became unreadable + if (!metadataFile.canRead()) { + assertNull("Metadata should be null for unreadable file", metadata); + } + } finally { + metadataFile.setReadable(true); // Restore for cleanup + } + } + + /** + * Test reading from file with invalid JSON. + */ + @Test + public void testReadFromFileInvalidJson() throws IOException { + String invalidJsonContent = "{\n" + + " \"appInstanceID\": \"test-app\",\n" + + " \"nodeName\": \"test-node\"\n" + + " // missing closing brace"; + + File metadataFile = createTempFile("invalid.json", invalidJsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + assertNull("Metadata should be null for invalid JSON", metadata); + } + + /** + * Test reading from empty JSON file. + */ + @Test + public void testReadFromFileEmptyJson() throws IOException { + File metadataFile = createTempFile("empty.json", "{}"); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null for empty JSON", metadata); + assertNull("App instance ID should be null", metadata.getAppInstanceID()); + assertNull("Node name should be null", metadata.getNodeName()); + assertNull("Maintenance zone should be null", metadata.getMaintenanceZone()); + } + + /** + * Test getters with null values. + */ + @Test + public void testGettersWithNullValues() throws IOException { + File metadataFile = createTempFile("null-values.json", "{}"); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null", metadata); + assertNull("App instance ID should be null", metadata.getAppInstanceID()); + assertNull("Node name should be null", metadata.getNodeName()); + assertNull("Maintenance zone should be null", metadata.getMaintenanceZone()); + } + + /** + * Test toString method. + */ + @Test + public void testToString() throws IOException { + String jsonContent = "{\n" + + " \"appInstanceID\": \"test-app-toString\",\n" + + " \"nodeName\": \"test-node-toString\",\n" + + " \"maintenanceZone\": \"zone-toString\"\n" + + "}"; + + File metadataFile = createTempFile("toString-test.json", jsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + String result = metadata.toString(); + assertTrue("Should contain app instance ID", result.contains("test-app-toString")); + assertTrue("Should contain node name", result.contains("test-node-toString")); + assertTrue("Should contain maintenance zone", result.contains("zone-toString")); + assertTrue("Should contain class name", result.contains("NimbusServiceMetadata")); + } + + /** + * Test toString method with null values. + */ + @Test + public void testToStringWithNullValues() throws IOException { + File metadataFile = createTempFile("toString-null.json", "{}"); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + String result = metadata.toString(); + assertTrue("Should contain null values", result.contains("null")); + assertTrue("Should contain class name", result.contains("NimbusServiceMetadata")); + } + + /** + * Test reading with different JSON formatting. + */ + @Test + public void testReadFromFileCompactJson() throws IOException { + String compactJsonContent = "{\"appInstanceID\":\"compact-app\",\"nodeName\":\"compact-node\",\"maintenanceZone\":\"compact-zone\"}"; + + File metadataFile = createTempFile("compact.json", compactJsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null", metadata); + assertEquals("App instance ID should match", "compact-app", metadata.getAppInstanceID()); + assertEquals("Node name should match", "compact-node", metadata.getNodeName()); + assertEquals("Maintenance zone should match", "compact-zone", metadata.getMaintenanceZone()); + } + + /** + * Test reading with special characters in values. + */ + @Test + public void testReadFromFileSpecialCharacters() throws IOException { + String jsonContent = "{\n" + + " \"appInstanceID\": \"app-with-special-chars-@#$%\",\n" + + " \"nodeName\": \"node_with_underscores_123\",\n" + + " \"maintenanceZone\": \"zone.with.dots\"\n" + + "}"; + + File metadataFile = createTempFile("special-chars.json", jsonContent); + NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); + + assertNotNull("Metadata should not be null", metadata); + assertEquals("App instance ID should handle special chars", "app-with-special-chars-@#$%", metadata.getAppInstanceID()); + assertEquals("Node name should handle underscores", "node_with_underscores_123", metadata.getNodeName()); + assertEquals("Maintenance zone should handle dots", "zone.with.dots", metadata.getMaintenanceZone()); + } + + /** + * Helper method to create a temporary file with given content. + */ + private File createTempFile(String fileName, String content) throws IOException { + File file = tempDir.resolve(fileName).toFile(); + try (FileWriter writer = new FileWriter(file)) { + writer.write(content); + } + return file; + } +} diff --git a/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java b/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java index e7ca2bb66c..a329b3bbe0 100644 --- a/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java +++ b/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java @@ -85,7 +85,6 @@ import com.github.ambry.protocol.RequestHandlerPool; import com.github.ambry.repair.RepairRequestsDb; import com.github.ambry.repair.RepairRequestsDbFactory; -import com.github.ambry.replica.prioritization.FCFSPrioritizationManager; import com.github.ambry.replica.prioritization.FileBasedReplicationPrioritizationManagerFactory; import com.github.ambry.replica.prioritization.PrioritizationManager; import com.github.ambry.replica.prioritization.PrioritizationManagerFactory; @@ -117,7 +116,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Predicate; @@ -364,6 +362,9 @@ public void startup() throws InstantiationException { // wait for dataNode to be populated if (nodeId == null) { + if (clusterParticipant != null && !clusterParticipant.populateDataNodeConfig()) { + logger.error("Failed to populate data node config to property store for instance: {}", networkConfig.hostName); + } logger.info("Waiting on dataNode config to be populated..."); if(!dataNodeLatch.await(serverConfig.serverDatanodeConfigTimeout, TimeUnit.SECONDS)) { throw new IllegalArgumentException("Startup timed out waiting for data node config to be populated"); diff --git a/ambry-server/src/test/java/com/github/ambry/server/AmbryServerTest.java b/ambry-server/src/test/java/com/github/ambry/server/AmbryServerTest.java index 286460948d..9583b06566 100644 --- a/ambry-server/src/test/java/com/github/ambry/server/AmbryServerTest.java +++ b/ambry-server/src/test/java/com/github/ambry/server/AmbryServerTest.java @@ -154,4 +154,51 @@ public void testAmbryServerStartupWithoutDataNodeIdTimeoutCase() throws Exceptio "failure during startup java.lang.IllegalArgumentException: Startup timed out waiting for data node config to be populated"); }); } + + @Test + public void testPopulateDataNodeConfigWithHostnameCheck() throws Exception { + ClusterAgentsFactory spyClusterAgentsFactory = spy(new MockClusterAgentsFactory(false, false, 1, 1, 1)); + DataNodeId dataNodeId = spyClusterAgentsFactory.getClusterMap().getDataNodeIds().get(0); + MockClusterMap spyClusterMap = spy(new MockClusterMap(false, false, 1, 1, 1, false, false, null)); + doReturn(null).when(spyClusterMap).getDataNodeId(dataNodeId.getHostname(), dataNodeId.getPort()); + doReturn(spyClusterMap).when(spyClusterAgentsFactory).getClusterMap(); + + // Test case 1: Matching hostnames - should call populateDataNodeConfig + Properties props1 = new Properties(); + props1.setProperty("host.name", "localhost"); + props1.setProperty("port", "1234"); + props1.setProperty("clustermap.cluster.name", "test"); + props1.setProperty("clustermap.datacenter.name", "DC1"); + props1.setProperty("clustermap.host.name", "localhost"); // Same as host.name + props1.setProperty("clustermap.port", "1234"); + props1.setProperty("server.datanode.config.timeout", "1"); // Short timeout for test + + AmbryServer ambryServer1 = new AmbryServer(new VerifiableProperties(props1), spyClusterAgentsFactory, null, + new LoggingNotificationSystem(), SystemTime.getInstance(), null); + + // Should timeout because populateDataNodeConfig is called but no listener triggers + assertException(InstantiationException.class, ambryServer1::startup, e -> { + assertTrue("Should timeout waiting for DataNode config", + e.getMessage().contains("Startup timed out waiting for data node config to be populated")); + }); + + // Test case 2: Non-matching hostnames - should NOT call populateDataNodeConfig + Properties props2 = new Properties(); + props2.setProperty("host.name", "localhost"); + props2.setProperty("port", "1234"); + props2.setProperty("clustermap.cluster.name", "test"); + props2.setProperty("clustermap.datacenter.name", "DC1"); + props2.setProperty("clustermap.host.name", "different-host"); // Different from host.name + props2.setProperty("clustermap.port", "1234"); + props2.setProperty("server.datanode.config.timeout", "1"); // Short timeout for test + + AmbryServer ambryServer2 = new AmbryServer(new VerifiableProperties(props2), spyClusterAgentsFactory, null, + new LoggingNotificationSystem(), SystemTime.getInstance(), null); + + // Should also timeout, but for different reason - no DataNode config population attempted + assertException(InstantiationException.class, ambryServer2::startup, e -> { + assertTrue("Should timeout waiting for DataNode config", + e.getMessage().contains("Startup timed out waiting for data node config to be populated")); + }); + } } diff --git a/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java b/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java index ed9fe36a03..7154e1ac34 100644 --- a/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java +++ b/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java @@ -1649,4 +1649,49 @@ public static byte[] base64DecodeUrlSafe(String base64String) { return org.apache.commons.codec.binary.Base64.decodeBase64(base64String); } } + + // JSON file utilities + private static final ObjectMapper objectMapper = new ObjectMapper(); + + /** + * Read and parse a JSON file into the specified class type. + * @param filePath the path to the JSON file + * @param clazz the class type to deserialize into + * @param the type of the class + * @return instance of the specified class, or null if file cannot be read or parsed + */ + public static T readJsonFromFile(String filePath, Class clazz) { + if (filePath == null || filePath.trim().isEmpty()) { + logger.warn("JSON file path is null or empty for class: {}", clazz.getSimpleName()); + return null; + } + + File jsonFile = new File(filePath); + if (!jsonFile.exists()) { + logger.warn("JSON file does not exist: {} for class: {}", filePath, clazz.getSimpleName()); + return null; + } + + if (!jsonFile.canRead()) { + logger.warn("Cannot read JSON file: {} for class: {}", filePath, clazz.getSimpleName()); + return null; + } + + try { + T instance = objectMapper.readValue(jsonFile, clazz); + logger.info("Successfully read {} from: {}", clazz.getSimpleName(), filePath); + return instance; + } catch (IOException e) { + logger.error("Failed to parse JSON file: {} for class: {}", filePath, clazz.getSimpleName(), e); + return null; + } + } + + /** + * Get the shared ObjectMapper instance. + * @return the ObjectMapper instance + */ + public static ObjectMapper getObjectMapper() { + return objectMapper; + } } diff --git a/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java b/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java index d0f57094d1..6724c0649e 100644 --- a/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java +++ b/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java @@ -13,6 +13,7 @@ */ package com.github.ambry.utils; +import com.fasterxml.jackson.annotation.JsonProperty; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.PooledByteBufAllocator; @@ -21,6 +22,7 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.lang.reflect.Field; import java.nio.ByteBuffer; @@ -39,9 +41,13 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.nio.file.Files; +import java.nio.file.Path; import javax.net.ssl.SSLException; import org.apache.commons.io.FileUtils; +import org.junit.After; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; @@ -53,6 +59,22 @@ */ public class UtilsTest { static final String STATIC_FIELD_TEST_STRING = "field1"; + + private Path tempDir; + + @Before + public void setUp() throws IOException { + tempDir = Files.createTempDirectory("utils-test"); + } + + @After + public void tearDown() throws IOException { + if (tempDir != null) { + Files.walk(tempDir) + .map(Path::toFile) + .forEach(File::delete); + } + } @Test(expected = IllegalArgumentException.class) public void testGetRandomLongException() { @@ -785,6 +807,122 @@ public void testByteArrayCheckNotNullOrEmpty() { } assertTrue(thrownEx instanceof IllegalArgumentException); } + + // JSON file utilities tests + + /** + * Test class for JSON deserialization. + */ + public static class TestData { + @JsonProperty("name") + public String name; + + @JsonProperty("value") + public int value; + + @JsonProperty("enabled") + public boolean enabled; + } + + /** + * Test successful reading of valid JSON file. + */ + @Test + public void testReadJsonFromFileValid() throws IOException { + String jsonContent = "{\n" + + " \"name\": \"test-name\",\n" + + " \"value\": 42,\n" + + " \"enabled\": true\n" + + "}"; + + File jsonFile = createTempFile("valid.json", jsonContent); + TestData result = Utils.readJsonFromFile(jsonFile.getAbsolutePath(), TestData.class); + + assertNotNull("Result should not be null", result); + assertEquals("Name should match", "test-name", result.name); + assertEquals("Value should match", 42, result.value); + assertTrue("Enabled should be true", result.enabled); + } + + /** + * Test reading from null file path. + */ + @Test + public void testReadJsonFromFileNullPath() { + TestData result = Utils.readJsonFromFile(null, TestData.class); + assertNull("Result should be null for null path", result); + } + + /** + * Test reading from empty file path. + */ + @Test + public void testReadJsonFromFileEmptyPath() { + TestData result = Utils.readJsonFromFile("", TestData.class); + assertNull("Result should be null for empty path", result); + + result = Utils.readJsonFromFile(" ", TestData.class); + assertNull("Result should be null for whitespace path", result); + } + + /** + * Test reading from non-existent file. + */ + @Test + public void testReadJsonFromFileNonExistent() { + String nonExistentPath = tempDir.resolve("non-existent.json").toString(); + TestData result = Utils.readJsonFromFile(nonExistentPath, TestData.class); + assertNull("Result should be null for non-existent file", result); + } + + /** + * Test reading from file with invalid JSON. + */ + @Test + public void testReadJsonFromFileInvalidJson() throws IOException { + String invalidJsonContent = "{\n" + + " \"name\": \"test\",\n" + + " \"value\": 42\n" + + " // missing closing brace"; + + File jsonFile = createTempFile("invalid.json", invalidJsonContent); + TestData result = Utils.readJsonFromFile(jsonFile.getAbsolutePath(), TestData.class); + assertNull("Result should be null for invalid JSON", result); + } + + /** + * Test reading from empty JSON file. + */ + @Test + public void testReadJsonFromFileEmptyJson() throws IOException { + File jsonFile = createTempFile("empty.json", "{}"); + TestData result = Utils.readJsonFromFile(jsonFile.getAbsolutePath(), TestData.class); + + assertNotNull("Result should not be null for empty JSON", result); + assertNull("Name should be null", result.name); + assertEquals("Value should be default", 0, result.value); + assertFalse("Enabled should be default false", result.enabled); + } + + /** + * Test getObjectMapper method. + */ + @Test + public void testGetObjectMapper() { + assertNotNull("ObjectMapper should not be null", Utils.getObjectMapper()); + assertSame("Should return same instance", Utils.getObjectMapper(), Utils.getObjectMapper()); + } + + /** + * Helper method to create a temporary file with given content. + */ + private File createTempFile(String fileName, String content) throws IOException { + File file = tempDir.resolve(fileName).toFile(); + try (FileWriter writer = new FileWriter(file)) { + writer.write(content); + } + return file; + } } class MockClassForTesting { From f9821547e14403a86037eb0736c4081bde563068 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 16 Dec 2025 12:00:58 -0800 Subject: [PATCH 02/14] fix ut --- .../ambry/clustermap/RecoveryTestClusterAgentsFactory.java | 7 +++++++ .../ambry/clustermap/StaticClusterAgentsFactory.java | 7 +++++++ .../github/ambry/server/ParticipantsConsistencyTest.java | 7 +++++++ .../github/ambry/clustermap/MockClusterAgentsFactory.java | 7 +++++++ .../src/main/java/com/github/ambry/utils/Utils.java | 1 + build.gradle | 3 +++ 6 files changed, 32 insertions(+) diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/RecoveryTestClusterAgentsFactory.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/RecoveryTestClusterAgentsFactory.java index ad54c7e210..1c90149562 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/RecoveryTestClusterAgentsFactory.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/RecoveryTestClusterAgentsFactory.java @@ -95,6 +95,13 @@ public void participateAndBlockStateTransition(List ambryHealt public void unblockStateTransition() { } + @Override + public boolean populateDataNodeConfig() { + // Recovery test cluster doesn't support populating data node config. + // Return false to indicate this operation is not supported. + return false; + } + @Override public void close() { } diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/StaticClusterAgentsFactory.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/StaticClusterAgentsFactory.java index 5ef9d3cd58..803bbe7bed 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/StaticClusterAgentsFactory.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/StaticClusterAgentsFactory.java @@ -104,6 +104,13 @@ public void participateAndBlockStateTransition(List ambryStats public void unblockStateTransition() { } + @Override + public boolean populateDataNodeConfig() { + // Static clustermap doesn't support populating data node config dynamically. + // Return false to indicate this operation is not supported. + return false; + } + @Override public void close() { diff --git a/ambry-server/src/test/java/com/github/ambry/server/ParticipantsConsistencyTest.java b/ambry-server/src/test/java/com/github/ambry/server/ParticipantsConsistencyTest.java index 93fc80c35c..1e1f268fb8 100644 --- a/ambry-server/src/test/java/com/github/ambry/server/ParticipantsConsistencyTest.java +++ b/ambry-server/src/test/java/com/github/ambry/server/ParticipantsConsistencyTest.java @@ -216,6 +216,13 @@ public void participateAndBlockStateTransition(List ambryStats public void unblockStateTransition() { } + @Override + public boolean populateDataNodeConfig() { + // Mock cluster participant doesn't support populating data node config. + // Return false to indicate this operation is not supported. + return false; + } + @Override public boolean setReplicaSealedState(ReplicaId replicaId, ReplicaSealStatus replicaSealStatus) { String replicaPath = replicaId.getReplicaPath(); diff --git a/ambry-test-utils/src/main/java/com/github/ambry/clustermap/MockClusterAgentsFactory.java b/ambry-test-utils/src/main/java/com/github/ambry/clustermap/MockClusterAgentsFactory.java index 736e6df2de..48eb924e6e 100644 --- a/ambry-test-utils/src/main/java/com/github/ambry/clustermap/MockClusterAgentsFactory.java +++ b/ambry-test-utils/src/main/java/com/github/ambry/clustermap/MockClusterAgentsFactory.java @@ -98,6 +98,13 @@ public void participateAndBlockStateTransition(List ambryHealt public void unblockStateTransition() { } + @Override + public boolean populateDataNodeConfig() { + // Mock cluster doesn't support populating data node config. + // Return false to indicate this operation is not supported. + return false; + } + @Override public void close() { diff --git a/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java b/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java index 7154e1ac34..fd580e443e 100644 --- a/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java +++ b/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java @@ -72,6 +72,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.zip.CRC32; +import com.fasterxml.jackson.databind.ObjectMapper; import org.json.JSONException; import org.json.JSONObject; import org.slf4j.Logger; diff --git a/build.gradle b/build.gradle index 8d56e7a5c5..b797dbeb5e 100644 --- a/build.gradle +++ b/build.gradle @@ -205,6 +205,9 @@ project(':ambry-utils') { implementation "org.json:json:$jsonVersion" implementation "net.sf.jopt-simple:jopt-simple:$joptSimpleVersion" implementation "io.netty:netty-all:$nettyVersion" + implementation "com.fasterxml.jackson.core:jackson-core:$jacksonVersion" + implementation "com.fasterxml.jackson.core:jackson-annotations:$jacksonVersion" + implementation "com.fasterxml.jackson.core:jackson-databind:$jacksonVersion" testImplementation project(":ambry-test-utils") testImplementation "io.netty:netty-transport-native-epoll:$nettyVersion" testImplementation "commons-io:commons-io:$commonsIoVersion" From 6ac5f3f056063319594bef71b39df86c4b127afd Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 16 Dec 2025 12:36:26 -0800 Subject: [PATCH 03/14] fix --- .../com/github/ambry/cloud/RecoveryNetworkClientTest.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java b/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java index f359097896..32b0d9dc3d 100644 --- a/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java +++ b/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java @@ -130,6 +130,13 @@ public RecoveryNetworkClientTest() throws Exception { properties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, RecoveryTokenFactory.class.getCanonicalName()); properties.setProperty("replication.metadata.request.version", "2"); + properties.setProperty("clustermap.nimbus.service.metadata.file.path", "./etc/metadata/nimbus-service.json"); + properties.setProperty("clustermap.listatefulset.metadata.file.path", "./etc/metadata/liStatefulSet.json"); + properties.setProperty("clustermap.reserve.disk.space.percentage", "0.05"); + properties.setProperty("clustermap.resource.tag.prefix", "TAG_"); + properties.setProperty("clustermap.default.http2.port", "15388"); + properties.setProperty("clustermap.default.port", "15088"); + properties.setProperty("clustermap.default.ssl.port", "15288"); verifiableProperties = new VerifiableProperties(properties); findTokenHelper = new FindTokenHelper(null, new ReplicationConfig(verifiableProperties)); // Create test cluster From 76061a132291d779dea29920fc70bf4b58d6cead Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 16 Dec 2025 12:47:41 -0800 Subject: [PATCH 04/14] fix --- .../github/ambry/config/ClusterMapConfig.java | 59 ++++++++++++------- .../cloud/RecoveryNetworkClientTest.java | 7 --- 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index e121afa193..6b132d7dee 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -48,6 +48,23 @@ public class ClusterMapConfig { public static final String PARTITION_FILTERING_ENABLED = "clustermap.enable.partition.filtering"; public static final String ENABLE_FILE_COPY_PROTOCOL = "clustermap.enable.file.copy.protocol"; + public static final String NIMBUS_SERVICE_METADATA_FILE_PATH = "clustermap.nimbus.service.metadata.file.path"; + public static final String LI_STATEFUL_SET_METADATA_FILE_PATH = "clustermap.listatefulset.metadata.file.path"; + public static final String RESERVE_DISK_SPACE_PERCENTAGE = "clustermap.reserve.disk.space.percentage"; + public static final String RESOURCE_TAG_PREFIX = "clustermap.resource.tag.prefix"; + public static final String DEFAULT_HTTP2_PORT = "clustermap.default.http2.port"; + public static final String DEFAULT_PORT = "clustermap.default.port"; + public static final String DEFAULT_SSL_PORT = "clustermap.default.ssl.port"; + + // Default values + public static final String DEFAULT_NIMBUS_SERVICE_METADATA_FILE_PATH = "./etc/metadata/nimbus-service.json"; + public static final String DEFAULT_LI_STATEFUL_SET_METADATA_FILE_PATH = "./etc/metadata/liStatefulSet.json"; + public static final double DEFAULT_RESERVE_DISK_SPACE_PERCENTAGE = 0.05; + public static final String DEFAULT_RESOURCE_TAG_PREFIX = "TAG_"; + public static final int DEFAULT_HTTP2_PORT_VALUE = 15388; + public static final int DEFAULT_PORT_VALUE = 15088; + public static final int DEFAULT_SSL_PORT_VALUE = 15288; + /** * The factory class used to get the resource state policies. */ @@ -420,50 +437,50 @@ public class ClusterMapConfig { /** * Path to the nimbus service metadata file containing instance information */ - @Config("clustermap.nimbus.service.metadata.file.path") - @Default("./etc/metadata/nimbus-service.json") + @Config(NIMBUS_SERVICE_METADATA_FILE_PATH) + @Default(DEFAULT_NIMBUS_SERVICE_METADATA_FILE_PATH) public final String nimbusServiceMetadataFilePath; /** * Path to the LiStatefulSet metadata file containing Kubernetes StatefulSet information */ - @Config("clustermap.listatefulset.metadata.file.path") - @Default("./etc/metadata/liStatefulSet.json") + @Config(LI_STATEFUL_SET_METADATA_FILE_PATH) + @Default(DEFAULT_LI_STATEFUL_SET_METADATA_FILE_PATH) public final String liStatefulSetMetadataFilePath; /** * Percentage of disk space to reserve, default to 5% */ - @Config("clustermap.reserve.disk.space.percentage") - @Default("0.05") + @Config(RESERVE_DISK_SPACE_PERCENTAGE) + @Default("" + DEFAULT_RESERVE_DISK_SPACE_PERCENTAGE) public final double clusterMapReserveDiskSpacePercentage; /** * Prefix for resource tags in cluster map */ - @Config("clustermap.resource.tag.prefix") - @Default("TAG_") + @Config(RESOURCE_TAG_PREFIX) + @Default(DEFAULT_RESOURCE_TAG_PREFIX) public final String clusterMapResourceTagPrefix; /** * Default HTTP2 port for cluster nodes */ - @Config("clustermap.default.http2.port") - @Default("15388") + @Config(DEFAULT_HTTP2_PORT) + @Default("" + DEFAULT_HTTP2_PORT_VALUE) public final int clusterMapDefaultHttp2Port; /** * Default port for cluster nodes */ - @Config("clustermap.default.port") - @Default("15088") + @Config(DEFAULT_PORT) + @Default("" + DEFAULT_PORT_VALUE) public final int clusterMapDefaultPort; /** * Default SSL port for cluster nodes */ - @Config("clustermap.default.ssl.port") - @Default("15288") + @Config(DEFAULT_SSL_PORT) + @Default("" + DEFAULT_SSL_PORT_VALUE) public final int clusterMapDefaultSslPort; public ClusterMapConfig(VerifiableProperties verifiableProperties) { @@ -557,12 +574,12 @@ public ClusterMapConfig(VerifiableProperties verifiableProperties) { routerPutSuccessTarget = verifiableProperties.getIntInRange(ROUTER_PUT_SUCCESS_TARGET, 2, 1, Integer.MAX_VALUE); clusterMapPartitionFilteringEnabled = verifiableProperties.getBoolean(PARTITION_FILTERING_ENABLED, false); enableFileCopyProtocol = verifiableProperties.getBoolean(ENABLE_FILE_COPY_PROTOCOL, false); - nimbusServiceMetadataFilePath = verifiableProperties.getString("clustermap.nimbus.service.metadata.file.path"); - liStatefulSetMetadataFilePath = verifiableProperties.getString("clustermap.listatefulset.metadata.file.path"); - clusterMapReserveDiskSpacePercentage = verifiableProperties.getDouble("clustermap.reserve.disk.space.percentage"); - clusterMapResourceTagPrefix = verifiableProperties.getString("clustermap.resource.tag.prefix"); - clusterMapDefaultHttp2Port = verifiableProperties.getInt("clustermap.default.http2.port"); - clusterMapDefaultPort = verifiableProperties.getInt("clustermap.default.port"); - clusterMapDefaultSslPort = verifiableProperties.getInt("clustermap.default.ssl.port"); + nimbusServiceMetadataFilePath = verifiableProperties.getString(NIMBUS_SERVICE_METADATA_FILE_PATH, DEFAULT_NIMBUS_SERVICE_METADATA_FILE_PATH); + liStatefulSetMetadataFilePath = verifiableProperties.getString(LI_STATEFUL_SET_METADATA_FILE_PATH, DEFAULT_LI_STATEFUL_SET_METADATA_FILE_PATH); + clusterMapReserveDiskSpacePercentage = verifiableProperties.getDouble(RESERVE_DISK_SPACE_PERCENTAGE, DEFAULT_RESERVE_DISK_SPACE_PERCENTAGE); + clusterMapResourceTagPrefix = verifiableProperties.getString(RESOURCE_TAG_PREFIX, DEFAULT_RESOURCE_TAG_PREFIX); + clusterMapDefaultHttp2Port = verifiableProperties.getInt(DEFAULT_HTTP2_PORT, DEFAULT_HTTP2_PORT_VALUE); + clusterMapDefaultPort = verifiableProperties.getInt(DEFAULT_PORT, DEFAULT_PORT_VALUE); + clusterMapDefaultSslPort = verifiableProperties.getInt(DEFAULT_SSL_PORT, DEFAULT_SSL_PORT_VALUE); } } diff --git a/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java b/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java index 32b0d9dc3d..f359097896 100644 --- a/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java +++ b/ambry-cloud/src/test/java/com/github/ambry/cloud/RecoveryNetworkClientTest.java @@ -130,13 +130,6 @@ public RecoveryNetworkClientTest() throws Exception { properties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, RecoveryTokenFactory.class.getCanonicalName()); properties.setProperty("replication.metadata.request.version", "2"); - properties.setProperty("clustermap.nimbus.service.metadata.file.path", "./etc/metadata/nimbus-service.json"); - properties.setProperty("clustermap.listatefulset.metadata.file.path", "./etc/metadata/liStatefulSet.json"); - properties.setProperty("clustermap.reserve.disk.space.percentage", "0.05"); - properties.setProperty("clustermap.resource.tag.prefix", "TAG_"); - properties.setProperty("clustermap.default.http2.port", "15388"); - properties.setProperty("clustermap.default.port", "15088"); - properties.setProperty("clustermap.default.ssl.port", "15288"); verifiableProperties = new VerifiableProperties(properties); findTokenHelper = new FindTokenHelper(null, new ReplicationConfig(verifiableProperties)); // Create test cluster From 927453c8341b50a902f5a3814733198277f90f21 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 01:11:35 -0800 Subject: [PATCH 05/14] moving to closed source --- .../github/ambry/config/ClusterMapConfig.java | 74 +--- .../ambry/clustermap/DiskInfoCollector.java | 235 ------------- .../clustermap/HelixClusterAgentsFactory.java | 24 ++ .../github/ambry/clustermap/HelixFactory.java | 41 --- .../ambry/clustermap/HelixParticipant.java | 43 +-- .../clustermap/LiStatefulSetMetadata.java | 132 ------- .../clustermap/NimbusServiceMetadata.java | 66 ---- .../clustermap/DiskInfoCollectorTest.java | 242 ------------- .../ambry/clustermap/HelixFactoryTest.java | 322 ------------------ .../clustermap/HelixParticipantTest.java | 216 ------------ .../clustermap/LiStatefulSetMetadataTest.java | 180 ---------- .../clustermap/NimbusServiceMetadataTest.java | 282 --------------- .../java/com/github/ambry/utils/Utils.java | 46 --- .../com/github/ambry/utils/UtilsTest.java | 138 -------- build.gradle | 3 - 15 files changed, 43 insertions(+), 2001 deletions(-) delete mode 100644 ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java delete mode 100644 ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java delete mode 100644 ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java delete mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java delete mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java delete mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java delete mode 100644 ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index 6b132d7dee..a8d53e1b36 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -48,23 +48,7 @@ public class ClusterMapConfig { public static final String PARTITION_FILTERING_ENABLED = "clustermap.enable.partition.filtering"; public static final String ENABLE_FILE_COPY_PROTOCOL = "clustermap.enable.file.copy.protocol"; - public static final String NIMBUS_SERVICE_METADATA_FILE_PATH = "clustermap.nimbus.service.metadata.file.path"; - public static final String LI_STATEFUL_SET_METADATA_FILE_PATH = "clustermap.listatefulset.metadata.file.path"; - public static final String RESERVE_DISK_SPACE_PERCENTAGE = "clustermap.reserve.disk.space.percentage"; - public static final String RESOURCE_TAG_PREFIX = "clustermap.resource.tag.prefix"; - public static final String DEFAULT_HTTP2_PORT = "clustermap.default.http2.port"; - public static final String DEFAULT_PORT = "clustermap.default.port"; - public static final String DEFAULT_SSL_PORT = "clustermap.default.ssl.port"; - - // Default values - public static final String DEFAULT_NIMBUS_SERVICE_METADATA_FILE_PATH = "./etc/metadata/nimbus-service.json"; - public static final String DEFAULT_LI_STATEFUL_SET_METADATA_FILE_PATH = "./etc/metadata/liStatefulSet.json"; - public static final double DEFAULT_RESERVE_DISK_SPACE_PERCENTAGE = 0.05; - public static final String DEFAULT_RESOURCE_TAG_PREFIX = "TAG_"; - public static final int DEFAULT_HTTP2_PORT_VALUE = 15388; - public static final int DEFAULT_PORT_VALUE = 15088; - public static final int DEFAULT_SSL_PORT_VALUE = 15288; - + /** * The factory class used to get the resource state policies. */ @@ -434,55 +418,6 @@ public class ClusterMapConfig { @Default("false") public final boolean enableFileCopyProtocol; - /** - * Path to the nimbus service metadata file containing instance information - */ - @Config(NIMBUS_SERVICE_METADATA_FILE_PATH) - @Default(DEFAULT_NIMBUS_SERVICE_METADATA_FILE_PATH) - public final String nimbusServiceMetadataFilePath; - - /** - * Path to the LiStatefulSet metadata file containing Kubernetes StatefulSet information - */ - @Config(LI_STATEFUL_SET_METADATA_FILE_PATH) - @Default(DEFAULT_LI_STATEFUL_SET_METADATA_FILE_PATH) - public final String liStatefulSetMetadataFilePath; - - /** - * Percentage of disk space to reserve, default to 5% - */ - @Config(RESERVE_DISK_SPACE_PERCENTAGE) - @Default("" + DEFAULT_RESERVE_DISK_SPACE_PERCENTAGE) - public final double clusterMapReserveDiskSpacePercentage; - - /** - * Prefix for resource tags in cluster map - */ - @Config(RESOURCE_TAG_PREFIX) - @Default(DEFAULT_RESOURCE_TAG_PREFIX) - public final String clusterMapResourceTagPrefix; - - /** - * Default HTTP2 port for cluster nodes - */ - @Config(DEFAULT_HTTP2_PORT) - @Default("" + DEFAULT_HTTP2_PORT_VALUE) - public final int clusterMapDefaultHttp2Port; - - /** - * Default port for cluster nodes - */ - @Config(DEFAULT_PORT) - @Default("" + DEFAULT_PORT_VALUE) - public final int clusterMapDefaultPort; - - /** - * Default SSL port for cluster nodes - */ - @Config(DEFAULT_SSL_PORT) - @Default("" + DEFAULT_SSL_PORT_VALUE) - public final int clusterMapDefaultSslPort; - public ClusterMapConfig(VerifiableProperties verifiableProperties) { clusterMapFixedTimeoutDatanodeErrorThreshold = verifiableProperties.getIntInRange("clustermap.fixedtimeout.datanode.error.threshold", 3, 1, 100); @@ -574,12 +509,5 @@ public ClusterMapConfig(VerifiableProperties verifiableProperties) { routerPutSuccessTarget = verifiableProperties.getIntInRange(ROUTER_PUT_SUCCESS_TARGET, 2, 1, Integer.MAX_VALUE); clusterMapPartitionFilteringEnabled = verifiableProperties.getBoolean(PARTITION_FILTERING_ENABLED, false); enableFileCopyProtocol = verifiableProperties.getBoolean(ENABLE_FILE_COPY_PROTOCOL, false); - nimbusServiceMetadataFilePath = verifiableProperties.getString(NIMBUS_SERVICE_METADATA_FILE_PATH, DEFAULT_NIMBUS_SERVICE_METADATA_FILE_PATH); - liStatefulSetMetadataFilePath = verifiableProperties.getString(LI_STATEFUL_SET_METADATA_FILE_PATH, DEFAULT_LI_STATEFUL_SET_METADATA_FILE_PATH); - clusterMapReserveDiskSpacePercentage = verifiableProperties.getDouble(RESERVE_DISK_SPACE_PERCENTAGE, DEFAULT_RESERVE_DISK_SPACE_PERCENTAGE); - clusterMapResourceTagPrefix = verifiableProperties.getString(RESOURCE_TAG_PREFIX, DEFAULT_RESOURCE_TAG_PREFIX); - clusterMapDefaultHttp2Port = verifiableProperties.getInt(DEFAULT_HTTP2_PORT, DEFAULT_HTTP2_PORT_VALUE); - clusterMapDefaultPort = verifiableProperties.getInt(DEFAULT_PORT, DEFAULT_PORT_VALUE); - clusterMapDefaultSslPort = verifiableProperties.getInt(DEFAULT_SSL_PORT, DEFAULT_SSL_PORT_VALUE); } } diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java deleted file mode 100644 index 8186320802..0000000000 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/DiskInfoCollector.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright 2024 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.HashMap; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Collects disk information by running system command 'df -h'. - */ -public class DiskInfoCollector { - private static final Logger logger = LoggerFactory.getLogger(DiskInfoCollector.class); - - // Pattern to match df -h output lines for Ambry mount points - // Example: /dev/sdh1 21T 14T 6.5T 68% /mnt/u001/ambrydata - private static final Pattern DF_PATTERN = Pattern.compile( - "^(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\d+)%\\s+(/mnt/u\\d+/ambrydata)$"); - - /** - * Represents disk information from df command. - */ - public static class DiskInfo { - private final String filesystem; - private final String size; - private final String used; - private final String available; - private final int usePercentage; - private final String mountPoint; - - public DiskInfo(String filesystem, String size, String used, String available, int usePercentage, String mountPoint) { - this.filesystem = filesystem; - this.size = size; - this.used = used; - this.available = available; - this.usePercentage = usePercentage; - this.mountPoint = mountPoint; - } - - public String getFilesystem() { - return filesystem; - } - - public String getSize() { - return size; - } - - public String getUsed() { - return used; - } - - public String getAvailable() { - return available; - } - - public int getUsePercentage() { - return usePercentage; - } - - public String getMountPoint() { - return mountPoint; - } - - /** - * Convert size string (e.g., "100G", "1.5T") to bytes. - * @return size in bytes, or -1 if parsing fails - */ - public long getSizeInBytes() { - return parseSize(size); - } - - private static long parseSize(String sizeStr) { - if (sizeStr == null || sizeStr.trim().isEmpty()) { - return -1; - } - - try { - sizeStr = sizeStr.trim().toUpperCase(); - // Extract number and unit - Pattern pattern = Pattern.compile("^([0-9.]+)([KMGTPE]?)$"); - Matcher matcher = pattern.matcher(sizeStr); - if (!matcher.matches()) { - return -1; - } - - double value = Double.parseDouble(matcher.group(1)); - String unit = matcher.group(2); - long multiplier; - switch (unit) { - case "K": - multiplier = 1024L; - break; - case "M": - multiplier = 1024L * 1024L; - break; - case "G": - multiplier = 1024L * 1024L * 1024L; - break; - case "T": - multiplier = 1024L * 1024L * 1024L * 1024L; - break; - case "P": - multiplier = 1024L * 1024L * 1024L * 1024L * 1024L; - break; - case "E": - multiplier = 1024L * 1024L * 1024L * 1024L * 1024L * 1024L; - break; - default: - multiplier = 1L; // Bytes - break; - } - - return (long) (value * multiplier); - } catch (NumberFormatException e) { - logger.warn("Failed to parse size: {}", sizeStr, e); - return -1; - } - } - - @Override - public String toString() { - return "DiskInfo{" + - "filesystem='" + filesystem + '\'' + - ", size='" + size + '\'' + - ", used='" + used + '\'' + - ", available='" + available + '\'' + - ", usePercentage=" + usePercentage + - ", mountPoint='" + mountPoint + '\'' + - '}'; - } - } - - /** - * Collect disk information by running 'df -h' command. - * @return map of mount point to DiskInfo - */ - public static Map collectDiskInfo() { - Map diskInfoMap = new HashMap<>(); - - try { - logger.info("Running command: df -h"); - ProcessBuilder processBuilder = new ProcessBuilder("df", "-h"); - processBuilder.redirectErrorStream(true); - Process process = processBuilder.start(); - - try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { - String line; - boolean isFirstLine = true; - - while ((line = reader.readLine()) != null) { - // Skip header line - if (isFirstLine) { - logger.debug("df header: {}", line); - isFirstLine = false; - continue; - } - - DiskInfo diskInfo = parseDfLine(line); - if (diskInfo != null) { - diskInfoMap.put(diskInfo.getMountPoint(), diskInfo); - logger.info("Found disk: {} -> {}", diskInfo.getMountPoint(), diskInfo); - } - } - } - - } catch (IOException e) { - logger.error("Failed to run df command", e); - } - - logger.info("Collected disk info for {} mount points", diskInfoMap.size()); - return diskInfoMap; - } - - /** - * Parse a single line from df -h output. - * @param line the line to parse - * @return DiskInfo object, or null if parsing fails - */ - private static DiskInfo parseDfLine(String line) { - if (line == null || line.trim().isEmpty()) { - return null; - } - - line = line.trim(); - Matcher matcher = DF_PATTERN.matcher(line); - if (!matcher.matches()) { - logger.debug("Line doesn't match df pattern: {}", line); - return null; - } - - try { - String filesystem = matcher.group(1); - String size = matcher.group(2); - String used = matcher.group(3); - String available = matcher.group(4); - int usePercentage = Integer.parseInt(matcher.group(5)); - String mountPoint = matcher.group(6); - - return new DiskInfo(filesystem, size, used, available, usePercentage, mountPoint); - } catch (NumberFormatException e) { - logger.warn("Failed to parse df line: {}", line, e); - return null; - } - } - - /** - * Get total capacity across all provided disks. - * @param diskInfoMap map of disk information - * @return total capacity in bytes - */ - public static long getTotalCapacity(Map diskInfoMap) { - return diskInfoMap.values().stream() - .mapToLong(DiskInfo::getSizeInBytes) - .filter(size -> size > 0) - .sum(); - } -} diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixClusterAgentsFactory.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixClusterAgentsFactory.java index 53b0d08300..57322f6c92 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixClusterAgentsFactory.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixClusterAgentsFactory.java @@ -94,5 +94,29 @@ public List getClusterParticipants() throws IOException { } return helixParticipants; } + + /** + * Get the ClusterMapConfig. Exposed for subclasses. + * @return the {@link ClusterMapConfig} associated with this factory. + */ + protected ClusterMapConfig getClusterMapConfig() { + return clusterMapConfig; + } + + /** + * Get the HelixFactory. Exposed for subclasses. + * @return the {@link HelixFactory} used by this factory. + */ + protected HelixFactory getHelixFactory() { + return helixFactory; + } + + /** + * Get the MetricRegistry. Exposed for subclasses. + * @return the {@link MetricRegistry} used by this factory. + */ + protected MetricRegistry getMetricRegistry() { + return metricRegistry; + } } diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java index 6919964a50..ff8f235507 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java @@ -39,7 +39,6 @@ */ public class HelixFactory { private static final Logger LOGGER = LoggerFactory.getLogger(HelixFactory.class); - private static final String DOMAIN_TEMPLATE = "mz=%s,host=%s,applicationInstanceId=%s"; // exposed for use in testing private final Map helixManagers = new ConcurrentHashMap<>(); @@ -119,30 +118,6 @@ HelixManager buildZKHelixManager(String clusterName, String instanceName, Instan instanceConfigBuilder.setPort(port); } - NimbusServiceMetadata nimbusMetadata = NimbusServiceMetadata.readFromFile(clusterMapConfig.nimbusServiceMetadataFilePath); - if (nimbusMetadata != null) { - LOGGER.info("Loaded nimbus service metadata - AppInstanceID: {}, NodeName: {}, MaintenanceZone: {}", - nimbusMetadata.getAppInstanceID(), nimbusMetadata.getNodeName(), nimbusMetadata.getMaintenanceZone()); - instanceConfigBuilder.setDomain(String.format(DOMAIN_TEMPLATE, nimbusMetadata.getMaintenanceZone(), nimbusMetadata.getNodeName(), nimbusMetadata.getAppInstanceID())); - } - - LiStatefulSetMetadata liStatefulSetMetadata = LiStatefulSetMetadata.readFromFile(clusterMapConfig.liStatefulSetMetadataFilePath); - if (liStatefulSetMetadata != null) { - List resourceTags = liStatefulSetMetadata.getResourceTags(); - LOGGER.info("Loaded LiStatefulSet metadata - Name: {}, ResourceTags: {}", - liStatefulSetMetadata.getName(), resourceTags); - for (String resourceTag : resourceTags) { - instanceConfigBuilder.addTag(clusterMapConfig.clusterMapResourceTagPrefix + resourceTag); - } - } - - // Short term solution to collect disk information via df -h, while pending DEPEND-92318. - Map diskInfo = DiskInfoCollector.collectDiskInfo(); - if (!diskInfo.isEmpty()) { - Map capacityMap = calculateInstanceCapacityMap(diskInfo, clusterMapConfig); - instanceConfigBuilder.setInstanceCapacityMap(capacityMap); - } - HelixManagerProperty participantHelixProperty = new HelixManagerProperty.Builder().setDefaultInstanceConfigBuilder(instanceConfigBuilder).build(); HelixManagerProperty defaultHelixManagerProperty = new HelixManagerProperty.Builder().setDefaultInstanceConfigBuilder(new InstanceConfig.Builder()).build(); @@ -158,22 +133,6 @@ HelixManager buildZKHelixManager(String clusterName, String instanceName, Instan return helixManager; } - /** - * Calculate the capacity map for the instance based on the disk information. - * @param diskInfo the disk information. - * @param clusterMapConfig the {@link ClusterMapConfig} to use. - * @return the capacity map for the instance. - */ - private Map calculateInstanceCapacityMap(Map diskInfo, ClusterMapConfig clusterMapConfig) { - long totalDiskCapacity = DiskInfoCollector.getTotalCapacity(diskInfo); - // Convert to GiB and apply reserved space - int capacityGiB = (int) (totalDiskCapacity / (1024.0 * 1024.0 * 1024.0) * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); - // Create the capacity map - Map capacityMap = new HashMap<>(); - capacityMap.put("DISK", capacityGiB); - return capacityMap; - } - /** * @param clusterMapConfig the {@link ClusterMapConfig} to use. * @param zkAddr the ZooKeeper address to connect to. If a {@link HelixManager} is required and one is already in the diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java index b009b41db2..d92411ae13 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixParticipant.java @@ -622,33 +622,10 @@ public void unblockStateTransition() { this.blockStateTransitionLatch.countDown(); } - // Populate the data node config to property store @Override public boolean populateDataNodeConfig() { - // Short term solution to collect disk information via df -h, while pending DEPEND-92318. - Map diskInfo = DiskInfoCollector.collectDiskInfo(); - if (!diskInfo.isEmpty()) { - logger.info("Populating DataNode config"); - DataNodeConfig dataNodeConfig = new DataNodeConfig( - clusterMapConfig.clusterMapDatacenterName, - clusterMapConfig.clusterMapHostName, - clusterMapConfig.clusterMapDefaultHttp2Port, - clusterMapConfig.clusterMapDefaultPort, - clusterMapConfig.clusterMapDefaultSslPort); - for (Map.Entry entry : diskInfo.entrySet()) { - // e.g. /mnt/u001/ambrydata - String mountPath = entry.getKey(); - // Calculate capacity with reserved space - long totalCapacity = entry.getValue().getSizeInBytes(); - long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); - DataNodeConfig.DiskConfig diskConfig = - new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); - dataNodeConfig.addDiskConfig(mountPath, diskConfig); - } - return dataNodeConfigSource.set(dataNodeConfig); - } - logger.error("No disk information collected, nothing to populate"); - return false; + // No-op in base implementation, returns true for backward compatibility. + return true; } /** @@ -700,6 +677,22 @@ protected void markDisablePartitionComplete() { disablePartitionsComplete = true; } + /** + * Get the ClusterMapConfig. Exposed for subclasses. + * @return the {@link ClusterMapConfig} associated with this participant. + */ + protected ClusterMapConfig getClusterMapConfig() { + return clusterMapConfig; + } + + /** + * Get the DataNodeConfigSource. Exposed for subclasses. + * @return the {@link DataNodeConfigSource} associated with this participant. + */ + protected DataNodeConfigSource getDataNodeConfigSource() { + return dataNodeConfigSource; + } + /** * Disable/enable partition on local node. This method will update both InstanceConfig and DataNodeConfig in PropertyStore. * @param partitionName name of partition on local node diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java deleted file mode 100644 index 7d6b5e225e..0000000000 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/LiStatefulSetMetadata.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2024 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.github.ambry.utils.Utils; -import java.util.ArrayList; -import java.util.List; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Represents metadata from liStatefulSet.json file containing Kubernetes StatefulSet information. - */ -@JsonIgnoreProperties(ignoreUnknown = true) -public class LiStatefulSetMetadata { - private static final Logger logger = LoggerFactory.getLogger(LiStatefulSetMetadata.class); - - @JsonProperty("metadata") - private Metadata metadata; - - /** - * Read LiStatefulSet metadata from the specified file path. - * @param filePath the path to the liStatefulSet.json file - * @return LiStatefulSetMetadata instance, or null if file cannot be read - */ - public static LiStatefulSetMetadata readFromFile(String filePath) { - return Utils.readJsonFromFile(filePath, LiStatefulSetMetadata.class); - } - - /** - * Get the StatefulSet name from metadata. - * @return the name, or null if not available - */ - public String getName() { - return metadata != null ? metadata.name : null; - } - - /** - * Extract resource tags from the StatefulSet name. - * Expected format: "v1.ambry-prod.{resourceTag}" or "v1.ambry-prod.{start}-{end}" for ranges - * Examples: - * - "v1.ambry-video.10032" -> ["10032"] - * - "v1.ambry-video.10032-10033" -> ["10032", "10033"] - * @return list of resource tags, empty if not found - */ - public List getResourceTags() { - String name = getName(); - if (name != null && name.contains(".")) { - String[] parts = name.split("\\."); - if (parts.length == 3) { - String resourcePart = parts[parts.length - 1]; // Get the last part - return parseResourceTags(resourcePart); - } - } - return new ArrayList<>(); - } - - /** - * Parse resource tags from the resource part, handling ranges. - * @param resourcePart the resource part (e.g., "10032" or "10032-10033") - * @return list of resource tags - */ - private List parseResourceTags(String resourcePart) { - List tags = new ArrayList<>(); - if (resourcePart == null || resourcePart.trim().isEmpty()) { - return tags; - } - resourcePart = resourcePart.trim(); - - // Check if it's a range (contains hyphen and both parts are numeric) - if (resourcePart.contains("-")) { - String[] rangeParts = resourcePart.split("-"); - if (rangeParts.length == 2) { - String startStr = rangeParts[0].trim(); - String endStr = rangeParts[1].trim(); - try { - int start = Integer.parseInt(startStr); - int end = Integer.parseInt(endStr); - // Validate range - if (start <= end) { - for (int i = start; i <= end; i++) { - tags.add(String.valueOf(i)); - } - return tags; - } else { - logger.warn("Invalid range in resource part: {} (start={}, end={})", resourcePart, start, end); - } - } catch (NumberFormatException e) { - logger.warn("Non-numeric range in resource part: {}", resourcePart); - } - } - } - - // If not a valid range, treat as single tag - tags.add(resourcePart); - return tags; - } - - // Getter - public Metadata getMetadata() { - return metadata; - } - - @Override - public String toString() { - return "LiStatefulSetMetadata{" + - "name='" + getName() + '\'' + - ", resourceTags=" + getResourceTags() + - '}'; - } - - // Inner class for JSON structure - @JsonIgnoreProperties(ignoreUnknown = true) - public static class Metadata { - @JsonProperty("name") - public String name; - } -} diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java deleted file mode 100644 index 88f0d98e60..0000000000 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/NimbusServiceMetadata.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2024 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.github.ambry.utils.Utils; - -/** - * Represents metadata from nimbus-service.json file containing service instance information. - */ -@JsonIgnoreProperties(ignoreUnknown = true) -public class NimbusServiceMetadata { - - @JsonProperty("appInstanceID") - private String appInstanceID; - - @JsonProperty("nodeName") - private String nodeName; - - @JsonProperty("maintenanceZone") - private String maintenanceZone; - - /** - * Read nimbus service metadata from the specified file path. - * @param filePath the path to the nimbus-service.json file - * @return NimbusServiceMetadata instance, or null if file cannot be read - */ - public static NimbusServiceMetadata readFromFile(String filePath) { - return Utils.readJsonFromFile(filePath, NimbusServiceMetadata.class); - } - - // Getters - public String getAppInstanceID() { - return appInstanceID; - } - - public String getNodeName() { - return nodeName; - } - - public String getMaintenanceZone() { - return maintenanceZone; - } - - @Override - public String toString() { - return "NimbusServiceMetadata{" + - "appInstanceID='" + appInstanceID + '\'' + - ", nodeName='" + nodeName + '\'' + - ", maintenanceZone='" + maintenanceZone + '\'' + - '}'; - } -} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java deleted file mode 100644 index 64f24acf63..0000000000 --- a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/DiskInfoCollectorTest.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2024 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import java.util.Map; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** - * Test for {@link DiskInfoCollector}. - */ -public class DiskInfoCollectorTest { - - /** - * Test DiskInfo constructor and getters. - */ - @Test - public void testDiskInfoConstructorAndGetters() { - String filesystem = "/dev/sdh1"; - String size = "21T"; - String used = "14T"; - String available = "6.5T"; - int usePercentage = 68; - String mountPoint = "/mnt/u001/ambrydata"; - - DiskInfoCollector.DiskInfo diskInfo = new DiskInfoCollector.DiskInfo( - filesystem, size, used, available, usePercentage, mountPoint); - - assertEquals("Filesystem should match", filesystem, diskInfo.getFilesystem()); - assertEquals("Size should match", size, diskInfo.getSize()); - assertEquals("Used should match", used, diskInfo.getUsed()); - assertEquals("Available should match", available, diskInfo.getAvailable()); - assertEquals("Use percentage should match", usePercentage, diskInfo.getUsePercentage()); - assertEquals("Mount point should match", mountPoint, diskInfo.getMountPoint()); - } - - /** - * Test DiskInfo toString method. - */ - @Test - public void testDiskInfoToString() { - DiskInfoCollector.DiskInfo diskInfo = new DiskInfoCollector.DiskInfo( - "/dev/sdh1", "21T", "14T", "6.5T", 68, "/mnt/u001/ambrydata"); - - String result = diskInfo.toString(); - assertTrue("Should contain filesystem", result.contains("/dev/sdh1")); - assertTrue("Should contain size", result.contains("21T")); - assertTrue("Should contain used", result.contains("14T")); - assertTrue("Should contain available", result.contains("6.5T")); - assertTrue("Should contain use percentage", result.contains("68")); - assertTrue("Should contain mount point", result.contains("/mnt/u001/ambrydata")); - } - - /** - * Test getSizeInBytes with various size formats. - */ - @Test - public void testGetSizeInBytes() { - // Test bytes - DiskInfoCollector.DiskInfo diskInfo1 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1024", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Bytes should be parsed correctly", 1024L, diskInfo1.getSizeInBytes()); - - // Test kilobytes - DiskInfoCollector.DiskInfo diskInfo2 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1K", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Kilobytes should be parsed correctly", 1024L, diskInfo2.getSizeInBytes()); - - // Test megabytes - DiskInfoCollector.DiskInfo diskInfo3 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1M", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Megabytes should be parsed correctly", 1024L * 1024L, diskInfo3.getSizeInBytes()); - - // Test gigabytes - DiskInfoCollector.DiskInfo diskInfo4 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1G", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Gigabytes should be parsed correctly", 1024L * 1024L * 1024L, diskInfo4.getSizeInBytes()); - - // Test terabytes - DiskInfoCollector.DiskInfo diskInfo5 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1T", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Terabytes should be parsed correctly", 1024L * 1024L * 1024L * 1024L, diskInfo5.getSizeInBytes()); - - // Test petabytes - DiskInfoCollector.DiskInfo diskInfo6 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1P", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Petabytes should be parsed correctly", 1024L * 1024L * 1024L * 1024L * 1024L, diskInfo6.getSizeInBytes()); - - // Test exabytes - DiskInfoCollector.DiskInfo diskInfo7 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1E", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Exabytes should be parsed correctly", 1024L * 1024L * 1024L * 1024L * 1024L * 1024L, diskInfo7.getSizeInBytes()); - - // Test decimal values - DiskInfoCollector.DiskInfo diskInfo8 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1.5G", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Decimal gigabytes should be parsed correctly", (long)(1.5 * 1024L * 1024L * 1024L), diskInfo8.getSizeInBytes()); - - // Test case insensitive - DiskInfoCollector.DiskInfo diskInfo9 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1g", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Lowercase units should be parsed correctly", 1024L * 1024L * 1024L, diskInfo9.getSizeInBytes()); - } - - /** - * Test getSizeInBytes with invalid formats. - */ - @Test - public void testGetSizeInBytesInvalidFormats() { - // Test null - DiskInfoCollector.DiskInfo diskInfo1 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", null, "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Null size should return -1", -1L, diskInfo1.getSizeInBytes()); - - // Test empty string - DiskInfoCollector.DiskInfo diskInfo2 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Empty size should return -1", -1L, diskInfo2.getSizeInBytes()); - - // Test invalid format - DiskInfoCollector.DiskInfo diskInfo3 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "invalid", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Invalid size should return -1", -1L, diskInfo3.getSizeInBytes()); - - // Test invalid number - DiskInfoCollector.DiskInfo diskInfo4 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "abcG", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Invalid number should return -1", -1L, diskInfo4.getSizeInBytes()); - - // Test unsupported unit - DiskInfoCollector.DiskInfo diskInfo5 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1Z", "512", "512", 50, "/mnt/u001/ambrydata"); - assertEquals("Unsupported unit should return -1", -1L, diskInfo5.getSizeInBytes()); - } - - /** - * Test collectDiskInfo method. - * Note: This test will actually run the 'df -h' command, so results may vary by system. - */ - @Test - public void testCollectDiskInfo() { - Map diskInfoMap = DiskInfoCollector.collectDiskInfo(); - - // Should return a map (may be empty if no Ambry mount points exist) - assertNotNull("DiskInfo map should not be null", diskInfoMap); - - // If there are results, validate the structure - for (Map.Entry entry : diskInfoMap.entrySet()) { - String mountPoint = entry.getKey(); - DiskInfoCollector.DiskInfo diskInfo = entry.getValue(); - - assertNotNull("Mount point should not be null", mountPoint); - assertNotNull("DiskInfo should not be null", diskInfo); - assertEquals("Mount point should match DiskInfo mount point", mountPoint, diskInfo.getMountPoint()); - assertTrue("Mount point should match Ambry pattern", mountPoint.matches("/mnt/u\\d+/ambrydata")); - assertNotNull("Filesystem should not be null", diskInfo.getFilesystem()); - assertNotNull("Size should not be null", diskInfo.getSize()); - assertNotNull("Used should not be null", diskInfo.getUsed()); - assertNotNull("Available should not be null", diskInfo.getAvailable()); - assertTrue("Use percentage should be valid", diskInfo.getUsePercentage() >= 0 && diskInfo.getUsePercentage() <= 100); - } - } - - /** - * Test getTotalCapacity method. - */ - @Test - public void testGetTotalCapacity() { - Map diskInfoMap = new java.util.HashMap<>(); - - // Test empty map - assertEquals("Empty map should have zero capacity", 0L, DiskInfoCollector.getTotalCapacity(diskInfoMap)); - - // Add some disk info - diskInfoMap.put("/mnt/u001/ambrydata", new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1G", "512M", "512M", 50, "/mnt/u001/ambrydata")); - diskInfoMap.put("/mnt/u002/ambrydata", new DiskInfoCollector.DiskInfo( - "/dev/sdb1", "2G", "1G", "1G", 50, "/mnt/u002/ambrydata")); - - long expectedTotal = (1024L * 1024L * 1024L) + (2L * 1024L * 1024L * 1024L); // 1G + 2G = 3G - assertEquals("Total capacity should be sum of all disks", expectedTotal, DiskInfoCollector.getTotalCapacity(diskInfoMap)); - - // Add disk with invalid size - diskInfoMap.put("/mnt/u003/ambrydata", new DiskInfoCollector.DiskInfo( - "/dev/sdc1", "invalid", "1G", "1G", 50, "/mnt/u003/ambrydata")); - - // Should still be 3G (invalid size is filtered out) - assertEquals("Invalid sizes should be filtered out", expectedTotal, DiskInfoCollector.getTotalCapacity(diskInfoMap)); - } - - /** - * Test parseDfLine method indirectly by testing the pattern matching. - */ - @Test - public void testDfLinePatternMatching() { - // Test valid df line - Map result1 = DiskInfoCollector.collectDiskInfo(); - // This will test the actual df command, but we can't easily test parseDfLine directly - // since it's private. The collectDiskInfo test above covers this functionality. - - // We can test the pattern indirectly by knowing what should match - String validLine = "/dev/sdh1 21T 14T 6.5T 68% /mnt/u001/ambrydata"; - // The pattern should match this format, but since parseDfLine is private, - // we rely on the collectDiskInfo method to test this functionality - - assertNotNull("collectDiskInfo should work without throwing exceptions", result1); - } - - /** - * Test edge cases for DiskInfo creation. - */ - @Test - public void testDiskInfoEdgeCases() { - // Test with zero use percentage - DiskInfoCollector.DiskInfo diskInfo1 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1G", "0", "1G", 0, "/mnt/u001/ambrydata"); - assertEquals("Zero use percentage should be valid", 0, diskInfo1.getUsePercentage()); - - // Test with 100% use percentage - DiskInfoCollector.DiskInfo diskInfo2 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "1G", "1G", "0", 100, "/mnt/u001/ambrydata"); - assertEquals("100% use percentage should be valid", 100, diskInfo2.getUsePercentage()); - - // Test with very large sizes - DiskInfoCollector.DiskInfo diskInfo3 = new DiskInfoCollector.DiskInfo( - "/dev/sda1", "100T", "50T", "50T", 50, "/mnt/u001/ambrydata"); - assertTrue("Very large sizes should be parsed correctly", diskInfo3.getSizeInBytes() > 0); - } -} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java deleted file mode 100644 index 8c5c5b0ebf..0000000000 --- a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixFactoryTest.java +++ /dev/null @@ -1,322 +0,0 @@ -/* - * Copyright 2017 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import com.github.ambry.config.ClusterMapConfig; -import com.github.ambry.config.VerifiableProperties; -import java.util.Properties; -import org.apache.helix.HelixManager; -import org.apache.helix.InstanceType; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - -/** - * Test for {@link HelixFactory}. - */ -public class HelixFactoryTest { - - private static final String CLUSTER_NAME = "test-cluster"; - private static final String INSTANCE_NAME = "localhost_1234"; - private static final String ZK_ADDR = "localhost:2181"; - - /** - * Test ManagerKey equals and hashCode methods. - */ - @Test - public void testManagerKeyEqualsAndHashCode() { - HelixFactory.ManagerKey key1 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); - HelixFactory.ManagerKey key2 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); - HelixFactory.ManagerKey key3 = new HelixFactory.ManagerKey("different-cluster", INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); - HelixFactory.ManagerKey key4 = new HelixFactory.ManagerKey(CLUSTER_NAME, "different-instance", InstanceType.PARTICIPANT, ZK_ADDR); - HelixFactory.ManagerKey key5 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR); - HelixFactory.ManagerKey key6 = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, "different-zk"); - - // Test equals - assertEquals("Same keys should be equal", key1, key2); - assertNotEquals("Different cluster should not be equal", key1, key3); - assertNotEquals("Different instance should not be equal", key1, key4); - assertNotEquals("Different instance type should not be equal", key1, key5); - assertNotEquals("Different zk address should not be equal", key1, key6); - assertNotEquals("Key should not equal null", key1, null); - assertNotEquals("Key should not equal different class", key1, "string"); - - // Test reflexive - assertEquals("Key should equal itself", key1, key1); - - // Test hashCode consistency - assertEquals("Equal keys should have same hash code", key1.hashCode(), key2.hashCode()); - assertNotEquals("Different keys should have different hash codes", key1.hashCode(), key3.hashCode()); - } - - /** - * Test getZKHelixManager with auto-registration disabled. - */ - @Test - public void testGetZKHelixManagerWithoutAutoRegistration() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.auto.registration.enabled", "false"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - HelixManager manager1 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - HelixManager manager2 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - - assertNotNull("Manager should not be null", manager1); - assertSame("Same manager should be returned for same parameters", manager1, manager2); - } - - /** - * Test getZKHelixManager with auto-registration enabled. - */ - @Test - public void testGetZKHelixManagerWithAutoRegistration() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.auto.registration.enabled", "true"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - HelixManager manager = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - - assertNotNull("Manager should not be null", manager); - } - - /** - * Test getZKHelixManager with different instance types. - */ - @Test - public void testGetZKHelixManagerWithDifferentInstanceTypes() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.auto.registration.enabled", "true"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - HelixManager participantManager = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - HelixManager spectatorManager = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR, clusterMapConfig); - - assertNotNull("Participant manager should not be null", participantManager); - assertNotNull("Spectator manager should not be null", spectatorManager); - assertNotSame("Different instance types should return different managers", participantManager, spectatorManager); - } - - /** - * Test getZKHelixManager caching behavior. - */ - @Test - public void testGetZKHelixManagerCaching() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - - // Same parameters should return cached instance - HelixManager manager1 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - HelixManager manager2 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - assertSame("Same parameters should return cached manager", manager1, manager2); - - // Different parameters should return different instances - HelixManager manager3 = helixFactory.getZKHelixManager("different-cluster", INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - assertNotSame("Different cluster should return different manager", manager1, manager3); - - HelixManager manager4 = helixFactory.getZKHelixManager(CLUSTER_NAME, "different-instance", InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - assertNotSame("Different instance should return different manager", manager1, manager4); - - HelixManager manager5 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR, clusterMapConfig); - assertNotSame("Different instance type should return different manager", manager1, manager5); - - HelixManager manager6 = helixFactory.getZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, "different-zk", clusterMapConfig); - assertNotSame("Different ZK address should return different manager", manager1, manager6); - } - - /** - * Test getZkHelixManagerAndConnect method. - */ - @Test - public void testGetZkHelixManagerAndConnect() throws Exception { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = spy(new HelixFactory()); - HelixManager mockManager = mock(HelixManager.class); - - // Mock the buildZKHelixManager method to return our mock - doReturn(mockManager).when(helixFactory).buildZKHelixManager(anyString(), anyString(), any(InstanceType.class), anyString(), any(ClusterMapConfig.class)); - - // Test when manager is not connected - when(mockManager.isConnected()).thenReturn(false); - HelixManager result1 = helixFactory.getZkHelixManagerAndConnect(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - - assertSame("Should return the same manager", mockManager, result1); - verify(mockManager, times(1)).connect(); - - // Test when manager is already connected - when(mockManager.isConnected()).thenReturn(true); - HelixManager result2 = helixFactory.getZkHelixManagerAndConnect(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - - assertSame("Should return the same manager", mockManager, result2); - // connect() should not be called again - verify(mockManager, times(1)).connect(); - } - - /** - * Test getDataNodeConfigSource method. - */ - @Test - public void testGetDataNodeConfigSource() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.data.node.config.source.type", "PROPERTY_STORE"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - DataNodeConfigSourceMetrics metrics = mock(DataNodeConfigSourceMetrics.class); - - DataNodeConfigSource source1 = helixFactory.getDataNodeConfigSource(clusterMapConfig, ZK_ADDR, metrics); - DataNodeConfigSource source2 = helixFactory.getDataNodeConfigSource(clusterMapConfig, ZK_ADDR, metrics); - - assertNotNull("DataNodeConfigSource should not be null", source1); - assertSame("Same ZK address should return cached source", source1, source2); - - // Different ZK address should return different source - DataNodeConfigSource source3 = helixFactory.getDataNodeConfigSource(clusterMapConfig, "different-zk", metrics); - assertNotSame("Different ZK address should return different source", source1, source3); - } - - /** - * Test buildZKHelixManager with null ClusterMapConfig. - */ - @Test - public void testBuildZKHelixManagerWithNullConfig() { - HelixFactory helixFactory = new HelixFactory(); - HelixManager manager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, null); - - assertNotNull("Manager should not be null even with null config", manager); - } - - /** - * Test buildZKHelixManager with auto-registration disabled. - */ - @Test - public void testBuildZKHelixManagerAutoRegistrationDisabled() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.auto.registration.enabled", "false"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - HelixManager manager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - - assertNotNull("Manager should not be null", manager); - } - - /** - * Test buildZKHelixManager with auto-registration enabled. - */ - @Test - public void testBuildZKHelixManagerAutoRegistrationEnabled() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.auto.registration.enabled", "true"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - HelixManager manager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - - assertNotNull("Manager should not be null", manager); - } - - /** - * Test that HelixFactory properly handles different instance types with auto-registration. - */ - @Test - public void testBuildZKHelixManagerInstanceTypes() { - Properties props = new Properties(); - props.setProperty("clustermap.cluster.name", CLUSTER_NAME); - props.setProperty("clustermap.datacenter.name", "DC1"); - props.setProperty("clustermap.host.name", "localhost"); - props.setProperty("clustermap.port", "1234"); - props.setProperty("clustermap.auto.registration.enabled", "true"); - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - - HelixFactory helixFactory = new HelixFactory(); - - HelixManager participantManager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR, clusterMapConfig); - HelixManager spectatorManager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.SPECTATOR, ZK_ADDR, clusterMapConfig); - HelixManager adminManager = helixFactory.buildZKHelixManager(CLUSTER_NAME, INSTANCE_NAME, InstanceType.ADMINISTRATOR, ZK_ADDR, clusterMapConfig); - - assertNotNull("Participant manager should not be null", participantManager); - assertNotNull("Spectator manager should not be null", spectatorManager); - assertNotNull("Admin manager should not be null", adminManager); - } - - /** - * Test ManagerKey constructor. - */ - @Test - public void testManagerKeyConstructor() { - HelixFactory.ManagerKey key = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); - - assertNotNull("ManagerKey should not be null", key); - // We can't directly test the private fields, but we can test equals/hashCode behavior - HelixFactory.ManagerKey sameKey = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); - assertEquals("Keys with same parameters should be equal", key, sameKey); - } - - /** - * Test edge cases for ManagerKey equals method. - */ - @Test - public void testManagerKeyEqualsEdgeCases() { - HelixFactory.ManagerKey key = new HelixFactory.ManagerKey(CLUSTER_NAME, INSTANCE_NAME, InstanceType.PARTICIPANT, ZK_ADDR); - - // Test with null values - HelixFactory.ManagerKey keyWithNulls = new HelixFactory.ManagerKey(null, null, null, null); - assertNotEquals("Key with nulls should not equal key with values", key, keyWithNulls); - - HelixFactory.ManagerKey anotherKeyWithNulls = new HelixFactory.ManagerKey(null, null, null, null); - assertEquals("Keys with same null values should be equal", keyWithNulls, anotherKeyWithNulls); - } -} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java index 5ed581bd47..57aaae228d 100644 --- a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java +++ b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/HelixParticipantTest.java @@ -1196,222 +1196,6 @@ public void testOfflineToBootstrapWithDelayedStateTransition() throws Exception helixParticipant.close(); } - /** - * Test populateDataNodeConfig method with comprehensive scenarios - * @throws Exception - */ - @Test - public void testPopulateDataNodeConfig() throws Exception { - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - MetricRegistry metricRegistry = new MetricRegistry(); - - // Test with mocked DataNodeConfigSource to control behavior - DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); - HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, - new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { - @Override - public boolean populateDataNodeConfig() { - return testPopulateDataNodeConfigWithMockedDiskInfo(mockDataNodeConfigSource); - } - - private boolean testPopulateDataNodeConfigWithMockedDiskInfo(DataNodeConfigSource configSource) { - // Simulate the actual method logic with controlled disk info - Map diskInfo = createMockDiskInfo(); - if (!diskInfo.isEmpty()) { - DataNodeConfig dataNodeConfig = new DataNodeConfig( - clusterMapConfig.clusterMapDatacenterName, - clusterMapConfig.clusterMapHostName, - clusterMapConfig.clusterMapDefaultHttp2Port, - clusterMapConfig.clusterMapDefaultPort, - clusterMapConfig.clusterMapDefaultSslPort); - for (Map.Entry entry : diskInfo.entrySet()) { - String mountPath = entry.getKey(); - long totalCapacity = entry.getValue().getSizeInBytes(); - long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); - DataNodeConfig.DiskConfig diskConfig = - new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); - dataNodeConfig.addDiskConfig(mountPath, diskConfig); - } - return configSource.set(dataNodeConfig); - } - return false; - } - - private Map createMockDiskInfo() { - Map diskInfo = new HashMap<>(); - diskInfo.put("/mnt/u001/ambrydata", - new DiskInfoCollector.DiskInfo("/dev/sda1", "1T", "500G", "500G", 50, "/mnt/u001/ambrydata")); - diskInfo.put("/mnt/u002/ambrydata", - new DiskInfoCollector.DiskInfo("/dev/sdb1", "2T", "1T", "1T", 50, "/mnt/u002/ambrydata")); - return diskInfo; - } - }; - - // Test successful population - when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(true); - assertTrue("populateDataNodeConfig should return true when successful", - helixParticipant.populateDataNodeConfig()); - verify(mockDataNodeConfigSource, times(1)).set(any(DataNodeConfig.class)); - - // Test DataNodeConfigSource failure - when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(false); - assertFalse("populateDataNodeConfig should return false when DataNodeConfigSource.set fails", - helixParticipant.populateDataNodeConfig()); - - helixParticipant.close(); - } - - /** - * Test populateDataNodeConfig with empty disk info scenario - * @throws Exception - */ - @Test - public void testPopulateDataNodeConfigEmptyDiskInfo() throws Exception { - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - MetricRegistry metricRegistry = new MetricRegistry(); - - DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); - HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, - new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { - @Override - public boolean populateDataNodeConfig() { - // Simulate empty disk info collection - Map diskInfo = new HashMap<>(); - if (!diskInfo.isEmpty()) { - DataNodeConfig dataNodeConfig = new DataNodeConfig( - clusterMapConfig.clusterMapDatacenterName, - clusterMapConfig.clusterMapHostName, - clusterMapConfig.clusterMapDefaultHttp2Port, - clusterMapConfig.clusterMapDefaultPort, - clusterMapConfig.clusterMapDefaultSslPort); - return mockDataNodeConfigSource.set(dataNodeConfig); - } - return false; - } - }; - - // Test empty disk info returns false and doesn't call set - assertFalse("populateDataNodeConfig should return false when no disk info is collected", - helixParticipant.populateDataNodeConfig()); - verify(mockDataNodeConfigSource, never()).set(any(DataNodeConfig.class)); - - helixParticipant.close(); - } - - /** - * Test populateDataNodeConfig capacity calculation with reserved space - * @throws Exception - */ - @Test - public void testPopulateDataNodeConfigCapacityCalculation() throws Exception { - // Set custom reserved space percentage for testing - Properties testProps = new Properties(props); - testProps.setProperty("clustermap.reserve.disk.space.percentage", "0.1"); // 10% reserved - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(testProps)); - MetricRegistry metricRegistry = new MetricRegistry(); - - DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); - when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(true); - - HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, - new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { - @Override - public boolean populateDataNodeConfig() { - // Create disk info with known capacity - Map diskInfo = new HashMap<>(); - // 1TB disk - diskInfo.put("/mnt/u001/ambrydata", - new DiskInfoCollector.DiskInfo("/dev/sda1", "1T", "500G", "500G", 50, "/mnt/u001/ambrydata")); - - if (!diskInfo.isEmpty()) { - DataNodeConfig dataNodeConfig = new DataNodeConfig( - clusterMapConfig.clusterMapDatacenterName, - clusterMapConfig.clusterMapHostName, - clusterMapConfig.clusterMapDefaultHttp2Port, - clusterMapConfig.clusterMapDefaultPort, - clusterMapConfig.clusterMapDefaultSslPort); - for (Map.Entry entry : diskInfo.entrySet()) { - String mountPath = entry.getKey(); - long totalCapacity = entry.getValue().getSizeInBytes(); - long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); - DataNodeConfig.DiskConfig diskConfig = - new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); - dataNodeConfig.addDiskConfig(mountPath, diskConfig); - - // Verify capacity calculation: 1TB * (1 - 0.1) = 0.9TB - long expectedCapacity = (long) (1024L * 1024L * 1024L * 1024L * 0.9); // 0.9TB in bytes - assertEquals("Available capacity should be 90% of total capacity", - expectedCapacity, availableCapacity); - } - return mockDataNodeConfigSource.set(dataNodeConfig); - } - return false; - } - }; - - assertTrue("populateDataNodeConfig should succeed with proper capacity calculation", - helixParticipant.populateDataNodeConfig()); - - helixParticipant.close(); - } - - /** - * Test populateDataNodeConfig with multiple disk scenarios - * @throws Exception - */ - @Test - public void testPopulateDataNodeConfigMultipleDisks() throws Exception { - ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props)); - MetricRegistry metricRegistry = new MetricRegistry(); - - DataNodeConfigSource mockDataNodeConfigSource = mock(DataNodeConfigSource.class); - when(mockDataNodeConfigSource.set(any(DataNodeConfig.class))).thenReturn(true); - - HelixParticipant helixParticipant = new HelixParticipant(mock(HelixClusterManager.class), clusterMapConfig, - new HelixFactory(), metricRegistry, getDefaultZkConnectStr(clusterMapConfig), true) { - @Override - public boolean populateDataNodeConfig() { - // Create multiple disk infos - Map diskInfo = new HashMap<>(); - diskInfo.put("/mnt/u001/ambrydata", - new DiskInfoCollector.DiskInfo("/dev/sda1", "1T", "500G", "500G", 50, "/mnt/u001/ambrydata")); - diskInfo.put("/mnt/u002/ambrydata", - new DiskInfoCollector.DiskInfo("/dev/sdb1", "2T", "1T", "1T", 50, "/mnt/u002/ambrydata")); - diskInfo.put("/mnt/u003/ambrydata", - new DiskInfoCollector.DiskInfo("/dev/sdc1", "500G", "250G", "250G", 50, "/mnt/u003/ambrydata")); - - if (!diskInfo.isEmpty()) { - DataNodeConfig dataNodeConfig = new DataNodeConfig( - clusterMapConfig.clusterMapDatacenterName, - clusterMapConfig.clusterMapHostName, - clusterMapConfig.clusterMapDefaultHttp2Port, - clusterMapConfig.clusterMapDefaultPort, - clusterMapConfig.clusterMapDefaultSslPort); - - int diskCount = 0; - for (Map.Entry entry : diskInfo.entrySet()) { - String mountPath = entry.getKey(); - long totalCapacity = entry.getValue().getSizeInBytes(); - long availableCapacity = (long) (totalCapacity * (1.0 - clusterMapConfig.clusterMapReserveDiskSpacePercentage)); - DataNodeConfig.DiskConfig diskConfig = - new DataNodeConfig.DiskConfig(HardwareState.AVAILABLE, availableCapacity); - dataNodeConfig.addDiskConfig(mountPath, diskConfig); - diskCount++; - } - - assertEquals("Should process all 3 disks", 3, diskCount); - return mockDataNodeConfigSource.set(dataNodeConfig); - } - return false; - } - }; - - assertTrue("populateDataNodeConfig should succeed with multiple disks", - helixParticipant.populateDataNodeConfig()); - - helixParticipant.close(); - } - /** * Test two distributed locks * @param lock1 diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java deleted file mode 100644 index 125d1fdab5..0000000000 --- a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/LiStatefulSetMetadataTest.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright 2024 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import java.util.Arrays; -import java.util.List; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** - * Test for {@link LiStatefulSetMetadata}. - */ -public class LiStatefulSetMetadataTest { - - /** - * Test getResourceTags with single resource tag. - */ - @Test - public void testGetResourceTagsSingle() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032"); - List resourceTags = metadata.getResourceTags(); - - assertEquals("Should have one resource tag", 1, resourceTags.size()); - assertEquals("Resource tag should match", "10032", resourceTags.get(0)); - } - - /** - * Test getResourceTags with range. - */ - @Test - public void testGetResourceTagsRange() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032-10033"); - List resourceTags = metadata.getResourceTags(); - - assertEquals("Should have two resource tags", 2, resourceTags.size()); - assertEquals("First resource tag should be 10032", "10032", resourceTags.get(0)); - assertEquals("Second resource tag should be 10033", "10033", resourceTags.get(1)); - } - - /** - * Test getResourceTags with larger range. - */ - @Test - public void testGetResourceTagsLargerRange() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10030-10034"); - List resourceTags = metadata.getResourceTags(); - - List expected = Arrays.asList("10030", "10031", "10032", "10033", "10034"); - assertEquals("Should have five resource tags", 5, resourceTags.size()); - assertEquals("Resource tags should match expected range", expected, resourceTags); - } - - /** - * Test getResourceTags with invalid range (non-numeric). - */ - @Test - public void testGetResourceTagsInvalidRange() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.abc-def"); - List resourceTags = metadata.getResourceTags(); - - assertEquals("Should treat as single tag", 1, resourceTags.size()); - assertEquals("Should return original string", "abc-def", resourceTags.get(0)); - } - - /** - * Test getResourceTags with invalid range (start > end). - */ - @Test - public void testGetResourceTagsInvalidRangeOrder() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10033-10032"); - List resourceTags = metadata.getResourceTags(); - - assertEquals("Should treat as single tag", 1, resourceTags.size()); - assertEquals("Should return original string", "10033-10032", resourceTags.get(0)); - } - - /** - * Test getResourceTags with malformed range (multiple hyphens). - */ - @Test - public void testGetResourceTagsMalformedRange() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032-10033-10034"); - List resourceTags = metadata.getResourceTags(); - - assertEquals("Should treat as single tag", 1, resourceTags.size()); - assertEquals("Should return original string", "10032-10033-10034", resourceTags.get(0)); - } - - /** - * Test getResourceTags with empty name. - */ - @Test - public void testGetResourceTagsEmptyName() { - LiStatefulSetMetadata metadata = createMetadata(""); - List resourceTags = metadata.getResourceTags(); - - assertTrue("Should return empty list", resourceTags.isEmpty()); - } - - /** - * Test getResourceTags with null name. - */ - @Test - public void testGetResourceTagsNullName() { - LiStatefulSetMetadata metadata = createMetadata(null); - List resourceTags = metadata.getResourceTags(); - - assertTrue("Should return empty list", resourceTags.isEmpty()); - } - - /** - * Test getResourceTags with invalid format (not 3 parts). - */ - @Test - public void testGetResourceTagsInvalidFormat() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry"); - List resourceTags = metadata.getResourceTags(); - - assertTrue("Should return empty list for invalid format", resourceTags.isEmpty()); - } - - /** - * Test getResourceTags with whitespace in range. - */ - @Test - public void testGetResourceTagsWithWhitespace() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video. 10032 - 10033 "); - List resourceTags = metadata.getResourceTags(); - - assertEquals("Should have two resource tags", 2, resourceTags.size()); - assertEquals("First resource tag should be 10032", "10032", resourceTags.get(0)); - assertEquals("Second resource tag should be 10033", "10033", resourceTags.get(1)); - } - - - /** - * Test toString method with resource tags. - */ - @Test - public void testToString() { - LiStatefulSetMetadata metadata = createMetadata("v1.ambry-video.10032-10033"); - String result = metadata.toString(); - - assertTrue("Should contain name", result.contains("v1.ambry-video.10032-10033")); - assertTrue("Should contain resource tags", result.contains("[10032, 10033]")); - } - - /** - * Helper method to create LiStatefulSetMetadata with given name. - */ - private LiStatefulSetMetadata createMetadata(String name) { - LiStatefulSetMetadata metadata = new LiStatefulSetMetadata(); - LiStatefulSetMetadata.Metadata innerMetadata = new LiStatefulSetMetadata.Metadata(); - innerMetadata.name = name; - - // Use reflection to set the private field - try { - java.lang.reflect.Field field = LiStatefulSetMetadata.class.getDeclaredField("metadata"); - field.setAccessible(true); - field.set(metadata, innerMetadata); - } catch (Exception e) { - throw new RuntimeException("Failed to set metadata field", e); - } - - return metadata; - } -} diff --git a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java b/ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java deleted file mode 100644 index 00f8b46eaa..0000000000 --- a/ambry-clustermap/src/test/java/com/github/ambry/clustermap/NimbusServiceMetadataTest.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2024 LinkedIn Corp. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - */ - -package com.github.ambry.clustermap; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.*; - -/** - * Test for {@link NimbusServiceMetadata}. - */ -public class NimbusServiceMetadataTest { - - private Path tempDir; - - @Before - public void setUp() throws IOException { - tempDir = Files.createTempDirectory("nimbus-test"); - } - - @After - public void tearDown() throws IOException { - if (tempDir != null) { - Files.walk(tempDir) - .map(Path::toFile) - .forEach(File::delete); - } - } - - /** - * Test successful reading of valid nimbus service metadata. - */ - @Test - public void testReadFromFileValid() throws IOException { - String jsonContent = "{\n" + - " \"appInstanceID\": \"test-app-123\",\n" + - " \"nodeName\": \"test-node-01\",\n" + - " \"maintenanceZone\": \"zone-a\"\n" + - "}"; - - File metadataFile = createTempFile("nimbus-service.json", jsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null", metadata); - assertEquals("App instance ID should match", "test-app-123", metadata.getAppInstanceID()); - assertEquals("Node name should match", "test-node-01", metadata.getNodeName()); - assertEquals("Maintenance zone should match", "zone-a", metadata.getMaintenanceZone()); - } - - /** - * Test reading with missing optional fields. - */ - @Test - public void testReadFromFilePartialData() throws IOException { - String jsonContent = "{\n" + - " \"appInstanceID\": \"test-app-456\"\n" + - "}"; - - File metadataFile = createTempFile("nimbus-service-partial.json", jsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null", metadata); - assertEquals("App instance ID should match", "test-app-456", metadata.getAppInstanceID()); - assertNull("Node name should be null", metadata.getNodeName()); - assertNull("Maintenance zone should be null", metadata.getMaintenanceZone()); - } - - /** - * Test reading with extra unknown fields (should be ignored). - */ - @Test - public void testReadFromFileWithUnknownFields() throws IOException { - String jsonContent = "{\n" + - " \"appInstanceID\": \"test-app-789\",\n" + - " \"nodeName\": \"test-node-02\",\n" + - " \"maintenanceZone\": \"zone-b\",\n" + - " \"unknownField\": \"should-be-ignored\",\n" + - " \"anotherUnknownField\": 12345\n" + - "}"; - - File metadataFile = createTempFile("nimbus-service-extra.json", jsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null", metadata); - assertEquals("App instance ID should match", "test-app-789", metadata.getAppInstanceID()); - assertEquals("Node name should match", "test-node-02", metadata.getNodeName()); - assertEquals("Maintenance zone should match", "zone-b", metadata.getMaintenanceZone()); - } - - /** - * Test reading from null file path. - */ - @Test - public void testReadFromFileNullPath() { - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(null); - assertNull("Metadata should be null for null path", metadata); - } - - /** - * Test reading from empty file path. - */ - @Test - public void testReadFromFileEmptyPath() { - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(""); - assertNull("Metadata should be null for empty path", metadata); - - metadata = NimbusServiceMetadata.readFromFile(" "); - assertNull("Metadata should be null for whitespace path", metadata); - } - - /** - * Test reading from non-existent file. - */ - @Test - public void testReadFromFileNonExistent() { - String nonExistentPath = tempDir.resolve("non-existent-file.json").toString(); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(nonExistentPath); - assertNull("Metadata should be null for non-existent file", metadata); - } - - /** - * Test reading from unreadable file. - */ - @Test - public void testReadFromFileUnreadable() throws IOException { - File metadataFile = createTempFile("unreadable.json", "{}"); - metadataFile.setReadable(false); - - try { - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - // On some systems, setReadable(false) might not work, so we check if it actually became unreadable - if (!metadataFile.canRead()) { - assertNull("Metadata should be null for unreadable file", metadata); - } - } finally { - metadataFile.setReadable(true); // Restore for cleanup - } - } - - /** - * Test reading from file with invalid JSON. - */ - @Test - public void testReadFromFileInvalidJson() throws IOException { - String invalidJsonContent = "{\n" + - " \"appInstanceID\": \"test-app\",\n" + - " \"nodeName\": \"test-node\"\n" + - " // missing closing brace"; - - File metadataFile = createTempFile("invalid.json", invalidJsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - assertNull("Metadata should be null for invalid JSON", metadata); - } - - /** - * Test reading from empty JSON file. - */ - @Test - public void testReadFromFileEmptyJson() throws IOException { - File metadataFile = createTempFile("empty.json", "{}"); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null for empty JSON", metadata); - assertNull("App instance ID should be null", metadata.getAppInstanceID()); - assertNull("Node name should be null", metadata.getNodeName()); - assertNull("Maintenance zone should be null", metadata.getMaintenanceZone()); - } - - /** - * Test getters with null values. - */ - @Test - public void testGettersWithNullValues() throws IOException { - File metadataFile = createTempFile("null-values.json", "{}"); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null", metadata); - assertNull("App instance ID should be null", metadata.getAppInstanceID()); - assertNull("Node name should be null", metadata.getNodeName()); - assertNull("Maintenance zone should be null", metadata.getMaintenanceZone()); - } - - /** - * Test toString method. - */ - @Test - public void testToString() throws IOException { - String jsonContent = "{\n" + - " \"appInstanceID\": \"test-app-toString\",\n" + - " \"nodeName\": \"test-node-toString\",\n" + - " \"maintenanceZone\": \"zone-toString\"\n" + - "}"; - - File metadataFile = createTempFile("toString-test.json", jsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - String result = metadata.toString(); - assertTrue("Should contain app instance ID", result.contains("test-app-toString")); - assertTrue("Should contain node name", result.contains("test-node-toString")); - assertTrue("Should contain maintenance zone", result.contains("zone-toString")); - assertTrue("Should contain class name", result.contains("NimbusServiceMetadata")); - } - - /** - * Test toString method with null values. - */ - @Test - public void testToStringWithNullValues() throws IOException { - File metadataFile = createTempFile("toString-null.json", "{}"); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - String result = metadata.toString(); - assertTrue("Should contain null values", result.contains("null")); - assertTrue("Should contain class name", result.contains("NimbusServiceMetadata")); - } - - /** - * Test reading with different JSON formatting. - */ - @Test - public void testReadFromFileCompactJson() throws IOException { - String compactJsonContent = "{\"appInstanceID\":\"compact-app\",\"nodeName\":\"compact-node\",\"maintenanceZone\":\"compact-zone\"}"; - - File metadataFile = createTempFile("compact.json", compactJsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null", metadata); - assertEquals("App instance ID should match", "compact-app", metadata.getAppInstanceID()); - assertEquals("Node name should match", "compact-node", metadata.getNodeName()); - assertEquals("Maintenance zone should match", "compact-zone", metadata.getMaintenanceZone()); - } - - /** - * Test reading with special characters in values. - */ - @Test - public void testReadFromFileSpecialCharacters() throws IOException { - String jsonContent = "{\n" + - " \"appInstanceID\": \"app-with-special-chars-@#$%\",\n" + - " \"nodeName\": \"node_with_underscores_123\",\n" + - " \"maintenanceZone\": \"zone.with.dots\"\n" + - "}"; - - File metadataFile = createTempFile("special-chars.json", jsonContent); - NimbusServiceMetadata metadata = NimbusServiceMetadata.readFromFile(metadataFile.getAbsolutePath()); - - assertNotNull("Metadata should not be null", metadata); - assertEquals("App instance ID should handle special chars", "app-with-special-chars-@#$%", metadata.getAppInstanceID()); - assertEquals("Node name should handle underscores", "node_with_underscores_123", metadata.getNodeName()); - assertEquals("Maintenance zone should handle dots", "zone.with.dots", metadata.getMaintenanceZone()); - } - - /** - * Helper method to create a temporary file with given content. - */ - private File createTempFile(String fileName, String content) throws IOException { - File file = tempDir.resolve(fileName).toFile(); - try (FileWriter writer = new FileWriter(file)) { - writer.write(content); - } - return file; - } -} diff --git a/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java b/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java index fd580e443e..ed9fe36a03 100644 --- a/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java +++ b/ambry-utils/src/main/java/com/github/ambry/utils/Utils.java @@ -72,7 +72,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.zip.CRC32; -import com.fasterxml.jackson.databind.ObjectMapper; import org.json.JSONException; import org.json.JSONObject; import org.slf4j.Logger; @@ -1650,49 +1649,4 @@ public static byte[] base64DecodeUrlSafe(String base64String) { return org.apache.commons.codec.binary.Base64.decodeBase64(base64String); } } - - // JSON file utilities - private static final ObjectMapper objectMapper = new ObjectMapper(); - - /** - * Read and parse a JSON file into the specified class type. - * @param filePath the path to the JSON file - * @param clazz the class type to deserialize into - * @param the type of the class - * @return instance of the specified class, or null if file cannot be read or parsed - */ - public static T readJsonFromFile(String filePath, Class clazz) { - if (filePath == null || filePath.trim().isEmpty()) { - logger.warn("JSON file path is null or empty for class: {}", clazz.getSimpleName()); - return null; - } - - File jsonFile = new File(filePath); - if (!jsonFile.exists()) { - logger.warn("JSON file does not exist: {} for class: {}", filePath, clazz.getSimpleName()); - return null; - } - - if (!jsonFile.canRead()) { - logger.warn("Cannot read JSON file: {} for class: {}", filePath, clazz.getSimpleName()); - return null; - } - - try { - T instance = objectMapper.readValue(jsonFile, clazz); - logger.info("Successfully read {} from: {}", clazz.getSimpleName(), filePath); - return instance; - } catch (IOException e) { - logger.error("Failed to parse JSON file: {} for class: {}", filePath, clazz.getSimpleName(), e); - return null; - } - } - - /** - * Get the shared ObjectMapper instance. - * @return the ObjectMapper instance - */ - public static ObjectMapper getObjectMapper() { - return objectMapper; - } } diff --git a/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java b/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java index 6724c0649e..d0f57094d1 100644 --- a/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java +++ b/ambry-utils/src/test/java/com/github/ambry/utils/UtilsTest.java @@ -13,7 +13,6 @@ */ package com.github.ambry.utils; -import com.fasterxml.jackson.annotation.JsonProperty; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.PooledByteBufAllocator; @@ -22,7 +21,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; -import java.io.FileWriter; import java.io.IOException; import java.lang.reflect.Field; import java.nio.ByteBuffer; @@ -41,13 +39,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; -import java.nio.file.Files; -import java.nio.file.Path; import javax.net.ssl.SSLException; import org.apache.commons.io.FileUtils; -import org.junit.After; import org.junit.Assert; -import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; @@ -59,22 +53,6 @@ */ public class UtilsTest { static final String STATIC_FIELD_TEST_STRING = "field1"; - - private Path tempDir; - - @Before - public void setUp() throws IOException { - tempDir = Files.createTempDirectory("utils-test"); - } - - @After - public void tearDown() throws IOException { - if (tempDir != null) { - Files.walk(tempDir) - .map(Path::toFile) - .forEach(File::delete); - } - } @Test(expected = IllegalArgumentException.class) public void testGetRandomLongException() { @@ -807,122 +785,6 @@ public void testByteArrayCheckNotNullOrEmpty() { } assertTrue(thrownEx instanceof IllegalArgumentException); } - - // JSON file utilities tests - - /** - * Test class for JSON deserialization. - */ - public static class TestData { - @JsonProperty("name") - public String name; - - @JsonProperty("value") - public int value; - - @JsonProperty("enabled") - public boolean enabled; - } - - /** - * Test successful reading of valid JSON file. - */ - @Test - public void testReadJsonFromFileValid() throws IOException { - String jsonContent = "{\n" + - " \"name\": \"test-name\",\n" + - " \"value\": 42,\n" + - " \"enabled\": true\n" + - "}"; - - File jsonFile = createTempFile("valid.json", jsonContent); - TestData result = Utils.readJsonFromFile(jsonFile.getAbsolutePath(), TestData.class); - - assertNotNull("Result should not be null", result); - assertEquals("Name should match", "test-name", result.name); - assertEquals("Value should match", 42, result.value); - assertTrue("Enabled should be true", result.enabled); - } - - /** - * Test reading from null file path. - */ - @Test - public void testReadJsonFromFileNullPath() { - TestData result = Utils.readJsonFromFile(null, TestData.class); - assertNull("Result should be null for null path", result); - } - - /** - * Test reading from empty file path. - */ - @Test - public void testReadJsonFromFileEmptyPath() { - TestData result = Utils.readJsonFromFile("", TestData.class); - assertNull("Result should be null for empty path", result); - - result = Utils.readJsonFromFile(" ", TestData.class); - assertNull("Result should be null for whitespace path", result); - } - - /** - * Test reading from non-existent file. - */ - @Test - public void testReadJsonFromFileNonExistent() { - String nonExistentPath = tempDir.resolve("non-existent.json").toString(); - TestData result = Utils.readJsonFromFile(nonExistentPath, TestData.class); - assertNull("Result should be null for non-existent file", result); - } - - /** - * Test reading from file with invalid JSON. - */ - @Test - public void testReadJsonFromFileInvalidJson() throws IOException { - String invalidJsonContent = "{\n" + - " \"name\": \"test\",\n" + - " \"value\": 42\n" + - " // missing closing brace"; - - File jsonFile = createTempFile("invalid.json", invalidJsonContent); - TestData result = Utils.readJsonFromFile(jsonFile.getAbsolutePath(), TestData.class); - assertNull("Result should be null for invalid JSON", result); - } - - /** - * Test reading from empty JSON file. - */ - @Test - public void testReadJsonFromFileEmptyJson() throws IOException { - File jsonFile = createTempFile("empty.json", "{}"); - TestData result = Utils.readJsonFromFile(jsonFile.getAbsolutePath(), TestData.class); - - assertNotNull("Result should not be null for empty JSON", result); - assertNull("Name should be null", result.name); - assertEquals("Value should be default", 0, result.value); - assertFalse("Enabled should be default false", result.enabled); - } - - /** - * Test getObjectMapper method. - */ - @Test - public void testGetObjectMapper() { - assertNotNull("ObjectMapper should not be null", Utils.getObjectMapper()); - assertSame("Should return same instance", Utils.getObjectMapper(), Utils.getObjectMapper()); - } - - /** - * Helper method to create a temporary file with given content. - */ - private File createTempFile(String fileName, String content) throws IOException { - File file = tempDir.resolve(fileName).toFile(); - try (FileWriter writer = new FileWriter(file)) { - writer.write(content); - } - return file; - } } class MockClassForTesting { diff --git a/build.gradle b/build.gradle index b797dbeb5e..8d56e7a5c5 100644 --- a/build.gradle +++ b/build.gradle @@ -205,9 +205,6 @@ project(':ambry-utils') { implementation "org.json:json:$jsonVersion" implementation "net.sf.jopt-simple:jopt-simple:$joptSimpleVersion" implementation "io.netty:netty-all:$nettyVersion" - implementation "com.fasterxml.jackson.core:jackson-core:$jacksonVersion" - implementation "com.fasterxml.jackson.core:jackson-annotations:$jacksonVersion" - implementation "com.fasterxml.jackson.core:jackson-databind:$jacksonVersion" testImplementation project(":ambry-test-utils") testImplementation "io.netty:netty-transport-native-epoll:$nettyVersion" testImplementation "commons-io:commons-io:$commonsIoVersion" From db91aeab3588aef05978c36809dbe855cf7d868d Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 01:16:09 -0800 Subject: [PATCH 06/14] remove unused --- .../main/java/com/github/ambry/config/ClusterMapConfig.java | 1 - .../main/java/com/github/ambry/clustermap/HelixFactory.java | 4 ---- .../src/main/java/com/github/ambry/server/AmbryServer.java | 2 ++ 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index a8d53e1b36..893c3af17a 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -48,7 +48,6 @@ public class ClusterMapConfig { public static final String PARTITION_FILTERING_ENABLED = "clustermap.enable.partition.filtering"; public static final String ENABLE_FILE_COPY_PROTOCOL = "clustermap.enable.file.copy.protocol"; - /** * The factory class used to get the resource state policies. */ diff --git a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java index ff8f235507..63c77313f4 100644 --- a/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java +++ b/ambry-clustermap/src/main/java/com/github/ambry/clustermap/HelixFactory.java @@ -15,9 +15,6 @@ import com.github.ambry.config.ClusterMapConfig; import com.github.ambry.utils.SystemTime; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; @@ -39,7 +36,6 @@ */ public class HelixFactory { private static final Logger LOGGER = LoggerFactory.getLogger(HelixFactory.class); - // exposed for use in testing private final Map helixManagers = new ConcurrentHashMap<>(); private final Map dataNodeConfigSources = new ConcurrentHashMap<>(); diff --git a/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java b/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java index a329b3bbe0..30310c5411 100644 --- a/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java +++ b/ambry-server/src/main/java/com/github/ambry/server/AmbryServer.java @@ -85,6 +85,7 @@ import com.github.ambry.protocol.RequestHandlerPool; import com.github.ambry.repair.RepairRequestsDb; import com.github.ambry.repair.RepairRequestsDbFactory; +import com.github.ambry.replica.prioritization.FCFSPrioritizationManager; import com.github.ambry.replica.prioritization.FileBasedReplicationPrioritizationManagerFactory; import com.github.ambry.replica.prioritization.PrioritizationManager; import com.github.ambry.replica.prioritization.PrioritizationManagerFactory; @@ -116,6 +117,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Predicate; From 8bd2788ca20e69ee5711897a557cc62c1127c677 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 02:19:48 -0800 Subject: [PATCH 07/14] add back configs --- .../github/ambry/config/ClusterMapConfig.java | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index 893c3af17a..36066157fb 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -417,6 +417,55 @@ public class ClusterMapConfig { @Default("false") public final boolean enableFileCopyProtocol; + /** + * Path to the nimbus service metadata file containing instance information. + */ + @Config("clustermap.nimbus.service.metadata.file.path") + @Default("") + public final String nimbusServiceMetadataFilePath; + + /** + * Path to the LiStatefulSet metadata file containing Kubernetes StatefulSet information. + */ + @Config("clustermap.listatefulset.metadata.file.path") + @Default("") + public final String liStatefulSetMetadataFilePath; + + /** + * Percentage of disk space to reserve (0.0 to 1.0). + */ + @Config("clustermap.reserve.disk.space.percentage") + @Default("0.0") + public final double clusterMapReserveDiskSpacePercentage; + + /** + * Prefix for resource tags in cluster map. + */ + @Config("clustermap.resource.tag.prefix") + @Default("") + public final String clusterMapResourceTagPrefix; + + /** + * Default HTTP2 port for cluster nodes. + */ + @Config("clustermap.default.http2.port") + @Default("0") + public final int clusterMapDefaultHttp2Port; + + /** + * Default port for cluster nodes. + */ + @Config("clustermap.default.port") + @Default("0") + public final int clusterMapDefaultPort; + + /** + * Default SSL port for cluster nodes. + */ + @Config("clustermap.default.ssl.port") + @Default("0") + public final int clusterMapDefaultSslPort; + public ClusterMapConfig(VerifiableProperties verifiableProperties) { clusterMapFixedTimeoutDatanodeErrorThreshold = verifiableProperties.getIntInRange("clustermap.fixedtimeout.datanode.error.threshold", 3, 1, 100); @@ -508,5 +557,12 @@ public ClusterMapConfig(VerifiableProperties verifiableProperties) { routerPutSuccessTarget = verifiableProperties.getIntInRange(ROUTER_PUT_SUCCESS_TARGET, 2, 1, Integer.MAX_VALUE); clusterMapPartitionFilteringEnabled = verifiableProperties.getBoolean(PARTITION_FILTERING_ENABLED, false); enableFileCopyProtocol = verifiableProperties.getBoolean(ENABLE_FILE_COPY_PROTOCOL, false); + nimbusServiceMetadataFilePath = verifiableProperties.getString("clustermap.nimbus.service.metadata.file.path", ""); + liStatefulSetMetadataFilePath = verifiableProperties.getString("clustermap.listatefulset.metadata.file.path", ""); + clusterMapReserveDiskSpacePercentage = verifiableProperties.getDouble("clustermap.reserve.disk.space.percentage", 0.0); + clusterMapResourceTagPrefix = verifiableProperties.getString("clustermap.resource.tag.prefix", ""); + clusterMapDefaultHttp2Port = verifiableProperties.getInt("clustermap.default.http2.port", 0); + clusterMapDefaultPort = verifiableProperties.getInt("clustermap.default.port", 0); + clusterMapDefaultSslPort = verifiableProperties.getInt("clustermap.default.ssl.port", 0); } } From 2afa6ac765b7c4e8b801f0370f362c83b97ece32 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 02:29:35 -0800 Subject: [PATCH 08/14] fix gha --- .github/workflows/github-actions.yml | 71 ++++++++++++---------------- 1 file changed, 29 insertions(+), 42 deletions(-) diff --git a/.github/workflows/github-actions.yml b/.github/workflows/github-actions.yml index 8580d140f4..4614230c3b 100644 --- a/.github/workflows/github-actions.yml +++ b/.github/workflows/github-actions.yml @@ -48,12 +48,11 @@ jobs: npm install -g azurite azurite --silent & - - uses: burrunan/gradle-cache-action@v1 - name: Run unit tests excluding ambry-store - with: - job-id: jdk11 - arguments: --scan -x :ambry-store:test build codeCoverageReport - gradle-version: wrapper + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + + - name: Run unit tests excluding ambry-store + run: ./gradlew --scan -x :ambry-store:test build codeCoverageReport - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -77,12 +76,11 @@ jobs: java-version: '11' distribution: 'adopt' - - uses: burrunan/gradle-cache-action@v1 - name: Run unit tests for ambry-store - with: - job-id: jdk11 - arguments: --scan :ambry-store:test codeCoverageReport - gradle-version: wrapper + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + + - name: Run unit tests for ambry-store + run: ./gradlew --scan :ambry-store:test codeCoverageReport - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -125,12 +123,11 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - uses: burrunan/gradle-cache-action@v1 - name: Run integration tests excluding server integration test - with: - job-id: jdk11 - arguments: --scan intTest -x :ambry-server:intTest codeCoverageReport - gradle-version: wrapper + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + + - name: Run integration tests excluding server integration test + run: ./gradlew --scan intTest -x :ambry-server:intTest codeCoverageReport - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -173,12 +170,11 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - uses: burrunan/gradle-cache-action@v1 - name: Run integration tests - with: - job-id: jdk11 - arguments: --scan :ambry-server:intTest codeCoverageReport - gradle-version: wrapper + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + + - name: Run integration tests + run: ./gradlew --scan :ambry-server:intTest codeCoverageReport - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -207,23 +203,14 @@ jobs: java-version: '11' distribution: 'adopt' - - uses: burrunan/gradle-cache-action@v1 - name: Build artifacts and create pom files - with: - job-id: jdk11 - arguments: --scan assemble publishToMavenLocal - gradle-version: wrapper + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 - - uses: burrunan/gradle-cache-action@v1 - name: Test publication by uploading in dry run mode - with: - job-id: jdk11 - arguments: -i --scan artifactoryPublishAll -Partifactory.dryRun - gradle-version: wrapper + - name: Build artifacts and create pom files + run: ./gradlew --scan assemble publishToMavenLocal - - uses: burrunan/gradle-cache-action@v1 - name: Tag and upload to JFrog Artifactory - with: - job-id: jdk11 - arguments: -i --scan ciPerformRelease - gradle-version: wrapper + - name: Test publication by uploading in dry run mode + run: ./gradlew -i --scan artifactoryPublishAll -Partifactory.dryRun + + - name: Tag and upload to JFrog Artifactory + run: ./gradlew -i --scan ciPerformRelease From 40383400e7ba0a1d7dba82c2369217d3866e9eb5 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 02:36:29 -0800 Subject: [PATCH 09/14] Revert "fix gha" This reverts commit 2afa6ac765b7c4e8b801f0370f362c83b97ece32. --- .github/workflows/github-actions.yml | 71 ++++++++++++++++------------ 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/.github/workflows/github-actions.yml b/.github/workflows/github-actions.yml index 4614230c3b..8580d140f4 100644 --- a/.github/workflows/github-actions.yml +++ b/.github/workflows/github-actions.yml @@ -48,11 +48,12 @@ jobs: npm install -g azurite azurite --silent & - - name: Setup Gradle - uses: gradle/actions/setup-gradle@v3 - - - name: Run unit tests excluding ambry-store - run: ./gradlew --scan -x :ambry-store:test build codeCoverageReport + - uses: burrunan/gradle-cache-action@v1 + name: Run unit tests excluding ambry-store + with: + job-id: jdk11 + arguments: --scan -x :ambry-store:test build codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -76,11 +77,12 @@ jobs: java-version: '11' distribution: 'adopt' - - name: Setup Gradle - uses: gradle/actions/setup-gradle@v3 - - - name: Run unit tests for ambry-store - run: ./gradlew --scan :ambry-store:test codeCoverageReport + - uses: burrunan/gradle-cache-action@v1 + name: Run unit tests for ambry-store + with: + job-id: jdk11 + arguments: --scan :ambry-store:test codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -123,11 +125,12 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - name: Setup Gradle - uses: gradle/actions/setup-gradle@v3 - - - name: Run integration tests excluding server integration test - run: ./gradlew --scan intTest -x :ambry-server:intTest codeCoverageReport + - uses: burrunan/gradle-cache-action@v1 + name: Run integration tests excluding server integration test + with: + job-id: jdk11 + arguments: --scan intTest -x :ambry-server:intTest codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -170,11 +173,12 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - name: Setup Gradle - uses: gradle/actions/setup-gradle@v3 - - - name: Run integration tests - run: ./gradlew --scan :ambry-server:intTest codeCoverageReport + - uses: burrunan/gradle-cache-action@v1 + name: Run integration tests + with: + job-id: jdk11 + arguments: --scan :ambry-server:intTest codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -203,14 +207,23 @@ jobs: java-version: '11' distribution: 'adopt' - - name: Setup Gradle - uses: gradle/actions/setup-gradle@v3 - - - name: Build artifacts and create pom files - run: ./gradlew --scan assemble publishToMavenLocal + - uses: burrunan/gradle-cache-action@v1 + name: Build artifacts and create pom files + with: + job-id: jdk11 + arguments: --scan assemble publishToMavenLocal + gradle-version: wrapper - - name: Test publication by uploading in dry run mode - run: ./gradlew -i --scan artifactoryPublishAll -Partifactory.dryRun + - uses: burrunan/gradle-cache-action@v1 + name: Test publication by uploading in dry run mode + with: + job-id: jdk11 + arguments: -i --scan artifactoryPublishAll -Partifactory.dryRun + gradle-version: wrapper - - name: Tag and upload to JFrog Artifactory - run: ./gradlew -i --scan ciPerformRelease + - uses: burrunan/gradle-cache-action@v1 + name: Tag and upload to JFrog Artifactory + with: + job-id: jdk11 + arguments: -i --scan ciPerformRelease + gradle-version: wrapper From be98f20ce3beb27a43972401e517add568f679d9 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 02:40:48 -0800 Subject: [PATCH 10/14] try again gha fix --- .github/workflows/github-actions.yml | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/.github/workflows/github-actions.yml b/.github/workflows/github-actions.yml index 8580d140f4..37f18b1106 100644 --- a/.github/workflows/github-actions.yml +++ b/.github/workflows/github-actions.yml @@ -48,12 +48,10 @@ jobs: npm install -g azurite azurite --silent & - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Run unit tests excluding ambry-store with: - job-id: jdk11 arguments: --scan -x :ambry-store:test build codeCoverageReport - gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -77,12 +75,10 @@ jobs: java-version: '11' distribution: 'adopt' - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Run unit tests for ambry-store with: - job-id: jdk11 arguments: --scan :ambry-store:test codeCoverageReport - gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -125,12 +121,10 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Run integration tests excluding server integration test with: - job-id: jdk11 arguments: --scan intTest -x :ambry-server:intTest codeCoverageReport - gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -173,12 +167,10 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Run integration tests with: - job-id: jdk11 arguments: --scan :ambry-server:intTest codeCoverageReport - gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -207,23 +199,17 @@ jobs: java-version: '11' distribution: 'adopt' - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Build artifacts and create pom files with: - job-id: jdk11 arguments: --scan assemble publishToMavenLocal - gradle-version: wrapper - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Test publication by uploading in dry run mode with: - job-id: jdk11 arguments: -i --scan artifactoryPublishAll -Partifactory.dryRun - gradle-version: wrapper - - uses: burrunan/gradle-cache-action@v1 + - uses: gradle/gradle-build-action@v2 name: Tag and upload to JFrog Artifactory with: - job-id: jdk11 arguments: -i --scan ciPerformRelease - gradle-version: wrapper From ed1bdec9d4b5ff2b90a32dc92857be1e9fa174ca Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 02:42:13 -0800 Subject: [PATCH 11/14] Revert "try again gha fix" This reverts commit be98f20ce3beb27a43972401e517add568f679d9. --- .github/workflows/github-actions.yml | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/.github/workflows/github-actions.yml b/.github/workflows/github-actions.yml index 37f18b1106..8580d140f4 100644 --- a/.github/workflows/github-actions.yml +++ b/.github/workflows/github-actions.yml @@ -48,10 +48,12 @@ jobs: npm install -g azurite azurite --silent & - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Run unit tests excluding ambry-store with: + job-id: jdk11 arguments: --scan -x :ambry-store:test build codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -75,10 +77,12 @@ jobs: java-version: '11' distribution: 'adopt' - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Run unit tests for ambry-store with: + job-id: jdk11 arguments: --scan :ambry-store:test codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -121,10 +125,12 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Run integration tests excluding server integration test with: + job-id: jdk11 arguments: --scan intTest -x :ambry-server:intTest codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -167,10 +173,12 @@ jobs: mysql -e 'GRANT ALL PRIVILEGES ON * . * TO 'travis'@'localhost';' -uroot -proot mysql -e 'FLUSH PRIVILEGES;' -uroot -proot - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Run integration tests with: + job-id: jdk11 arguments: --scan :ambry-server:intTest codeCoverageReport + gradle-version: wrapper - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 @@ -199,17 +207,23 @@ jobs: java-version: '11' distribution: 'adopt' - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Build artifacts and create pom files with: + job-id: jdk11 arguments: --scan assemble publishToMavenLocal + gradle-version: wrapper - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Test publication by uploading in dry run mode with: + job-id: jdk11 arguments: -i --scan artifactoryPublishAll -Partifactory.dryRun + gradle-version: wrapper - - uses: gradle/gradle-build-action@v2 + - uses: burrunan/gradle-cache-action@v1 name: Tag and upload to JFrog Artifactory with: + job-id: jdk11 arguments: -i --scan ciPerformRelease + gradle-version: wrapper From 488e57fc729584c0c5addc9259aa33c6aaeef434 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Tue, 6 Jan 2026 10:20:24 -0800 Subject: [PATCH 12/14] rerun gha From 0546923f6a12c7d280caec170bf1620a44150a73 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Wed, 7 Jan 2026 14:04:54 -0800 Subject: [PATCH 13/14] remove config in OSS --- .../github/ambry/config/ClusterMapConfig.java | 56 ------------------- 1 file changed, 56 deletions(-) diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index 36066157fb..893c3af17a 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -417,55 +417,6 @@ public class ClusterMapConfig { @Default("false") public final boolean enableFileCopyProtocol; - /** - * Path to the nimbus service metadata file containing instance information. - */ - @Config("clustermap.nimbus.service.metadata.file.path") - @Default("") - public final String nimbusServiceMetadataFilePath; - - /** - * Path to the LiStatefulSet metadata file containing Kubernetes StatefulSet information. - */ - @Config("clustermap.listatefulset.metadata.file.path") - @Default("") - public final String liStatefulSetMetadataFilePath; - - /** - * Percentage of disk space to reserve (0.0 to 1.0). - */ - @Config("clustermap.reserve.disk.space.percentage") - @Default("0.0") - public final double clusterMapReserveDiskSpacePercentage; - - /** - * Prefix for resource tags in cluster map. - */ - @Config("clustermap.resource.tag.prefix") - @Default("") - public final String clusterMapResourceTagPrefix; - - /** - * Default HTTP2 port for cluster nodes. - */ - @Config("clustermap.default.http2.port") - @Default("0") - public final int clusterMapDefaultHttp2Port; - - /** - * Default port for cluster nodes. - */ - @Config("clustermap.default.port") - @Default("0") - public final int clusterMapDefaultPort; - - /** - * Default SSL port for cluster nodes. - */ - @Config("clustermap.default.ssl.port") - @Default("0") - public final int clusterMapDefaultSslPort; - public ClusterMapConfig(VerifiableProperties verifiableProperties) { clusterMapFixedTimeoutDatanodeErrorThreshold = verifiableProperties.getIntInRange("clustermap.fixedtimeout.datanode.error.threshold", 3, 1, 100); @@ -557,12 +508,5 @@ public ClusterMapConfig(VerifiableProperties verifiableProperties) { routerPutSuccessTarget = verifiableProperties.getIntInRange(ROUTER_PUT_SUCCESS_TARGET, 2, 1, Integer.MAX_VALUE); clusterMapPartitionFilteringEnabled = verifiableProperties.getBoolean(PARTITION_FILTERING_ENABLED, false); enableFileCopyProtocol = verifiableProperties.getBoolean(ENABLE_FILE_COPY_PROTOCOL, false); - nimbusServiceMetadataFilePath = verifiableProperties.getString("clustermap.nimbus.service.metadata.file.path", ""); - liStatefulSetMetadataFilePath = verifiableProperties.getString("clustermap.listatefulset.metadata.file.path", ""); - clusterMapReserveDiskSpacePercentage = verifiableProperties.getDouble("clustermap.reserve.disk.space.percentage", 0.0); - clusterMapResourceTagPrefix = verifiableProperties.getString("clustermap.resource.tag.prefix", ""); - clusterMapDefaultHttp2Port = verifiableProperties.getInt("clustermap.default.http2.port", 0); - clusterMapDefaultPort = verifiableProperties.getInt("clustermap.default.port", 0); - clusterMapDefaultSslPort = verifiableProperties.getInt("clustermap.default.ssl.port", 0); } } From 3ffc87712fa9498f79657d7f05f05cf68ecb7691 Mon Sep 17 00:00:00 2001 From: Cris Liao Date: Wed, 7 Jan 2026 16:00:29 -0800 Subject: [PATCH 14/14] expose verifiableProperties for internal config pass down --- .../main/java/com/github/ambry/config/ClusterMapConfig.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java index 893c3af17a..afdb03f828 100644 --- a/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java +++ b/ambry-api/src/main/java/com/github/ambry/config/ClusterMapConfig.java @@ -417,7 +417,13 @@ public class ClusterMapConfig { @Default("false") public final boolean enableFileCopyProtocol; + /** + * The VerifiableProperties used to construct this config. + */ + public final VerifiableProperties verifiableProperties; + public ClusterMapConfig(VerifiableProperties verifiableProperties) { + this.verifiableProperties = verifiableProperties; clusterMapFixedTimeoutDatanodeErrorThreshold = verifiableProperties.getIntInRange("clustermap.fixedtimeout.datanode.error.threshold", 3, 1, 100); clusterMapResourceStatePolicyFactory = verifiableProperties.getString("clustermap.resourcestatepolicy.factory",