|
11 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
| 14 | +import os |
14 | 15 |
|
15 | 16 | # pylint: disable=protected-access,too-many-lines |
16 | | - |
17 | 17 | import sys |
| 18 | +import unittest |
18 | 19 | from collections import namedtuple |
19 | 20 | from platform import python_implementation |
20 | 21 | from unittest import mock, skipIf |
21 | 22 |
|
22 | 23 | from opentelemetry.instrumentation.system_metrics import ( |
23 | 24 | _DEFAULT_CONFIG, |
| 25 | + OTEL_PYTHON_SYSTEM_METRICS_EXCLUDED_METRICS, |
24 | 26 | SystemMetricsInstrumentor, |
| 27 | + _build_default_config, |
25 | 28 | ) |
26 | 29 | from opentelemetry.sdk.metrics import MeterProvider |
27 | 30 | from opentelemetry.sdk.metrics.export import InMemoryMetricReader |
@@ -1091,3 +1094,191 @@ def test_that_correct_config_is_read(self): |
1091 | 1094 | instrumentor.instrument(meter_provider=meter_provider) |
1092 | 1095 | meter_provider.force_flush() |
1093 | 1096 | instrumentor.uninstrument() |
| 1097 | + |
| 1098 | + |
| 1099 | +class TestBuildDefaultConfig(unittest.TestCase): |
| 1100 | + def setUp(self): |
| 1101 | + # Store original environment to restore after each test |
| 1102 | + self.env_patcher = mock.patch.dict("os.environ", {}, clear=False) |
| 1103 | + self.env_patcher.start() |
| 1104 | + |
| 1105 | + def tearDown(self): |
| 1106 | + self.env_patcher.stop() |
| 1107 | + os.environ.pop(OTEL_PYTHON_SYSTEM_METRICS_EXCLUDED_METRICS, None) |
| 1108 | + |
| 1109 | + def test_default_config_without_exclusions(self): |
| 1110 | + """Test that _DEFAULT_CONFIG is returned when no exclusions are specified.""" |
| 1111 | + test_cases = [ |
| 1112 | + { |
| 1113 | + "name": "no_env_var_set", |
| 1114 | + "env_value": None, |
| 1115 | + }, |
| 1116 | + { |
| 1117 | + "name": "empty_string", |
| 1118 | + "env_value": "", |
| 1119 | + }, |
| 1120 | + { |
| 1121 | + "name": "whitespace_only", |
| 1122 | + "env_value": " ", |
| 1123 | + }, |
| 1124 | + ] |
| 1125 | + |
| 1126 | + for test_case in test_cases: |
| 1127 | + with self.subTest(test_case["name"]): |
| 1128 | + if test_case["env_value"] is None: |
| 1129 | + # Don't set the environment variable |
| 1130 | + result = _build_default_config() |
| 1131 | + else: |
| 1132 | + with mock.patch.dict( |
| 1133 | + "os.environ", |
| 1134 | + { |
| 1135 | + OTEL_PYTHON_SYSTEM_METRICS_EXCLUDED_METRICS: test_case[ |
| 1136 | + "env_value" |
| 1137 | + ] |
| 1138 | + }, |
| 1139 | + ): |
| 1140 | + result = _build_default_config() |
| 1141 | + |
| 1142 | + self.assertEqual(result, _DEFAULT_CONFIG) |
| 1143 | + |
| 1144 | + def test_exact_metric_exclusions(self): |
| 1145 | + test_cases = [ |
| 1146 | + { |
| 1147 | + "name": "single_metric", |
| 1148 | + "pattern": "system.cpu.time", |
| 1149 | + "excluded": ["system.cpu.time"], |
| 1150 | + "included": ["system.cpu.utilization", "system.memory.usage"], |
| 1151 | + "expected_count": len(_DEFAULT_CONFIG) - 1, |
| 1152 | + }, |
| 1153 | + { |
| 1154 | + "name": "multiple_metrics", |
| 1155 | + "pattern": "system.cpu.time,system.memory.usage", |
| 1156 | + "excluded": ["system.cpu.time", "system.memory.usage"], |
| 1157 | + "included": ["system.cpu.utilization", "process.cpu.time"], |
| 1158 | + "expected_count": len(_DEFAULT_CONFIG) - 2, |
| 1159 | + }, |
| 1160 | + { |
| 1161 | + "name": "with_whitespace", |
| 1162 | + "pattern": "system.cpu.time , system.memory.usage , process.cpu.time", |
| 1163 | + "excluded": [ |
| 1164 | + "system.cpu.time", |
| 1165 | + "system.memory.usage", |
| 1166 | + "process.cpu.time", |
| 1167 | + ], |
| 1168 | + "included": ["system.cpu.utilization"], |
| 1169 | + "expected_count": len(_DEFAULT_CONFIG) - 3, |
| 1170 | + }, |
| 1171 | + { |
| 1172 | + "name": "non_existent_metric", |
| 1173 | + "pattern": "non.existent.metric", |
| 1174 | + "excluded": [], |
| 1175 | + "included": ["system.cpu.time", "process.cpu.time"], |
| 1176 | + "expected_count": len(_DEFAULT_CONFIG), |
| 1177 | + }, |
| 1178 | + ] |
| 1179 | + |
| 1180 | + for test_case in test_cases: |
| 1181 | + with self.subTest(test_case["name"]): |
| 1182 | + with mock.patch.dict( |
| 1183 | + "os.environ", |
| 1184 | + { |
| 1185 | + OTEL_PYTHON_SYSTEM_METRICS_EXCLUDED_METRICS: test_case[ |
| 1186 | + "pattern" |
| 1187 | + ] |
| 1188 | + }, |
| 1189 | + ): |
| 1190 | + result = _build_default_config() |
| 1191 | + |
| 1192 | + for metric in test_case["excluded"]: |
| 1193 | + self.assertNotIn( |
| 1194 | + metric, result, f"{metric} should be excluded" |
| 1195 | + ) |
| 1196 | + |
| 1197 | + for metric in test_case["included"]: |
| 1198 | + self.assertIn( |
| 1199 | + metric, result, f"{metric} should be included" |
| 1200 | + ) |
| 1201 | + |
| 1202 | + self.assertEqual(len(result), test_case["expected_count"]) |
| 1203 | + |
| 1204 | + def test_wildcard_patterns(self): |
| 1205 | + test_cases = [ |
| 1206 | + { |
| 1207 | + "name": "all_system_metrics", |
| 1208 | + "pattern": "system.*", |
| 1209 | + "excluded_prefixes": ["system."], |
| 1210 | + "included_prefixes": ["process.", "cpython."], |
| 1211 | + }, |
| 1212 | + { |
| 1213 | + "name": "system_cpu_prefix", |
| 1214 | + "pattern": "system.cpu.*", |
| 1215 | + "excluded": ["system.cpu.time", "system.cpu.utilization"], |
| 1216 | + "included": ["system.memory.usage", "system.disk.io"], |
| 1217 | + }, |
| 1218 | + { |
| 1219 | + "name": "utilization_suffix", |
| 1220 | + "pattern": "*.utilization", |
| 1221 | + "excluded_suffixes": [".utilization"], |
| 1222 | + "included": ["system.cpu.time", "system.memory.usage"], |
| 1223 | + }, |
| 1224 | + { |
| 1225 | + "name": "all_metrics", |
| 1226 | + "pattern": "*", |
| 1227 | + "expected_count": 0, |
| 1228 | + }, |
| 1229 | + ] |
| 1230 | + |
| 1231 | + for test_case in test_cases: |
| 1232 | + with self.subTest(test_case["name"]): |
| 1233 | + with mock.patch.dict( |
| 1234 | + "os.environ", |
| 1235 | + { |
| 1236 | + OTEL_PYTHON_SYSTEM_METRICS_EXCLUDED_METRICS: test_case[ |
| 1237 | + "pattern" |
| 1238 | + ] |
| 1239 | + }, |
| 1240 | + ): |
| 1241 | + result = _build_default_config() |
| 1242 | + |
| 1243 | + if "excluded" in test_case: |
| 1244 | + for metric in test_case["excluded"]: |
| 1245 | + self.assertNotIn(metric, result) |
| 1246 | + |
| 1247 | + if "included" in test_case: |
| 1248 | + for metric in test_case["included"]: |
| 1249 | + self.assertIn(metric, result) |
| 1250 | + |
| 1251 | + if "excluded_prefixes" in test_case: |
| 1252 | + for prefix in test_case["excluded_prefixes"]: |
| 1253 | + excluded_metrics = [ |
| 1254 | + k for k in result if k.startswith(prefix) |
| 1255 | + ] |
| 1256 | + self.assertEqual( |
| 1257 | + len(excluded_metrics), |
| 1258 | + 0, |
| 1259 | + ) |
| 1260 | + |
| 1261 | + if "included_prefixes" in test_case: |
| 1262 | + for prefix in test_case["included_prefixes"]: |
| 1263 | + included_metrics = [ |
| 1264 | + k for k in result if k.startswith(prefix) |
| 1265 | + ] |
| 1266 | + self.assertGreater( |
| 1267 | + len(included_metrics), |
| 1268 | + 0, |
| 1269 | + ) |
| 1270 | + |
| 1271 | + if "excluded_suffixes" in test_case: |
| 1272 | + for suffix in test_case["excluded_suffixes"]: |
| 1273 | + suffix_metrics = [ |
| 1274 | + k for k in result if k.endswith(suffix) |
| 1275 | + ] |
| 1276 | + self.assertEqual( |
| 1277 | + len(suffix_metrics), |
| 1278 | + 0, |
| 1279 | + ) |
| 1280 | + |
| 1281 | + if "expected_count" in test_case: |
| 1282 | + self.assertEqual( |
| 1283 | + len(result), test_case["expected_count"] |
| 1284 | + ) |
0 commit comments