Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 13 additions & 22 deletions cmd/collectors/eseries/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,32 +36,23 @@ func (e *ESeries) LoadTemplate() (string, error) {

// ObjectConfig holds configuration for different object types
type ObjectConfig struct {
ArrayPath string
Filter string
CalculateUtilization bool
UsesSharedCache bool
}

// newObjectConfig creates an ObjectConfig with UsesSharedCache defaulted to true
func newObjectConfig(arrayPath, filter string, calculateUtilization bool) ObjectConfig {
return ObjectConfig{
ArrayPath: arrayPath,
Filter: filter,
CalculateUtilization: calculateUtilization,
UsesSharedCache: true,
}
ArrayPath string
Filter string
CalculateUtilization bool
CalculateQueueDepthAverage bool
UsesSharedCache bool
}

func GetESeriesPerfObjectConfig(objType string) ObjectConfig {
configs := map[string]ObjectConfig{
"controller": newObjectConfig("controllerStats", "type=controller", false),
"pool": newObjectConfig("poolStats", "type=storagePool", false),
"volume": newObjectConfig("volumeStats", "type=volume", false),
"drive": newObjectConfig("diskStats", "type=drive", true),
"interface": newObjectConfig("interfaceStats", "type=ioInterface", false),
"application": newObjectConfig("applicationStats", "type=application", false),
"workload": newObjectConfig("workloadStats", "type=workload", false),
"array": newObjectConfig("systemStats", "type=storageSystem", false),
"controller": {ArrayPath: "controllerStats", Filter: "type=controller", UsesSharedCache: true},
"pool": {ArrayPath: "poolStats", Filter: "type=storagePool", UsesSharedCache: true},
"volume": {ArrayPath: "volumeStats", Filter: "type=volume", UsesSharedCache: true, CalculateQueueDepthAverage: true},
"drive": {ArrayPath: "diskStats", Filter: "type=drive", UsesSharedCache: true, CalculateUtilization: true},
"interface": {ArrayPath: "interfaceStats", Filter: "type=ioInterface", UsesSharedCache: true},
"application": {ArrayPath: "applicationStats", Filter: "type=application", UsesSharedCache: true},
"workload": {ArrayPath: "workloadStats", Filter: "type=workload", UsesSharedCache: true},
"array": {ArrayPath: "systemStats", Filter: "type=storageSystem", UsesSharedCache: true},
}
if config, ok := configs[objType]; ok {
return config
Expand Down
66 changes: 62 additions & 4 deletions cmd/collectors/eseriesperf/eseriesperf.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,11 @@ type counter struct {
}

type perfProp struct {
isCacheEmpty bool
counterInfo map[string]*counter
timestampMetricName string
calculateUtilization bool
isCacheEmpty bool
counterInfo map[string]*counter
timestampMetricName string
calculateUtilization bool
calculateQueueDepthAverage bool
}

func init() {
Expand Down Expand Up @@ -156,6 +157,7 @@ func (ep *EseriesPerf) ParseTemplate() error {
}

ep.perfProp.calculateUtilization = config.CalculateUtilization
ep.perfProp.calculateQueueDepthAverage = config.CalculateQueueDepthAverage

return nil
}
Expand Down Expand Up @@ -633,6 +635,12 @@ func (ep *EseriesPerf) cookCounters(curMat *matrix.Matrix, prevMat *matrix.Matri
idleTime.SetExportable(false)
}

// Don't export queueDepthTotal it's only used to compute queue_depth_average
queueDepthTotal := curMat.GetMetric("queueDepthTotal")
if queueDepthTotal != nil {
queueDepthTotal.SetExportable(false)
}

err = ep.validateMatrix(prevMat, curMat)
if err != nil {
return nil, err
Expand Down Expand Up @@ -677,6 +685,15 @@ func (ep *EseriesPerf) cookCounters(curMat *matrix.Matrix, prevMat *matrix.Matri
}
}

// Calculate queue depth average if template flag is set (after all deltas are done)
if ep.perfProp.calculateQueueDepthAverage {
if skips, err := ep.calculateQueueDepthAverage(curMat); err != nil {
ep.Logger.Error("Calculate queue depth average", slogx.Err(err))
} else {
totalSkips += skips
}
}

// Second pass: Apply transformations (average, percent)
for i := range orderedMetrics {
key := orderedKeys[i]
Expand Down Expand Up @@ -802,6 +819,7 @@ func (ep *EseriesPerf) calculateUtilization(curMat *matrix.Matrix) (int, error)
idleTime, idleOk := idleTimeMetric.GetValueFloat64(instance)

if !readOk || !writeOk || !idleOk {
skips++
continue
}

Expand All @@ -822,6 +840,46 @@ func (ep *EseriesPerf) calculateUtilization(curMat *matrix.Matrix) (int, error)
return skips, nil
}

// calculateQueueDepthAverage calculates the average queue depth per I/O operation.
func (ep *EseriesPerf) calculateQueueDepthAverage(curMat *matrix.Matrix) (int, error) {
queueDepthTotalMetric := curMat.GetMetric("queueDepthTotal")
readOpsMetric := curMat.GetMetric("readOps")
writeOpsMetric := curMat.GetMetric("writeOps")
otherOpsMetric := curMat.GetMetric("otherOps")

if queueDepthTotalMetric == nil || readOpsMetric == nil || writeOpsMetric == nil || otherOpsMetric == nil {
return 0, errs.New(errs.ErrMissingParam, "missing metrics (queueDepthTotal, readOps, writeOps, otherOps) for queue depth average")
Comment thread
rahulguptajss marked this conversation as resolved.
}

queueDepthAvgMetric, err := curMat.NewMetricFloat64("queue_depth_average")
if err != nil {
return 0, err
}
queueDepthAvgMetric.SetProperty("average")

skips := 0
for _, instance := range curMat.GetInstances() {
qDepthTotal, qOk := queueDepthTotalMetric.GetValueFloat64(instance)
readOps, rOk := readOpsMetric.GetValueFloat64(instance)
writeOps, wOk := writeOpsMetric.GetValueFloat64(instance)
otherOps, oOk := otherOpsMetric.GetValueFloat64(instance)

if !qOk || !rOk || !wOk || !oOk {
skips++
continue
}

totalOps := readOps + writeOps + otherOps
if totalOps > 0 {
queueDepthAvgMetric.SetValueFloat64(instance, qDepthTotal/totalOps)
} else {
skips++
}
}

return skips, nil
}

// validateMatrix ensures that the previous matrix (prevMat) contains all the metrics present in the current matrix (curMat).
// This is crucial for performing accurate comparisons and calculations between the two matrices, especially in scenarios where
// the current matrix may have additional metrics that are not present in the previous matrix, such as after an ONTAP upgrade.
Expand Down
111 changes: 111 additions & 0 deletions cmd/collectors/eseriesperf/eseriesperf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -862,3 +862,114 @@ func TestEseriesPerf_SsdCache_NegativeDelta_SkipOnCounterReset(t *testing.T) {
}
}
}
func TestEseriesPerf_QueueDepthAverage_Flag(t *testing.T) {
ep := newEseriesPerf("Volume", "volume.yaml")

assert.True(t, ep.perfProp.calculateQueueDepthAverage)
assert.False(t, ep.perfProp.calculateUtilization)
}

func TestEseriesPerf_QueueDepthAverage_Calculation(t *testing.T) {
ep := newEseriesPerf("Volume", "volume.yaml")
mat := ep.Matrix[ep.Object]

instance, err := mat.NewInstance("vol1")
if err != nil {
t.Fatalf("failed to create instance: %v", err)
}

queueDepthTotal, err := mat.NewMetricFloat64("queueDepthTotal")
if err != nil {
t.Fatalf("failed to create queueDepthTotal: %v", err)
}
readOps, err := mat.NewMetricFloat64("readOps")
if err != nil {
t.Fatalf("failed to create readOps: %v", err)
}
writeOps, err := mat.NewMetricFloat64("writeOps")
if err != nil {
t.Fatalf("failed to create writeOps: %v", err)
}
otherOps, err := mat.NewMetricFloat64("otherOps")
if err != nil {
t.Fatalf("failed to create otherOps: %v", err)
}

queueDepthTotal.SetValueFloat64(instance, 1500)
readOps.SetValueFloat64(instance, 300)
writeOps.SetValueFloat64(instance, 200)
otherOps.SetValueFloat64(instance, 0)

skips, err := ep.calculateQueueDepthAverage(mat)
assert.Nil(t, err)
assert.Equal(t, skips, 0)

avgMetric := mat.GetMetric("queue_depth_average")
assert.NotNil(t, avgMetric)

val, ok := avgMetric.GetValueFloat64(instance)
assert.True(t, ok)
assert.Equal(t, val, 3.0)
}

func TestEseriesPerf_QueueDepthAverage_ZeroTotalOps(t *testing.T) {
ep := newEseriesPerf("Volume", "volume.yaml")
mat := ep.Matrix[ep.Object]

instance, err := mat.NewInstance("vol1")
if err != nil {
t.Fatalf("failed to create instance: %v", err)
}

queueDepthTotal, _ := mat.NewMetricFloat64("queueDepthTotal")
readOps, _ := mat.NewMetricFloat64("readOps")
writeOps, _ := mat.NewMetricFloat64("writeOps")
otherOps, _ := mat.NewMetricFloat64("otherOps")

queueDepthTotal.SetValueFloat64(instance, 100)
readOps.SetValueFloat64(instance, 0)
writeOps.SetValueFloat64(instance, 0)
otherOps.SetValueFloat64(instance, 0)

skips, err := ep.calculateQueueDepthAverage(mat)
assert.Nil(t, err)
assert.Equal(t, skips, 1)

avgMetric := mat.GetMetric("queue_depth_average")
assert.NotNil(t, avgMetric)

_, ok := avgMetric.GetValueFloat64(instance)
assert.False(t, ok)
}

func TestEseriesPerf_QueueDepthTotal_NotExported(t *testing.T) {
ep := newEseriesPerf("Volume", "volume.yaml")
mat := ep.Matrix[ep.Object]
mat.SetGlobalLabel("array_id", "600a098000f63714000000005e5cf5d2")
mat.SetGlobalLabel("array", "eseries-test-system")

// First poll - establishes baseline; cookCounters returns nil when cache is empty
pollData1 := jsonToPerfData("testdata/perf1.json")
ep.pollData(mat, pollData1, set.New())
_, _ = ep.cookCounters(mat, mat)

// Second poll
pollData2 := jsonToPerfData("testdata/perf2.json")
prevMat := mat.Clone(matrix.With{Data: true, Metrics: true, Instances: true, ExportInstances: true})
curMat := prevMat.Clone(matrix.With{Data: false, Metrics: true, Instances: true, ExportInstances: true})
curMat.Reset()
ep.pollData(curMat, pollData2, set.New())

got, err := ep.cookCounters(curMat, prevMat)
assert.Nil(t, err)
assert.NotNil(t, got)

resultMat := got["Volume"]
assert.NotNil(t, resultMat)

qdt := resultMat.GetMetric("queueDepthTotal")
if qdt == nil {
t.Fatal("queueDepthTotal metric should exist in result matrix")
}
assert.False(t, qdt.IsExportable())
}
Comment thread
rahulguptajss marked this conversation as resolved.
24 changes: 24 additions & 0 deletions cmd/tools/generate/eseries_counter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,30 @@ counters:
ESeriesCounter: Harvest Generated
Template: conf/eseriesperf/11.80.0/volume.yaml (CacheHitRatio plugin)

- Name: eseries_volume_other_ops
Description: Volume other I/O operations per second
APIs:
- API: REST
Endpoint: storage-systems/{array_id}/live-statistics
ESeriesCounter: otherOps
Template: conf/eseriesperf/11.80.0/volume.yaml

- Name: eseries_volume_queue_depth_average
Description: Average queue depth per I/O operation
APIs:
- API: REST
Endpoint: storage-systems/{array_id}/live-statistics
ESeriesCounter: Harvest Generated
Template: conf/eseriesperf/11.80.0/volume.yaml

- Name: eseries_volume_queue_depth_max
Description: Maximum queue depth seen over the observation window
APIs:
- API: REST
Endpoint: storage-systems/{array_id}/live-statistics
ESeriesCounter: queueDepthMax
Template: conf/eseriesperf/11.80.0/volume.yaml

# =============================================================================
# Cache Backup Device Metrics (Hardware Plugin)
# =============================================================================
Expand Down
3 changes: 3 additions & 0 deletions conf/eseriesperf/11.80.0/volume.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ counters:
- ^^volumeName => volume
- lastResetTimeInMS => last_reset_time
- observedTimeInMS => observed_time
- otherOps => other_ops
- queueDepthMax => queue_depth_max
- queueDepthTotal # queueDepthTotal is only used to compute queue_depth_average. It is not exported
- readBytes => read_data
- readHitOps => read_hit_ops
- readOps => read_ops
Expand Down
6 changes: 6 additions & 0 deletions conf/eseriesperf/static_counter_definitions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,12 @@ objects:
- name: idleTime
type: average
base_counter: readOps
- name: otherOps
type: rate
- name: queueDepthTotal
type: delta
- name: queueDepthMax
type: raw

eseries_controller:
counter_definitions:
Expand Down
2 changes: 1 addition & 1 deletion docs/cisco-switch-metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ These can be generated on demand by running `bin/harvest grafana metrics`. See
[#1577](https://github.com/NetApp/harvest/issues/1577#issue-1471478260) for details.

```
Creation Date : 2026-Apr-03
Creation Date : 2026-Apr-08
NX-OS Version: 9.3.12
```

Expand Down
Loading
Loading