From f4a70b6848a47e49605a8a4e1cf4ba348036ee08 Mon Sep 17 00:00:00 2001 From: Chris Grindstaff Date: Wed, 15 Apr 2026 08:47:41 -0400 Subject: [PATCH 1/2] feat: Harvest should include a cmperf collector --- cmd/collectors/cmperf/cmperf.go | 540 ++++++++++++++++++ cmd/collectors/cmperf/plugins/disk/disk.go | 11 + .../cmperf/plugins/fabricpool/fabricpool.go | 11 + cmd/collectors/cmperf/plugins/fcp/fcp.go | 11 + cmd/collectors/cmperf/plugins/fcvi/fcvi.go | 11 + .../cmperf/plugins/flexcache/flexcache.go | 11 + .../cmperf/plugins/headroom/headroom.go | 11 + cmd/collectors/cmperf/plugins/nic/nic.go | 11 + .../cmperf/plugins/volume/volume.go | 11 + cmd/collectors/cmperf/plugins/vscan/vscan.go | 11 + cmd/collectors/zapiperf/zapiperf.go | 2 +- cmd/poller/poller.go | 100 +++- cmd/poller/poller_test.go | 238 +++++++- conf/cmperf/9.19.1/cifs_node.yaml | 26 + conf/cmperf/9.19.1/cifs_vserver.yaml | 28 + conf/cmperf/9.19.1/copy_manager.yaml | 21 + conf/cmperf/9.19.1/disk.yaml | 110 ++++ conf/cmperf/9.19.1/ext_cache_obj.yaml | 33 ++ .../9.19.1/external_service_operation.yaml | 39 ++ conf/cmperf/9.19.1/fcp.yaml | 74 +++ conf/cmperf/9.19.1/fcp_lif.yaml | 31 + conf/cmperf/9.19.1/fcvi.yaml | 31 + conf/cmperf/9.19.1/flexcache.yaml | 38 ++ conf/cmperf/9.19.1/fpolicy.yaml | 24 + conf/cmperf/9.19.1/fpolicy_server.yaml | 23 + conf/cmperf/9.19.1/fpolicy_svm.yaml | 17 + conf/cmperf/9.19.1/hostadapter.yaml | 17 + conf/cmperf/9.19.1/iscsi_lif.yaml | 32 ++ conf/cmperf/9.19.1/iwarp.yaml | 24 + conf/cmperf/9.19.1/lif.yaml | 29 + conf/cmperf/9.19.1/lun.yaml | 55 ++ conf/cmperf/9.19.1/namespace.yaml | 36 ++ conf/cmperf/9.19.1/netstat.yaml | 38 ++ conf/cmperf/9.19.1/nfsv3.yaml | 92 +++ conf/cmperf/9.19.1/nfsv3_node.yaml | 91 +++ conf/cmperf/9.19.1/nfsv4.yaml | 140 +++++ conf/cmperf/9.19.1/nfsv4_1.yaml | 181 ++++++ conf/cmperf/9.19.1/nfsv4_1_node.yaml | 180 ++++++ conf/cmperf/9.19.1/nfsv4_2.yaml | 180 ++++++ conf/cmperf/9.19.1/nfsv4_2_node.yaml | 178 ++++++ conf/cmperf/9.19.1/nfsv4_node.yaml | 136 +++++ conf/cmperf/9.19.1/nfsv4_pool.yaml | 51 ++ conf/cmperf/9.19.1/nic_common.yaml | 45 ++ conf/cmperf/9.19.1/nvm_mirror.yaml | 17 + conf/cmperf/9.19.1/nvmf_lif.yaml | 30 + conf/cmperf/9.19.1/nvmf_rdma_port.yaml | 31 + conf/cmperf/9.19.1/nvmf_tcp_port.yaml | 31 + .../cmperf/9.19.1/object_store_client_op.yaml | 21 + conf/cmperf/9.19.1/ontap_s3_svm.yaml | 146 +++++ conf/cmperf/9.19.1/path.yaml | 31 + conf/cmperf/9.19.1/qtree.yaml | 26 + .../cmperf/9.19.1/resource_headroom_aggr.yaml | 27 + conf/cmperf/9.19.1/resource_headroom_cpu.yaml | 22 + conf/cmperf/9.19.1/rwctx.yaml | 17 + conf/cmperf/9.19.1/smb2.yaml | 54 ++ conf/cmperf/9.19.1/system_node.yaml | 56 ++ conf/cmperf/9.19.1/token_manager.yaml | 29 + conf/cmperf/9.19.1/volume.yaml | 70 +++ conf/cmperf/9.19.1/volume_node.yaml | 50 ++ conf/cmperf/9.19.1/vscan.yaml | 27 + conf/cmperf/9.19.1/vscan_svm.yaml | 19 + conf/cmperf/9.19.1/wafl.yaml | 38 ++ .../cmperf/9.19.1/wafl_comp_aggr_vol_bin.yaml | 27 + conf/cmperf/9.19.1/wafl_hya_per_aggr.yaml | 34 ++ conf/cmperf/9.19.1/wafl_hya_sizer.yaml | 16 + conf/cmperf/9.19.1/workload.yaml | 55 ++ conf/cmperf/9.19.1/workload_volume.yaml | 57 ++ conf/cmperf/default.yaml | 66 +++ harvest.cue | 1 + pkg/conf/collectors.go | 2 + pkg/conf/conf.go | 1 + 71 files changed, 3972 insertions(+), 8 deletions(-) create mode 100644 cmd/collectors/cmperf/cmperf.go create mode 100644 cmd/collectors/cmperf/plugins/disk/disk.go create mode 100644 cmd/collectors/cmperf/plugins/fabricpool/fabricpool.go create mode 100644 cmd/collectors/cmperf/plugins/fcp/fcp.go create mode 100644 cmd/collectors/cmperf/plugins/fcvi/fcvi.go create mode 100644 cmd/collectors/cmperf/plugins/flexcache/flexcache.go create mode 100644 cmd/collectors/cmperf/plugins/headroom/headroom.go create mode 100644 cmd/collectors/cmperf/plugins/nic/nic.go create mode 100644 cmd/collectors/cmperf/plugins/volume/volume.go create mode 100644 cmd/collectors/cmperf/plugins/vscan/vscan.go create mode 100644 conf/cmperf/9.19.1/cifs_node.yaml create mode 100644 conf/cmperf/9.19.1/cifs_vserver.yaml create mode 100644 conf/cmperf/9.19.1/copy_manager.yaml create mode 100644 conf/cmperf/9.19.1/disk.yaml create mode 100644 conf/cmperf/9.19.1/ext_cache_obj.yaml create mode 100644 conf/cmperf/9.19.1/external_service_operation.yaml create mode 100644 conf/cmperf/9.19.1/fcp.yaml create mode 100644 conf/cmperf/9.19.1/fcp_lif.yaml create mode 100644 conf/cmperf/9.19.1/fcvi.yaml create mode 100644 conf/cmperf/9.19.1/flexcache.yaml create mode 100644 conf/cmperf/9.19.1/fpolicy.yaml create mode 100644 conf/cmperf/9.19.1/fpolicy_server.yaml create mode 100644 conf/cmperf/9.19.1/fpolicy_svm.yaml create mode 100644 conf/cmperf/9.19.1/hostadapter.yaml create mode 100644 conf/cmperf/9.19.1/iscsi_lif.yaml create mode 100644 conf/cmperf/9.19.1/iwarp.yaml create mode 100644 conf/cmperf/9.19.1/lif.yaml create mode 100644 conf/cmperf/9.19.1/lun.yaml create mode 100644 conf/cmperf/9.19.1/namespace.yaml create mode 100644 conf/cmperf/9.19.1/netstat.yaml create mode 100644 conf/cmperf/9.19.1/nfsv3.yaml create mode 100644 conf/cmperf/9.19.1/nfsv3_node.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4_1.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4_1_node.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4_2.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4_2_node.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4_node.yaml create mode 100644 conf/cmperf/9.19.1/nfsv4_pool.yaml create mode 100644 conf/cmperf/9.19.1/nic_common.yaml create mode 100644 conf/cmperf/9.19.1/nvm_mirror.yaml create mode 100644 conf/cmperf/9.19.1/nvmf_lif.yaml create mode 100644 conf/cmperf/9.19.1/nvmf_rdma_port.yaml create mode 100644 conf/cmperf/9.19.1/nvmf_tcp_port.yaml create mode 100644 conf/cmperf/9.19.1/object_store_client_op.yaml create mode 100644 conf/cmperf/9.19.1/ontap_s3_svm.yaml create mode 100644 conf/cmperf/9.19.1/path.yaml create mode 100644 conf/cmperf/9.19.1/qtree.yaml create mode 100644 conf/cmperf/9.19.1/resource_headroom_aggr.yaml create mode 100644 conf/cmperf/9.19.1/resource_headroom_cpu.yaml create mode 100644 conf/cmperf/9.19.1/rwctx.yaml create mode 100644 conf/cmperf/9.19.1/smb2.yaml create mode 100644 conf/cmperf/9.19.1/system_node.yaml create mode 100644 conf/cmperf/9.19.1/token_manager.yaml create mode 100644 conf/cmperf/9.19.1/volume.yaml create mode 100644 conf/cmperf/9.19.1/volume_node.yaml create mode 100644 conf/cmperf/9.19.1/vscan.yaml create mode 100644 conf/cmperf/9.19.1/vscan_svm.yaml create mode 100644 conf/cmperf/9.19.1/wafl.yaml create mode 100644 conf/cmperf/9.19.1/wafl_comp_aggr_vol_bin.yaml create mode 100644 conf/cmperf/9.19.1/wafl_hya_per_aggr.yaml create mode 100644 conf/cmperf/9.19.1/wafl_hya_sizer.yaml create mode 100644 conf/cmperf/9.19.1/workload.yaml create mode 100644 conf/cmperf/9.19.1/workload_volume.yaml create mode 100644 conf/cmperf/default.yaml diff --git a/cmd/collectors/cmperf/cmperf.go b/cmd/collectors/cmperf/cmperf.go new file mode 100644 index 000000000..eba8d52b1 --- /dev/null +++ b/cmd/collectors/cmperf/cmperf.go @@ -0,0 +1,540 @@ +package cmperf + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/disk" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/fabricpool" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/fcp" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/fcvi" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/flexcache" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/headroom" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/nic" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/volume" + "github.com/netapp/harvest/v2/cmd/collectors/cmperf/plugins/vscan" + rest2 "github.com/netapp/harvest/v2/cmd/collectors/rest" + "github.com/netapp/harvest/v2/cmd/poller/collector" + "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/pkg/errs" + "github.com/netapp/harvest/v2/pkg/matrix" + "github.com/netapp/harvest/v2/pkg/slogx" + "log/slog" + "strconv" + "strings" + "time" +) + +const ( + latencyIoReqd = 0 + arrayKeyToken = "#" + timestampMetricName = "timestamp" +) + +var ( + qosQuery = "api/cluster/counter/tables/qos" + qosVolumeQuery = "api/cluster/counter/tables/qos_volume" + qosDetailQuery = "api/cluster/counter/tables/qos_detail" + qosDetailVolumeQuery = "api/cluster/counter/tables/qos_detail_volume" + workloadDetailMetrics = []string{"resource_latency"} +) + +var qosQueries = map[string]string{ + qosQuery: qosQuery, + qosVolumeQuery: qosVolumeQuery, +} +var qosDetailQueries = map[string]string{ + qosDetailQuery: qosDetailQuery, + qosDetailVolumeQuery: qosDetailVolumeQuery, +} + +type CmPerf struct { + *rest2.Rest // provides: AbstractCollector, Client, Object, Query, TemplateFn, TemplateType + perfProp *perfProp + archivedMetrics map[string]*rest2.Metric // Keeps metric definitions that are not found in the counter schema. These metrics may be available in future ONTAP versions. + hasInstanceSchedule bool + recordsToSave int // Number of records to save when using the recorder +} + +type counter struct { + counterType string + denominator string +} + +type perfProp struct { + isCacheEmpty bool + counterInfo map[string]*counter + latencyIoReqd int + qosLabels map[string]string + disableConstituents bool +} + +func init() { + plugin.RegisterModule(&CmPerf{}) +} + +func (r *CmPerf) HarvestModule() plugin.ModuleInfo { + return plugin.ModuleInfo{ + ID: "harvest.collector.cmperf", + New: func() plugin.Module { return new(CmPerf) }, + } +} + +func (r *CmPerf) Init(a *collector.AbstractCollector) error { + + var err error + + r.Rest = &rest2.Rest{AbstractCollector: a} + + r.perfProp = &perfProp{} + + r.InitProp() + + r.perfProp.counterInfo = make(map[string]*counter) + r.archivedMetrics = make(map[string]*rest2.Metric) + + if err := r.InitClient(); err != nil { + return err + } + + if r.Prop.TemplatePath, err = r.LoadTemplate(); err != nil { + return err + } + + r.InitVars(a.Params) + + if err := collector.Init(r); err != nil { + return err + } + + if err := r.InitCache(); err != nil { + return err + } + + if err := r.InitMatrix(); err != nil { + return err + } + + if err := r.InitQOS(); err != nil { + return err + } + + r.InitSchedule() + + r.recordsToSave = collector.RecordKeepLast(r.Params, r.Logger) + + r.Logger.Debug( + "initialized cache", + slog.Int("numMetrics", len(r.Prop.Metrics)), + slog.String("timeout", r.Client.GetTimeout().String()), + ) + + return nil +} + +func (r *CmPerf) InitQOS() error { + if isWorkloadObject(r.Prop.Query) || isWorkloadDetailObject(r.Prop.Query) { + qosLabels := r.Params.GetChildS("qos_labels") + if qosLabels == nil { + return errs.New(errs.ErrMissingParam, "qos_labels") + } + r.perfProp.qosLabels = make(map[string]string) + for _, label := range qosLabels.GetAllChildContentS() { + + display := strings.ReplaceAll(label, "-", "_") + before, after, found := strings.Cut(label, "=>") + if found { + label = strings.TrimSpace(before) + display = strings.TrimSpace(after) + } + r.perfProp.qosLabels[label] = display + } + } + if counters := r.Params.GetChildS("counters"); counters != nil { + refine := counters.GetChildS("refine") + if refine != nil { + withConstituents := refine.GetChildContentS("with_constituents") + if withConstituents == "false" { + r.perfProp.disableConstituents = true + } + withServiceLatency := refine.GetChildContentS("with_service_latency") + if withServiceLatency != "false" { + workloadDetailMetrics = append(workloadDetailMetrics, "service_time_latency") + } + } + } + return nil +} + +func (r *CmPerf) InitMatrix() error { + mat := r.Matrix[r.Object] + // init perf properties + r.perfProp.latencyIoReqd = r.loadParamInt("latency_io_reqd", latencyIoReqd) + r.perfProp.isCacheEmpty = true + // overwrite from abstract collector + mat.Object = r.Prop.Object + // Add system (cluster) name + mat.SetGlobalLabel("cluster", r.Remote.Name) + if r.Params.HasChildS("labels") { + for _, l := range r.Params.GetChildS("labels").GetChildren() { + mat.SetGlobalLabel(l.GetNameS(), l.GetContentS()) + } + } + + // Add metadata metric for skips/numPartials + _, _ = r.Metadata.NewMetricUint64("skips") + _, _ = r.Metadata.NewMetricUint64("numPartials") + return nil +} + +// load an int parameter or use defaultValue +func (r *CmPerf) loadParamInt(name string, defaultValue int) int { + + var ( + x string + n int + e error + ) + + if x = r.Params.GetChildContentS(name); x != "" { + if n, e = strconv.Atoi(x); e == nil { + r.Logger.Debug("using", + slog.String("name", name), + slog.Int("value", n), + ) + return n + } + r.Logger.Warn("invalid parameter (expected integer)", slog.String("name", name), slog.String("value", x)) + } + + r.Logger.Debug("using", slog.String("name", name), slog.Int("defaultValue", defaultValue)) + return defaultValue +} + +func (r *CmPerf) PollCounter() (map[string]*matrix.Matrix, error) { + + mat := r.Matrix[r.Object] + + // Create an artificial metric to hold timestamp of each instance data. + // The reason we don't keep a single timestamp for the whole data + // is because we might get instances in different batches + if mat.GetMetric(timestampMetricName) == nil { + m, err := mat.NewMetricFloat64(timestampMetricName) + if err != nil { + r.Logger.Error("add timestamp metric", slogx.Err(err)) + } + m.SetProperty("raw") + m.SetExportable(false) + } + + return nil, nil +} + +// GetOverride override counter property +func (r *CmPerf) GetOverride(counter string) string { + if o := r.Params.GetChildS("override"); o != nil { + return o.GetChildContentS(counter) + } + return "" +} + +func (r *CmPerf) PollData() (map[string]*matrix.Matrix, error) { + var ( + apiD, parseD time.Duration + metricCount uint64 + numPartials uint64 + startTime time.Time + prevMat *matrix.Matrix + curMat *matrix.Matrix + ) + + timestamp := r.Matrix[r.Object].GetMetric(timestampMetricName) + if timestamp == nil { + return nil, errs.New(errs.ErrConfig, "missing timestamp metric") + } + + startTime = time.Now() + r.Client.Metadata.Reset() + prevMat = r.Matrix[r.Object] + + // clone matrix without numeric data + curMat = prevMat.Clone(matrix.With{Data: false, Metrics: true, Instances: true, ExportInstances: true}) + curMat.Reset() + + apiD += time.Since(startTime) + + _ = r.Metadata.LazySetValueInt64("api_time", "data", apiD.Microseconds()) + _ = r.Metadata.LazySetValueInt64("parse_time", "data", parseD.Microseconds()) + _ = r.Metadata.LazySetValueUint64("metrics", "data", metricCount) + _ = r.Metadata.LazySetValueUint64("instances", "data", uint64(len(curMat.GetInstances()))) + _ = r.Metadata.LazySetValueUint64("bytesRx", "data", r.Client.Metadata.BytesRx) + _ = r.Metadata.LazySetValueUint64("numCalls", "data", r.Client.Metadata.NumCalls) + _ = r.Metadata.LazySetValueUint64("numPartials", "data", numPartials) + r.AddCollectCount(metricCount) + + return r.cookCounters(curMat, prevMat) +} + +func (r *CmPerf) cookCounters(curMat *matrix.Matrix, prevMat *matrix.Matrix) (map[string]*matrix.Matrix, error) { + var ( + err error + skips int + ) + + // skip calculating from delta if no data from previous poll + if r.perfProp.isCacheEmpty { + r.Logger.Debug("skip postprocessing until next poll (previous cache empty)") + r.Matrix[r.Object] = curMat + r.perfProp.isCacheEmpty = false + return nil, nil + } + + calcStart := time.Now() + + // cache raw data for next poll + cachedData := curMat.Clone(matrix.With{Data: true, Metrics: true, Instances: true, ExportInstances: true, PartialInstances: true}) + + orderedNonDenominatorMetrics := make([]*matrix.Metric, 0, len(curMat.GetMetrics())) + orderedNonDenominatorKeys := make([]string, 0, len(orderedNonDenominatorMetrics)) + + orderedDenominatorMetrics := make([]*matrix.Metric, 0, len(curMat.GetMetrics())) + orderedDenominatorKeys := make([]string, 0, len(orderedDenominatorMetrics)) + + for key, metric := range curMat.GetMetrics() { + if metric.GetName() != timestampMetricName && metric.Buckets() == nil { + counter := r.counterLookup(metric, key) + if counter != nil { + if counter.denominator == "" { + // does not require base counter + orderedNonDenominatorMetrics = append(orderedNonDenominatorMetrics, metric) + orderedNonDenominatorKeys = append(orderedNonDenominatorKeys, key) + } else { + // does require base counter + orderedDenominatorMetrics = append(orderedDenominatorMetrics, metric) + orderedDenominatorKeys = append(orderedDenominatorKeys, key) + } + } else { + r.Logger.Warn("Counter is missing or unable to parse", slog.String("counter", metric.GetName())) + } + } + } + + // order metrics, such that those requiring base counters are processed last + orderedMetrics := orderedNonDenominatorMetrics + orderedMetrics = append(orderedMetrics, orderedDenominatorMetrics...) + orderedKeys := orderedNonDenominatorKeys + orderedKeys = append(orderedKeys, orderedDenominatorKeys...) + + // Calculate timestamp delta first since many counters require it for postprocessing. + // Timestamp has "raw" property, so it isn't post-processed automatically + if _, err = curMat.Delta("timestamp", prevMat, cachedData, r.AllowPartialAggregation, r.Logger); err != nil { + r.Logger.Error("(timestamp) calculate delta:", slogx.Err(err)) + } + + var base *matrix.Metric + var totalSkips int + + for i, metric := range orderedMetrics { + key := orderedKeys[i] + counter := r.counterLookup(metric, key) + if counter == nil { + r.Logger.Error( + "Missing counter:", + slogx.Err(err), + slog.String("counter", metric.GetName()), + ) + continue + } + property := counter.counterType + // used in aggregator plugin + metric.SetProperty(property) + // used in volume.go plugin + metric.SetComment(counter.denominator) + + // raw/string - submit without post-processing + if property == "raw" || property == "string" { + continue + } + + // all other properties - first calculate delta + if skips, err = curMat.Delta(key, prevMat, cachedData, r.AllowPartialAggregation, r.Logger); err != nil { + r.Logger.Error("Calculate delta:", slogx.Err(err), slog.String("key", key)) + continue + } + totalSkips += skips + + // DELTA - subtract previous value from current + if property == "delta" { + // already done + continue + } + + // RATE - delta, normalized by elapsed time + if property == "rate" { + // defer calculation, so we can first calculate averages/percents + // Note: calculating rate before averages are averages/percentages are calculated + // used to be a bug in Harvest 2.0 (Alpha, RC1, RC2) resulting in very high latency values + continue + } + + // For the next two properties we need base counters + // We assume that delta of base counters is already calculated + if base = curMat.GetMetric(counter.denominator); base == nil { + if isWorkloadDetailObject(r.Prop.Query) { + // The workload detail generates metrics at the resource level. The 'service_time' and 'wait_time' metrics are used as raw values for these resource-level metrics. Their denominator, 'visits', is not collected; therefore, a check is added here to prevent warnings. + // There is no need to cook these metrics further. + if key == "service_time" || key == "wait_time" { + continue + } + } + r.Logger.Warn( + "Base counter missing", + slog.String("key", key), + slog.String("property", property), + slog.String("denominator", counter.denominator), + ) + continue + } + + // remaining properties: average and percent + // + // AVERAGE - delta, divided by base-counter delta + // + // PERCENT - average * 100 + // special case for latency counter: apply minimum number of iops as threshold + if property == "average" || property == "percent" { + + if strings.HasSuffix(metric.GetName(), "latency") { + skips, err = curMat.DivideWithThreshold(key, counter.denominator, r.perfProp.latencyIoReqd, cachedData, prevMat, timestampMetricName, r.Logger) + } else { + skips, err = curMat.Divide(key, counter.denominator) + } + + if err != nil { + r.Logger.Error("Division by base", slogx.Err(err), slog.String("key", key)) + continue + } + totalSkips += skips + + if property == "average" { + continue + } + } + + if property == "percent" { + if skips, err = curMat.MultiplyByScalar(key, 100); err != nil { + r.Logger.Error("Multiply by scalar", slogx.Err(err), slog.String("key", key)) + } else { + totalSkips += skips + } + continue + } + // If we reach here, then one of the earlier clauses should have executed `continue` statement + r.Logger.Error( + "Unknown property", + slog.String("key", key), + slog.String("property", property), + ) + } + + // calculate rates (which we deferred to calculate averages/percents first) + for i, metric := range orderedMetrics { + key := orderedKeys[i] + counter := r.counterLookup(metric, key) + if counter != nil { + property := counter.counterType + if property == "rate" { + if skips, err = curMat.Divide(orderedKeys[i], timestampMetricName); err != nil { + r.Logger.Error( + "Calculate rate", + slogx.Err(err), + slog.Int("i", i), + slog.String("metric", metric.GetName()), + slog.String("key", key), + ) + continue + } + totalSkips += skips + } + } else { + r.Logger.Warn("Counter is missing or unable to parse", slog.String("counter", metric.GetName())) + continue + } + } + + calcD := time.Since(calcStart) + _ = r.Metadata.LazySetValueUint64("instances", "data", uint64(len(curMat.GetInstances()))) + _ = r.Metadata.LazySetValueInt64("calc_time", "data", calcD.Microseconds()) + _ = r.Metadata.LazySetValueUint64("skips", "data", uint64(totalSkips)) //nolint:gosec + + // store cache for next poll + r.Matrix[r.Object] = cachedData + + newDataMap := make(map[string]*matrix.Matrix) + newDataMap[r.Object] = curMat + return newDataMap, nil +} + +func (r *CmPerf) counterLookup(metric *matrix.Metric, metricKey string) *counter { + var c *counter + + if metric.IsArray() { + name, _, _ := strings.Cut(metricKey, arrayKeyToken) + c = r.perfProp.counterInfo[name] + } else { + c = r.perfProp.counterInfo[metricKey] + } + return c +} + +func (r *CmPerf) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Plugin { + switch kind { + case "Vscan": + return vscan.New(abc) + case "FlexCache": + return flexcache.New(abc) + case "Disk": + return disk.New(abc) + case "Nic": + return nic.New(abc) + case "Headroom": + return headroom.New(abc) + case "Fcp": + return fcp.New(abc) + case "FCVI": + return fcvi.New(abc) + case "FabricPool": + return fabricpool.New(abc) + case "Volume": + return volume.New(abc) + + default: + r.Logger.Info("no CmPerf plugin found", slog.String("kind", kind)) + } + return nil +} + +func (r *CmPerf) InitSchedule() { + if r.Schedule == nil { + return + } + tasks := r.Schedule.GetTasks() + for _, task := range tasks { + if task.Name == "instance" { + r.hasInstanceSchedule = true + return + } + } +} + +func isWorkloadObject(query string) bool { + _, ok := qosQueries[query] + return ok +} + +func isWorkloadDetailObject(query string) bool { + _, ok := qosDetailQueries[query] + return ok +} + +// Interface guards +var ( + _ collector.Collector = (*CmPerf)(nil) +) diff --git a/cmd/collectors/cmperf/plugins/disk/disk.go b/cmd/collectors/cmperf/plugins/disk/disk.go new file mode 100644 index 000000000..c324f51ff --- /dev/null +++ b/cmd/collectors/cmperf/plugins/disk/disk.go @@ -0,0 +1,11 @@ +package disk + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/disk" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf Disk plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return disk.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/fabricpool/fabricpool.go b/cmd/collectors/cmperf/plugins/fabricpool/fabricpool.go new file mode 100644 index 000000000..1b5eb2311 --- /dev/null +++ b/cmd/collectors/cmperf/plugins/fabricpool/fabricpool.go @@ -0,0 +1,11 @@ +package fabricpool + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/fabricpool" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf fabricpool plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return fabricpool.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/fcp/fcp.go b/cmd/collectors/cmperf/plugins/fcp/fcp.go new file mode 100644 index 000000000..6e6a3210d --- /dev/null +++ b/cmd/collectors/cmperf/plugins/fcp/fcp.go @@ -0,0 +1,11 @@ +package fcp + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/fcp" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf FCP plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return fcp.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/fcvi/fcvi.go b/cmd/collectors/cmperf/plugins/fcvi/fcvi.go new file mode 100644 index 000000000..920f8dc65 --- /dev/null +++ b/cmd/collectors/cmperf/plugins/fcvi/fcvi.go @@ -0,0 +1,11 @@ +package fcvi + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/fcvi" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf FCVI plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return fcvi.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/flexcache/flexcache.go b/cmd/collectors/cmperf/plugins/flexcache/flexcache.go new file mode 100644 index 000000000..d604ea856 --- /dev/null +++ b/cmd/collectors/cmperf/plugins/flexcache/flexcache.go @@ -0,0 +1,11 @@ +package flexcache + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/statperf/plugins/flexcache" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This uses the statperf flexcahe plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return flexcache.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/headroom/headroom.go b/cmd/collectors/cmperf/plugins/headroom/headroom.go new file mode 100644 index 000000000..ba739c068 --- /dev/null +++ b/cmd/collectors/cmperf/plugins/headroom/headroom.go @@ -0,0 +1,11 @@ +package headroom + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/headroom" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf Headroom plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return headroom.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/nic/nic.go b/cmd/collectors/cmperf/plugins/nic/nic.go new file mode 100644 index 000000000..a7c2df26a --- /dev/null +++ b/cmd/collectors/cmperf/plugins/nic/nic.go @@ -0,0 +1,11 @@ +package nic + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/nic" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf NIC plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return nic.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/volume/volume.go b/cmd/collectors/cmperf/plugins/volume/volume.go new file mode 100644 index 000000000..572051173 --- /dev/null +++ b/cmd/collectors/cmperf/plugins/volume/volume.go @@ -0,0 +1,11 @@ +package volume + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/restperf/plugins/volume" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This reuses the restperf volume plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return volume.New(p) +} diff --git a/cmd/collectors/cmperf/plugins/vscan/vscan.go b/cmd/collectors/cmperf/plugins/vscan/vscan.go new file mode 100644 index 000000000..7a1b749e9 --- /dev/null +++ b/cmd/collectors/cmperf/plugins/vscan/vscan.go @@ -0,0 +1,11 @@ +package vscan + +import ( + "github.com/netapp/harvest/v2/cmd/collectors/zapiperf/plugins/vscan" + "github.com/netapp/harvest/v2/cmd/poller/plugin" +) + +// New This uses the zapiperf vscan plugin implementation as the functionality is identical +func New(p *plugin.AbstractPlugin) plugin.Plugin { + return vscan.New(p) +} diff --git a/cmd/collectors/zapiperf/zapiperf.go b/cmd/collectors/zapiperf/zapiperf.go index 492e9fccc..8846ab1c3 100644 --- a/cmd/collectors/zapiperf/zapiperf.go +++ b/cmd/collectors/zapiperf/zapiperf.go @@ -173,7 +173,7 @@ func (z *ZapiPerf) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Pl case "FlexCache": return flexcache.New(abc) default: - z.Logger.Info("no zapiPerf plugin found for %s", slog.String("kind", kind)) + z.Logger.Info("no zapiPerf plugin found", slog.String("kind", kind)) } return nil } diff --git a/cmd/poller/poller.go b/cmd/poller/poller.go index 04011c35a..9bbfa24de 100644 --- a/cmd/poller/poller.go +++ b/cmd/poller/poller.go @@ -25,6 +25,7 @@ package main import ( "bytes" + "cmp" "context" "crypto/fips140" "crypto/tls" @@ -55,6 +56,7 @@ import ( "github.com/goccy/go-yaml/parser" "github.com/netapp/harvest/v2/cmd/collectors" _ "github.com/netapp/harvest/v2/cmd/collectors/cisco" + _ "github.com/netapp/harvest/v2/cmd/collectors/cmperf" _ "github.com/netapp/harvest/v2/cmd/collectors/ems" _ "github.com/netapp/harvest/v2/cmd/collectors/eseries" _ "github.com/netapp/harvest/v2/cmd/collectors/eseriesperf" @@ -82,6 +84,7 @@ import ( "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/requests" "github.com/netapp/harvest/v2/pkg/slogx" + harvestTemplate "github.com/netapp/harvest/v2/pkg/template" "github.com/netapp/harvest/v2/pkg/tree/node" version2 "github.com/netapp/harvest/v2/pkg/version" goversion "github.com/netapp/harvest/v2/third_party/go-version" @@ -905,6 +908,11 @@ func (p *Poller) loadCollectorObject(ocs []objectCollector) error { } } + // Build CmPerf manifest from the already-initialized CmPerf collectors. + if manifest := buildCmPerfManifest(cols, p.getCmManifestName()); manifest != nil { + deleteAndPostCmManifest(manifest) + } + p.collectors = append(p.collectors, cols...) // link each collector with requested exporter & update metadata for _, col := range cols { @@ -942,6 +950,85 @@ func (p *Poller) loadCollectorObject(ocs []objectCollector) error { return nil } +// cmPerfPresetDetail holds the per-object section of a CmPerf manifest. +type cmPerfPresetDetail struct { + Object string `json:"object"` + SamplePeriod string `json:"sample-period"` + Counters []string `json:"counters"` +} + +// cmPerfManifestJSON is the top-level CmPerf manifest structure. +type cmPerfManifestJSON struct { + Preset string `json:"preset"` + PresetDetails []cmPerfPresetDetail `json:"preset_details"` +} + +func deleteAndPostCmManifest(_ []byte) { + // TODO implement +} + +// buildCmPerfManifest constructs a JSON manifest from the already-initialized CmPerf +// collectors. It reads query, counters, and schedule from the collector's merged params +// (i.e. default.yaml + per-object sub-template). +// preset_details are sorted by object; counters within each entry are sorted. +func buildCmPerfManifest(cols []collector.Collector, manifestName string) []byte { + const defaultDataPeriod = "1m" + + var details []cmPerfPresetDetail + for _, col := range cols { + if col.GetName() != "CmPerf" { + continue + } + params := col.GetParams() + + query := params.GetChildContentS("query") + if query == "" { + continue + } + + // At the moment, only the defaultDataPeriod is supported by ONTAP + dataPeriod := defaultDataPeriod + + var counters []string + if countersNode := params.GetChildS("counters"); countersNode != nil { + for _, raw := range countersNode.GetAllChildContentS() { + if raw == "" { + continue + } + name, _, _, _ := harvestTemplate.ParseMetric(raw) + counters = append(counters, name) + } + } + slices.Sort(counters) + + details = append(details, cmPerfPresetDetail{ + Object: query, + SamplePeriod: dataPeriod, + Counters: counters, + }) + } + + if len(details) == 0 { + return nil + } + + slices.SortFunc(details, func(a, b cmPerfPresetDetail) int { + return strings.Compare(a.Object, b.Object) + }) + + manifest := cmPerfManifestJSON{ + Preset: manifestName, + PresetDetails: details, + } + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + logger.Error("buildCmPerfManifest: failed to marshal manifest", slogx.Err(err)) + return nil + } + logger.Info("built CmPerf manifest", slog.Int("objects", len(details))) + return data +} + func nonOverlappingCollectors(objectCollectors []objectCollector) []objectCollector { if len(objectCollectors) == 0 { return []objectCollector{} @@ -953,11 +1040,12 @@ func nonOverlappingCollectors(objectCollectors []objectCollector) []objectCollec unique := make([]objectCollector, 0) conflicts := map[string][]string{ "Zapi": {"Rest"}, - "ZapiPerf": {"RestPerf", "KeyPerf", "StatPerf"}, + "ZapiPerf": {"RestPerf", "KeyPerf", "StatPerf", "CmPerf"}, "Rest": {"Zapi"}, - "RestPerf": {"ZapiPerf", "KeyPerf", "StatPerf"}, - "KeyPerf": {"ZapiPerf", "RestPerf", "StatPerf"}, - "StatPerf": {"ZapiPerf", "RestPerf", "KeyPerf"}, + "RestPerf": {"ZapiPerf", "KeyPerf", "StatPerf", "CmPerf"}, + "KeyPerf": {"ZapiPerf", "RestPerf", "StatPerf", "CmPerf"}, + "StatPerf": {"ZapiPerf", "RestPerf", "KeyPerf", "CmPerf"}, + "CmPerf": {"ZapiPerf", "RestPerf", "KeyPerf", "StatPerf"}, } // Sort collectors so native ones (viaRedirection=false) come before redirected ones @@ -1894,6 +1982,10 @@ func (p *Poller) truncateReason(msg string) string { return strings.ReplaceAll(msg, "\"", "") } +func (p *Poller) getCmManifestName() string { + return cmp.Or(p.params.CmPerfManifest, p.name) +} + func startPoller(_ *cobra.Command, _ []string) { poller := &Poller{} poller.options = opts diff --git a/cmd/poller/poller_test.go b/cmd/poller/poller_test.go index 3c717a4f8..1e9176e19 100644 --- a/cmd/poller/poller_test.go +++ b/cmd/poller/poller_test.go @@ -1,17 +1,18 @@ package main import ( + "encoding/json" "errors" - "strings" - "testing" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/netapp/harvest/v2/assert" "github.com/netapp/harvest/v2/cmd/collectors" + collectorPkg "github.com/netapp/harvest/v2/cmd/poller/collector" "github.com/netapp/harvest/v2/cmd/poller/options" "github.com/netapp/harvest/v2/pkg/conf" "github.com/netapp/harvest/v2/pkg/tree/node" + "strings" + "testing" ) func TestUnion2(t *testing.T) { @@ -156,6 +157,18 @@ func Test_nonOverlappingCollectors(t *testing.T) { {name: "no overlap StatPerf", args: ocs("Rest", "StatPerf"), want: ocs("Rest", "StatPerf")}, {name: "overlap statperf", args: ocs("StatPerf", "StatPerf"), want: ocs("StatPerf")}, + // CmPerf overlap tests + {name: "CmPerf alone", args: ocs("CmPerf"), want: ocs("CmPerf")}, + {name: "CmPerf no overlap with Rest", args: ocs("Rest", "CmPerf"), want: ocs("Rest", "CmPerf")}, + {name: "CmPerf overlap RestPerf", args: ocs("RestPerf", "CmPerf"), want: ocs("RestPerf")}, + {name: "CmPerf overlap RestPerf reversed", args: ocs("CmPerf", "RestPerf"), want: ocs("CmPerf")}, + {name: "CmPerf overlap KeyPerf", args: ocs("KeyPerf", "CmPerf"), want: ocs("KeyPerf")}, + {name: "CmPerf overlap KeyPerf reversed", args: ocs("CmPerf", "KeyPerf"), want: ocs("CmPerf")}, + {name: "CmPerf overlap StatPerf", args: ocs("StatPerf", "CmPerf"), want: ocs("StatPerf")}, + {name: "CmPerf overlap ZapiPerf", args: ocs("ZapiPerf", "CmPerf"), want: ocs("ZapiPerf")}, + {name: "CmPerf overlap CmPerf", args: ocs("CmPerf", "CmPerf"), want: ocs("CmPerf")}, + {name: "CmPerf with Rest and RestPerf", args: ocs("Rest", "RestPerf", "CmPerf"), want: ocs("Rest", "RestPerf")}, + // Test cases for viaRedirection flag behavior {name: "native wins over redirected - same class", args: []objectCollector{ @@ -682,3 +695,222 @@ func TestGetTemplatesForCollector(t *testing.T) { }) } } + +// testCollector wraps AbstractCollector and provides the one method (Init) that +// AbstractCollector omits. *testCollector satisfies the full collector.Collector interface. +type testCollector struct { + *collectorPkg.AbstractCollector +} + +func (t *testCollector) Init(*collectorPkg.AbstractCollector) error { return nil } + +// makeCmPerfCollector builds a minimal *testCollector whose Params contain +// the query, counters, and (optionally) schedule sub-trees expected by buildCmPerfManifest. +func makeCmPerfCollector(name, query, dataPeriod string, counters []string) *testCollector { + params := node.NewS("") + params.NewChildS("query", query) + + if dataPeriod != "" { + sched := params.NewChildS("schedule", "") + dataTask := sched.NewChildS("data", "") + dataTask.SetContentS(dataPeriod) + } + + cNode := params.NewChildS("counters", "") + for _, c := range counters { + cNode.NewChildS("", c) + } + + return &testCollector{collectorPkg.New(name, query, &options.Options{}, params, nil, conf.Remote{})} +} + +func TestBuildCmPerfManifest(t *testing.T) { + tests := []struct { + name string + cols []collectorPkg.Collector + wantNil bool + wantJSON string + }{ + { + name: "empty input returns nil", + cols: nil, + wantNil: true, + }, + { + name: "no CmPerf collectors returns nil", + cols: []collectorPkg.Collector{makeCmPerfCollector("RestPerf", "nfsv3", "3m", []string{"ops"})}, + wantNil: true, + }, + { + name: "single CmPerf collector", + cols: []collectorPkg.Collector{ + makeCmPerfCollector("CmPerf", "nfsv3", "3m", []string{"ops", "read_ops"}), + }, + wantJSON: `{ + "preset": "harvest_overview", + "preset_details": [ + { + "object": "nfsv3", + "sample-period": "1m", + "counters": [ + "ops", + "read_ops" + ] + } + ] +}`, + }, + { + name: "multiple collectors sorted by object", + cols: []collectorPkg.Collector{ + makeCmPerfCollector("CmPerf", "zcifs", "1m", []string{"write_ops"}), + makeCmPerfCollector("CmPerf", "nfsv3", "1m", []string{"ops"}), + }, + wantJSON: `{ + "preset": "harvest_overview", + "preset_details": [ + { + "object": "nfsv3", + "sample-period": "1m", + "counters": [ + "ops" + ] + }, + { + "object": "zcifs", + "sample-period": "1m", + "counters": [ + "write_ops" + ] + } + ] +}`, + }, + { + name: "counters are sorted", + cols: []collectorPkg.Collector{ + makeCmPerfCollector("CmPerf", "workload", "5m", []string{"zz_counter", "aa_counter", "mm_counter"}), + }, + wantJSON: `{ + "preset": "harvest_overview", + "preset_details": [ + { + "object": "workload", + "sample-period": "1m", + "counters": [ + "aa_counter", + "mm_counter", + "zz_counter" + ] + } + ] +}`, + }, + { + name: "counter with rename arrow uses base name", + cols: []collectorPkg.Collector{ + makeCmPerfCollector("CmPerf", "nfsv3", "1m", []string{"nfsv3_ops => ops"}), + }, + wantJSON: `{ + "preset": "harvest_overview", + "preset_details": [ + { + "object": "nfsv3", + "sample-period": "1m", + "counters": [ + "nfsv3_ops" + ] + } + ] +}`, + }, + { + name: "default data period when schedule absent", + cols: []collectorPkg.Collector{ + makeCmPerfCollector("CmPerf", "nfsv3", "", []string{"ops"}), + }, + wantJSON: `{ + "preset": "harvest_overview", + "preset_details": [ + { + "object": "nfsv3", + "sample-period": "1m", + "counters": [ + "ops" + ] + } + ] +}`, + }, + { + name: "mixed collector names: only CmPerf included", + cols: []collectorPkg.Collector{ + makeCmPerfCollector("RestPerf", "rest_obj", "1m", []string{"rest_ctr"}), + makeCmPerfCollector("CmPerf", "cm_obj", "2m", []string{"cm_ctr"}), + makeCmPerfCollector("ZapiPerf", "zapi_obj", "1m", []string{"zapi_ctr"}), + }, + wantJSON: `{ + "preset": "harvest_overview", + "preset_details": [ + { + "object": "cm_obj", + "sample-period": "1m", + "counters": [ + "cm_ctr" + ] + } + ] +}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := buildCmPerfManifest(tt.cols, "harvest_overview") + if tt.wantNil { + if result != nil { + t.Errorf("expected nil, got %s", result) + } + return + } + var gotMap, wantMap any + if err := json.Unmarshal(result, &gotMap); err != nil { + t.Fatalf("result is not valid JSON: %v\n%s", err, result) + } + if err := json.Unmarshal([]byte(tt.wantJSON), &wantMap); err != nil { + t.Fatalf("wantJSON is not valid JSON: %v", err) + } + if diff := cmp.Diff(wantMap, gotMap); diff != "" { + t.Errorf("manifest mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestPoller_getCmManifestName(t *testing.T) { + tests := []struct { + name string + pollerName string + cmPerfManifest string + want string + }{ + {name: "empty", pollerName: "", cmPerfManifest: "", want: ""}, + {name: "pollerName", pollerName: "sar", cmPerfManifest: "", want: "sar"}, + {name: "cm", pollerName: "sar", cmPerfManifest: "moon", want: "moon"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Poller{ + name: tt.pollerName, + params: &conf.Poller{ + CmPerfManifest: tt.cmPerfManifest, + }, + } + + if got := p.getCmManifestName(); got != tt.want { + t.Errorf("getCmManifestName() got [%v], want [%v]", got, tt.want) + } + }) + } +} diff --git a/conf/cmperf/9.19.1/cifs_node.yaml b/conf/cmperf/9.19.1/cifs_node.yaml new file mode 100644 index 000000000..ad5ec9d09 --- /dev/null +++ b/conf/cmperf/9.19.1/cifs_node.yaml @@ -0,0 +1,26 @@ + +name: CIFSNode +query: cifs:node +object: node_cifs + +allow_partial_aggregation: true + +counters: + - cifs_latency => latency + - cifs_op_count => op_count + - cifs_ops => total_ops # "cifs_ops" already used in system_node.yaml + - cifs_read_latency => read_latency + - cifs_read_ops => read_ops + - cifs_write_latency => write_latency + - cifs_write_ops => write_ops + - connections + - established_sessions + - instance_name => node + - open_files + +export_options: + instance_keys: + - node + +override: + - cifs_op_count: rate diff --git a/conf/cmperf/9.19.1/cifs_vserver.yaml b/conf/cmperf/9.19.1/cifs_vserver.yaml new file mode 100644 index 000000000..799da520a --- /dev/null +++ b/conf/cmperf/9.19.1/cifs_vserver.yaml @@ -0,0 +1,28 @@ + +name: CIFSvserver +query: cifs:vserver +object: svm_cifs + +instance_key: uuid + +counters: + - cifs_latency => latency + - cifs_op_count => op_count + - cifs_ops => ops + - cifs_read_latency => read_latency + - cifs_read_ops => read_ops + - cifs_write_latency => write_latency + - cifs_write_ops => write_ops + - connections + - established_sessions + - instance_name => svm + - instance_uuid + - open_files + - signed_sessions + +export_options: + instance_keys: + - svm + +override: + - cifs_op_count: rate diff --git a/conf/cmperf/9.19.1/copy_manager.yaml b/conf/cmperf/9.19.1/copy_manager.yaml new file mode 100644 index 000000000..85ece22f8 --- /dev/null +++ b/conf/cmperf/9.19.1/copy_manager.yaml @@ -0,0 +1,21 @@ +name: CopyManager +query: copy_manager +object: copy_manager + +instance_key: uuid + +counters: + - KB_copied => kb_copied + - bce_copy_count_curr + - instance_name => svm + - instance_uuid + - ocs_copy_count_curr + - sce_copy_count_curr + - spince_copy_count_curr + +override: + - KB_copied: delta + +export_options: + instance_keys: + - svm diff --git a/conf/cmperf/9.19.1/disk.yaml b/conf/cmperf/9.19.1/disk.yaml new file mode 100644 index 000000000..c37efd231 --- /dev/null +++ b/conf/cmperf/9.19.1/disk.yaml @@ -0,0 +1,110 @@ +name: Disk +query: disk:constituent +object: disk +instance_key: uuid + +counters: + - cp_read_chain + - cp_read_latency + - cp_reads + - disk_busy + - disk_capacity + - disk_speed + - instance_name => partition + - instance_uuid + - io_pending + - io_queued + - node_name => node + - physical_disk_name => disk + - physical_disk_uuid => disk_uuid + - raid_group + - raid_type + - total_data + - total_transfers + - user_read_blocks + - user_read_chain + - user_read_latency + - user_reads + - user_write_blocks + - user_write_chain + - user_write_latency + - user_writes + +plugins: + LabelAgent: + split: + - raid_group `/` ,aggr,plex,raid + Aggregator: + # plugin will create summary/average for each object + # any names after the object names will be treated as + # label names that will be added to instances + - node + - aggr ... + - plex node,aggr,plex + - raid node,aggr,disk,plex,raid + + Max: + # plugin will create max for each object + # any names after the object names will be treated as + # label names that will be added to instances + - node<>node_disk_max + - aggr<>aggr_disk_max ... + + Disk: + objects: + - cooling-fans => fan: + storage-shelf-cooling-fan-info: + - ^^fan-id + - ^fan-location => location + - ^fan-op-status => status + - fan-rpm => rpm + - current-sensors => sensor: + storage-shelf-current-sensor-info: + - ^^current-sensor-id => sensor_id + - ^current-sensor-location => location + - ^current-op-status => status + - current-sensor-reading => reading + - power-supply-units => psu: + storage-shelf-power-supply-unit-info: + # - ^psu-crest-factor => crest_factor + - ^psu-fw-version => firmware_version + - ^^psu-id + - ^psu-is-enabled => enabled + - ^psu-location => location + - ^psu-part-number => part_number + - psu-power-drawn => power_drawn + - psu-power-rating => power_rating + # - ^psu-reset-capable => reset_capable + - ^psu-serial-number => serial + - ^psu-type => type + - ^psu-op-status => status + - shelf-modules => module: + storage-shelf-module-info: + - ^^module-id + - ^module-fw-revision => firmware_version + - ^is-monitor-active => enabled + - ^module-location => location + - ^module-part-number => part_number + - ^es-serial-number => serial + - ^module-op-status => status + - temperature-sensors => temperature: + storage-shelf-temperature-sensor-info: + - ^high-critical-threshold => high_critical + - ^high-warning-threshold => high_warning + - ^temp-is-ambient + - ^temp-low-critical-threshold => low_critical + - ^temp-low-warning-threshold => low_warning + - ^^temp-sensor-id => sensor_id + - ^temp-sens-op-status => status + - temp-sensor-reading => reading + - voltage-sensors => voltage: + storage-shelf-voltage-sensor-info: + - ^^voltage-sensor-id => sensor_id + - ^voltage-sensor-location => location + - ^voltage-op-status => status + - voltage-sensor-reading => reading + + +# only export node/aggr aggregations from plugin +# set this true or comment, to get data for each disk +export_data: false diff --git a/conf/cmperf/9.19.1/ext_cache_obj.yaml b/conf/cmperf/9.19.1/ext_cache_obj.yaml new file mode 100644 index 000000000..e5c333b26 --- /dev/null +++ b/conf/cmperf/9.19.1/ext_cache_obj.yaml @@ -0,0 +1,33 @@ + + +name: ExtCacheObj +query: ext_cache_obj +object: flashcache + +instance_key: uuid + +counters: + - accesses + - disk_reads_replaced + - evicts + - hit + - hit_directory + - hit_indirect + - hit_metadata_file + - hit_normal_lev0 + - hit_percent + - inserts + - instance_name + - instance_uuid + - invalidates + - miss + - miss_directory + - miss_indirect + - miss_metadata_file + - miss_normal_lev0 + - node_name => node + - usage + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/external_service_operation.yaml b/conf/cmperf/9.19.1/external_service_operation.yaml new file mode 100644 index 000000000..bbb8e85a0 --- /dev/null +++ b/conf/cmperf/9.19.1/external_service_operation.yaml @@ -0,0 +1,39 @@ + +name: ExternalServiceOperation +query: external_service_op +object: external_service_op + +instance_key: uuid + +counters: + - instance_name + - instance_uuid + - node_name => node + - num_not_found_responses + - num_request_failures + - num_requests_sent + - num_responses_received + - num_successful_responses + - num_timeouts + - operation + - process_name + - request_latency + - request_latency_hist + - server_ip_address + - server_name + - service_name + - vserver_name => svm + +plugins: + - ExternalServiceOperation + +export_options: + instance_keys: + - key + - node + - operation + - process_name + - server_ip_address + - server_name + - service_name + - svm diff --git a/conf/cmperf/9.19.1/fcp.yaml b/conf/cmperf/9.19.1/fcp.yaml new file mode 100644 index 000000000..af9aff70a --- /dev/null +++ b/conf/cmperf/9.19.1/fcp.yaml @@ -0,0 +1,74 @@ +name: FcpPort +query: fcp_port +object: fcp + +instance_key: uuid + +counters: + - avg_other_latency + - avg_read_latency + - avg_write_latency + - discarded_frames_count + - instance_name => port + - instance_uuid + - int_count + - invalid_crc + - invalid_transmission_word + - isr_count + - link_down + - link_failure + - link_speed => speed + - link_up + - loss_of_signal + - loss_of_sync + - node_name => node + - nvmf_avg_other_latency + - nvmf_avg_read_latency + - nvmf_avg_remote_other_latency + - nvmf_avg_remote_read_latency + - nvmf_avg_remote_write_latency + - nvmf_avg_write_latency + - nvmf_caw_data + - nvmf_caw_ops + - nvmf_command_slots + - nvmf_other_ops + - nvmf_read_data + - nvmf_read_ops + - nvmf_remote_caw_data + - nvmf_remote_caw_ops + - nvmf_remote_other_ops + - nvmf_remote_read_data + - nvmf_remote_read_ops + - nvmf_remote_total_data + - nvmf_remote_total_ops + - nvmf_remote_write_data + - nvmf_remote_write_ops + - nvmf_total_data + - nvmf_total_ops + - nvmf_write_data + - nvmf_write_ops + - other_ops + - prim_seq_err + - queue_full + - read_data + - read_ops + - reset_count + - shared_int_count + - spurious_int_count + - threshold_full + - total_data + - total_ops + - write_data + - write_ops + +override: + - link_speed: string + +plugins: + - Fcp + +export_options: + instance_keys: + - node + - port + - speed diff --git a/conf/cmperf/9.19.1/fcp_lif.yaml b/conf/cmperf/9.19.1/fcp_lif.yaml new file mode 100644 index 000000000..2e69c2c8e --- /dev/null +++ b/conf/cmperf/9.19.1/fcp_lif.yaml @@ -0,0 +1,31 @@ + + +name: FcpLif +query: fcp_lif +object: fcp_lif + +instance_key: uuid + +counters: + - avg_latency + - avg_other_latency + - avg_read_latency + - avg_write_latency + - instance_name => lif + - instance_uuid + - node_name => node + - other_ops + - port_id => port + - read_data + - read_ops + - total_ops + - vserver_name => svm + - write_data + - write_ops + +export_options: + instance_keys: + - lif + - node + - port + - svm diff --git a/conf/cmperf/9.19.1/fcvi.yaml b/conf/cmperf/9.19.1/fcvi.yaml new file mode 100644 index 000000000..c2def7e60 --- /dev/null +++ b/conf/cmperf/9.19.1/fcvi.yaml @@ -0,0 +1,31 @@ + +name: FCVI +query: fcvi +object: fcvi + +instance_key: uuid + +counters: + - fw_SyStatDiscardFrames => firmware_systat_discard_frames + - fw_invalid_crc => firmware_invalid_crc_count + - fw_invalid_xmit_words => firmware_invalid_transmit_word_count + - fw_link_failure => firmware_link_failure_count + - fw_loss_of_signal => firmware_loss_of_signal_count + - fw_loss_of_sync => firmware_loss_of_sync_count + - hard_reset_cnt => hard_reset_count + - instance_name => fcvi + - instance_uuid + - node_name => node + - rdma_write_avg_latency + - rdma_write_ops + - rdma_write_throughput + - soft_reset_cnt => soft_reset_count + +plugins: + - FCVI + +export_options: + instance_keys: + - fcvi + - node + - port diff --git a/conf/cmperf/9.19.1/flexcache.yaml b/conf/cmperf/9.19.1/flexcache.yaml new file mode 100644 index 000000000..a429eb41d --- /dev/null +++ b/conf/cmperf/9.19.1/flexcache.yaml @@ -0,0 +1,38 @@ +name: FlexCache +query: flexcache_per_volume +object: flexcache + +instance_key: + - uuid + - name + +counters: + - blocks_requested_from_client + - blocks_retrieved_from_origin + - evict_rw_cache_skipped_reason_disconnected + - evict_skipped_reason_config_noent + - evict_skipped_reason_disconnected + - evict_skipped_reason_offline + - instance_name => volume + - instance_uuid => svm + - invalidate_skipped_reason_config_noent + - invalidate_skipped_reason_disconnected + - invalidate_skipped_reason_offline + - nix_retry_skipped_reason_initiator_retrieve + - nix_skipped_reason_config_noent + - nix_skipped_reason_disconnected + - nix_skipped_reason_in_progress + - nix_skipped_reason_offline + - reconciled_data_entries + - reconciled_lock_entries + +plugins: + - FlexCache + - MetricAgent: + compute_metric: + - miss_percent PERCENT blocks_retrieved_from_origin blocks_requested_from_client + +export_options: + instance_keys: + - svm + - volume \ No newline at end of file diff --git a/conf/cmperf/9.19.1/fpolicy.yaml b/conf/cmperf/9.19.1/fpolicy.yaml new file mode 100644 index 000000000..fb5c51815 --- /dev/null +++ b/conf/cmperf/9.19.1/fpolicy.yaml @@ -0,0 +1,24 @@ + +name: FPolicy +query: fpolicy_policy +object: fpolicy + +counters: + - aborted_requests => aborted_requests + - denied_requests => denied_requests + - instance_name => instance + - io_processing_latency => io_processing_latency + - io_thread_wait_latency => io_thread_wait_latency + - policy_processing_latency => processing_latency + - processed_requests => processed_requests + +plugins: + LabelAgent: + split: + - instance `:` svm,policy + +export_options: + instance_keys: + - policy + - svm + diff --git a/conf/cmperf/9.19.1/fpolicy_server.yaml b/conf/cmperf/9.19.1/fpolicy_server.yaml new file mode 100644 index 000000000..6e0df45ce --- /dev/null +++ b/conf/cmperf/9.19.1/fpolicy_server.yaml @@ -0,0 +1,23 @@ +name: FPolicyServer +query: fpolicy_server +object: fpolicy_server + +counters: + - cancelled_requests => cancelled_requests + - failed_requests => failed_requests + - instance_name => instance + - max_request_latency => max_request_latency + - outstanding_requests => outstanding_requests + - processed_requests => processed_requests + - request_latency => request_latency + +plugins: + LabelAgent: + split: + - instance `:` svm,server + +export_options: + instance_keys: + - server + - svm + diff --git a/conf/cmperf/9.19.1/fpolicy_svm.yaml b/conf/cmperf/9.19.1/fpolicy_svm.yaml new file mode 100644 index 000000000..b4c6fee01 --- /dev/null +++ b/conf/cmperf/9.19.1/fpolicy_svm.yaml @@ -0,0 +1,17 @@ + +name: FPolicySVM +query: fpolicy +object: fpolicy_svm + +counters: + - aborted_requests => aborted_requests + - cifs_requests => cifs_requests + - failedop_notifications => failedop_notifications + - instance_name => svm + - io_processing_latency => io_processing_latency + - io_thread_wait_latency => io_thread_wait_latency + +export_options: + instance_keys: + - svm + diff --git a/conf/cmperf/9.19.1/hostadapter.yaml b/conf/cmperf/9.19.1/hostadapter.yaml new file mode 100644 index 000000000..2b339e47f --- /dev/null +++ b/conf/cmperf/9.19.1/hostadapter.yaml @@ -0,0 +1,17 @@ + +name: HostAdapter +query: hostadapter +object: hostadapter + +instance_key: uuid + +counters: + - bytes_read + - bytes_written + - instance_name + - node_name => node + +export_options: + instance_keys: + - hostadapter + - node diff --git a/conf/cmperf/9.19.1/iscsi_lif.yaml b/conf/cmperf/9.19.1/iscsi_lif.yaml new file mode 100644 index 000000000..f1c1e99dd --- /dev/null +++ b/conf/cmperf/9.19.1/iscsi_lif.yaml @@ -0,0 +1,32 @@ + +name: ISCSI +query: iscsi_lif +object: iscsi_lif + +instance_key: uuid + +counters: + - avg_latency + - avg_other_latency + - avg_read_latency + - avg_write_latency + - cmd_transfered + - instance_name => lif + - instance_uuid + - iscsi_other_ops + - iscsi_read_ops + - iscsi_write_ops + - node_name => node + - protocol_errors + - read_data + - vserver_name => svm + - write_data + +override: + - cmd_transfered: rate + +export_options: + instance_keys: + - lif + - node + - svm diff --git a/conf/cmperf/9.19.1/iwarp.yaml b/conf/cmperf/9.19.1/iwarp.yaml new file mode 100644 index 000000000..f6a4f3d05 --- /dev/null +++ b/conf/cmperf/9.19.1/iwarp.yaml @@ -0,0 +1,24 @@ + +name: Iwarp +query: iwarp +object: iw + +instance_key: uuid + +counters: + - instance_name => adapter + - instance_uuid + - iw_avg_latency => avg_latency + - iw_ops => ops + - iw_read_ops => read_ops + - iw_write_ops => write_ops + - node_name => node + +override: + - iw_read_ops: rate + - iw_write_ops: rate + +export_options: + instance_keys: + - adapter + - node diff --git a/conf/cmperf/9.19.1/lif.yaml b/conf/cmperf/9.19.1/lif.yaml new file mode 100644 index 000000000..328b90f91 --- /dev/null +++ b/conf/cmperf/9.19.1/lif.yaml @@ -0,0 +1,29 @@ + +name: LIF +query: lif +object: lif + +counters: + - current_port => port + - instance_name + - instance_uuid + - node_name => node + - recv_data + - recv_errors + - recv_packet + - sent_data + - sent_errors + - sent_packet + - up_time => uptime + - vserver_name => svm + + +export_options: + instance_keys: + - lif + - node + - port + - svm + +override: + up_time: raw \ No newline at end of file diff --git a/conf/cmperf/9.19.1/lun.yaml b/conf/cmperf/9.19.1/lun.yaml new file mode 100644 index 000000000..04cc47ca6 --- /dev/null +++ b/conf/cmperf/9.19.1/lun.yaml @@ -0,0 +1,55 @@ + +name: Lun +query: lun +object: lun + +counters: + - avg_read_latency + - avg_write_latency + - avg_xcopy_latency + - caw_reqs + - enospc + - instance_name + - queue_full + - read_align_histo + - read_data + - read_ops + - read_partial_blocks + - remote_bytes + - remote_ops + - unmap_reqs + - vserver_name => svm + - write_align_histo + - write_data + - write_ops + - write_partial_blocks + - writesame_reqs + - writesame_unmap_reqs + - xcopy_reqs + +override: + - writesame_reqs: rate + - writesame_unmap_reqs: rate + - caw_reqs: rate + - unmap_reqs: rate + - xcopy_reqs: rate + + +plugins: + - LabelAgent: + # There are two flavors of lun names + # /vol/vol_georg_fcp401/lun401 + # /vol/vol_georg_fcp401/lun401/lun401 + split_regex: + - lun `^/[^/]+/([^/]+)(?:/.*?|)/([^/]+)$` volume,lun + - MetricAgent: + compute_metric: + - total_data ADD read_data write_data + - total_ops ADD read_ops write_ops + - block_size DIVIDE total_data total_ops + +export_options: + instance_keys: + - lun # edited by plugin + - svm + - volume # added by plugin diff --git a/conf/cmperf/9.19.1/namespace.yaml b/conf/cmperf/9.19.1/namespace.yaml new file mode 100644 index 000000000..8d72e717f --- /dev/null +++ b/conf/cmperf/9.19.1/namespace.yaml @@ -0,0 +1,36 @@ +name: Namespace +query: namespace +object: namespace + +instance_key: name + +counters: + - avg_other_latency + - avg_read_latency + - avg_write_latency + - instance_name => path + - other_ops + - read_data + - read_ops + - remote_other_ops + - remote_read_data + - remote_read_ops + - remote_write_data + - remote_write_ops + - vserver_name => svm + - write_data + - write_ops + +plugins: + LabelAgent: + split: + - path `/` ,,volume,namespace + +export_options: + instance_keys: + - namespace + - path + - svm + - volume + + diff --git a/conf/cmperf/9.19.1/netstat.yaml b/conf/cmperf/9.19.1/netstat.yaml new file mode 100644 index 000000000..e8297535c --- /dev/null +++ b/conf/cmperf/9.19.1/netstat.yaml @@ -0,0 +1,38 @@ + +name: Netstat +query: netstat +object: netstat + +instance_key: uuid + +counters: + - bytes_recvd + - bytes_sent + - cong_win + - cong_win_th + - faddr + - fport_hbo => fport + - instance_uuid + - laddr + - lport_hbo => lport + - node_name => node + - ooorcv_pkts + - recv_window + - rexmit_pkts + - send_window + +override: + lport_hbo: string + fport_hbo: string + +plugins: + - LabelAgent: + join: + - faddr `_` faddr,fport + - laddr `_` laddr,lport + +export_options: + instance_keys: + - faddr + - laddr + - node \ No newline at end of file diff --git a/conf/cmperf/9.19.1/nfsv3.yaml b/conf/cmperf/9.19.1/nfsv3.yaml new file mode 100644 index 000000000..c3fa01049 --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv3.yaml @@ -0,0 +1,92 @@ + +name: NFSv3 +query: nfsv3 +object: svm_nfs + +global_labels: + - nfsv: v3 + +counters: + - access_avg_latency + - access_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_total + - fsinfo_avg_latency + - fsinfo_total + - fsstat_avg_latency + - fsstat_total + - getattr_avg_latency + - getattr_total + - instance_name => svm + - latency + - link_avg_latency + - link_total + - lookup_avg_latency + - lookup_total + - mkdir_avg_latency + - mkdir_total + - mknod_avg_latency + - mknod_total + - nfsv3_latency_hist => latency_hist + - nfsv3_ops => ops + - nfsv3_read_ops => read_ops + - nfsv3_read_throughput => read_throughput + - nfsv3_throughput => throughput + - nfsv3_write_ops => write_ops + - nfsv3_write_throughput => write_throughput + - null_avg_latency + - null_total + - pathconf_avg_latency + - pathconf_total + - read_avg_latency + - read_latency_hist + - read_symlink_avg_latency + - read_symlink_total + - read_total + - readdir_avg_latency + - readdir_total + - readdirplus_avg_latency + - readdirplus_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - rmdir_avg_latency + - rmdir_total + - setattr_avg_latency + - setattr_total + - symlink_avg_latency + - symlink_total + - write_avg_latency + - write_latency_hist + - write_total + +override: + - access_total: rate + - commit_total: rate + - create_total: rate + - fsinfo_total: rate + - fsstat_total: rate + - getattr_total: rate + - link_total: rate + - lookup_total: rate + - mkdir_total: rate + - mknod_total: rate + - null_total: rate + - pathconf_total: rate + - read_symlink_total: rate + - read_total: rate + - readdir_total: rate + - readdirplus_total: rate + - remove_total: rate + - rename_total: rate + - rmdir_total: rate + - setattr_total: rate + - symlink_total: rate + - write_total: rate + +export_options: + instance_keys: + - svm diff --git a/conf/cmperf/9.19.1/nfsv3_node.yaml b/conf/cmperf/9.19.1/nfsv3_node.yaml new file mode 100644 index 000000000..75645dca0 --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv3_node.yaml @@ -0,0 +1,91 @@ + +name: NFSv3Node +query: nfsv3:node +object: node_nfs + +global_labels: + - nfsv: v3 + +allow_partial_aggregation: true + +counters: + - access_avg_latency + - access_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_total + - fsinfo_avg_latency + - fsinfo_total + - fsstat_avg_latency + - fsstat_total + - getattr_avg_latency + - getattr_total + - instance_name => node + - latency + - link_avg_latency + - link_total + - lookup_avg_latency + - lookup_total + - mkdir_avg_latency + - mkdir_total + - mknod_avg_latency + - mknod_total + - nfsv3_ops => total_ops # "nfs_ops" already used in system_node.yaml + - nfsv3_read_ops => read_ops + - nfsv3_read_throughput => read_throughput + - nfsv3_throughput => throughput + - nfsv3_write_ops => write_ops + - nfsv3_write_throughput => write_throughput + - null_avg_latency + - null_total + - pathconf_avg_latency + - pathconf_total + - read_avg_latency + - read_symlink_avg_latency + - read_symlink_total + - read_total + - readdir_avg_latency + - readdir_total + - readdirplus_avg_latency + - readdirplus_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - rmdir_avg_latency + - rmdir_total + - setattr_avg_latency + - setattr_total + - symlink_avg_latency + - symlink_total + - write_avg_latency + - write_total + +override: + - access_total: rate + - commit_total: rate + - create_total: rate + - fsinfo_total: rate + - fsstat_total: rate + - getattr_total: rate + - link_total: rate + - lookup_total: rate + - mkdir_total: rate + - mknod_total: rate + - null_total: rate + - pathconf_total: rate + - read_symlink_total: rate + - read_total: rate + - readdir_total: rate + - readdirplus_total: rate + - remove_total: rate + - rename_total: rate + - rmdir_total: rate + - setattr_total: rate + - symlink_total: rate + - write_total: rate + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/nfsv4.yaml b/conf/cmperf/9.19.1/nfsv4.yaml new file mode 100644 index 000000000..bf0ceabc9 --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4.yaml @@ -0,0 +1,140 @@ + +name: NFSv4 +query: nfsv4 +object: svm_nfs + +global_labels: + - nfsv: v4 + +counters: + - access_avg_latency + - access_total + - close_avg_latency + - close_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_total + - delegpurge_avg_latency + - delegpurge_total + - delegreturn_avg_latency + - delegreturn_total + - getattr_avg_latency + - getattr_total + - getfh_avg_latency + - getfh_total + - instance_name => svm + - latency + - link_avg_latency + - link_total + - lock_avg_latency + - lock_total + - lockt_avg_latency + - lockt_total + - locku_avg_latency + - locku_total + - lookup_avg_latency + - lookup_total + - lookupp_avg_latency + - lookupp_total + - nfs4_latency_hist => latency_hist + - nfs4_read_throughput => read_throughput + - nfs4_throughput => throughput + - nfs4_write_throughput => write_throughput + - null_avg_latency + - null_total + - nverify_avg_latency + - nverify_total + - open_avg_latency + - open_confirm_avg_latency + - open_confirm_total + - open_downgrade_avg_latency + - open_downgrade_total + - open_total + - openattr_avg_latency + - openattr_total + - putfh_avg_latency + - putfh_total + - putpubfh_avg_latency + - putpubfh_total + - putrootfh_avg_latency + - putrootfh_total + - read_avg_latency + - read_latency_hist + - read_total + - readdir_avg_latency + - readdir_total + - readlink_avg_latency + - readlink_total + - release_lock_owner_avg_latency + - release_lock_owner_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - renew_avg_latency + - renew_total + - restorefh_avg_latency + - restorefh_total + - savefh_avg_latency + - savefh_total + - secinfo_avg_latency + - secinfo_total + - setattr_avg_latency + - setattr_total + - setclientid_avg_latency + - setclientid_confirm_avg_latency + - setclientid_confirm_total + - setclientid_total + - total_ops => ops + - verify_avg_latency + - verify_total + - write_avg_latency + - write_latency_hist + - write_total + +override: + - access_total: rate + - close_total: rate + - commit_total: rate + - compound_total: rate + - create_total: rate + - delegpurge_total: rate + - delegreturn_total: rate + - getattr_total: rate + - getfh_total: rate + - link_total: rate + - lock_total: rate + - lockt_total: rate + - locku_total: rate + - lookup_total: rate + - lookupp_total: rate + - null_total: rate + - nverify_total: rate + - open_confirm_total: rate + - open_downgrade_total: rate + - open_total: rate + - openattr_total: rate + - putfh_total: rate + - putpubfh_total: rate + - putrootfh_total: rate + - read_total: rate + - readdir_total: rate + - readlink_total: rate + - release_lock_owner_total: rate + - remove_total: rate + - rename_total: rate + - renew_total: rate + - restorefh_total: rate + - savefh_total: rate + - secinfo_total: rate + - setattr_total: rate + - setclientid_confirm_total: rate + - setclientid_total: rate + - verify_total: rate + - write_total: rate + +export_options: + instance_keys: + - svm + diff --git a/conf/cmperf/9.19.1/nfsv4_1.yaml b/conf/cmperf/9.19.1/nfsv4_1.yaml new file mode 100644 index 000000000..7cb286d20 --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4_1.yaml @@ -0,0 +1,181 @@ + +name: NFSv41 +query: nfsv4_1 +object: svm_nfs + +global_labels: + - nfsv: v4.1 + +counters: + - access_avg_latency + - access_total + - backchannel_ctl_avg_latency + - backchannel_ctl_total + - bind_conn_to_session_avg_latency + - bind_conn_to_session_total + - close_avg_latency + - close_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_session_avg_latency + - create_session_total + - create_total + - delegpurge_avg_latency + - delegpurge_total + - delegreturn_avg_latency + - delegreturn_total + - destroy_clientid_avg_latency + - destroy_clientid_total + - destroy_session_avg_latency + - destroy_session_total + - exchange_id_avg_latency + - exchange_id_total + - free_stateid_avg_latency + - free_stateid_total + - get_dir_delegation_avg_latency + - get_dir_delegation_total + - getattr_avg_latency + - getattr_total + - getdeviceinfo_avg_latency + - getdeviceinfo_total + - getdevicelist_avg_latency + - getdevicelist_total + - getfh_avg_latency + - getfh_total + - instance_name => svm + - latency + - layoutcommit_avg_latency + - layoutcommit_total + - layoutget_avg_latency + - layoutget_total + - layoutreturn_avg_latency + - layoutreturn_total + - link_avg_latency + - link_total + - lock_avg_latency + - lock_total + - lockt_avg_latency + - lockt_total + - locku_avg_latency + - locku_total + - lookup_avg_latency + - lookup_total + - lookupp_avg_latency + - lookupp_total + - nfs41_latency_hist => latency_hist + - nfs41_read_throughput => read_throughput + - nfs41_throughput => throughput + - nfs41_write_throughput => write_throughput + - null_avg_latency + - null_total + - nverify_avg_latency + - nverify_total + - open_avg_latency + - open_downgrade_avg_latency + - open_downgrade_total + - open_total + - openattr_avg_latency + - openattr_total + - putfh_avg_latency + - putfh_total + - putpubfh_avg_latency + - putpubfh_total + - putrootfh_avg_latency + - putrootfh_total + - read_avg_latency + - read_latency_hist + - read_total + - readdir_avg_latency + - readdir_total + - readlink_avg_latency + - readlink_total + - reclaim_complete_avg_latency + - reclaim_complete_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - restorefh_avg_latency + - restorefh_total + - savefh_avg_latency + - savefh_total + - secinfo_avg_latency + - secinfo_no_name_avg_latency + - secinfo_no_name_total + - secinfo_total + - sequence_avg_latency + - sequence_total + - set_ssv_avg_latency + - set_ssv_total + - setattr_avg_latency + - setattr_total + - test_stateid_avg_latency + - test_stateid_total + - total_ops => ops + - verify_avg_latency + - verify_total + - want_delegation_avg_latency + - want_delegation_total + - write_avg_latency + - write_latency_hist + - write_total + +export_options: + instance_keys: + - svm + +override: + - access_total: rate + - backchannel_ctl_total: rate + - bind_conn_to_session_total: rate + - close_total: rate + - commit_total: rate + - compound_total: rate + - create_session_total: rate + - create_total: rate + - delegpurge_total: rate + - delegreturn_total: rate + - destroy_clientid_total: rate + - destroy_session_total: rate + - exchange_id_total: rate + - free_stateid_total: rate + - get_dir_delegation_total: rate + - getattr_total: rate + - getdeviceinfo_total: rate + - getdevicelist_total: rate + - getfh_total: rate + - layoutcommit_total: rate + - layoutget_total: rate + - layoutreturn_total: rate + - link_total: rate + - lock_total: rate + - lockt_total: rate + - locku_total: rate + - lookup_total: rate + - lookupp_total: rate + - null_total: rate + - nverify_total: rate + - open_downgrade_total: rate + - open_total: rate + - openattr_total: rate + - putfh_total: rate + - putpubfh_total: rate + - putrootfh_total: rate + - read_total: rate + - readdir_total: rate + - readlink_total: rate + - reclaim_complete_total: rate + - remove_total: rate + - rename_total: rate + - restorefh_total: rate + - savefh_total: rate + - secinfo_no_name_total: rate + - secinfo_total: rate + - sequence_total: rate + - set_ssv_total: rate + - setattr_total: rate + - test_stateid_total: rate + - verify_total: rate + - want_delegation_total: rate + - write_total: rate diff --git a/conf/cmperf/9.19.1/nfsv4_1_node.yaml b/conf/cmperf/9.19.1/nfsv4_1_node.yaml new file mode 100644 index 000000000..f4821ebb2 --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4_1_node.yaml @@ -0,0 +1,180 @@ + +name: NFSv41Node +query: nfsv4_1:node +object: node_nfs + +global_labels: + - nfsv: v4.1 + +allow_partial_aggregation: true + +counters: + - access_avg_latency + - access_total + - backchannel_ctl_avg_latency + - backchannel_ctl_total + - bind_conn_to_session_avg_latency + - bind_conn_to_session_total + - close_avg_latency + - close_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_session_avg_latency + - create_session_total + - create_total + - delegpurge_avg_latency + - delegpurge_total + - delegreturn_avg_latency + - delegreturn_total + - destroy_clientid_avg_latency + - destroy_clientid_total + - destroy_session_avg_latency + - destroy_session_total + - exchange_id_avg_latency + - exchange_id_total + - free_stateid_avg_latency + - free_stateid_total + - get_dir_delegation_avg_latency + - get_dir_delegation_total + - getattr_avg_latency + - getattr_total + - getdeviceinfo_avg_latency + - getdeviceinfo_total + - getdevicelist_avg_latency + - getdevicelist_total + - getfh_avg_latency + - getfh_total + - instance_name => node + - latency + - layoutcommit_avg_latency + - layoutcommit_total + - layoutget_avg_latency + - layoutget_total + - layoutreturn_avg_latency + - layoutreturn_total + - link_avg_latency + - link_total + - lock_avg_latency + - lock_total + - lockt_avg_latency + - lockt_total + - locku_avg_latency + - locku_total + - lookup_avg_latency + - lookup_total + - lookupp_avg_latency + - lookupp_total + - nfs41_read_throughput => read_throughput + - nfs41_throughput => throughput + - nfs41_write_throughput => write_throughput + - null_avg_latency + - null_total + - nverify_avg_latency + - nverify_total + - open_avg_latency + - open_downgrade_avg_latency + - open_downgrade_total + - open_total + - openattr_avg_latency + - openattr_total + - putfh_avg_latency + - putfh_total + - putpubfh_avg_latency + - putpubfh_total + - putrootfh_avg_latency + - putrootfh_total + - read_avg_latency + - read_total + - readdir_avg_latency + - readdir_total + - readlink_avg_latency + - readlink_total + - reclaim_complete_avg_latency + - reclaim_complete_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - restorefh_avg_latency + - restorefh_total + - savefh_avg_latency + - savefh_total + - secinfo_avg_latency + - secinfo_no_name_avg_latency + - secinfo_no_name_total + - secinfo_total + - sequence_avg_latency + - sequence_total + - set_ssv_avg_latency + - set_ssv_total + - setattr_avg_latency + - setattr_total + - test_stateid_avg_latency + - test_stateid_total + - total_ops + - verify_avg_latency + - verify_total + - want_delegation_avg_latency + - want_delegation_total + - write_avg_latency + - write_total + +override: + - access_total: rate + - backchannel_ctl_total: rate + - bind_conn_to_session_total: rate + - close_total: rate + - commit_total: rate + - compound_total: rate + - create_session_total: rate + - create_total: rate + - delegpurge_total: rate + - delegreturn_total: rate + - destroy_clientid_total: rate + - destroy_session_total: rate + - exchange_id_total: rate + - free_stateid_total: rate + - get_dir_delegation_total: rate + - getattr_total: rate + - getdeviceinfo_total: rate + - getdevicelist_total: rate + - getfh_total: rate + - layoutcommit_total: rate + - layoutget_total: rate + - layoutreturn_total: rate + - link_total: rate + - lock_total: rate + - lockt_total: rate + - locku_total: rate + - lookup_total: rate + - lookupp_total: rate + - null_total: rate + - nverify_total: rate + - open_downgrade_total: rate + - open_total: rate + - openattr_total: rate + - putfh_total: rate + - putpubfh_total: rate + - putrootfh_total: rate + - read_total: rate + - readdir_total: rate + - readlink_total: rate + - reclaim_complete_total: rate + - remove_total: rate + - rename_total: rate + - restorefh_total: rate + - savefh_total: rate + - secinfo_no_name_total: rate + - secinfo_total: rate + - sequence_total: rate + - set_ssv_total: rate + - setattr_total: rate + - test_stateid_total: rate + - verify_total: rate + - want_delegation_total: rate + - write_total: rate + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/nfsv4_2.yaml b/conf/cmperf/9.19.1/nfsv4_2.yaml new file mode 100644 index 000000000..2ef518a97 --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4_2.yaml @@ -0,0 +1,180 @@ + +name: NFSv42 +query: nfsv4_2 +object: svm_nfs + +global_labels: + - nfsv: v4.2 + +allow_partial_aggregation: true + +counters: + - access_avg_latency + - access_total + - backchannel_ctl_avg_latency + - backchannel_ctl_total + - bind_conn_to_session_avg_latency + - bind_conn_to_session_total + - close_avg_latency + - close_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_session_avg_latency + - create_session_total + - create_total + - delegpurge_avg_latency + - delegpurge_total + - delegreturn_avg_latency + - delegreturn_total + - destroy_clientid_avg_latency + - destroy_clientid_total + - destroy_session_avg_latency + - destroy_session_total + - exchange_id_avg_latency + - exchange_id_total + - free_stateid_avg_latency + - free_stateid_total + - get_dir_delegation_avg_latency + - get_dir_delegation_total + - getattr_avg_latency + - getattr_total + - getdeviceinfo_avg_latency + - getdeviceinfo_total + - getdevicelist_avg_latency + - getdevicelist_total + - getfh_avg_latency + - getfh_total + - instance_name => svm + - latency + - layoutcommit_avg_latency + - layoutcommit_total + - layoutget_avg_latency + - layoutget_total + - layoutreturn_avg_latency + - layoutreturn_total + - link_avg_latency + - link_total + - lock_avg_latency + - lock_total + - lockt_avg_latency + - lockt_total + - locku_avg_latency + - locku_total + - lookup_avg_latency + - lookup_total + - lookupp_avg_latency + - lookupp_total + - nfs42_read_throughput => read_throughput + - nfs42_throughput => throughput + - nfs42_write_throughput => write_throughput + - null_avg_latency + - null_total + - nverify_avg_latency + - nverify_total + - open_avg_latency + - open_downgrade_avg_latency + - open_downgrade_total + - open_total + - openattr_avg_latency + - openattr_total + - putfh_avg_latency + - putfh_total + - putpubfh_avg_latency + - putpubfh_total + - putrootfh_avg_latency + - putrootfh_total + - read_avg_latency + - read_total + - readdir_avg_latency + - readdir_total + - readlink_avg_latency + - readlink_total + - reclaim_complete_avg_latency + - reclaim_complete_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - restorefh_avg_latency + - restorefh_total + - savefh_avg_latency + - savefh_total + - secinfo_avg_latency + - secinfo_no_name_avg_latency + - secinfo_no_name_total + - secinfo_total + - sequence_avg_latency + - sequence_total + - set_ssv_avg_latency + - set_ssv_total + - setattr_avg_latency + - setattr_total + - test_stateid_avg_latency + - test_stateid_total + - total_ops => ops + - verify_avg_latency + - verify_total + - want_delegation_avg_latency + - want_delegation_total + - write_avg_latency + - write_total + +export_options: + instance_keys: + - svm + +override: + - access_total: rate + - backchannel_ctl_total: rate + - bind_conn_to_session_total: rate + - close_total: rate + - commit_total: rate + - compound_total: rate + - create_session_total: rate + - create_total: rate + - delegpurge_total: rate + - delegreturn_total: rate + - destroy_clientid_total: rate + - destroy_session_total: rate + - exchange_id_total: rate + - free_stateid_total: rate + - get_dir_delegation_total: rate + - getattr_total: rate + - getdeviceinfo_total: rate + - getdevicelist_total: rate + - getfh_total: rate + - layoutcommit_total: rate + - layoutget_total: rate + - layoutreturn_total: rate + - link_total: rate + - lock_total: rate + - lockt_total: rate + - locku_total: rate + - lookup_total: rate + - lookupp_total: rate + - null_total: rate + - nverify_total: rate + - open_downgrade_total: rate + - open_total: rate + - openattr_total: rate + - putfh_total: rate + - putpubfh_total: rate + - putrootfh_total: rate + - read_total: rate + - readdir_total: rate + - readlink_total: rate + - reclaim_complete_total: rate + - remove_total: rate + - rename_total: rate + - restorefh_total: rate + - savefh_total: rate + - secinfo_no_name_total: rate + - secinfo_total: rate + - sequence_total: rate + - set_ssv_total: rate + - setattr_total: rate + - test_stateid_total: rate + - verify_total: rate + - want_delegation_total: rate + - write_total: rate diff --git a/conf/cmperf/9.19.1/nfsv4_2_node.yaml b/conf/cmperf/9.19.1/nfsv4_2_node.yaml new file mode 100644 index 000000000..db64479ea --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4_2_node.yaml @@ -0,0 +1,178 @@ + +name: NFSv42Node +query: nfsv4_2:node +object: node_nfs + +global_labels: + - nfsv: v4.2 + +counters: + - access_avg_latency + - access_total + - backchannel_ctl_avg_latency + - backchannel_ctl_total + - bind_conn_to_session_avg_latency + - bind_conn_to_session_total + - close_avg_latency + - close_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_session_avg_latency + - create_session_total + - create_total + - delegpurge_avg_latency + - delegpurge_total + - delegreturn_avg_latency + - delegreturn_total + - destroy_clientid_avg_latency + - destroy_clientid_total + - destroy_session_avg_latency + - destroy_session_total + - exchange_id_avg_latency + - exchange_id_total + - free_stateid_avg_latency + - free_stateid_total + - get_dir_delegation_avg_latency + - get_dir_delegation_total + - getattr_avg_latency + - getattr_total + - getdeviceinfo_avg_latency + - getdeviceinfo_total + - getdevicelist_avg_latency + - getdevicelist_total + - getfh_avg_latency + - getfh_total + - instance_name => node + - latency + - layoutcommit_avg_latency + - layoutcommit_total + - layoutget_avg_latency + - layoutget_total + - layoutreturn_avg_latency + - layoutreturn_total + - link_avg_latency + - link_total + - lock_avg_latency + - lock_total + - lockt_avg_latency + - lockt_total + - locku_avg_latency + - locku_total + - lookup_avg_latency + - lookup_total + - lookupp_avg_latency + - lookupp_total + - nfs42_read_throughput => read_throughput + - nfs42_throughput => throughput + - nfs42_write_throughput => write_throughput + - null_avg_latency + - null_total + - nverify_avg_latency + - nverify_total + - open_avg_latency + - open_downgrade_avg_latency + - open_downgrade_total + - open_total + - openattr_avg_latency + - openattr_total + - putfh_avg_latency + - putfh_total + - putpubfh_avg_latency + - putpubfh_total + - putrootfh_avg_latency + - putrootfh_total + - read_avg_latency + - read_total + - readdir_avg_latency + - readdir_total + - readlink_avg_latency + - readlink_total + - reclaim_complete_avg_latency + - reclaim_complete_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - restorefh_avg_latency + - restorefh_total + - savefh_avg_latency + - savefh_total + - secinfo_avg_latency + - secinfo_no_name_avg_latency + - secinfo_no_name_total + - secinfo_total + - sequence_avg_latency + - sequence_total + - set_ssv_avg_latency + - set_ssv_total + - setattr_avg_latency + - setattr_total + - test_stateid_avg_latency + - test_stateid_total + - total_ops + - verify_avg_latency + - verify_total + - want_delegation_avg_latency + - want_delegation_total + - write_avg_latency + - write_total + +override: + - access_total: rate + - backchannel_ctl_total: rate + - bind_conn_to_session_total: rate + - close_total: rate + - commit_total: rate + - compound_total: rate + - create_session_total: rate + - create_total: rate + - delegpurge_total: rate + - delegreturn_total: rate + - destroy_clientid_total: rate + - destroy_session_total: rate + - exchange_id_total: rate + - free_stateid_total: rate + - get_dir_delegation_total: rate + - getattr_total: rate + - getdeviceinfo_total: rate + - getdevicelist_total: rate + - getfh_total: rate + - layoutcommit_total: rate + - layoutget_total: rate + - layoutreturn_total: rate + - link_total: rate + - lock_total: rate + - lockt_total: rate + - locku_total: rate + - lookup_total: rate + - lookupp_total: rate + - null_total: rate + - nverify_total: rate + - open_downgrade_total: rate + - open_total: rate + - openattr_total: rate + - putfh_total: rate + - putpubfh_total: rate + - putrootfh_total: rate + - read_total: rate + - readdir_total: rate + - readlink_total: rate + - reclaim_complete_total: rate + - remove_total: rate + - rename_total: rate + - restorefh_total: rate + - savefh_total: rate + - secinfo_no_name_total: rate + - secinfo_total: rate + - sequence_total: rate + - set_ssv_total: rate + - setattr_total: rate + - test_stateid_total: rate + - verify_total: rate + - want_delegation_total: rate + - write_total: rate + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/nfsv4_node.yaml b/conf/cmperf/9.19.1/nfsv4_node.yaml new file mode 100644 index 000000000..1d54cf1fd --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4_node.yaml @@ -0,0 +1,136 @@ + +name: NFSv4Node +query: nfsv4:node +object: node_nfs + +global_labels: + - nfsv: v4 + +counters: + - access_avg_latency + - access_total + - close_avg_latency + - close_total + - commit_avg_latency + - commit_total + - create_avg_latency + - create_total + - delegpurge_avg_latency + - delegpurge_total + - delegreturn_avg_latency + - delegreturn_total + - getattr_avg_latency + - getattr_total + - getfh_avg_latency + - getfh_total + - instance_name => node + - latency + - link_avg_latency + - link_total + - lock_avg_latency + - lock_total + - lockt_avg_latency + - lockt_total + - locku_avg_latency + - locku_total + - lookup_avg_latency + - lookup_total + - lookupp_avg_latency + - lookupp_total + - nfs4_read_throughput => read_throughput + - nfs4_throughput => throughput + - nfs4_write_throughput => write_throughput + - null_avg_latency + - null_total + - nverify_avg_latency + - nverify_total + - open_avg_latency + - open_confirm_avg_latency + - open_confirm_total + - open_downgrade_avg_latency + - open_downgrade_total + - open_total + - openattr_avg_latency + - openattr_total + - putfh_avg_latency + - putfh_total + - putpubfh_avg_latency + - putpubfh_total + - putrootfh_avg_latency + - putrootfh_total + - read_avg_latency + - read_total + - readdir_avg_latency + - readdir_total + - readlink_avg_latency + - readlink_total + - release_lock_owner_avg_latency + - release_lock_owner_total + - remove_avg_latency + - remove_total + - rename_avg_latency + - rename_total + - renew_avg_latency + - renew_total + - restorefh_avg_latency + - restorefh_total + - savefh_avg_latency + - savefh_total + - secinfo_avg_latency + - secinfo_total + - setattr_avg_latency + - setattr_total + - setclientid_avg_latency + - setclientid_confirm_avg_latency + - setclientid_confirm_total + - setclientid_total + - total_ops + - verify_avg_latency + - verify_total + - write_avg_latency + - write_total + +override: + - access_total: rate + - close_total: rate + - commit_total: rate + - compound_total: rate + - create_total: rate + - delegpurge_total: rate + - delegreturn_total: rate + - getattr_total: rate + - getfh_total: rate + - link_total: rate + - lock_total: rate + - lockt_total: rate + - locku_total: rate + - lookup_total: rate + - lookupp_total: rate + - null_total: rate + - nverify_total: rate + - open_confirm_total: rate + - open_downgrade_total: rate + - open_total: rate + - openattr_total: rate + - putfh_total: rate + - putpubfh_total: rate + - putrootfh_total: rate + - read_total: rate + - readdir_total: rate + - readlink_total: rate + - release_lock_owner_total: rate + - remove_total: rate + - rename_total: rate + - renew_total: rate + - restorefh_total: rate + - savefh_total: rate + - secinfo_total: rate + - setattr_total: rate + - setclientid_confirm_total: rate + - setclientid_total: rate + - verify_total: rate + - write_total: rate + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/nfsv4_pool.yaml b/conf/cmperf/9.19.1/nfsv4_pool.yaml new file mode 100644 index 000000000..c1ff81d1a --- /dev/null +++ b/conf/cmperf/9.19.1/nfsv4_pool.yaml @@ -0,0 +1,51 @@ +name: NFSv4Pool +query: nfsv4_diag +object: nfs_diag + +instance_key: uuid + +global_labels: + - nfsv: v4 + +counters: + - instance_uuid + - node_name => node + - storePool_ByteLockAlloc + - storePool_ByteLockMax + - storePool_ClientAlloc + - storePool_ClientMax + - storePool_ConnectionParentSessionReferenceAlloc + - storePool_ConnectionParentSessionReferenceMax + - storePool_CopyStateAlloc + - storePool_CopyStateMax + - storePool_DelegAlloc + - storePool_DelegMax + - storePool_DelegStateAlloc + - storePool_DelegStateMax + - storePool_LayoutAlloc + - storePool_LayoutMax + - storePool_LayoutStateAlloc + - storePool_LayoutStateMax + - storePool_LockStateAlloc + - storePool_LockStateMax + - storePool_OpenAlloc + - storePool_OpenMax + - storePool_OpenStateAlloc + - storePool_OpenStateMax + - storePool_OwnerAlloc + - storePool_OwnerMax + - storePool_SessionAlloc + - storePool_SessionConnectionHolderAlloc + - storePool_SessionConnectionHolderMax + - storePool_SessionHolderAlloc + - storePool_SessionHolderMax + - storePool_SessionMax + - storePool_StateRefHistoryAlloc + - storePool_StateRefHistoryMax + - storePool_StringAlloc + - storePool_StringMax + +export_options: + require_instance_keys: false + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/nic_common.yaml b/conf/cmperf/9.19.1/nic_common.yaml new file mode 100644 index 000000000..7cd0bd87c --- /dev/null +++ b/conf/cmperf/9.19.1/nic_common.yaml @@ -0,0 +1,45 @@ + +name: NicCommon +query: nic_common +object: nic + +instance_key: uuid + +counters: + - instance_name + - instance_uuid + - link_current_state => state + - link_speed => speed + - link_up_to_downs + - nic_type => type + - node_name => node + - rx_alignment_errors + - rx_bytes + - rx_crc_errors + - rx_errors + - rx_length_errors + - rx_total_errors + - tx_bytes + - tx_errors + - tx_hw_errors + - tx_total_errors + +override: + - link_speed: string + +plugins: + - Nic + - LabelAgent: + # metric label zapi_value rest_value `default_value` + value_to_num: + - new_status state up up `0` + +export_options: + instance_keys: + - nic + - node + instance_labels: + - speed + - state + - type + diff --git a/conf/cmperf/9.19.1/nvm_mirror.yaml b/conf/cmperf/9.19.1/nvm_mirror.yaml new file mode 100644 index 000000000..70629cd5f --- /dev/null +++ b/conf/cmperf/9.19.1/nvm_mirror.yaml @@ -0,0 +1,17 @@ + +name: NvmMirror +query: nvm_mirror +object: nvm_mirror + +instance_key: uuid + +counters: + - instance_name => name + - instance_uuid + - node_name => node + - write_throughput + +export_options: + instance_keys: + - name + - node diff --git a/conf/cmperf/9.19.1/nvmf_lif.yaml b/conf/cmperf/9.19.1/nvmf_lif.yaml new file mode 100644 index 000000000..782967c5b --- /dev/null +++ b/conf/cmperf/9.19.1/nvmf_lif.yaml @@ -0,0 +1,30 @@ +name: NVMfLif +query: nvmf_fc_lif +object: nvme_lif + +instance_key: uuid + +counters: + - avg_latency + - avg_other_latency + - avg_read_latency + - avg_write_latency + - instance_name => lif + - instance_uuid + - node_name => node + - other_ops + - port_id => port + - read_data + - read_ops + - total_ops + - vserver_name => svm + - write_data + - write_ops + +export_options: + instance_keys: + - lif + - node + - port + - svm + diff --git a/conf/cmperf/9.19.1/nvmf_rdma_port.yaml b/conf/cmperf/9.19.1/nvmf_rdma_port.yaml new file mode 100644 index 000000000..0ab2fabdf --- /dev/null +++ b/conf/cmperf/9.19.1/nvmf_rdma_port.yaml @@ -0,0 +1,31 @@ + +name: NvmfRdmaPort +query: nvmf_rdma_port +object: nvmf_rdma_port + +instance_key: uuid + +counters: + - avg_latency + - avg_other_latency + - avg_read_latency + - avg_write_latency + - instance_uuid + - node_name => node + - other_ops + - port_id => lif + - port_ip_addr + - read_data + - read_ops + - total_data + - total_ops + - vserver_name => svm + - write_data + - write_ops + +export_options: + instance_keys: + - lif + - node + - port_ip_addr + - svm diff --git a/conf/cmperf/9.19.1/nvmf_tcp_port.yaml b/conf/cmperf/9.19.1/nvmf_tcp_port.yaml new file mode 100644 index 000000000..4d0929776 --- /dev/null +++ b/conf/cmperf/9.19.1/nvmf_tcp_port.yaml @@ -0,0 +1,31 @@ + +name: NvmfTcpPort +query: nvmf_tcp_port +object: nvmf_tcp_port + +instance_key: uuid + +counters: + - avg_latency + - avg_other_latency + - avg_read_latency + - avg_write_latency + - instance_uuid + - node_name => node + - other_ops + - port_id => lif + - port_ip_addr + - read_data + - read_ops + - total_data + - total_ops + - vserver_name => svm + - write_data + - write_ops + +export_options: + instance_keys: + - lif + - node + - port_ip_addr + - svm diff --git a/conf/cmperf/9.19.1/object_store_client_op.yaml b/conf/cmperf/9.19.1/object_store_client_op.yaml new file mode 100644 index 000000000..9b376dfd4 --- /dev/null +++ b/conf/cmperf/9.19.1/object_store_client_op.yaml @@ -0,0 +1,21 @@ + +name: ObjectStoreClient +query: object_store_client_op +object: fabricpool + +instance_key: uuid + +counters: + - average_latency + - get_throughput_bytes + - instance_name + - instance_uuid + - node_name => node + - put_throughput_bytes + - stats + - throughput_ops + +export_options: + instance_keys: + - fabricpool + - node diff --git a/conf/cmperf/9.19.1/ontap_s3_svm.yaml b/conf/cmperf/9.19.1/ontap_s3_svm.yaml new file mode 100644 index 000000000..1ea5e2292 --- /dev/null +++ b/conf/cmperf/9.19.1/ontap_s3_svm.yaml @@ -0,0 +1,146 @@ +name: OntapS3SVM +query: object_store_server +object: ontaps3_svm + +counters: + - abort_multipart_upload_failed + - abort_multipart_upload_failed_client_close + - abort_multipart_upload_latency + - abort_multipart_upload_rate + - abort_multipart_upload_total + - allow_access + - anonymous_access + - anonymous_deny_access + - authentication_failures + - chunked_upload_reqs + - complete_multipart_upload_failed + - complete_multipart_upload_failed_client_close + - complete_multipart_upload_latency + - complete_multipart_upload_rate + - complete_multipart_upload_total + - connected_connections + - connections + - create_bucket_failed + - create_bucket_failed_client_close + - create_bucket_latency + - create_bucket_rate + - create_bucket_total + - default_deny_access + - delete_bucket_failed + - delete_bucket_failed_client_close + - delete_bucket_latency + - delete_bucket_rate + - delete_bucket_total + - delete_object_failed + - delete_object_failed_client_close + - delete_object_latency + - delete_object_rate + - delete_object_tagging_failed + - delete_object_tagging_failed_client_close + - delete_object_tagging_latency + - delete_object_tagging_rate + - delete_object_tagging_total + - delete_object_total + - explicit_deny_access + - get_bucket_acl_failed + - get_bucket_acl_total + - get_bucket_versioning_failed + - get_bucket_versioning_total + - get_data + - get_object_acl_failed + - get_object_acl_total + - get_object_failed + - get_object_failed_client_close + - get_object_lastbyte_latency + - get_object_latency + - get_object_rate + - get_object_tagging_failed + - get_object_tagging_failed_client_close + - get_object_tagging_latency + - get_object_tagging_rate + - get_object_tagging_total + - get_object_total + - group_policy_evaluated + - head_bucket_failed + - head_bucket_failed_client_close + - head_bucket_latency + - head_bucket_rate + - head_bucket_total + - head_object_failed + - head_object_failed_client_close + - head_object_latency + - head_object_rate + - head_object_total + - initiate_multipart_upload_failed + - initiate_multipart_upload_failed_client_close + - initiate_multipart_upload_latency + - initiate_multipart_upload_rate + - initiate_multipart_upload_total + - input_flow_control_entry + - input_flow_control_exit + - list_buckets_failed + - list_buckets_failed_client_close + - list_buckets_latency + - list_buckets_rate + - list_buckets_total + - list_object_versions_failed + - list_object_versions_failed_client_close + - list_object_versions_latency + - list_object_versions_rate + - list_object_versions_total + - list_objects_failed + - list_objects_failed_client_close + - list_objects_latency + - list_objects_rate + - list_objects_total + - list_uploads_failed + - list_uploads_failed_client_close + - list_uploads_latency + - list_uploads_rate + - list_uploads_total + - max_cmds_per_connection + - max_connected_connections + - max_requests_outstanding + - multi_delete_reqs + - node_name => node + - output_flow_control_entry + - output_flow_control_exit + - presigned_url_reqs + - put_bucket_versioning_failed + - put_bucket_versioning_total + - put_data + - put_object_failed + - put_object_failed_client_close + - put_object_latency + - put_object_rate + - put_object_tagging_failed + - put_object_tagging_failed_client_close + - put_object_tagging_latency + - put_object_tagging_rate + - put_object_tagging_total + - put_object_total + - request_parse_errors + - requests + - requests_outstanding + - root_user_access + - server_connection_close + - signature_v2_reqs + - signature_v4_reqs + - tagging + - upload_part_failed + - upload_part_failed_client_close + - upload_part_latency + - upload_part_rate + - upload_part_total + - vserver_name => svm + +plugins: + - Aggregator: + # plugin will create summary/average for each ontaps3_svm object + # any names after the object names will be treated as + # label names that will be added to instances + - svm<>ontaps3_svm + +# only export svm aggregations from plugin +# set this true or comment, to get data for each ontaps3_svm +export_data: false diff --git a/conf/cmperf/9.19.1/path.yaml b/conf/cmperf/9.19.1/path.yaml new file mode 100644 index 000000000..41494b54a --- /dev/null +++ b/conf/cmperf/9.19.1/path.yaml @@ -0,0 +1,31 @@ + +name: Path +query: path +object: path + +instance_key: uuid + +counters: + - instance_name + - instance_uuid + - node_name => node + - read_data + - read_iops + - read_latency + - total_data + - total_iops + - write_data + - write_iops + - write_latency + +plugins: + LabelAgent: + split: + - path `_` hostadapter,target_wwpn + +export_options: + instance_keys: + - hostadapter # from plugin + - node + - target_wwpn # from plugin + diff --git a/conf/cmperf/9.19.1/qtree.yaml b/conf/cmperf/9.19.1/qtree.yaml new file mode 100644 index 000000000..3407c6c2b --- /dev/null +++ b/conf/cmperf/9.19.1/qtree.yaml @@ -0,0 +1,26 @@ +name: Qtree +query: qtree +object: qtree +instance_key: uuid +counters: + - cifs_ops + - instance_name => qtreefull + - instance_uuid + - internal_ops + - nfs_ops + - node_name => node + - parent_vol => volume + - total_ops + - vserver_name => svm + +plugins: + - LabelAgent: + split: + - qtreefull `/` ,qtree + exclude_equals: + - qtree `` +export_options: + instance_keys: + - qtree + - svm + - volume \ No newline at end of file diff --git a/conf/cmperf/9.19.1/resource_headroom_aggr.yaml b/conf/cmperf/9.19.1/resource_headroom_aggr.yaml new file mode 100644 index 000000000..353051d67 --- /dev/null +++ b/conf/cmperf/9.19.1/resource_headroom_aggr.yaml @@ -0,0 +1,27 @@ +name: HeadroomAggr +query: resource_headroom_aggr +object: headroom_aggr + +counters: + - current_latency + - current_ops + - current_utilization + - ewma_daily + - ewma_hourly + - ewma_monthly + - ewma_weekly + - instance_name + - node_name => node + - optimal_point_confidence_factor + - optimal_point_latency + - optimal_point_ops + - optimal_point_utilization + +plugins: + - Headroom + +export_options: + instance_keys: + - aggr + - disk_type + - node diff --git a/conf/cmperf/9.19.1/resource_headroom_cpu.yaml b/conf/cmperf/9.19.1/resource_headroom_cpu.yaml new file mode 100644 index 000000000..57bac9249 --- /dev/null +++ b/conf/cmperf/9.19.1/resource_headroom_cpu.yaml @@ -0,0 +1,22 @@ +name: HeadroomCPU +query: resource_headroom_cpu +object: headroom_cpu + +counters: + - current_latency + - current_ops + - current_utilization + - ewma_daily + - ewma_hourly + - ewma_monthly + - ewma_weekly + - instance_name + - node_name => node + - optimal_point_confidence_factor + - optimal_point_latency + - optimal_point_ops + - optimal_point_utilization + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/rwctx.yaml b/conf/cmperf/9.19.1/rwctx.yaml new file mode 100644 index 000000000..cf5cc9015 --- /dev/null +++ b/conf/cmperf/9.19.1/rwctx.yaml @@ -0,0 +1,17 @@ +name: Rwctx +query: rw_ctx +object: rw_ctx + +counters: + - cifs_giveups + - cifs_rewinds + - instance_uuid + - nfs_giveups + - nfs_rewinds + - node_name => node + - qos_flowcontrol + - qos_rewinds + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/smb2.yaml b/conf/cmperf/9.19.1/smb2.yaml new file mode 100644 index 000000000..4b2c5d999 --- /dev/null +++ b/conf/cmperf/9.19.1/smb2.yaml @@ -0,0 +1,54 @@ +name: SMB2 +query: smb2 +object: smb2 + +counters: + - close_latency + - close_ops + - create_latency + - create_ops + - instance_uuid + - lock_latency + - lock_ops + - negotiate_latency + - negotiate_ops + - node_name => node + - oplock_break_latency + - oplock_break_ops + - query_directory_latency + - query_directory_ops + - query_info_latency + - query_info_ops + - read_latency + - read_ops + - session_setup_latency + - session_setup_ops + - set_info_latency + - set_info_ops + - tree_connect_latency + - tree_connect_ops + - vserver_name => svm + - write_latency + - write_ops +# Histograms are disabled by default since they are expensive to export. +# - close_latency_histogram +# - create_latency_histogram +# - lock_latency_histogram +# - oplock_break_latency_histogram +# - query_directory_latency_histogram +# - query_info_latency_histogram +# - session_setup_latency_histogram +# - set_info_latency_histogram + + +plugins: + LabelAgent: + split: + # instance_uuid AFF-02:kernel:A_SVM_FOR_CONTAINERS + - instance_uuid `:` ,type, + +export_options: + instance_keys: + - node + - svm + - type diff --git a/conf/cmperf/9.19.1/system_node.yaml b/conf/cmperf/9.19.1/system_node.yaml new file mode 100644 index 000000000..e36e86eb9 --- /dev/null +++ b/conf/cmperf/9.19.1/system_node.yaml @@ -0,0 +1,56 @@ + +name: SystemNode +query: system:node +object: node + +instance_key: name + +allow_partial_aggregation: true + +counters: + - avg_processor_busy + - cifs_ops + - cpu_busy + - cpu_elapsed_time + - disk_data_read + - disk_data_written + - domain_busy => cpu_domain_busy + - fcp_data_recv + - fcp_data_sent + - fcp_ops + - hdd_data_read + - hdd_data_written + - instance_name + - iscsi_ops + - memory + - net_data_recv + - net_data_sent + - nfs_ops + - nvme_fc_data_recv + - nvme_fc_data_sent + - nvme_fc_ops + - other_data + - other_latency + - other_ops + - read_data + - read_latency + - read_ops + - ssd_data_read + - ssd_data_written + - total_data + - total_latency + - total_ops + - write_data + - write_latency + - write_ops + +plugins: + - MetricAgent: + compute_metric: + - nvmf_data_recv MULTIPLY nvme_fc_data_recv 1 # Added for backward compatibility with versions prior to ONTAP 9.15.1 + - nvmf_data_sent MULTIPLY nvme_fc_data_sent 1 # Added for backward compatibility with versions prior to ONTAP 9.15.1 + - nvmf_ops MULTIPLY nvme_fc_ops 1 # Added for backward compatibility with versions prior to ONTAP 9.15.1 + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/token_manager.yaml b/conf/cmperf/9.19.1/token_manager.yaml new file mode 100644 index 000000000..73271e88e --- /dev/null +++ b/conf/cmperf/9.19.1/token_manager.yaml @@ -0,0 +1,29 @@ +name: TokenManager +query: token_manager +object: token + +instance_key: uuid + +counters: + - instance_name + - instance_uuid + - node_name => node + - token_copy_bytes + - token_copy_failure + - token_copy_success + - token_create_bytes + - token_create_failure + - token_create_success + - token_zero_bytes + - token_zero_failure + - token_zero_success + +override: + - token_copy_bytes: rate + - token_create_bytes: rate + - token_zero_bytes: rate + +export_options: + instance_keys: + - node + - token diff --git a/conf/cmperf/9.19.1/volume.yaml b/conf/cmperf/9.19.1/volume.yaml new file mode 100644 index 000000000..618a29f19 --- /dev/null +++ b/conf/cmperf/9.19.1/volume.yaml @@ -0,0 +1,70 @@ + +name: Volume +query: volume +object: volume + +instance_key: uuid + +counters: + - avg_latency + - instance_name + - instance_uuid + - nfs_access_latency + - nfs_access_ops + - nfs_getattr_latency + - nfs_getattr_ops + - nfs_lookup_latency + - nfs_lookup_ops + - nfs_other_latency + - nfs_other_ops + - nfs_punch_hole_latency + - nfs_punch_hole_ops + - nfs_read_latency + - nfs_read_ops + - nfs_setattr_latency + - nfs_setattr_ops + - nfs_total_ops + - nfs_write_latency + - nfs_write_ops + - node_name => node + - other_latency + - other_ops + - parent_aggr => aggr + - read_data + - read_latency + - read_ops + - total_ops + - vserver_name => svm + - write_data + - write_latency + - write_ops + +plugins: + - MetricAgent: + compute_metric: + - total_data ADD read_data write_data + - Aggregator: + # plugin will create summary/average for each object + # any names after the object names will be treated as label names that will be added to instances + - node + - svm<>svm_vol + - Volume: + include_constituents: false + - LabelAgent: + # Ignore transient volumes, e.g. SnapProtect, SnapManager, SnapCenter, CommVault, Clone, and Metadata volumes + exclude_regex: + - volume `.+_CVclone` + - volume `.+(0[1-9]|[12][0-9]|3[01])(0[1-9]|1[012])\d\d[0-9]{6}` + - volume `cl_.+_(19|20)\d\d(0[1-9]|1[012])( 0[1-9]|[12][0-9]|3[01])[0-9]{6}` + - volume `sdw_cl_.+` + - volume `MDV_CRS_.+` + - volume `MDV_aud_.+` + - volume `cohesity_.+` + +export_options: + instance_keys: + - aggr + - node + - style + - svm + - volume diff --git a/conf/cmperf/9.19.1/volume_node.yaml b/conf/cmperf/9.19.1/volume_node.yaml new file mode 100644 index 000000000..cef44070e --- /dev/null +++ b/conf/cmperf/9.19.1/volume_node.yaml @@ -0,0 +1,50 @@ +# This template is disabled by default. The ONTAP volume metrics provided via ZapiPerf and RestPerf may be incomplete. +# ONTAP volume metrics may be missing other ops, including NAS protocol ops. +# See: https://github.com/NetApp/harvest/discussions/3900 + +name: VolumeNode +query: volume:node +object: node_vol + +allow_partial_aggregation: true + +counters: + - cifs_other_latency + - cifs_other_ops + - cifs_read_data + - cifs_read_latency + - cifs_read_ops + - cifs_write_data + - cifs_write_latency + - cifs_write_ops + - fcp_other_latency + - fcp_other_ops + - fcp_read_data + - fcp_read_latency + - fcp_read_ops + - fcp_write_data + - fcp_write_latency + - fcp_write_ops + - instance_name => node + - iscsi_other_latency + - iscsi_other_ops + - iscsi_read_data + - iscsi_read_latency + - iscsi_read_ops + - iscsi_write_data + - iscsi_write_latency + - iscsi_write_ops + - nfs_other_latency + - nfs_other_ops + - nfs_read_data + - nfs_read_latency + - nfs_read_ops + - nfs_write_data + - nfs_write_latency + - nfs_write_ops + - read_latency + - write_latency + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/vscan.yaml b/conf/cmperf/9.19.1/vscan.yaml new file mode 100644 index 000000000..4acbe76aa --- /dev/null +++ b/conf/cmperf/9.19.1/vscan.yaml @@ -0,0 +1,27 @@ +# Offbox vscan counters from a per cluster perspective +name: Vscan +query: offbox_vscan_server +object: vscan + +instance_key: uuid + +counters: + - instance_name + - instance_uuid + - scan_latency + - scan_request_dispatched_rate + - scanner_stats_pct_cpu_used + - scanner_stats_pct_mem_used + - scanner_stats_pct_network_used + +plugins: + - Vscan: + # when metricsPerScanner is true, the counters are aggregated per scanner + # otherwise, they're not + metricsPerScanner: true + +export_options: + instance_keys: + - node + - scanner + - svm diff --git a/conf/cmperf/9.19.1/vscan_svm.yaml b/conf/cmperf/9.19.1/vscan_svm.yaml new file mode 100644 index 000000000..15bf57790 --- /dev/null +++ b/conf/cmperf/9.19.1/vscan_svm.yaml @@ -0,0 +1,19 @@ +# Offbox vscan counters from a per SVM perspective +name: VscanSVM +query: offbox_vscan +object: svm_vscan + +instance_key: uuid + +counters: + - connections_active + - dispatch_latency + - instance_name => svm + - instance_uuid + - scan_latency + - scan_noti_received_rate + - scan_request_dispatched_rate + +export_options: + instance_keys: + - svm diff --git a/conf/cmperf/9.19.1/wafl.yaml b/conf/cmperf/9.19.1/wafl.yaml new file mode 100644 index 000000000..f620c3fa2 --- /dev/null +++ b/conf/cmperf/9.19.1/wafl.yaml @@ -0,0 +1,38 @@ + +name: WAFL +query: wafl +object: wafl + +instance_key: uuid + +counters: + - avg_non_wafl_msg_latency # latency + - avg_wafl_msg_latency => avg_msg_latency + - avg_wafl_repl_msg_latency => avg_repl_msg_latency + - cp_count # consistency point + - cp_phase_times + - instance_uuid + - node_name => node + - non_wafl_msg_total + - read_io_type + - total_cp_msecs + - total_cp_util + - wafl_memory_free + - wafl_memory_used # memory + - wafl_msg_total => msg_total # iops + - wafl_reads_from_cache # reads from + - wafl_reads_from_cloud + - wafl_reads_from_cloud_s2c_bin + - wafl_reads_from_disk + - wafl_reads_from_ext_cache + - wafl_reads_from_fc_miss + - wafl_reads_from_pmem + - wafl_reads_from_ssd + - wafl_repl_msg_total + +override: + - read_io_type_base: delta + +export_options: + instance_keys: + - node diff --git a/conf/cmperf/9.19.1/wafl_comp_aggr_vol_bin.yaml b/conf/cmperf/9.19.1/wafl_comp_aggr_vol_bin.yaml new file mode 100644 index 000000000..03aafa1c6 --- /dev/null +++ b/conf/cmperf/9.19.1/wafl_comp_aggr_vol_bin.yaml @@ -0,0 +1,27 @@ +name: WAFLCompBin +query: wafl_comp_aggr_vol_bin +object: fabricpool + +instance_key: uuid + +counters: + - cloud_bin_op_latency_average + - cloud_bin_operation + - comp_aggr_name + - instance_name + - object_store_name => cloud_target + - vol_name => volume + - vserver_name => svm + +plugins: + - FabricPool: + - include_constituents: false + +export_options: + instance_keys: + - cloud_target + - svm + - volume + +override: + - cloud_bin_operation: delta diff --git a/conf/cmperf/9.19.1/wafl_hya_per_aggr.yaml b/conf/cmperf/9.19.1/wafl_hya_per_aggr.yaml new file mode 100644 index 000000000..162a76910 --- /dev/null +++ b/conf/cmperf/9.19.1/wafl_hya_per_aggr.yaml @@ -0,0 +1,34 @@ + +name: WAFLAggr +query: wafl_hya_per_aggr +object: flashpool + +counters: + - evict_destage_rate + - evict_remove_rate + - hya_aggr_name => aggr + - hya_read_hit_latency_average + - hya_read_miss_latency_average + - hya_write_hdd_latency_average + - hya_write_ssd_latency_average + - instance_name + - node_name => node + - read_cache_ins_rate + - read_ops_replaced + - read_ops_replaced_percent + - ssd_available + - ssd_read_cached + - ssd_total + - ssd_total_used + - ssd_write_cached + - wc_write_blks_total + - write_blks_replaced + - write_blks_replaced_percent + +override: + - write_blks_replaced_percent: average + +export_options: + instance_keys: + - aggr + - node diff --git a/conf/cmperf/9.19.1/wafl_hya_sizer.yaml b/conf/cmperf/9.19.1/wafl_hya_sizer.yaml new file mode 100644 index 000000000..40c9b0846 --- /dev/null +++ b/conf/cmperf/9.19.1/wafl_hya_sizer.yaml @@ -0,0 +1,16 @@ + +name: WAFLSizer +query: wafl_hya_sizer +object: flashpool + +instance_key: uuid + +counters: + - cache_stats + - instance_name => aggr + - node_name => node + +export_options: + instance_keys: + - aggr + - node diff --git a/conf/cmperf/9.19.1/workload.yaml b/conf/cmperf/9.19.1/workload.yaml new file mode 100644 index 000000000..8dad59cda --- /dev/null +++ b/conf/cmperf/9.19.1/workload.yaml @@ -0,0 +1,55 @@ + +# object Workload provides counters about workload usage + +name: Workload +query: workload +object: qos + +instance_key: uuid + +# recommended to use large interval, since workload objects are expensive +client_timeout: 1m30s +schedule: + - data: 3m + +counters: + - concurrency + - instance_name + - instance_uuid + - latency + - ops + - other_ops + - read_data + - read_io_type + - read_latency + - read_ops + - sequential_reads + - sequential_writes + - total_data + - write_data + - write_latency + - write_ops + +override: + - read_io_type_base: delta + +qos_labels: + - vserver => svm + - volume + - qtree + - lun + - file + - policy-group + - wid + - workload-name => workload + +export_options: + instance_keys: + - file + - lun + - policy_group + - qtree + - svm + - volume + - wid + - workload diff --git a/conf/cmperf/9.19.1/workload_volume.yaml b/conf/cmperf/9.19.1/workload_volume.yaml new file mode 100644 index 000000000..6e5e576f7 --- /dev/null +++ b/conf/cmperf/9.19.1/workload_volume.yaml @@ -0,0 +1,57 @@ + +# object provides counters per volume for workloads tracked via "autovol" +# (i.e. not in a policy group) + +name: WorkloadVolume +query: workload_volume +object: qos + +# recommended to use large interval, since workload objects are expensive +client_timeout: 1m30s +schedule: + - data: 3m + +instance_key: name + +counters: + - concurrency + - instance_name + - instance_uuid + - latency + - ops + - other_ops + - read_data + - read_io_type + - read_latency + - read_ops + - sequential_reads + - sequential_writes + - total_data + - write_data + - write_latency + - write_ops + + +override: + - read_io_type_base: delta + +qos_labels: + - vserver => svm + - volume + - qtree + - lun + - file + - policy-group + - wid + - workload-name => workload + +export_options: + instance_keys: + - file + - lun + - policy_group + - qtree + - svm + - volume + - wid + - workload diff --git a/conf/cmperf/default.yaml b/conf/cmperf/default.yaml new file mode 100644 index 000000000..1368f1ff9 --- /dev/null +++ b/conf/cmperf/default.yaml @@ -0,0 +1,66 @@ + +collector: CmPerf + +# Order here matters! +schedule: + - counter: 24h + - data: 1m + +objects: + # Node-level metrics + CIFSNode: cifs_node.yaml + Disk: disk.yaml + ExtCacheObj: ext_cache_obj.yaml + FCVI: fcvi.yaml + FcpPort: fcp.yaml + FlexCache: flexcache.yaml + HeadroomAggr: resource_headroom_aggr.yaml + HeadroomCPU: resource_headroom_cpu.yaml + HostAdapter: hostadapter.yaml + Iwarp: iwarp.yaml + # Netstat: netstat.yaml + NFSv3Node: nfsv3_node.yaml + NFSv41Node: nfsv4_1_node.yaml + NFSv42Node: nfsv4_2_node.yaml + NFSv4Node: nfsv4_node.yaml + NVMfLif: nvmf_lif.yaml + Namespace: namespace.yaml + NicCommon: nic_common.yaml + NvmMirror: nvm_mirror.yaml + # ObjectStoreClient: object_store_client_op.yaml + Path: path.yaml + # Qtree: qtree.yaml #Enabling `qtree.yaml` may slow down data collection + Rwctx: rwctx.yaml + SystemNode: system_node.yaml + # TokenManager: token_manager.yaml + WAFL: wafl.yaml + WAFLAggr: wafl_hya_per_aggr.yaml + WAFLSizer: wafl_hya_sizer.yaml + # NFSv4Pool: nfsv4_pool.yaml + + # SVM-level metrics + CIFSvserver: cifs_vserver.yaml + CopyManager: copy_manager.yaml + # ExternalServiceOperation: external_service_operation.yaml + FcpLif: fcp_lif.yaml + FPolicy: fpolicy.yaml + FPolicyServer: fpolicy_server.yaml + FPolicySVM: fpolicy_svm.yaml + ISCSI: iscsi_lif.yaml + LIF: lif.yaml + Lun: lun.yaml + NFSv3: nfsv3.yaml + NFSv41: nfsv4_1.yaml + NFSv42: nfsv4_2.yaml + NFSv4: nfsv4.yaml + # NvmfRdmaPort: nvmf_rdma_port.yaml + # NvmfTcpPort: nvmf_tcp_port.yaml + # OntapS3SVM: ontap_s3_svm.yaml + SMB2: smb2.yaml + Volume: volume.yaml + WAFLCompBin: wafl_comp_aggr_vol_bin.yaml + Vscan: vscan.yaml + VscanSVM: vscan_svm.yaml + + Workload: workload.yaml + WorkloadVolume: workload_volume.yaml diff --git a/harvest.cue b/harvest.cue index 00554aa37..8eeab30fe 100644 --- a/harvest.cue +++ b/harvest.cue @@ -85,6 +85,7 @@ Pollers: [Name=_]: #Poller ca_cert?: string certificate_script?: #CertificateScript client_timeout?: string + cm_perf_manifest?: string collectors?: [...#CollectorDef] | [...string] conf_path?: string credentials_file?: string diff --git a/pkg/conf/collectors.go b/pkg/conf/collectors.go index f9dc419ef..d8282f8a1 100644 --- a/pkg/conf/collectors.go +++ b/pkg/conf/collectors.go @@ -11,6 +11,7 @@ func GetCollectorSlice() []string { var IsCollector = map[string]struct{}{ "CiscoRest": {}, + "CmPerf": {}, "Ems": {}, "Eseries": {}, "EseriesPerf": {}, @@ -30,6 +31,7 @@ var IsONTAPCollector = map[string]struct{}{ "Zapi": {}, "Rest": {}, "RestPerf": {}, + "CmPerf": {}, "StatPerf": {}, "KeyPerf": {}, "Ems": {}, diff --git a/pkg/conf/conf.go b/pkg/conf/conf.go index 2b8fe1ba9..b66a83b88 100644 --- a/pkg/conf/conf.go +++ b/pkg/conf/conf.go @@ -629,6 +629,7 @@ type Poller struct { AuthStyle string `yaml:"auth_style,omitempty"` CaCertPath string `yaml:"ca_cert,omitempty"` CertificateScript CertificateScript `yaml:"certificate_script,omitempty"` + CmPerfManifest string `yaml:"cm_perf_manifest,omitzero"` ClientTimeout string `yaml:"client_timeout,omitempty"` Collectors []Collector `yaml:"collectors,omitempty"` ConfPath string `yaml:"conf_path,omitempty"` From 4ca0e0accddbe6ad986a954bb86710890a9b5b32 Mon Sep 17 00:00:00 2001 From: Chris Grindstaff Date: Wed, 15 Apr 2026 09:05:27 -0400 Subject: [PATCH 2/2] feat: Harvest should include a cmperf collector --- cmd/collectors/cmperf/plugins/flexcache/flexcache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/collectors/cmperf/plugins/flexcache/flexcache.go b/cmd/collectors/cmperf/plugins/flexcache/flexcache.go index d604ea856..adcd75c76 100644 --- a/cmd/collectors/cmperf/plugins/flexcache/flexcache.go +++ b/cmd/collectors/cmperf/plugins/flexcache/flexcache.go @@ -5,7 +5,7 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" ) -// New This uses the statperf flexcahe plugin implementation as the functionality is identical +// New This uses the statperf flexcache plugin implementation as the functionality is identical func New(p *plugin.AbstractPlugin) plugin.Plugin { return flexcache.New(p) }