diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9aa0cb0c3..374aa6411 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,187 @@
# Change Log
## [Releases](https://github.com/NetApp/harvest/releases)
+## 25.05.0 / 2025-05-19 Release
+pushpin: Highlights of this major release include:
+## :star: New Features
+
+- Cisco Switch collector:
+ - Harvest collects metrics from all supported MetroCluster Cisco switches. More details [here](https://netapp.github.io/harvest/latest/configure-cisco-rest).
+ - Harvest collects environmental, ethernet, optics, interface, link layer discovery protocol (LLDP), Cisco discovery protocol (CDP), and version related details.
+ - Harvest includes a new Cisco switch dashboard. Thanks to @BrendonA667, Mamoep, and Eric Brüning for reporting and providing valuable feedback on this feature.
+
+- Harvest includes a new performance collector named KeyPerf, designed to gather performance counters from ONTAP objects that include a `statistics` field in their REST responses. More details [here](https://netapp.github.io/harvest/latest/configure-keyperf).
+
+- Harvest supports auditing volume operations such as create,delete and modify via ONTAP CLI or REST commands, tracked through the `ONTAP: AuditLog` dashboard. Thanks @mvilam79 for reporting. More details [here](https://github.com/NetApp/harvest/discussions/3478).
+
+- Harvest supports filtering for the RestPerf collector. See [Filter](https://netapp.github.io/harvest/latest/configure-rest/#filter) for more detail.
+
+- Harvest collects vscan server pool active connection. Thanks @BrendonA667 for reporting.
+
+- Harvest collects uptime in lif perf templates and shows them in the SVM dashboard. Thanks to @Pengng88 for reporting.
+
+- Harvest collects volume footprint metrics and displays them through the Volume dashboard. Thanks to @Robert Brown for reporting.
+
+- Harvest includes a beta template to collect ethernet switch ports. Thanks to @Robert Watson for reporting!
+
+- :star: Several of the existing dashboards include new panels in this release:
+ - The `Disk` dashboard updates CP panels `Disk Utilization` panel.
+ - The `Node` dashboard include the Node column in the `Node Detail` panel.
+ - The `Quota` dashboard includes `Space Used` panel. Thanks @razaahmed for reporting.
+ - The `Aggregate` dashboard includes `Growth Rate` panel. Thanks @Preston Nguyen for reporting.
+ - The `Volume` dashboard includes `Growth Rate` panel. Thanks @Preston Nguyen for reporting.
+ - The `Volume` dashboard includes volume footprint metrics in `FabricPool` panel. Thanks @RBrown for reporting.
+
+## Announcements
+
+:bangbang: **IMPORTANT** If using Docker Compose and you want to keep your historical Prometheus data, please read [how to migrate your Prometheus volume](https://github.com/NetApp/harvest/blob/main/docs/MigratePrometheusDocker.md)
+
+:bulb: **IMPORTANT** After upgrade, don't forget to re-import your dashboards, so you get all the new enhancements and fixes. You can import them via the 'bin/harvest grafana import' CLI, from the Grafana UI, or from the 'Maintenance > Reset Harvest Dashboards' button in NAbox3. For NAbox4, this step is not needed.
+
+## Known Issues
+
+:bulb: **IMPORTANT** FSx ZapiPerf workload collector fails to collect metrics, please use RestPerf instead.
+
+## Thanks to all the awesome contributors
+
+:metal: Thanks to all the people who've opened issues, asked questions on Discord, and contributed code or dashboards this release:
+
+@WayneShen2, @mvilam79, @RobbW, @Robert Watson, @roller, @Pengng88, @gaur-piyush, @Chris Gautcher, @BrendonA667, @razaahmed, @nicolai-hornung-bl, @Preston Nguyen, @Robert Brown, @jay-law
+
+:seedling: This release includes 28 features, 28 bug fixes, 13 documentation, 17 refactoring, 16 miscellaneous, and 11 ci pull requests.
+
+### :rocket: Features
+- Disable qtree perf metrics for KeyPerf collector ([#3488](https://github.com/NetApp/harvest/pull/3488))
+- Volume Audit log ([#3479](https://github.com/NetApp/harvest/pull/3479))
+- Handled duplicate instance issue in clustersoftware plugin ([#3486](https://github.com/NetApp/harvest/pull/3486))
+- Split cp panels in disk dashboard ([#3496](https://github.com/NetApp/harvest/pull/3496))
+- Adding uptime in lif perf templates ([#3507](https://github.com/NetApp/harvest/pull/3507))
+- Harvest EMS Events label plugin ([#3511](https://github.com/NetApp/harvest/pull/3511))
+- Filter support for RestPerf Collector ([#3514](https://github.com/NetApp/harvest/pull/3514))
+- Adding vscan server pool rest template and plugin changes ([#3519](https://github.com/NetApp/harvest/pull/3519))
+- Synthesize a timestamp when it is missing from KeyPerf responses ([#3544](https://github.com/NetApp/harvest/pull/3544))
+- Node dashboard should include the Node column in the Node detai… ([#3553](https://github.com/NetApp/harvest/pull/3553))
+- Adding format for promql in cluster dashboard ([#3538](https://github.com/NetApp/harvest/pull/3538))
+- Harvest should monitor Cisco 3K and 9K switches ([#3559](https://github.com/NetApp/harvest/pull/3559))
+- Adding space used time series panel in quota dashboard ([#3561](https://github.com/NetApp/harvest/pull/3561))
+- Cisco collector should collect optics metrics ([#3575](https://github.com/NetApp/harvest/pull/3575))
+- Private CLI perf collector StatPerf ([#3566](https://github.com/NetApp/harvest/pull/3566))
+- Cisco collector should collect optics metrics for transceivers … ([#3580](https://github.com/NetApp/harvest/pull/3580))
+- Add growth rate panel for Aggregate ([#3582](https://github.com/NetApp/harvest/pull/3582))
+- Use timestamp provided by CLI in statperf ([#3585](https://github.com/NetApp/harvest/pull/3585))
+- Add crc error for switch interface ([#3590](https://github.com/NetApp/harvest/pull/3590))
+- Dedup statperf against other perf collectors ([#3592](https://github.com/NetApp/harvest/pull/3592))
+- Harvest should collect volume footprint metrics ([#3598](https://github.com/NetApp/harvest/pull/3598))
+- Harvest should collect ethernet switch ports ([#3601](https://github.com/NetApp/harvest/pull/3601))
+- Adding cisco switch dashboard ([#3574](https://github.com/NetApp/harvest/pull/3574))
+- Add growth rate for volume and aggregate ([#3610](https://github.com/NetApp/harvest/pull/3610))
+- Update Cisco dashboard units and comment ([#3613](https://github.com/NetApp/harvest/pull/3613))
+- Add Volume footprint metrics to Volume Dashboard ([#3624](https://github.com/NetApp/harvest/pull/3624))
+- Include checksums with release artifacts ([#3628](https://github.com/NetApp/harvest/pull/3628))
+- Cisco collector should collect CDP and LLDP metrics ([#3638](https://github.com/NetApp/harvest/pull/3638))
+
+### :bug: Bug Fixes
+- Handled empty node name in clustersoftware plugin ([#3460](https://github.com/NetApp/harvest/pull/3460))
+- Duplicate timeseries in volume dashboard ([#3483](https://github.com/NetApp/harvest/pull/3483))
+- Update title of number of snapmirror transfers ([#3485](https://github.com/NetApp/harvest/pull/3485))
+- Network dashboard link speed units should be Megabits per second ([#3491](https://github.com/NetApp/harvest/pull/3491))
+- Workload and workload_volume templates should invoke the instance task before the data task ([#3498](https://github.com/NetApp/harvest/pull/3498))
+- Handled empty scanner and export false case for vscan ([#3502](https://github.com/NetApp/harvest/pull/3502))
+- KeyPerf Collector Volume stats are incorrect for flexgroup ([#3520](https://github.com/NetApp/harvest/pull/3520))
+- EMS cache handling ([#3524](https://github.com/NetApp/harvest/pull/3524))
+- IWARP read and write IOPS for ZAPI should be expressed as rate ([#3550](https://github.com/NetApp/harvest/pull/3550))
+- Aligning Harvest Dashboard node metrics with ONTAP CLI Data ([#3549](https://github.com/NetApp/harvest/pull/3549))
+- Handle system:node deprecate metrics in ZapiPerf ([#3554](https://github.com/NetApp/harvest/pull/3554))
+- Update namespace counters ([#3558](https://github.com/NetApp/harvest/pull/3558))
+- StorageGrid Collector handles global_prefix inconsistently ([#3565](https://github.com/NetApp/harvest/pull/3565))
+- `grafana import` should add labels to all panel expressions when… ([#3567](https://github.com/NetApp/harvest/pull/3567))
+- Cisco environment plugin should trim watts ([#3572](https://github.com/NetApp/harvest/pull/3572))
+- Handle string parsing for switch templates ([#3578](https://github.com/NetApp/harvest/pull/3578))
+- yaml parsing should handle key/values with spaces, colons, quotes ([#3581](https://github.com/NetApp/harvest/pull/3581))
+- Handle array element for optic metrics ([#3589](https://github.com/NetApp/harvest/pull/3589))
+- Filter label for ems destination is missing ([#3596](https://github.com/NetApp/harvest/pull/3596))
+- Harvest should collect ethernet switch ports when timestamp is m… ([#3603](https://github.com/NetApp/harvest/pull/3603))
+- Handle histogram skips in exporter ([#3606](https://github.com/NetApp/harvest/pull/3606))
+- Handled nil aggr instance in aggr plugin ([#3607](https://github.com/NetApp/harvest/pull/3607))
+- Handle HA and volume move alerts ([#3611](https://github.com/NetApp/harvest/pull/3611))
+- Poller Union2 should handle prom_port ([#3614](https://github.com/NetApp/harvest/pull/3614))
+- Handle empty values in template ([#3626](https://github.com/NetApp/harvest/pull/3626))
+- Improve Cisco RCF parsing ([#3629](https://github.com/NetApp/harvest/pull/3629))
+- Grafana import should refuse to redirect ([#3632](https://github.com/NetApp/harvest/pull/3632))
+- Handle empty values in template ([#3627](https://github.com/NetApp/harvest/pull/3627))
+- Vscanpool plugin should only ask for fields it uses ([#3639](https://github.com/NetApp/harvest/pull/3639))
+- Handle uname in qtree zapi plugin ([#3641](https://github.com/NetApp/harvest/pull/3641))
+
+### :closed_book: Documentation
+- Add changelog discussion link ([#3495](https://github.com/NetApp/harvest/pull/3495))
+- Handled plugin custom prefix name for metrics ([#3493](https://github.com/NetApp/harvest/pull/3493))
+- Asar2 support ([#3535](https://github.com/NetApp/harvest/pull/3535))
+- Add labels metric doc ([#3532](https://github.com/NetApp/harvest/pull/3532))
+- Update private cli ONTAP link ([#3591](https://github.com/NetApp/harvest/pull/3591))
+- Harvest should document volume footprint metrics ([#3599](https://github.com/NetApp/harvest/pull/3599))
+- StatPerf collector documentation ([#3600](https://github.com/NetApp/harvest/pull/3600))
+- Document ethernet switch port counters ([#3604](https://github.com/NetApp/harvest/pull/3604))
+- Document CiscoRest collector ([#3619](https://github.com/NetApp/harvest/pull/3619))
+- Fix restperf filter doc ([#3622](https://github.com/NetApp/harvest/pull/3622))
+- Update metric doc ([#3634](https://github.com/NetApp/harvest/pull/3634))
+- Add beta to StatPerf docs ([#3635](https://github.com/NetApp/harvest/pull/3635))
+- Fix default schedule values for collector ([#3642](https://github.com/NetApp/harvest/pull/3642))
+
+### Refactoring
+- Remove tidwall match and pretty dependencies ([#3503](https://github.com/NetApp/harvest/pull/3503))
+- Update log message ([#3526](https://github.com/NetApp/harvest/pull/3526))
+- Debug build logs ([#3536](https://github.com/NetApp/harvest/pull/3536))
+- Revert debug build logs ([#3537](https://github.com/NetApp/harvest/pull/3537))
+- Replace benchmark.N with benchmark.Loop() ([#3547](https://github.com/NetApp/harvest/pull/3547))
+- Remove zapiperf debug log for qos ([#3560](https://github.com/NetApp/harvest/pull/3560))
+- Support root aggrs in rest template ([#3569](https://github.com/NetApp/harvest/pull/3569))
+- Replace `gopkg.in/yaml` with `github.com/goccy/go-yaml` ([#3573](https://github.com/NetApp/harvest/pull/3573))
+- Remove unnecessary debug logs ([#3579](https://github.com/NetApp/harvest/pull/3579))
+- Correct error messages for health ([#3583](https://github.com/NetApp/harvest/pull/3583))
+- Workaround Cisco truncation issue by using cli_show_array ([#3586](https://github.com/NetApp/harvest/pull/3586))
+- Eliminate superfluous error ([#3588](https://github.com/NetApp/harvest/pull/3588))
+- Handle histogram skips in exporter ([#3608](https://github.com/NetApp/harvest/pull/3608))
+- Capitalize the Grafana Cisco folder ([#3612](https://github.com/NetApp/harvest/pull/3612))
+- Improve Grafana import logging (#3620) ([#3630](https://github.com/NetApp/harvest/pull/3630))
+- Update instance generation in quota plugin ([#3637](https://github.com/NetApp/harvest/pull/3637))
+- Remove unused errors ([#3640](https://github.com/NetApp/harvest/pull/3640))
+
+### Miscellaneous
+- Merge release/25.02.0 into main ([#3474](https://github.com/NetApp/harvest/pull/3474))
+- Bump go.mod ([#3476](https://github.com/NetApp/harvest/pull/3476))
+- Update all dependencies ([#3477](https://github.com/NetApp/harvest/pull/3477))
+- Update all dependencies ([#3487](https://github.com/NetApp/harvest/pull/3487))
+- Update all dependencies ([#3499](https://github.com/NetApp/harvest/pull/3499))
+- Update all dependencies ([#3508](https://github.com/NetApp/harvest/pull/3508))
+- Update astral-sh/setup-uv digest to a4fd982 ([#3521](https://github.com/NetApp/harvest/pull/3521))
+- Update astral-sh/setup-uv digest to 2269511 ([#3525](https://github.com/NetApp/harvest/pull/3525))
+- Update all dependencies ([#3539](https://github.com/NetApp/harvest/pull/3539))
+- Update all dependencies ([#3548](https://github.com/NetApp/harvest/pull/3548))
+- Fix formatting ([#3552](https://github.com/NetApp/harvest/pull/3552))
+- Update astral-sh/setup-uv digest to 594f292 ([#3556](https://github.com/NetApp/harvest/pull/3556))
+- Update astral-sh/setup-uv digest to fb3a0a9 ([#3568](https://github.com/NetApp/harvest/pull/3568))
+- Update all dependencies ([#3576](https://github.com/NetApp/harvest/pull/3576))
+- Update all dependencies ([#3595](https://github.com/NetApp/harvest/pull/3595))
+- Update all dependencies ([#3615](https://github.com/NetApp/harvest/pull/3615))
+
+
+### :hammer: CI
+- The issue burn-down list should ignore status/done issues ([#3459](https://github.com/NetApp/harvest/pull/3459))
+- Bump go ([#3504](https://github.com/NetApp/harvest/pull/3504))
+- style: format match gjson file ([#3506](https://github.com/NetApp/harvest/pull/3506))
+- Bump dependencies ([#3517](https://github.com/NetApp/harvest/pull/3517))
+- Update config path ([#3523](https://github.com/NetApp/harvest/pull/3523))
+- Update rest role in cert ([#3527](https://github.com/NetApp/harvest/pull/3527))
+- Upgrade golangci-lint to v2.0.1 ([#3529](https://github.com/NetApp/harvest/pull/3529))
+- Bump go ([#3543](https://github.com/NetApp/harvest/pull/3543))
+- Fix lint warnings ([#3557](https://github.com/NetApp/harvest/pull/3557))
+- Update promtool path ([#3571](https://github.com/NetApp/harvest/pull/3571))
+- Handle ems_events error for ZAPI datacenter ([#3597](https://github.com/NetApp/harvest/pull/3597))
+- Bump go ([#3602](https://github.com/NetApp/harvest/pull/3602))
+- Handle duplicated definition of symbol dlopen error ([#3605](https://github.com/NetApp/harvest/pull/3605))
+
+---
+
## 25.02.0 / 2025-02-11 Release
:pushpin: Highlights of this major release include:
## :star: New Features
diff --git a/cmd/collectors/cisco/cisco.go b/cmd/collectors/cisco/cisco.go
index c2750cf82..0a42e99e2 100644
--- a/cmd/collectors/cisco/cisco.go
+++ b/cmd/collectors/cisco/cisco.go
@@ -2,7 +2,9 @@ package cisco
import (
"fmt"
+ "github.com/netapp/harvest/v2/cmd/collectors/cisco/plugins/cdp"
"github.com/netapp/harvest/v2/cmd/collectors/cisco/plugins/environment"
+ "github.com/netapp/harvest/v2/cmd/collectors/cisco/plugins/lldp"
"github.com/netapp/harvest/v2/cmd/collectors/cisco/plugins/networkinterface"
"github.com/netapp/harvest/v2/cmd/collectors/cisco/plugins/optic"
"github.com/netapp/harvest/v2/cmd/collectors/cisco/plugins/version"
@@ -193,10 +195,14 @@ func (c *CiscoRest) getClient(a *collector.AbstractCollector) (*rest.Client, err
func (c *CiscoRest) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Plugin {
switch kind {
+ case "CDP":
+ return cdp.New(abc)
case "Environment":
return environment.New(abc)
case "Interface":
return networkinterface.New(abc)
+ case "LLDP":
+ return lldp.New(abc)
case "Optic":
return optic.New(abc)
case "Version":
diff --git a/cmd/collectors/cisco/plugins/cdp/cdp.go b/cmd/collectors/cisco/plugins/cdp/cdp.go
new file mode 100644
index 000000000..d254b0a33
--- /dev/null
+++ b/cmd/collectors/cisco/plugins/cdp/cdp.go
@@ -0,0 +1,176 @@
+package cdp
+
+import (
+ "fmt"
+ "github.com/netapp/harvest/v2/cmd/collectors/cisco/rest"
+ "github.com/netapp/harvest/v2/cmd/poller/plugin"
+ "github.com/netapp/harvest/v2/pkg/conf"
+ "github.com/netapp/harvest/v2/pkg/matrix"
+ "github.com/netapp/harvest/v2/pkg/util"
+ "github.com/netapp/harvest/v2/third_party/tidwall/gjson"
+ "log/slog"
+ "slices"
+ "strings"
+ "time"
+)
+
+const (
+ labels = "labels"
+)
+
+type CDP struct {
+ *plugin.AbstractPlugin
+ matrix *matrix.Matrix
+ client *rest.Client
+ templateObject string // object name from the template
+}
+
+func New(p *plugin.AbstractPlugin) plugin.Plugin {
+ return &CDP{AbstractPlugin: p}
+}
+
+func (c *CDP) Init(_ conf.Remote) error {
+ var (
+ client *rest.Client
+ err error
+ )
+
+ if err = c.InitAbc(); err != nil {
+ return fmt.Errorf("failed to initialize AbstractPlugin: %w", err)
+ }
+
+ timeout, _ := time.ParseDuration(rest.DefaultTimeout)
+
+ if client, err = rest.New(conf.ZapiPoller(c.ParentParams), timeout, c.Auth); err != nil {
+ return fmt.Errorf("error creating new client: %w", err)
+ }
+
+ c.client = client
+ c.templateObject = c.ParentParams.GetChildContentS("object")
+
+ c.matrix = matrix.New(c.Parent+".CDP", c.templateObject, c.templateObject)
+
+ return nil
+}
+
+func (c *CDP) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util.Metadata, error) {
+ data := dataMap[c.Object]
+ c.client.Metadata.Reset()
+
+ cdpMat, err := c.initMatrix(c.templateObject)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error while initializing matrix: %w", err)
+ }
+
+ // Set all global labels if they don't already exist
+ cdpMat.SetGlobalLabels(data.GetGlobalLabels())
+
+ data.Reset()
+
+ command := c.ParentParams.GetChildContentS("query")
+ output, err := c.client.CLIShowArray(command)
+
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to fetch data: %w", err)
+ }
+
+ c.parseCDP(output, cdpMat)
+
+ c.client.Metadata.NumCalls = 1
+ c.client.Metadata.BytesRx = uint64(len(output.Raw))
+ c.client.Metadata.PluginInstances = uint64(len(cdpMat.GetInstances()))
+
+ return []*matrix.Matrix{cdpMat}, c.client.Metadata, nil
+}
+
+func (c *CDP) initMatrix(name string) (*matrix.Matrix, error) {
+
+ mat := matrix.New(c.Parent+name, name, name)
+
+ if err := matrix.CreateMetric(labels, mat); err != nil {
+ return nil, fmt.Errorf("error while creating metric %s: %w", labels, err)
+ }
+
+ return mat, nil
+}
+
+func (c *CDP) parseCDP(output gjson.Result, mat *matrix.Matrix) {
+
+ rowQuery := "output.body.TABLE_cdp_neighbor_detail_info.ROW_cdp_neighbor_detail_info"
+
+ var models []Model
+
+ rows := output.Get(rowQuery)
+
+ if !rows.Exists() {
+ c.SLogger.Warn("Unable to parse CDP because rows are missing", slog.String("query", rowQuery))
+ return
+ }
+
+ rows.ForEach(func(_, value gjson.Result) bool {
+ cdpModel := NewCDPModel(value)
+ // Skip empty models
+ if cdpModel.DeviceID == "" {
+ return true
+ }
+ models = append(models, cdpModel)
+ return true
+ })
+
+ for _, model := range models {
+ instanceKey := model.DeviceID + model.PortID
+ instance, err := mat.NewInstance(instanceKey)
+ if err != nil {
+ c.SLogger.Warn("Failed to create cdp instance", slog.String("key", instanceKey))
+ continue
+ }
+
+ instance.SetLabel("capabilities", strings.Join(model.Capabilities, ","))
+ instance.SetLabel("device_id", model.DeviceID)
+ instance.SetLabel("local_interface_mac", model.LocalInterfaceMAC)
+ instance.SetLabel("platform_id", model.PlatformID)
+ instance.SetLabel("port_id", model.PortID)
+ instance.SetLabel("remote_interface_mac", model.RemoteInterfaceMAC)
+ instance.SetLabel("version", model.Version)
+
+ mat.GetMetric(labels).SetValueFloat64(instance, 1.0)
+ }
+}
+
+type Model struct {
+ Capabilities []string
+ DeviceID string
+ LocalInterfaceMAC string
+ PlatformID string
+ PortID string
+ RemoteInterfaceMAC string
+ TTL int64
+ Version string
+}
+
+func NewCDPModel(output gjson.Result) Model {
+
+ var m Model
+
+ m.DeviceID = output.Get("device_id").ClonedString()
+ m.PlatformID = output.Get("platform_id").ClonedString()
+ m.PortID = output.Get("port_id").ClonedString()
+ m.TTL = output.Get("ttl").Int()
+ m.Version = output.Get("version").ClonedString()
+ m.LocalInterfaceMAC = output.Get("local_intf_mac").ClonedString()
+ m.RemoteInterfaceMAC = output.Get("remote_intf_mac").ClonedString()
+
+ caps := output.Get("capability")
+ if caps.IsArray() {
+ caps.ForEach(func(_, value gjson.Result) bool {
+ m.Capabilities = append(m.Capabilities, value.String())
+ return true
+ })
+ } else if caps.Exists() {
+ m.Capabilities = []string{caps.ClonedString()}
+ }
+
+ slices.Sort(m.Capabilities)
+
+ return m
+}
diff --git a/cmd/collectors/cisco/plugins/environment/environment.go b/cmd/collectors/cisco/plugins/environment/environment.go
index 05e8d8ba9..463e0e748 100644
--- a/cmd/collectors/cisco/plugins/environment/environment.go
+++ b/cmd/collectors/cisco/plugins/environment/environment.go
@@ -415,7 +415,16 @@ func newFanModel9K(output gjson.Result, logger *slog.Logger) FanModel {
})
fanModel.fans = fans
- fanSpeed := output.Get("fandetails.TABLE_fan_zone_speed.ROW_fan_zone_speed.zonespeed").String()
+
+ fanSpeedQuery := "fandetails.TABLE_fan_zone_speed.ROW_fan_zone_speed.zonespeed"
+ fanSpeeds := output.Get(fanSpeedQuery)
+
+ if !fanSpeeds.Exists() {
+ logger.Warn("Unable to parse fan speed because rows are missing", slog.String("query", fanSpeedQuery))
+ return fanModel
+ }
+
+ fanSpeed := fanSpeeds.String()
speed := strings.ReplaceAll(strings.ReplaceAll(fanSpeed, "0x", ""), "0X", "")
fanModel.speed, err = strconv.ParseInt(speed, 16, 64)
if err != nil {
diff --git a/cmd/collectors/cisco/plugins/lldp/lldp.go b/cmd/collectors/cisco/plugins/lldp/lldp.go
new file mode 100644
index 000000000..862398b75
--- /dev/null
+++ b/cmd/collectors/cisco/plugins/lldp/lldp.go
@@ -0,0 +1,213 @@
+package lldp
+
+import (
+ "fmt"
+ "github.com/netapp/harvest/v2/cmd/collectors/cisco/rest"
+ "github.com/netapp/harvest/v2/cmd/poller/plugin"
+ "github.com/netapp/harvest/v2/pkg/conf"
+ "github.com/netapp/harvest/v2/pkg/matrix"
+ "github.com/netapp/harvest/v2/pkg/util"
+ "github.com/netapp/harvest/v2/third_party/tidwall/gjson"
+ "log/slog"
+ "slices"
+ "strings"
+ "time"
+)
+
+const (
+ labels = "labels"
+)
+
+type LLDP struct {
+ *plugin.AbstractPlugin
+ matrix *matrix.Matrix
+ client *rest.Client
+ templateObject string // object name from the template
+}
+
+func New(p *plugin.AbstractPlugin) plugin.Plugin {
+ return &LLDP{AbstractPlugin: p}
+}
+
+func (l *LLDP) Init(_ conf.Remote) error {
+ var (
+ client *rest.Client
+ err error
+ )
+
+ if err = l.InitAbc(); err != nil {
+ return fmt.Errorf("failed to initialize AbstractPlugin: %w", err)
+ }
+
+ timeout, _ := time.ParseDuration(rest.DefaultTimeout)
+
+ if client, err = rest.New(conf.ZapiPoller(l.ParentParams), timeout, l.Auth); err != nil {
+ return fmt.Errorf("error creating new client: %w", err)
+ }
+
+ l.client = client
+ l.templateObject = l.ParentParams.GetChildContentS("object")
+
+ l.matrix = matrix.New(l.Parent+".LLDP", l.templateObject, l.templateObject)
+
+ return nil
+}
+
+func (l *LLDP) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util.Metadata, error) {
+ data := dataMap[l.Object]
+ l.client.Metadata.Reset()
+
+ lldpMat, err := l.initMatrix(l.templateObject)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error while initializing matrix: %w", err)
+ }
+
+ // Set all global labels if they don't already exist
+ lldpMat.SetGlobalLabels(data.GetGlobalLabels())
+
+ data.Reset()
+
+ command := l.ParentParams.GetChildContentS("query")
+ output, err := l.client.CLIShowArray(command)
+
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to fetch data: %w", err)
+ }
+
+ l.parseLLDP(output, lldpMat)
+
+ l.client.Metadata.NumCalls = 1
+ l.client.Metadata.BytesRx = uint64(len(output.Raw))
+ l.client.Metadata.PluginInstances = uint64(len(lldpMat.GetInstances()))
+
+ return []*matrix.Matrix{lldpMat}, l.client.Metadata, nil
+}
+
+func (l *LLDP) initMatrix(name string) (*matrix.Matrix, error) {
+
+ mat := matrix.New(l.Parent+name, name, name)
+
+ if err := matrix.CreateMetric(labels, mat); err != nil {
+ return nil, fmt.Errorf("error while creating metric %s: %w", labels, err)
+ }
+
+ return mat, nil
+}
+
+func (l *LLDP) parseLLDP(output gjson.Result, mat *matrix.Matrix) {
+
+ rowQuery := "output.body.TABLE_nbor_detail.ROW_nbor_detail"
+
+ var models []Model
+
+ rows := output.Get(rowQuery)
+
+ if !rows.Exists() {
+ l.SLogger.Warn("Unable to parse LLDP because rows are missing", slog.String("query", rowQuery))
+ return
+ }
+
+ rows.ForEach(func(_, value gjson.Result) bool {
+ lldpModel := NewLLDPModel(value)
+ // Skip empty models
+ if lldpModel.DeviceID == "" {
+ return true
+ }
+ models = append(models, lldpModel)
+ return true
+ })
+
+ for _, model := range models {
+ instanceKey := model.ChassisID
+ instance, err := mat.NewInstance(instanceKey)
+ if err != nil {
+ l.SLogger.Warn("Failed to create lldp instance", slog.String("key", instanceKey))
+ continue
+ }
+
+ instance.SetLabel("device_id", model.DeviceID)
+ instance.SetLabel("description", model.Description)
+ instance.SetLabel("chassis", model.ChassisID)
+ instance.SetLabel("local_interface", model.LocalInterface)
+ instance.SetLabel("port_id", model.PortID)
+ instance.SetLabel("capabilities", strings.Join(model.Capabilities, ","))
+
+ mat.GetMetric(labels).SetValueFloat64(instance, 1.0)
+ }
+}
+
+type Model struct {
+ Capabilities []string
+ ChassisID string
+ Description string
+ DeviceID string
+ LocalInterface string
+ PortID string
+ TTL int64
+}
+
+func NewLLDPModel(output gjson.Result) Model {
+
+ var m Model
+
+ m.DeviceID = output.Get("sys_name").ClonedString()
+ m.Description = output.Get("sys_desc").ClonedString()
+ m.ChassisID = output.Get("chassis_id").ClonedString()
+ m.LocalInterface = output.Get("l_port_id").ClonedString()
+ m.TTL = output.Get("ttl").Int()
+ m.PortID = output.Get("port_id").ClonedString()
+ m.Capabilities = lldpCapabilities(output.Get("enabled_capability").String())
+
+ return m
+}
+
+func lldpCapabilities(capStr string) []string {
+ // show lldp neighbors detail
+ // "system_capability" : "B, R",
+ // Capability codes:
+ // (R) Router, (B) Bridge, (T) Telephone, (C) DOCSIS Cable Device
+ // (W) WLAN Access Point, (P) Repeater, (S) Station, (O) Other
+
+ var (
+ capabilities []string
+ code string
+ )
+
+ splits := strings.Split(capStr, ",")
+ for _, split := range splits {
+ letter := strings.TrimSpace(split)
+ // Ignore empty strings
+ if letter == "" {
+ continue
+ }
+
+ switch letter {
+ case "R":
+ code = "Router"
+ case "B":
+ code = "Bridge"
+ case "T":
+ code = "Telephone"
+ case "C":
+ code = "DOCSIS Cable Device"
+ case "W":
+ code = "WLAN Access Point"
+ case "P":
+ code = "Repeater"
+ case "S":
+ code = "Station"
+ case "O":
+ code = "Other"
+ default:
+ code = fmt.Sprintf("Unknown (%s)", letter)
+ }
+
+ if code != "" {
+ capabilities = append(capabilities, code)
+ }
+ }
+
+ slices.Sort(capabilities)
+
+ return capabilities
+}
diff --git a/cmd/collectors/cisco/plugins/version/version.go b/cmd/collectors/cisco/plugins/version/version.go
index 02b25c22a..d56c47cea 100644
--- a/cmd/collectors/cisco/plugins/version/version.go
+++ b/cmd/collectors/cisco/plugins/version/version.go
@@ -141,7 +141,10 @@ func (v *Version) parseVersionAndBanner(output gjson.Result, versionMat *matrix.
var filenameRegex = regexp.MustCompile(`(?m)Filename\s+:\s+(.*?)$`)
var generatorRegex = regexp.MustCompile(`Generator:\s+([^\s_]+)`)
-var versionRegex = regexp.MustCompile(`Version\s+:\s+(.*?)$`)
+var versionRegexes = []*regexp.Regexp{
+ regexp.MustCompile(`Version\s+:\s+(.*?)$`),
+ regexp.MustCompile(`Generator version:\s+([^\s_]+)`),
+}
type rcf struct {
Filename string
@@ -159,20 +162,26 @@ func parseRCF(banner string) rcf {
anRCF.Filename = matches[1]
- // There are two different kinds of banners.
+ // There are several different kinds of banners.
// One form looks like this:
// * Date : Generator: v1.6c 2023-12-05_001, file creation: 2024-07-29, 11:19:36
- // The other form looks like this:
+ // Another form looks like this:
// * Version : v1.10
+ // Another form looks like this:
+ // * Date : Generator version: v1.4a_2022-mm-dd_001
// The first form should extract version=v1.6c, the second form should extract v1.10
+ // The third form should extract v1.4a
matches = generatorRegex.FindStringSubmatch(banner)
if len(matches) == 2 {
anRCF.Version = matches[1]
} else {
- matches = versionRegex.FindStringSubmatch(banner)
- if len(matches) == 2 {
- anRCF.Version = matches[1]
+ for _, regex := range versionRegexes {
+ matches = regex.FindStringSubmatch(banner)
+ if len(matches) == 2 {
+ anRCF.Version = matches[1]
+ break
+ }
}
}
diff --git a/cmd/collectors/cisco/plugins/version/version_test.go b/cmd/collectors/cisco/plugins/version/version_test.go
index 5b714cc13..3fe7f4f73 100644
--- a/cmd/collectors/cisco/plugins/version/version_test.go
+++ b/cmd/collectors/cisco/plugins/version/version_test.go
@@ -35,6 +35,11 @@ func Test_parseRCF(t *testing.T) {
* Date : Generator: v1.6b_2023-07-18_001, file creation: 2024-02-15, 10:28:44`,
want: rcf{Version: "v1.6b", Filename: "NX3132Q-V_v2.00_Switch-A1.txt"},
},
+ {
+ name: "Generator with version", banner: `* Filename : NX3232_v1.90-X1_Switch-B2.txt
+* Date : Generator version: v1.4a_2022-mm-dd_001, file creation: 2024-02-15, 10:28:44`,
+ want: rcf{Version: "v1.4a", Filename: "NX3232_v1.90-X1_Switch-B2.txt"},
+ },
}
for _, tt := range tests {
diff --git a/cmd/collectors/rest/plugins/auditlog/auditlog_volume.go b/cmd/collectors/rest/plugins/auditlog/auditlog_volume.go
index 66500fb09..b0fc4373c 100644
--- a/cmd/collectors/rest/plugins/auditlog/auditlog_volume.go
+++ b/cmd/collectors/rest/plugins/auditlog/auditlog_volume.go
@@ -407,14 +407,6 @@ func (a *AuditLog) parseVolumeRecords(response []gjson.Result) {
instanceKey := application + location + user + svm + volume + uuid + handler.GetOperation() + object
if instance := mat.GetInstance(instanceKey); instance != nil {
a.setLogMetric(mat, instance, float64(auditTimeStamp))
- if err != nil {
- a.SLogger.Warn(
- "Unable to set value on metric",
- slogx.Err(err),
- slog.String("metric", "log"),
- )
- continue
- }
} else {
instance, err = mat.NewInstance(instanceKey)
if err != nil {
@@ -432,10 +424,6 @@ func (a *AuditLog) parseVolumeRecords(response []gjson.Result) {
instance.SetLabel("volume", volume)
instance.SetLabel("svm", svm)
a.setLogMetric(mat, instance, float64(auditTimeStamp))
- if err != nil {
- a.SLogger.Warn("error while setting metric value", slogx.Err(err))
- return
- }
}
}
}
diff --git a/cmd/collectors/rest/plugins/quota/quota.go b/cmd/collectors/rest/plugins/quota/quota.go
index 301c5a33d..0cd326f65 100644
--- a/cmd/collectors/rest/plugins/quota/quota.go
+++ b/cmd/collectors/rest/plugins/quota/quota.go
@@ -6,7 +6,6 @@ import (
"github.com/netapp/harvest/v2/pkg/matrix"
"github.com/netapp/harvest/v2/pkg/slogx"
"github.com/netapp/harvest/v2/pkg/util"
- "log/slog"
)
type Quota struct {
@@ -40,22 +39,7 @@ func (q *Quota) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util.
}
}
- // Purge and reset data
- instanceMap := data.GetInstances()
- metricsMap := data.GetMetrics()
- data.PurgeInstances()
- data.PurgeMetrics()
-
- for metricName, m := range metricsMap {
- _, err := data.NewMetricFloat64(metricName, m.GetName())
- if err != nil {
- q.SLogger.Error("add metric", slogx.Err(err))
- }
- }
-
- if err := q.handlingQuotaMetrics(instanceMap, metricsMap, data); err != nil {
- return nil, nil, err
- }
+ q.handlingQuotaMetrics(data)
if q.qtreeMetrics {
// metrics with qtree prefix and quota prefix are available to support backward compatibility
@@ -68,13 +52,11 @@ func (q *Quota) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util.
return nil, nil, nil
}
-func (q *Quota) handlingQuotaMetrics(instanceMap map[string]*matrix.Instance, metricMap map[string]*matrix.Metric, data *matrix.Matrix) error {
- for _, quota := range instanceMap {
+func (q *Quota) handlingQuotaMetrics(data *matrix.Matrix) {
+ for _, quota := range data.GetInstances() {
if !quota.IsExportable() {
continue
}
- index := quota.GetLabel("index")
- volumeUUID := quota.GetLabel("volume_uuid")
uName := quota.GetLabel("userName")
uid := quota.GetLabel("userId")
group := quota.GetLabel("groupName")
@@ -95,28 +77,19 @@ func (q *Quota) handlingQuotaMetrics(instanceMap map[string]*matrix.Instance, me
}
}
- for metricName, m := range metricMap {
+ for metricName, m := range data.GetMetrics() {
// set -1 for unlimited
value := -1.0
- quotaInstanceKey := index + volumeUUID + metricName
- quotaInstance, err := data.NewInstance(quotaInstanceKey)
- if err != nil {
- q.SLogger.Debug("add instance", slog.String("metricName", metricName), slogx.Err(err))
- return err
- }
- // set labels
- for k, v := range quota.GetLabels() {
- quotaInstance.SetLabel(k, v)
- }
if v, ok := m.GetValueFloat64(quota); ok {
// space limits are in bytes, converted to kibibytes to match ZAPI
if metricName == "space.hard_limit" || metricName == "space.soft_limit" || metricName == "space.used.total" {
value = v / 1024
- quotaInstance.SetLabel("unit", "kibibytes")
+ m.SetLabel("unit", "kibibytes")
if metricName == "space.soft_limit" {
t := data.GetMetric("threshold")
- t.SetValueFloat64(quotaInstance, value)
+ t.SetValueFloat64(quota, value)
+ t.SetLabel("unit", "kibibytes")
}
} else {
value = v
@@ -125,8 +98,7 @@ func (q *Quota) handlingQuotaMetrics(instanceMap map[string]*matrix.Instance, me
// populate numeric data
t := data.GetMetric(metricName)
- t.SetValueFloat64(quotaInstance, value)
+ t.SetValueFloat64(quota, value)
}
}
- return nil
}
diff --git a/cmd/collectors/rest/plugins/volume/volume.go b/cmd/collectors/rest/plugins/volume/volume.go
index 833183d39..526c7f575 100644
--- a/cmd/collectors/rest/plugins/volume/volume.go
+++ b/cmd/collectors/rest/plugins/volume/volume.go
@@ -140,6 +140,8 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util
}
}
+ flexgroupFootPrintMatrix := collectors.ProcessFlexGroupFootPrint(data, v.SLogger)
+
volumeMap, err := v.getVolumeInfo()
if err != nil {
v.SLogger.Error("Failed to collect volume info data", slogx.Err(err))
@@ -155,7 +157,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util
v.handleTags(data.GetGlobalLabels())
v.currentVal++
- return []*matrix.Matrix{v.arw, v.tags}, v.client.Metadata, nil
+ return []*matrix.Matrix{v.arw, v.tags, flexgroupFootPrintMatrix}, v.client.Metadata, nil
}
func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeMap map[string]volumeInfo) {
diff --git a/cmd/collectors/rest/plugins/vscanpool/vscanpool.go b/cmd/collectors/rest/plugins/vscanpool/vscanpool.go
index 6005783d5..a357b35c9 100644
--- a/cmd/collectors/rest/plugins/vscanpool/vscanpool.go
+++ b/cmd/collectors/rest/plugins/vscanpool/vscanpool.go
@@ -131,7 +131,7 @@ func (v *VscanPool) getVScanServerInfo() (map[string]map[string]string, error) {
serverMap := make(map[string][]ServerData)
vserverServerStateMap := make(map[string]map[string]string)
- fields := []string{"node.name", "svm.name", "ip", "update_time", "state", "interface.name"}
+ fields := []string{"svm.name", "ip", "update_time", "state"}
query := "api/protocols/vscan/server-status"
href := rest.NewHrefBuilder().
APIPath(query).
@@ -145,7 +145,11 @@ func (v *VscanPool) getVScanServerInfo() (map[string]map[string]string, error) {
for _, vscanServer := range result {
svmName := vscanServer.Get("svm.name").ClonedString()
- serverMap[svmName] = append(serverMap[svmName], ServerData{ip: vscanServer.Get("ip").ClonedString(), state: vscanServer.Get("state").ClonedString(), updateTime: collectors.HandleTimestamp(vscanServer.Get("update_time").ClonedString())})
+ serverMap[svmName] = append(serverMap[svmName], ServerData{
+ ip: vscanServer.Get("ip").ClonedString(),
+ state: vscanServer.Get("state").ClonedString(),
+ updateTime: collectors.HandleTimestamp(vscanServer.Get("update_time").ClonedString()),
+ })
}
for svm, serverData := range serverMap {
diff --git a/cmd/collectors/restperf/plugins/volume/volume_test.go b/cmd/collectors/restperf/plugins/volume/volume_test.go
index a7dcd8382..52aabe00d 100644
--- a/cmd/collectors/restperf/plugins/volume/volume_test.go
+++ b/cmd/collectors/restperf/plugins/volume/volume_test.go
@@ -238,6 +238,53 @@ func TestRunForAllImplementations(t *testing.T) {
}
}
+func TestProcessFlexGroupFootPrint(t *testing.T) {
+ logger := slog.Default()
+ data := matrix.New("volume", "volume", "volume")
+
+ // Create test data
+ instance1, _ := data.NewInstance("RahulTest__0001")
+ instance1.SetLabel("volume", "RahulTest__0001")
+ instance1.SetLabel("svm", "svm1")
+ instance1.SetLabel("style", "flexgroup_constituent")
+ instance1.SetLabel("aggr", "aggr1")
+
+ instance2, _ := data.NewInstance("RahulTest__0002")
+ instance2.SetLabel("volume", "RahulTest__0002")
+ instance2.SetLabel("svm", "svm1")
+ instance2.SetLabel("style", "flexgroup_constituent")
+ instance2.SetLabel("aggr", "aggr2")
+
+ instance3, _ := data.NewInstance("RahulTest__0003")
+ instance3.SetLabel("volume", "RahulTest__0003")
+ instance3.SetLabel("svm", "svm1")
+ instance3.SetLabel("style", "flexgroup_constituent")
+ instance3.SetLabel("aggr", "aggr3")
+
+ footprintMetric, _ := data.NewMetricFloat64("volume_blocks_footprint_bin0")
+ footprintMetric.SetValueFloat64(instance1, 20)
+ footprintMetric.SetValueFloat64(instance2, 50)
+ // Intentionally leave instance2 without a footprint value to test missing data handling
+
+ cache := collectors.ProcessFlexGroupFootPrint(data, logger)
+
+ flexgroupInstance := cache.GetInstance("svm1.RahulTest")
+ if flexgroupInstance == nil {
+ t.Fatalf("expected flexgroup instance 'svm1.RahulTest' to be created")
+ }
+
+ aggr := flexgroupInstance.GetLabel("aggr")
+ if aggr != "aggr1,aggr2,aggr3" {
+ t.Fatalf("expected flexgroup instance 'aggr1,aggr2,aggr3' to be created got '%s'", aggr)
+ }
+
+ if value, ok := cache.GetMetric("volume_blocks_footprint_bin0").GetValueFloat64(flexgroupInstance); !ok {
+ t.Error("Value [volume_blocks_footprint_bin0] missing")
+ } else if value != 70 {
+ t.Errorf("Value [volume_blocks_footprint_bin0] = (%f) incorrect, expected 70", value)
+ }
+}
+
func createRestVolume(params *node.Node) plugin.Plugin {
opts := options.New(options.WithConfPath("testdata/conf"))
opts.IsTest = true
diff --git a/cmd/collectors/volume.go b/cmd/collectors/volume.go
index 0add8c0e1..8d1aa2e03 100644
--- a/cmd/collectors/volume.go
+++ b/cmd/collectors/volume.go
@@ -14,6 +14,21 @@ import (
var flexgroupRegex = regexp.MustCompile(`^(.*)__(\d{4})$`)
+var footprintMetrics = map[string]struct{}{
+ "delayed_free_footprint": {}, // Rest, Zapi
+ "flexvol_metadata_footprint": {}, // Rest
+ "total_footprint": {}, // Rest, Zapi
+ "total_metadata_footprint": {}, // Rest, Zapi
+ "volume_blocks_footprint_bin0": {}, // Rest
+ "volume_blocks_footprint_bin1": {}, // Rest
+ "volume_guarantee_footprint": {}, // Rest
+ "metadata_footprint": {}, // Zapi
+ "guarantee_footprint": {}, // Zapi
+ "capacity_tier_footprint": {}, // Zapi
+ "performance_tier_footprint": {}, // Zapi
+
+}
+
func ProcessFlexGroupData(logger *slog.Logger, data *matrix.Matrix, style string, includeConstituents bool, opsKeyPrefix string, volumesMap map[string]string, enableVolumeAggrMatrix bool) ([]*matrix.Matrix, *util.Metadata, error) {
var err error
@@ -211,3 +226,87 @@ func ProcessFlexGroupData(logger *slog.Logger, data *matrix.Matrix, style string
}
return []*matrix.Matrix{cache}, nil, nil
}
+
+func ProcessFlexGroupFootPrint(data *matrix.Matrix, logger *slog.Logger) *matrix.Matrix {
+ fgAggrMap := make(map[string]*set.Set)
+
+ cache := data.Clone(matrix.With{Data: false, Metrics: true, Instances: false, ExportInstances: true})
+ cache.UUID += ".VolumeFootPrint.Flexgroup"
+ // remove instance_labels from this matrix otherwise it will emit volume_labels
+ cache.GetExportOptions().PopChildS("instance_labels")
+
+ for _, i := range data.GetInstances() {
+ volName := i.GetLabel("volume")
+ svmName := i.GetLabel("svm")
+ style := i.GetLabel("style")
+ if style != "flexgroup_constituent" {
+ continue
+ }
+ match := flexgroupRegex.FindStringSubmatch(volName)
+ if len(match) < 2 {
+ logger.Error("regex match failed or capture group missing", slog.String("volume", volName))
+ continue
+ }
+ key := svmName + "." + match[1]
+ if cache.GetInstance(key) == nil {
+ fg, _ := cache.NewInstance(key)
+ fg.SetLabels(maps.Clone(i.GetLabels()))
+ fg.SetLabel("volume", match[1])
+ fg.SetLabel("node", "")
+ fg.SetLabel("uuid", "")
+ fg.SetLabel("style", "flexgroup")
+ fgAggrMap[key] = set.New()
+ }
+ fgAggrMap[key].Add(i.GetLabel("aggr"))
+ }
+
+ for _, i := range data.GetInstances() {
+ volName := i.GetLabel("volume")
+ svmName := i.GetLabel("svm")
+ style := i.GetLabel("style")
+
+ if style != "flexgroup_constituent" {
+ continue
+ }
+ match := flexgroupRegex.FindStringSubmatch(volName)
+ if len(match) < 2 {
+ logger.Error("regex match failed or capture group missing", slog.String("volume", volName))
+ continue
+ }
+ key := svmName + "." + match[1]
+
+ fg := cache.GetInstance(key)
+ if fg == nil {
+ logger.Error("instance not in local cache", slog.String("key", key))
+ continue
+ }
+
+ aggrs := fgAggrMap[key].Values()
+ sort.Strings(aggrs)
+ fg.SetLabel("aggr", strings.Join(aggrs, ","))
+
+ for mkey, m := range data.GetMetrics() {
+ if !m.IsExportable() && m.GetType() != "float64" {
+ continue
+ }
+
+ _, ok := footprintMetrics[mkey]
+ if !ok {
+ continue
+ }
+
+ fgm := cache.GetMetric(mkey)
+ if fgm == nil {
+ logger.Error("metric not in local cache", slog.String("key", mkey))
+ continue
+ }
+
+ if value, ok := m.GetValueFloat64(i); ok {
+ fgv, _ := fgm.GetValueFloat64(fg)
+ fgm.SetValueFloat64(fg, fgv+value)
+ }
+ }
+ }
+
+ return cache
+}
diff --git a/cmd/collectors/zapi/plugins/qtree/qtree.go b/cmd/collectors/zapi/plugins/qtree/qtree.go
index 63a27fa91..32a71e9f5 100644
--- a/cmd/collectors/zapi/plugins/qtree/qtree.go
+++ b/cmd/collectors/zapi/plugins/qtree/qtree.go
@@ -306,11 +306,15 @@ func (q *Qtree) handlingQuotaMetrics(quotas []*node.Node, data *matrix.Matrix, q
}
if attrValue := quota.GetChildContentS(attribute); attrValue != "" {
+ userIdentifier := uName
+ if userIdentifier == "" {
+ userIdentifier = uid
+ }
// Ex. InstanceKey: SVMA.vol1Abc.qtree1.5.disk-limit
if q.client.IsClustered() {
- quotaInstanceKey = vserver + "." + volume + "." + tree + "." + uName + "." + attribute + "." + quotaType
+ quotaInstanceKey = vserver + "." + volume + "." + tree + "." + userIdentifier + "." + attribute + "." + quotaType
} else {
- quotaInstanceKey = volume + "." + tree + "." + uName + "." + attribute
+ quotaInstanceKey = volume + "." + tree + "." + userIdentifier + "." + attribute
}
quotaInstance, err := q.data.NewInstance(quotaInstanceKey)
if err != nil {
@@ -341,11 +345,7 @@ func (q *Qtree) handlingQuotaMetrics(quotas []*node.Node, data *matrix.Matrix, q
switch quotaType {
case "user":
- if uName != "" {
- quotaInstance.SetLabel("user", uName)
- } else if uid != "" {
- quotaInstance.SetLabel("user", uid)
- }
+ quotaInstance.SetLabel("user", userIdentifier)
case "group":
if uName != "" {
quotaInstance.SetLabel("group", uName)
diff --git a/cmd/collectors/zapi/plugins/qtree/qtree_test.go b/cmd/collectors/zapi/plugins/qtree/qtree_test.go
index 77b2ec0ea..794800f2b 100644
--- a/cmd/collectors/zapi/plugins/qtree/qtree_test.go
+++ b/cmd/collectors/zapi/plugins/qtree/qtree_test.go
@@ -11,7 +11,7 @@ import (
"testing"
)
-func NewQtree(historicalLabels bool) plugin.Plugin {
+func NewQtree(historicalLabels bool, testFileName string) plugin.Plugin {
params := node.NewS("Qtree")
pp := node.NewS("QtreeParent")
pp.NewChildS("poller_name", "test")
@@ -22,7 +22,7 @@ func NewQtree(historicalLabels bool) plugin.Plugin {
q.historicalLabels = historicalLabels
q.data = matrix.New(q.Parent+".Qtree", "quota", "quota")
q.client = client.NewTestClient()
- q.testFilePath = "testdata/quotas.xml"
+ q.testFilePath = testFileName
exportOptions := node.NewS("export_options")
instanceKeys := exportOptions.NewChildS("instance_keys", "")
// apply all instance keys, instance labels from qtree.yaml to all quota metrics
@@ -37,9 +37,10 @@ func NewQtree(historicalLabels bool) plugin.Plugin {
}
func TestRunForAllImplementations(t *testing.T) {
+ testFileName := "testdata/quotas.xml"
testCases := []struct {
name string
- createQtree func(historicalLabels bool) plugin.Plugin
+ createQtree func(historicalLabels bool, testFileName string) plugin.Plugin
historicalLabels bool
expectedQuotaCount int
expectedQtreeCount int
@@ -99,14 +100,14 @@ func TestRunForAllImplementations(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- runQtreeTest(t, tc.createQtree, tc.historicalLabels, tc.expectedQuotaCount, tc.expectedQtreeCount, tc.quotaInstanceKey, tc.expectedQuotaLabels, tc.withNonExportedQtree)
+ runQtreeTest(t, tc.createQtree, tc.historicalLabels, tc.expectedQuotaCount, tc.expectedQtreeCount, tc.quotaInstanceKey, tc.expectedQuotaLabels, tc.withNonExportedQtree, testFileName)
})
}
}
// Common test logic for Qtree plugin
-func runQtreeTest(t *testing.T, createQtree func(historicalLabels bool) plugin.Plugin, historicalLabels bool, expectedQuotaCount int, expectedQtreeCount int, quotaInstanceKey string, expectedQuotaLabels int, withNonExportedQtree bool) {
- q := createQtree(historicalLabels)
+func runQtreeTest(t *testing.T, createQtree func(historicalLabels bool, testFileName string) plugin.Plugin, historicalLabels bool, expectedQuotaCount int, expectedQtreeCount int, quotaInstanceKey string, expectedQuotaLabels int, withNonExportedQtree bool, testFileName string) {
+ q := createQtree(historicalLabels, testFileName)
// Initialize the plugin
if err := q.Init(conf.Remote{}); err != nil {
@@ -181,3 +182,50 @@ func verifyLabelCount(t *testing.T, quotaOutput *matrix.Matrix, quotaInstanceKey
t.Errorf("labels = %d; want %d", quotaLabels, expectedQuotaLabels)
}
}
+
+func TestUserIdentifierHandling(t *testing.T) {
+ testFileName := "testdata/quotas2.xml"
+ testCases := []struct {
+ name string
+ expectedInstanceKey []string
+ }{
+ {
+ name: "User identified by user ID",
+ expectedInstanceKey: []string{
+ "abcde.vol0..0.disk-limit.user",
+ "abcde.vol0..0.disk-used.user",
+ "abcde.vol0..1.disk-used.user",
+ "abcde.vol0..1.disk-limit.user"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ q := NewQtree(false, testFileName)
+
+ if err := q.Init(conf.Remote{}); err != nil {
+ t.Fatalf("failed to initialize plugin: %v", err)
+ }
+
+ qtreeData := matrix.New("qtree", "qtree", "qtree")
+ qtreeInstance, _ := qtreeData.NewInstance("svm1.volume1.qtree1")
+ addLabels(qtreeInstance)
+
+ dataMap := map[string]*matrix.Matrix{
+ "qtree": qtreeData,
+ }
+
+ output, _, err := q.Run(dataMap)
+ if err != nil {
+ t.Fatalf("Run method failed: %v", err)
+ }
+
+ quotaOutput := output[0]
+ for _, iKey := range tc.expectedInstanceKey {
+ if quotaInstance := quotaOutput.GetInstance(iKey); quotaInstance == nil {
+ t.Errorf("expected instance key %s not found", tc.expectedInstanceKey)
+ }
+ }
+ })
+ }
+}
diff --git a/cmd/collectors/zapi/plugins/qtree/testdata/quotas2.xml b/cmd/collectors/zapi/plugins/qtree/testdata/quotas2.xml
new file mode 100644
index 000000000..cff8562aa
--- /dev/null
+++ b/cmd/collectors/zapi/plugins/qtree/testdata/quotas2.xml
@@ -0,0 +1,45 @@
+
+
+
+ -
+ 0
+ -
+ 6
+
+ user
+
+
+ 0
+ uid
+
+
+ 20
+ 30
+ 10
+
+ vol0
+ abcde
+
+
+ -
+ 0
+ -
+ 6
+
+ user
+
+
+ 1
+ uid
+
+
+ 20
+ 30
+ 10
+
+ vol0
+ abcde
+
+
+ 2
+
\ No newline at end of file
diff --git a/cmd/collectors/zapi/plugins/volume/volume.go b/cmd/collectors/zapi/plugins/volume/volume.go
index 350aad6b0..35f991291 100644
--- a/cmd/collectors/zapi/plugins/volume/volume.go
+++ b/cmd/collectors/zapi/plugins/volume/volume.go
@@ -108,13 +108,44 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, *util
}
// update volume instance labels
- v.updateVolumeLabels(data, volumeCloneMap, volumeFootprintMap)
+ v.updateVolumeLabels(data, volumeCloneMap)
+ flexgroupFootPrintMatrix := v.processVolumeFootPrint(data, volumeFootprintMap)
v.currentVal++
- return nil, v.client.Metadata, nil
+ return []*matrix.Matrix{flexgroupFootPrintMatrix}, v.client.Metadata, nil
}
-func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[string]volumeClone, volumeFootprintMap map[string]map[string]string) {
+func (v *Volume) processVolumeFootPrint(data *matrix.Matrix, volumeFootprintMap map[string]map[string]string) *matrix.Matrix {
+ var err error
+ // Handling volume footprint metrics
+ for _, volume := range data.GetInstances() {
+ name := volume.GetLabel("volume")
+ svm := volume.GetLabel("svm")
+ key := name + svm
+ if vf, ok := volumeFootprintMap[key]; ok {
+ for vfKey, vfVal := range vf {
+ vfMetric := data.GetMetric(vfKey)
+ if vfMetric == nil {
+ if vfMetric, err = data.NewMetricFloat64(vfKey); err != nil {
+ v.SLogger.Error("add metric", slogx.Err(err), slog.String("metric", vfKey))
+ continue
+ }
+ }
+
+ if vfVal != "" {
+ err := vfMetric.SetValueString(volume, vfVal)
+ if err != nil {
+ v.SLogger.Error("parse", slogx.Err(err), slog.String(vfKey, vfVal))
+ continue
+ }
+ }
+ }
+ }
+ }
+ return collectors.ProcessFlexGroupFootPrint(data, v.SLogger)
+}
+
+func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[string]volumeClone) {
var err error
for _, volume := range data.GetInstances() {
if !volume.IsExportable() {
@@ -169,28 +200,6 @@ func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[stri
splitEstimateBytes = splitEstimateBytes * 4 * 1024
splitEstimate.SetValueFloat64(volume, splitEstimateBytes)
}
-
- // Handling volume footprint metrics
- if vf, ok := volumeFootprintMap[key]; ok {
- for vfKey, vfVal := range vf {
- vfMetric := data.GetMetric(vfKey)
- if vfMetric == nil {
- if vfMetric, err = data.NewMetricFloat64(vfKey); err != nil {
- v.SLogger.Error("add metric", slogx.Err(err), slog.String("metric", vfKey))
- continue
- }
- }
-
- if vfVal != "" {
- vfMetricVal, err := strconv.ParseFloat(vfVal, 64)
- if err != nil {
- v.SLogger.Error("parse", slogx.Err(err), slog.String(vfKey, vfVal))
- continue
- }
- vfMetric.SetValueFloat64(volume, vfMetricVal)
- }
- }
- }
}
}
@@ -277,7 +286,7 @@ func (v *Volume) getVolumeFootprint() (map[string]map[string]string, error) {
capacityTierFootprint := footprint.GetChildContentS("volume-blocks-footprint-bin1")
capacityTierFootprintPerc := footprint.GetChildContentS("volume-blocks-footprint-bin1-percent")
delayedFreeFootprint := footprint.GetChildContentS("delayed-free-footprint")
- flexvolMetadataFootprint := footprint.GetChildContentS("flexvol-metadata-footprint")
+ metadataFootprint := footprint.GetChildContentS("flexvol-metadata-footprint")
totalFootprint := footprint.GetChildContentS("total-footprint")
totalMetadataFootprint := footprint.GetChildContentS("total-metadata-footprint")
volumeBlocksFootprint := footprint.GetChildContentS("volume-guarantee-footprint")
@@ -287,7 +296,7 @@ func (v *Volume) getVolumeFootprint() (map[string]map[string]string, error) {
footprintMetrics["capacity_tier_footprint"] = capacityTierFootprint
footprintMetrics["capacity_tier_footprint_percent"] = capacityTierFootprintPerc
footprintMetrics["delayed_free_footprint"] = delayedFreeFootprint
- footprintMetrics["flexvol_metadata_footprint"] = flexvolMetadataFootprint
+ footprintMetrics["metadata_footprint"] = metadataFootprint
footprintMetrics["total_footprint"] = totalFootprint
footprintMetrics["total_metadata_footprint"] = totalMetadataFootprint
footprintMetrics["guarantee_footprint"] = volumeBlocksFootprint
diff --git a/cmd/tools/generate/counter.yaml b/cmd/tools/generate/counter.yaml
index 87597dc85..a3d8433bf 100644
--- a/cmd/tools/generate/counter.yaml
+++ b/cmd/tools/generate/counter.yaml
@@ -1778,8 +1778,8 @@ counters:
ONTAPCounter: delayed-free-footprint
Template: conf/zapi/cdot/9.8.0/volume.yaml
- - Name: volume_flexvol_metadata_footprint
- Description: This field represents flexible volume metadata in bytes.
+ - Name: volume_metadata_footprint
+ Description: This field represents flexible volume or flexgroup metadata in bytes.
APIs:
- API: ZAPI
Endpoint: volume-footprint-get-iter
diff --git a/cmd/tools/grafana/grafana.go b/cmd/tools/grafana/grafana.go
index 06dcfc7a9..6dfbfbbf1 100644
--- a/cmd/tools/grafana/grafana.go
+++ b/cmd/tools/grafana/grafana.go
@@ -12,6 +12,7 @@ import (
"fmt"
"github.com/netapp/harvest/v2/pkg/conf"
"github.com/netapp/harvest/v2/pkg/requests"
+ "github.com/netapp/harvest/v2/pkg/slogx"
goversion "github.com/netapp/harvest/v2/third_party/go-version"
"github.com/netapp/harvest/v2/third_party/tidwall/gjson"
"github.com/netapp/harvest/v2/third_party/tidwall/sjson"
@@ -19,6 +20,7 @@ import (
"golang.org/x/text/cases"
"golang.org/x/text/language"
"io"
+ "log/slog"
"net/http"
"os"
"path/filepath"
@@ -72,6 +74,7 @@ type options struct {
customCluster string
varDefaults string
defaultDropdownMap map[string][]string
+ isDebug bool
}
type Folder struct {
@@ -433,6 +436,7 @@ func doImport(_ *cobra.Command, _ []string) {
printErrorAndExit(err)
}
+ setupSlog()
adjustOptions()
validateImport()
askForToken()
@@ -442,6 +446,20 @@ func doImport(_ *cobra.Command, _ []string) {
importDashboards(opts)
}
+func setupSlog() {
+ logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
+ AddSource: true,
+ ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
+ if a.Key == slog.SourceKey {
+ source := a.Value.Any().(*slog.Source)
+ source.File = filepath.Base(source.File)
+ }
+ return a
+ },
+ }))
+ slog.SetDefault(logger)
+}
+
func printErrorAndExit(err error) {
fmt.Println(err)
os.Exit(1)
@@ -464,6 +482,13 @@ func validateImport() {
fmt.Printf("No dashboards found in [%s] is the directory correct?\n", opts.dir)
os.Exit(1)
}
+
+ if opts.isDebug {
+ token := opts.token
+ opts.token = "****"
+ slog.Default().Info("validateImport", slog.Any("opts", opts))
+ opts.token = token
+ }
}
func initImportVars() {
@@ -1076,7 +1101,10 @@ func checkToken(opts *options, ignoreConfig bool, tries int) error {
opts.headers.Add("Content-Type", "application/json")
opts.headers.Add("Authorization", "Bearer "+opts.token)
- opts.client = &http.Client{Timeout: time.Duration(clientTimeout) * time.Second}
+ opts.client = &http.Client{
+ Timeout: time.Duration(clientTimeout) * time.Second,
+ CheckRedirect: refuseRedirect,
+ }
if strings.HasPrefix(opts.addr, "https://") {
tlsConfig := &tls.Config{InsecureSkipVerify: opts.useInsecureTLS} //nolint:gosec
opts.client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
@@ -1149,6 +1177,18 @@ func checkToken(opts *options, ignoreConfig bool, tries int) error {
return nil
}
+func refuseRedirect(req *http.Request, _ []*http.Request) error {
+ // Refuse to follow redirects, see https://github.com/NetApp/harvest/issues/3617
+ if req.Response != nil {
+ loc := req.Response.Header.Get("Location")
+ if loc != "" {
+ return fmt.Errorf("redirect not allowed. location=[%s] Check that addr is using the correct URL", loc)
+ }
+ }
+
+ return errors.New("redirect not allowed. Check that addr is using the correct URL")
+}
+
func isValidDatasource(result map[string]any) bool {
if result == nil {
fmt.Printf("warning: result is null.")
@@ -1309,7 +1349,7 @@ func sendRequest(opts *options, method, url string, query map[string]any) (map[s
}
if err = json.Unmarshal(data, &result); err != nil {
- fmt.Printf("raw response (%d - %s):\n", code, status)
+ fmt.Printf("raw response sr (%d - %s):\n", code, status)
fmt.Println(string(data))
}
return result, status, code, err
@@ -1325,7 +1365,7 @@ func sendRequestArray(opts *options, method, url string, query map[string]any) (
}
if err = json.Unmarshal(data, &result); err != nil {
- fmt.Printf("raw response (%d - %s):\n", code, status)
+ fmt.Printf("raw response sra (%d - %s):\n", code, status)
fmt.Println(string(data))
}
return result, status, code, err
@@ -1382,6 +1422,16 @@ func doRequest(opts *options, method, url string, query map[string]any) ([]byte,
request.Header = opts.headers
if response, err = opts.client.Do(request); err != nil {
+ if opts.isDebug {
+ slog.Default().Info(
+ "doRequest",
+ slog.String("method", method),
+ slog.String("url", url),
+ slog.String("status", status),
+ slog.Int("code", code),
+ slogx.Err(err),
+ )
+ }
return nil, status, code, err
}
@@ -1391,6 +1441,15 @@ func doRequest(opts *options, method, url string, query map[string]any) ([]byte,
//goland:noinspection GoUnhandledErrorResult
defer response.Body.Close()
data, err = io.ReadAll(response.Body)
+ if opts.isDebug {
+ slog.Default().Info(
+ "doRequest",
+ slog.String("method", method),
+ slog.String("url", url),
+ slog.String("status", status),
+ slog.String("data", string(data)),
+ )
+ }
return data, status, code, err
}
@@ -1506,6 +1565,7 @@ func addCommonFlags(commands ...*cobra.Command) {
cmd.PersistentFlags().StringVarP(&opts.datasource, "datasource", "s", DefaultDataSource, "Name of your Prometheus datasource used by the imported dashboards")
cmd.PersistentFlags().BoolVarP(&opts.variable, "variable", "v", false, "Use datasource as variable, overrides: --datasource")
cmd.PersistentFlags().StringVarP(&opts.dir, "directory", "d", "", "When importing, import dashboards from this local directory.\nWhen exporting, local directory to write dashboards to")
+ cmd.PersistentFlags().BoolVar(&opts.isDebug, "debug", false, "Enable debug logging")
_ = cmd.PersistentFlags().MarkHidden("svm-variable-regex")
_ = cmd.MarkPersistentFlagRequired("directory")
diff --git a/conf/ciscorest/default.yaml b/conf/ciscorest/default.yaml
index c763c9d05..d1ae18383 100644
--- a/conf/ciscorest/default.yaml
+++ b/conf/ciscorest/default.yaml
@@ -5,7 +5,9 @@ schedule:
- data: 3m
objects:
+ CDP: cdp.yaml
Environment: environment.yaml
Interface: interface.yaml
+ LLDP: lldp.yaml
Optic: optic.yaml
Version: version.yaml
diff --git a/conf/ciscorest/nxos/9.3.12/cdp.yaml b/conf/ciscorest/nxos/9.3.12/cdp.yaml
new file mode 100644
index 000000000..bb38728c1
--- /dev/null
+++ b/conf/ciscorest/nxos/9.3.12/cdp.yaml
@@ -0,0 +1,6 @@
+name: CDP
+query: "show cdp neighbors detail"
+object: cisco_cdp_neighbor
+
+plugins:
+ - CDP
\ No newline at end of file
diff --git a/conf/ciscorest/nxos/9.3.12/lldp.yaml b/conf/ciscorest/nxos/9.3.12/lldp.yaml
new file mode 100644
index 000000000..873c95762
--- /dev/null
+++ b/conf/ciscorest/nxos/9.3.12/lldp.yaml
@@ -0,0 +1,6 @@
+name: LLDP
+query: "show lldp neighbors detail"
+object: cisco_lldp_neighbor
+
+plugins:
+ - LLDP
\ No newline at end of file
diff --git a/conf/rest/9.12.0/quota.yaml b/conf/rest/9.12.0/quota.yaml
index 3742c68f7..8509a28cf 100644
--- a/conf/rest/9.12.0/quota.yaml
+++ b/conf/rest/9.12.0/quota.yaml
@@ -37,6 +37,5 @@ export_options:
- qtree
- svm
- type
- - unit
- user
- volume
\ No newline at end of file
diff --git a/conf/rest/9.12.0/volume.yaml b/conf/rest/9.12.0/volume.yaml
index d43c36bea..58e804c38 100644
--- a/conf/rest/9.12.0/volume.yaml
+++ b/conf/rest/9.12.0/volume.yaml
@@ -76,7 +76,7 @@ endpoints:
- ^^volume
- ^^vserver => svm
- delayed_free_footprint
- - flexvol_metadata_footprint
+ - flexvol_metadata_footprint => metadata_footprint
- total_footprint
- total_metadata_footprint
- volume_blocks_footprint_bin0 => performance_tier_footprint
diff --git a/conf/rest/9.14.0/volume.yaml b/conf/rest/9.14.0/volume.yaml
index e4137ea30..5322938e0 100644
--- a/conf/rest/9.14.0/volume.yaml
+++ b/conf/rest/9.14.0/volume.yaml
@@ -78,7 +78,7 @@ endpoints:
- ^^volume
- ^^vserver => svm
- delayed_free_footprint
- - flexvol_metadata_footprint
+ - flexvol_metadata_footprint => metadata_footprint
- total_footprint
- total_metadata_footprint
- volume_blocks_footprint_bin0 => performance_tier_footprint
diff --git a/docs/configure-cisco-rest.md b/docs/configure-cisco-rest.md
new file mode 100644
index 000000000..0f6fcf424
--- /dev/null
+++ b/docs/configure-cisco-rest.md
@@ -0,0 +1,79 @@
+## CiscoRest Collector
+
+The CiscoRest collector uses NX-API REST calls to collect data from Cisco switches.
+
+### Target System
+
+Harvest supports all Cisco switches listed in [NetApp's Hardware Universe](https://hwu.netapp.com/).
+
+### Requirements
+
+The NX-API feature must be enabled on the switch. No SDK or other requirements. It is recommended to create a read-only user for Harvest on the Cisco switch (see
+[prepare monitored clusters](prepare-cisco-switch.md) for details)
+
+### Metrics
+
+The collector collects a dynamic set of metrics via Cisco's NX-API. The switch returns JSON documents, and unlike other
+Harvest collectors, the CiscoRest collector does not provide template customization.
+
+## Parameters
+
+The parameters of the collector are distributed across three files:
+
+- [Harvest configuration file](configure-harvest-basic.md#pollers) (default: `harvest.yml`)
+- CiscoRest configuration file (default: `conf/ciscorest/default.yaml`)
+- Each object has its own configuration file (located in `conf/ciscorest/nxos/$version/`)
+
+Except for `addr` and `datacenter`, all other parameters of the CiscoRest collector can be defined in any of these three files. Parameters defined in a lower-level file override those in higher-level files. This allows you to configure each object individually or use the same parameters for all objects.
+
+The full set of parameters are described [below](#harvest-configuration-file).
+
+### Harvest configuration file
+
+Parameters in the poller section should define the following required parameters.
+
+| parameter | type | description | default |
+|------------------------|----------------------|--------------------------------------------------------------------------------|---------|
+| Poller name (header) | string, **required** | Poller name, user-defined value | |
+| `addr` | string, **required** | IPv4, IPv6 or FQDN of the target system | |
+| `datacenter` | string, **required** | Datacenter name, user-defined value | |
+| `username`, `password` | string, **required** | Cisco switch username and password with at least `network-operator` permissions| |
+| `collectors` | list, **required** | Name of collector to run for this poller, use `CiscoRest` for this collector | |
+
+### CiscoRest configuration file
+
+This configuration file contains a list of objects that should be collected and the filenames of their templates (explained in the next section).
+
+Additionally, this file contains the parameters that are applied as defaults to all objects. As mentioned before, any
+of these parameters can be defined in the Harvest or object configuration files as well.
+
+| parameter | type | description | default |
+|-------------------------|----------------------|-------------------------------------------------------------------------------|-----------|
+| `client_timeout` | duration (Go-syntax) | how long to wait for server responses | 30s |
+| `schedule` | list, **required** | how frequently to retrieve metrics from StorageGRID | |
+| - `data` | duration (Go-syntax) | how frequently this collector/object should retrieve metrics from StorageGRID | 5 minutes |
+| `only_cluster_instance` | bool, optional | don't require instance key. assume the only instance is the cluster itself | |
+
+The template should define objects in the `objects` section. Example:
+
+```yaml
+objects:
+ Optic: optic.yaml
+```
+
+For each object, we define the filename of the object configuration file. The object configuration files
+are located in subdirectories matching the CiscoRest version that was used to create these files. It is possible to
+have multiple version-subdirectories for multiple CiscoRest versions. At runtime, the collector will select the object
+configuration file that closest matches the version of the target CiscoRest system.
+
+### Object configuration file
+
+The Object configuration file ("subtemplate") should contain the following parameters:
+
+| parameter | type | description | default |
+|------------------|----------------------|------------------------------------------------------------------------------------|---------|
+| `name` | string, **required** | display name of the collector that will collect this object | |
+| `query` | string, **required** | Cisco switch CLI command used to issue a REST request | |
+| `object` | string, **required** | short name of the object | |
+| `plugins` | list | plugins and their parameters to run on the collected data | |
+
diff --git a/docs/configure-keyperf.md b/docs/configure-keyperf.md
index 0605859c3..acd5b5ee3 100644
--- a/docs/configure-keyperf.md
+++ b/docs/configure-keyperf.md
@@ -37,7 +37,7 @@ Additionally, this file contains the parameters that are applied as defaults to
| `latency_io_reqd` | int, optional | threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) | 10 |
| `jitter` | duration (Go-syntax), optional | Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use `jitter` to randomly distribute collector startup across a specified duration. For example, a `jitter` of `1m` starts each collector after a random delay between 0 and 60 seconds. For more details, refer to [this discussion](https://github.com/NetApp/harvest/discussions/2856). | |
| `schedule` | list, required | the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: | |
-| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache | 20 minutes |
+| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache | 24 hours |
| - `data` | duration (Go-syntax) | poll frequency of updating the data cache
**Note** Harvest allows defining poll intervals on sub-second level (e.g. `1ms`), however keep in mind the following:
- API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than `client_timeout`.
- Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
- Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)
| 1 minute |
The template should define objects in the `objects` section. Example:
diff --git a/docs/configure-rest.md b/docs/configure-rest.md
index 150e072df..c9341e36b 100644
--- a/docs/configure-rest.md
+++ b/docs/configure-rest.md
@@ -75,9 +75,10 @@ of these parameters can be defined in the Harvest or object configuration files
| parameter | type | description | default |
|------------------|--------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|
-| `client_timeout` | duration (Go-syntax) | how long to wait for server responses | 30s |
+| `client_timeout` | duration (Go-syntax) | how long to wait for server responses | 30s |
| `jitter` | duration (Go-syntax), optional | Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use `jitter` to randomly distribute collector startup across a specified duration. For example, a `jitter` of `1m` starts each collector after a random delay between 0 and 60 seconds. For more details, refer to [this discussion](https://github.com/NetApp/harvest/discussions/2856). | |
| `schedule` | list, **required** | how frequently to retrieve metrics from ONTAP | |
+| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache | 24 hours |
| - `data` | duration (Go-syntax) | how frequently this collector/object should retrieve metrics from ONTAP | 3 minutes |
The template should define objects in the `objects` section. Example:
@@ -310,8 +311,7 @@ of these parameters can be defined in the Harvest or object configuration files
| `latency_io_reqd` | int, optional | threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) | 10 |
| `jitter` | duration (Go-syntax), optional | Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use `jitter` to randomly distribute collector startup across a specified duration. For example, a `jitter` of `1m` starts each collector after a random delay between 0 and 60 seconds. For more details, refer to [this discussion](https://github.com/NetApp/harvest/discussions/2856). | |
| `schedule` | list, required | the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: | |
-| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache | 20 minutes |
-| - `instance` | duration (Go-syntax) | poll frequency of updating the instance cache | 10 minutes |
+| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache | 24 hours |
| - `data` | duration (Go-syntax) | poll frequency of updating the data cache
**Note** Harvest allows defining poll intervals on sub-second level (e.g. `1ms`), however keep in mind the following:
- API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than `client_timeout`.
- Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
- Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)
| 1 minute |
The template should define objects in the `objects` section. Example:
@@ -383,8 +383,8 @@ You can include these fields under the `filter` parameter. For example, to filte
counters:
...
- filter:
- - name: *NS*|*Test*
- - svm.name: vs1
+ - name=*NS*|*Test*
+ - svm.name=vs1
```
## ONTAP Private CLI
diff --git a/docs/configure-statperf.md b/docs/configure-statperf.md
index 81e4e0e31..2d90efca1 100644
--- a/docs/configure-statperf.md
+++ b/docs/configure-statperf.md
@@ -1,5 +1,7 @@
# StatPerf Collector
+**Note: This collector is in beta.**
+
StatPerf collects performance metrics from ONTAP by invoking the ONTAP CLI statistics command via the private Rest CLI. The full ONTAP CLI command used is:
```bash
@@ -33,14 +35,14 @@ The StatPerf configuration file (also known as the "template") includes a list o
| Parameter | Type | Description | Default |
|--------------------|--------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------:|
-| `use_insecure_tls` | bool, optional | Skip verifying the TLS certificate of the target system. | false |
-| `client_timeout` | duration (Go-syntax) | Maximum time to wait for server responses. | 30s |
-| `latency_io_reqd` | int, optional | Threshold of IOPs for calculating latency metrics; latencies based on very few IOPs are unreliable. | 10 |
-| `jitter` | duration (Go-syntax), optional | Randomly delay collector startup by up to the specified duration to prevent simultaneous REST queries during startup. For example, a jitter value of `1m` will delay startup by a random duration between 0 seconds and 60 seconds. For more details, see [this discussion](https://github.com/NetApp/harvest/discussions/2856). | |
-| `schedule` | list, required | Specifies the polling frequencies, which must include exactly these three elements in the exact specified order: | |
-| - `counter` | duration (Go-syntax) | Poll frequency for updating the counter metadata cache. | 24 hours|
+| `use_insecure_tls` | bool, optional | Skip verifying the TLS certificate of the target system. | false |
+| `client_timeout` | duration (Go-syntax) | Maximum time to wait for server responses. | 30s |
+| `latency_io_reqd` | int, optional | Threshold of IOPs for calculating latency metrics; latencies based on very few IOPs are unreliable. | 10 |
+| `jitter` | duration (Go-syntax), optional | Randomly delay collector startup by up to the specified duration to prevent simultaneous REST queries during startup. For example, a jitter value of `1m` will delay startup by a random duration between 0 seconds and 60 seconds. For more details, see [this discussion](https://github.com/NetApp/harvest/discussions/2856). | |
+| `schedule` | list, required | Specifies the polling frequencies, which must include exactly these three elements in the exact specified order: | |
+| - `counter` | duration (Go-syntax) | Poll frequency for updating the counter metadata cache. | 24 hours |
| - `instance` | duration (Go-syntax) | Poll frequency for updating the instance cache. | 10 minutes|
-| - `data` | duration (Go-syntax) | Poll frequency for updating the data cache. Note that while Harvest allows sub-second poll intervals (e.g. `1ms`), factors such as API response times and system load should be considered. In short intervals, performance counters may not be aggregated accurately, potentially leading to a failed state in the collector if the poll interval is less than `client_timeout`. Additionally, very short intervals may cause heavier loads on the ONTAP system and lead to less meaningful metric values (e.g. for latencies). | 1 minute |
+| - `data` | duration (Go-syntax) | Poll frequency for updating the data cache. Note that while Harvest allows sub-second poll intervals (e.g. `1ms`), factors such as API response times and system load should be considered. In short intervals, performance counters may not be aggregated accurately, potentially leading to a failed state in the collector if the poll interval is less than `client_timeout`. Additionally, very short intervals may cause heavier loads on the ONTAP system and lead to less meaningful metric values (e.g. for latencies). | 1 minute |
The template should list objects in the `objects` section. For example:
diff --git a/docs/ontap-metrics.md b/docs/ontap-metrics.md
index a179195c7..f41144ff0 100644
--- a/docs/ontap-metrics.md
+++ b/docs/ontap-metrics.md
@@ -7,7 +7,7 @@ These can be generated on demand by running `bin/harvest grafana metrics`. See
- More information about ONTAP REST performance counters can be found [here](https://docs.netapp.com/us-en/ontap-pcmap-9121/index.html).
```
-Creation Date : 2025-May-07
+Creation Date : 2025-May-15
ONTAP Version: 9.15.1
```
## Understanding the structure
@@ -14545,16 +14545,6 @@ Filesystem size (in bytes) of the volume. This is the total usable size of the
| ZAPI | `volume-get-iter` | `volume-attributes.volume-space-attributes.filesystem-size` | conf/zapi/cdot/9.8.0/volume.yaml |
-### volume_flexvol_metadata_footprint
-
-This field represents flexible volume metadata in bytes.
-
-| API | Endpoint | Metric | Template |
-|--------|----------|--------|---------|
-| REST | `api/private/cli/volume/footprint` | `flexvol_metadata_footprint` | conf/rest/9.14.0/volume.yaml |
-| ZAPI | `volume-footprint-get-iter` | `flexvol-metadata-footprint` | conf/zapi/cdot/9.8.0/volume.yaml |
-
-
### volume_guarantee_footprint
This field represents the volume guarantee footprint in bytes. Alternatively, it is the space reserved for future writes in the volume.
@@ -14605,6 +14595,16 @@ This metric provides information about Volume
| ZAPI | `volume-get-iter` | `Harvest generated` | conf/zapi/cdot/9.8.0/volume.yaml |
+### volume_metadata_footprint
+
+This field represents flexible volume or flexgroup metadata in bytes.
+
+| API | Endpoint | Metric | Template |
+|--------|----------|--------|---------|
+| REST | `api/private/cli/volume/footprint` | `flexvol_metadata_footprint` | conf/rest/9.14.0/volume.yaml |
+| ZAPI | `volume-footprint-get-iter` | `flexvol-metadata-footprint` | conf/zapi/cdot/9.8.0/volume.yaml |
+
+
### volume_new_status
This metric indicates a value of 1 if the volume state is online (indicating the volume is operational) and a value of 0 for any other state.
diff --git a/docs/prepare-cisco-switch.md b/docs/prepare-cisco-switch.md
new file mode 100644
index 000000000..1d62cc11f
--- /dev/null
+++ b/docs/prepare-cisco-switch.md
@@ -0,0 +1,35 @@
+## Prepare Cisco switch
+
+NetApp Harvest requires login credentials to access Cisco switches. Although, a generic admin account can be used, it
+is better to create a dedicated monitoring user with read-only permissions.
+
+If you want to create a dedicated monitoring user for Harvest, follow the steps below.
+
+1. ssh into the switch with a user than can create new users. e.g. `ssh admin@switch-ip`
+2. Create a new user with read-only permissions by running the following commands. Replace password with a strong password.
+
+```bash
+configure terminal
+username ro_user role network-operator password Netapp123
+exit
+```
+
+## Enable NX-API on Cisco switch
+
+NetApp Harvest uses NX-API to collect metrics from Cisco switches. You need to enable NX-API on the switch, follow the steps below.
+
+1. ssh into the switch with a user than can enable NX-API. e.g. `ssh admin@switch-ip`
+2. Enable NX-API by running the following commands:
+
+```bash
+configure terminal
+feature nxapi
+exit
+```
+
+## Reference
+
+See [Configuring User Accounts and RBAC](https://www.cisco.com/c/en/us/td/docs/switches/datacenter/nexus9000/sw/93x/security/configuration/guide/b-cisco-nexus-9000-nx-os-security-configuration-guide-93x/b-cisco-nexus-9000-nx-os-security-configuration-guide-93x_chapter_01000.html)
+for more information on Cisco NX-OS user accounts and RBAC.
+
+See [NX-OS Programmability Guide](https://www.cisco.com/c/en/us/td/docs/switches/datacenter/nexus9000/sw/93x/progammability/guide/b-cisco-nexus-9000-series-nx-os-programmability-guide-93x/b-cisco-nexus-9000-series-nx-os-programmability-guide-93x_chapter_010011.html) for more information on the Cisco NX-API.
\ No newline at end of file
diff --git a/grafana/dashboards/cisco/cisco.json b/grafana/dashboards/cisco/cisco.json
index 2dc896758..ad6ec716b 100644
--- a/grafana/dashboards/cisco/cisco.json
+++ b/grafana/dashboards/cisco/cisco.json
@@ -1758,6 +1758,224 @@
"x": 0,
"y": 12
},
+ "id": 42,
+ "panels": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Displays Link Layer Discovery Protocol (LLDP) neighbors.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "filterable": true
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 13
+ },
+ "id": 44,
+ "options": {
+ "showHeader": true
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "cisco_lldp_neighbor_labels{datacenter=~\"$Datacenter\",switch=~\"$Switch\"}",
+ "format": "table",
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Link Layer Discovery Protocol",
+ "transformations": [
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "capabilities",
+ "chassis",
+ "datacenter",
+ "description",
+ "device_id",
+ "local_interface",
+ "port_id",
+ "switch"
+ ]
+ }
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {
+ "capabilities": 7,
+ "chassis": 2,
+ "datacenter": 0,
+ "description": 4,
+ "device_id": 3,
+ "local_interface": 5,
+ "port_id": 6,
+ "switch": 1
+ },
+ "renameByName": {
+ "capabilities": "Capabilities",
+ "chassis": "Chassis",
+ "datacenter": "Datacenter",
+ "description": "Description",
+ "device_id": "DeviceId",
+ "local_interface": "LocalInterface",
+ "port_id": "PortId",
+ "switch": "Switch"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Displays Cisco Discovery Protocol (CDP) neighbors.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "filterable": true
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 20
+ },
+ "id": 46,
+ "options": {
+ "showHeader": true
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "cisco_cdp_neighbor_labels{datacenter=~\"$Datacenter\",switch=~\"$Switch\"}",
+ "format": "table",
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Cisco Discovery Protocol",
+ "transformations": [
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "capabilities",
+ "datacenter",
+ "device_id",
+ "local_interface_mac",
+ "platform_id",
+ "port_id",
+ "remote_interface_mac",
+ "switch",
+ "version"
+ ]
+ }
+ }
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {
+ "capabilities": 8,
+ "datacenter": 0,
+ "device_id": 3,
+ "local_interface_mac": 5,
+ "platform_id": 2,
+ "port_id": 6,
+ "remote_interface_mac": 7,
+ "switch": 1,
+ "version": 4
+ },
+ "renameByName": {
+ "capabilities": "Capabilities",
+ "datacenter": "Datacenter",
+ "device_id": "DeviceId",
+ "local_interface_mac": "LocalInterface",
+ "platform_id": "PlatformId",
+ "port_id": "PortId",
+ "remote_interface_mac": "RemoteInterface",
+ "switch": "Switch",
+ "version": "Version"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "title": "Neighbors",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 13
+ },
"id": 25,
"panels": [
{
@@ -1803,7 +2021,7 @@
"h": 9,
"w": 4,
"x": 0,
- "y": 13
+ "y": 14
},
"id": 37,
"options": {
@@ -1896,7 +2114,7 @@
"h": 9,
"w": 20,
"x": 4,
- "y": 13
+ "y": 14
},
"id": 39,
"options": {
@@ -2014,7 +2232,7 @@
"h": 11,
"w": 24,
"x": 0,
- "y": 22
+ "y": 23
},
"id": 40,
"options": {
@@ -2296,5 +2514,5 @@
"timezone": "",
"title": "Cisco: Switch",
"uid": "cisco-switch",
- "version": 1
+ "version": 2
}
diff --git a/grafana/dashboards/cmode/volume.json b/grafana/dashboards/cmode/volume.json
index a1db58a43..8f52381d1 100644
--- a/grafana/dashboards/cmode/volume.json
+++ b/grafana/dashboards/cmode/volume.json
@@ -71,7 +71,7 @@
"gnetId": null,
"graphTooltip": 1,
"id": null,
- "iteration": 1746433866614,
+ "iteration": 1747041991384,
"links": [
{
"asDropdown": true,
@@ -910,7 +910,7 @@
"h": 9,
"w": 24,
"x": 0,
- "y": 25
+ "y": 14
},
"id": 18,
"interval": "1m",
@@ -1244,7 +1244,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 34
+ "y": 23
},
"id": 39,
"options": {
@@ -1400,7 +1400,7 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 34
+ "y": 23
},
"id": 41,
"options": {
@@ -1557,7 +1557,7 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 34
+ "y": 23
},
"id": 43,
"options": {
@@ -1744,7 +1744,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 41
+ "y": 30
},
"id": 40,
"options": {
@@ -1900,7 +1900,7 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 41
+ "y": 30
},
"id": 42,
"options": {
@@ -2057,7 +2057,7 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 41
+ "y": 30
},
"id": 44,
"options": {
@@ -6865,7 +6865,7 @@
"h": 12,
"w": 12,
"x": 0,
- "y": 25
+ "y": 57
},
"id": 93,
"options": {
@@ -6958,7 +6958,7 @@
"h": 12,
"w": 12,
"x": 12,
- "y": 25
+ "y": 57
},
"id": 94,
"options": {
@@ -7051,7 +7051,7 @@
"h": 12,
"w": 12,
"x": 0,
- "y": 37
+ "y": 69
},
"id": 95,
"options": {
@@ -7144,7 +7144,7 @@
"h": 12,
"w": 12,
"x": 12,
- "y": 37
+ "y": 69
},
"id": 96,
"options": {
@@ -7233,7 +7233,7 @@
"h": 12,
"w": 12,
"x": 0,
- "y": 49
+ "y": 81
},
"id": 90,
"options": {
@@ -7303,41 +7303,17 @@
"panels": [
{
"datasource": "${DS_PROMETHEUS}",
- "description": "Footprint of blocks written to the volume in bytes for the performance tier (bin 0).",
+ "description": "Display a list of volumes and metadata footprints in their associated aggregate.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "palette-classic"
+ "mode": "thresholds"
},
"custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
+ "align": "auto",
+ "displayMode": "auto",
+ "filterable": true
},
- "decimals": 2,
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -7351,208 +7327,972 @@
"value": 80
}
]
- },
- "unit": "bytes"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 12,
- "w": 12,
- "x": 0,
- "y": 26
- },
- "id": 119,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "lastNotNull",
- "max"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "sortBy": "Last *",
- "sortDesc": true
+ }
},
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "8.1.8",
- "targets": [
- {
- "exemplar": false,
- "expr": "volume_performance_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_performance_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
- "hide": false,
- "interval": "",
- "legendFormat": "{{svm}} - {{volume}} ",
- "refId": "A"
- }
- ],
- "title": "Top $TopResources Volumes by Performance Tier Footprint",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": "${DS_PROMETHEUS}",
- "description": "Footprint of blocks written to the volume in performance tier (bin 0) as a percentage of aggregate size.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #A"
},
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "decimals": 2,
- "mappings": [],
- "max": 100,
- "min": 0,
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
+ "properties": [
{
- "color": "red",
- "value": 80
+ "id": "unit",
+ "value": "bytes"
}
]
},
- "unit": "percent"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 12,
- "w": 12,
- "x": 12,
- "y": 26
- },
- "id": 120,
- "options": {
- "legend": {
- "calcs": [
- "mean",
- "lastNotNull",
- "max"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "sortBy": "Last *",
- "sortDesc": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
- },
- "pluginVersion": "8.1.8",
- "targets": [
- {
- "exemplar": false,
- "expr": "volume_performance_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_performance_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
- "hide": false,
- "interval": "",
- "legendFormat": "{{svm}} - {{volume}} ",
- "refId": "A"
- }
- ],
- "title": "Top $TopResources Volumes by Performance Tier Footprint %",
- "transformations": [],
- "type": "timeseries"
- },
- {
- "datasource": "${DS_PROMETHEUS}",
- "description": "Footprint of blocks written to the volume in bytes for capacity tier (bin 1).",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 10,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": true,
- "stacking": {
- "group": "A",
- "mode": "none"
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #B"
},
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "decimals": 2,
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
+ "properties": [
{
- "color": "green",
- "value": null
- },
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #C"
+ },
+ "properties": [
{
- "color": "red",
- "value": 80
+ "id": "unit",
+ "value": "bytes"
}
]
},
- "unit": "bytes"
- },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #D"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #E"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #F"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Value #G"
+ },
+ "properties": [
+ {
+ "id": "unit",
+ "value": "bytes"
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "datacenter"
+ },
+ "properties": [
+ {
+ "id": "links",
+ "value": [
+ {
+ "targetBlank": true,
+ "title": "",
+ "url": "/d/cdot-datacenter/ontap-datacenter?orgId=1&${__url_time_range}&var-Datacenter=${__value.raw}"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cluster"
+ },
+ "properties": [
+ {
+ "id": "links",
+ "value": [
+ {
+ "targetBlank": true,
+ "title": "",
+ "url": "/d/cdot-cluster/ontap-cluster?orgId=1&${Datacenter:queryparam}&${__url_time_range}&var-Cluster=${__value.raw}"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "volume"
+ },
+ "properties": [
+ {
+ "id": "links",
+ "value": [
+ {
+ "targetBlank": true,
+ "title": "",
+ "url": "/d/cdot-volume/ontap-volume?orgId=1&${Datacenter:queryparam}&${Cluster:queryparam}&${SVM:queryparam}&${__url_time_range}&var-Volume=${__value.raw}"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "svm"
+ },
+ "properties": [
+ {
+ "id": "links",
+ "value": [
+ {
+ "targetBlank": true,
+ "title": "",
+ "url": "/d/cdot-svm/ontap-svm?orgId=1&${Datacenter:queryparam}&${Cluster:queryparam}&${__url_time_range}&var-SVM=${__value.raw}"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "aggr"
+ },
+ "properties": [
+ {
+ "id": "links",
+ "value": [
+ {
+ "targetBlank": true,
+ "title": "",
+ "url": "/d/cdot-aggregate/ontap-aggregate?orgId=1&${Datacenter:queryparam}&${Cluster:queryparam}&${__url_time_range}&var-Aggregate=${__value.raw}"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 14
+ },
+ "id": 185,
+ "options": {
+ "showHeader": true,
+ "sortBy": [
+ {
+ "desc": false,
+ "displayName": "Volume"
+ }
+ ]
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_delayed_free_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ },
+ {
+ "exemplar": false,
+ "expr": "volume_metadata_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "hide": false,
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "B"
+ },
+ {
+ "exemplar": false,
+ "expr": "volume_total_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "hide": false,
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "C"
+ },
+ {
+ "exemplar": false,
+ "expr": "volume_total_metadata_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "hide": false,
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "D"
+ },
+ {
+ "exemplar": false,
+ "expr": "volume_guarantee_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "hide": false,
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "E"
+ },
+ {
+ "exemplar": false,
+ "expr": "volume_performance_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "hide": false,
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "F"
+ },
+ {
+ "exemplar": false,
+ "expr": "volume_capacity_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}",
+ "format": "table",
+ "hide": false,
+ "instant": true,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "G"
+ }
+ ],
+ "title": "Volumes Footprint",
+ "transformations": [
+ {
+ "id": "filterFieldsByName",
+ "options": {
+ "include": {
+ "names": [
+ "aggr",
+ "cluster",
+ "datacenter",
+ "style",
+ "svm",
+ "volume",
+ "Value #A",
+ "Value #B",
+ "Value #C",
+ "Value #D",
+ "Value #E",
+ "Value #F",
+ "Value #G"
+ ]
+ }
+ }
+ },
+ {
+ "id": "merge",
+ "options": {}
+ },
+ {
+ "id": "organize",
+ "options": {
+ "excludeByName": {},
+ "indexByName": {
+ "Value #A": 7,
+ "Value #B": 8,
+ "Value #C": 9,
+ "Value #D": 10,
+ "Value #E": 11,
+ "Value #F": 12,
+ "Value #G": 13,
+ "aggr": 5,
+ "cluster": 1,
+ "datacenter": 0,
+ "node": 4,
+ "style": 6,
+ "svm": 2,
+ "volume": 3
+ },
+ "renameByName": {
+ "Value #A": "Delayed Free",
+ "Value #B": "Metadata",
+ "Value #C": "Total",
+ "Value #D": "Total Metadata",
+ "Value #E": "Guarantee",
+ "Value #F": "Performance Tier",
+ "Value #G": "Capacity Tier",
+ "aggr": "Aggregate",
+ "cluster": "Cluster",
+ "datacenter": "Datacenter",
+ "node": "Node",
+ "style": "Style",
+ "svm": "SVM",
+ "volume": "Volume"
+ }
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Footprint of blocks written to the volume in bytes for the performance tier (bin 0).",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "id": 119,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_performance_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_performance_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Performance Tier Footprint",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Footprint of blocks written to the volume in performance tier (bin 0) as a percentage of aggregate size.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 12,
+ "y": 23
+ },
+ "id": 120,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_performance_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_performance_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Performance Tier Footprint %",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Footprint of blocks written to the volume in bytes for capacity tier (bin 1).",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 35
+ },
+ "id": 121,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_capacity_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_capacity_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Capacity Tier Footprint",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Footprint of blocks written to the volume in capacity tier (bin 1) as a percentage of aggregate size.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 12,
+ "y": 35
+ },
+ "id": 122,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_capacity_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_capacity_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Capacity Tier Footprint %",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "This field represents the delayed free blocks footprint. This is used to improve delete performance by batching delete requests.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 47
+ },
+ "id": 179,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_delayed_free_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_delayed_free_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Delayed Free Footprint",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "This represents flexible metadata for a volume.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 12,
+ "y": 47
+ },
+ "id": 181,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_metadata_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_metadata_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Metadata Footprint",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "Volume total footprint.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
"overrides": []
},
"gridPos": {
"h": 12,
"w": 12,
"x": 0,
- "y": 38
+ "y": 59
},
- "id": 121,
+ "id": 180,
"options": {
"legend": {
"calcs": [
@@ -7574,20 +8314,20 @@
"targets": [
{
"exemplar": false,
- "expr": "volume_capacity_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_capacity_tier_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "expr": "volume_total_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_total_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
"hide": false,
"interval": "",
"legendFormat": "{{svm}} - {{volume}} ",
"refId": "A"
}
],
- "title": "Top $TopResources Volumes by Capacity Tier Footprint",
+ "title": "Top $TopResources Volumes by Total Footprint",
"transformations": [],
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
- "description": "Footprint of blocks written to the volume in capacity tier (bin 1) as a percentage of aggregate size.",
+ "description": "Total metadata footprint for a volume.",
"fieldConfig": {
"defaults": {
"color": {
@@ -7623,8 +8363,6 @@
},
"decimals": 2,
"mappings": [],
- "max": 100,
- "min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
@@ -7638,7 +8376,7 @@
}
]
},
- "unit": "percent"
+ "unit": "bytes"
},
"overrides": []
},
@@ -7646,9 +8384,9 @@
"h": 12,
"w": 12,
"x": 12,
- "y": 38
+ "y": 59
},
- "id": 122,
+ "id": 183,
"options": {
"legend": {
"calcs": [
@@ -7670,14 +8408,108 @@
"targets": [
{
"exemplar": false,
- "expr": "volume_capacity_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_capacity_tier_footprint_percent{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "expr": "volume_total_metadata_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_total_metadata_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
"hide": false,
"interval": "",
"legendFormat": "{{svm}} - {{volume}} ",
"refId": "A"
}
],
- "title": "Top $TopResources Volumes by Capacity Tier Footprint %",
+ "title": "Top $TopResources Volumes by Total Metadata Footprint",
+ "transformations": [],
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "This field represents the volume guarantee footprint. Alternatively, it is the space reserved for future writes in the volume.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 71
+ },
+ "id": 182,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.1.8",
+ "targets": [
+ {
+ "exemplar": false,
+ "expr": "volume_guarantee_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}\nand on (datacenter, cluster, svm, volume)\n topk(\n $TopResources,\n (\n avg_over_time(\n volume_guarantee_footprint{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",style!=\"flexgroup_constituent\",svm=~\"$SVM\",volume=~\"$Volume\"}[3h] @ end()\n )\n * on (datacenter, cluster, svm, volume) group_left (node)\n (\n max by (datacenter, cluster, svm, volume, node) (\n volume_labels{cluster=~\"$Cluster\",datacenter=~\"$Datacenter\",svm=~\"$SVM\",tags=~\".*$Tag.*\",volume=~\"$Volume\"}\n )\n )\n )\n )",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{svm}} - {{volume}} ",
+ "refId": "A"
+ }
+ ],
+ "title": "Top $TopResources Volumes by Capacity Tier Footprint",
"transformations": [],
"type": "timeseries"
}
@@ -9737,5 +10569,5 @@
"timezone": "",
"title": "ONTAP: Volume",
"uid": "cdot-volume",
- "version": 34
+ "version": 35
}
diff --git a/integration/checksum/main.go b/integration/checksum/main.go
new file mode 100644
index 000000000..c60a6131c
--- /dev/null
+++ b/integration/checksum/main.go
@@ -0,0 +1,120 @@
+package main
+
+import (
+ "crypto/sha256"
+ "flag"
+ "fmt"
+ "github.com/Netapp/harvest-automation/test/utils"
+ "io"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ checksumsFilename = "sha256sums.txt"
+)
+
+var (
+ dir = ""
+ checksumLocation = ""
+)
+
+func main() {
+ utils.SetupLogging()
+ parseCLI()
+ begin()
+}
+
+func parseCLI() {
+ flag.StringVar(&dir, "dir", "", "Directory of files to checksum. Required")
+ flag.StringVar(&checksumLocation, "out", ".", "Directory to write "+checksumsFilename)
+
+ flag.Parse()
+ if dir == "" {
+ printRequired("dir")
+ }
+}
+
+func begin() {
+ slog.Info("Check checksums for files", slog.String("dir", dir))
+ checksums, err := calculateSHA256s(dir)
+
+ if err != nil {
+ fatal(fmt.Errorf("failed to calculate checksums: %w", err))
+ }
+
+ file, err := os.Create(filepath.Join(checksumLocation, checksumsFilename))
+ if err != nil {
+ fatal(fmt.Errorf("failed to create checksums file: %w", err))
+ }
+
+ defer file.Close()
+
+ for _, c := range checksums {
+ if _, err := fmt.Fprintf(file, "%x %s\n", c.checksum, c.filename); err != nil {
+ fatal(fmt.Errorf("failed to write to checksums file: %w", err))
+ }
+ }
+
+ slog.Info("Checksums written to file",
+ slog.String("file", filepath.Join(checksumLocation, checksumsFilename)),
+ slog.Int("count", len(checksums)),
+ )
+}
+
+func fatal(err error) {
+ slog.Error(err.Error())
+ os.Exit(1)
+}
+
+func printRequired(name string) {
+ fmt.Printf("%s is required\n", name)
+ fmt.Printf("usage: \n")
+ flag.PrintDefaults()
+ os.Exit(1)
+}
+
+type checksumSHA256 struct {
+ filename string
+ checksum []byte
+}
+
+func calculateSHA256s(path string) ([]checksumSHA256, error) {
+ var checksums []checksumSHA256
+ path = fmt.Sprintf("%s%c", filepath.Clean(path), filepath.Separator)
+
+ calculateSHA256 := func(filepath string, d os.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if d.IsDir() {
+ return nil
+ }
+
+ file, err := os.Open(filepath)
+ if err != nil {
+ return err
+ }
+
+ defer file.Close()
+
+ hash := sha256.New()
+ if _, err = io.Copy(hash, file); err != nil {
+ return err
+ }
+
+ checksums = append(checksums, checksumSHA256{
+ filename: strings.TrimPrefix(filepath, path),
+ checksum: hash.Sum(nil),
+ })
+
+ return nil
+ }
+
+ if err := filepath.WalkDir(path, calculateSHA256); err != nil {
+ return nil, err
+ }
+ return checksums, nil
+}
diff --git a/integration/test/dashboard_json_test.go b/integration/test/dashboard_json_test.go
index a37c78dd6..4fe3cd98a 100644
--- a/integration/test/dashboard_json_test.go
+++ b/integration/test/dashboard_json_test.go
@@ -51,6 +51,9 @@ var zapiCounterMap = map[string]struct{}{
"fru_status": {},
"snapshot_policy_labels": {},
"cluster_schedule_labels": {},
+ // Skip this counter in CI environments because it was introduced in version 9.15.
+ // The CI currently operates with clusters running versions earlier than 9.15 for the ZAPI collector.
+ "volume_total_metadata_footprint": {},
}
// restCounterMap are additional counters, above and beyond the ones from counterMap, which should be excluded from Rest
diff --git a/jenkins/artifacts/jenkinsfile b/jenkins/artifacts/jenkinsfile
index 524d35cd3..2c44ced76 100644
--- a/jenkins/artifacts/jenkinsfile
+++ b/jenkins/artifacts/jenkinsfile
@@ -212,6 +212,17 @@ pipeline {
}
}
+ stage('Checksum artifacts') {
+ steps {
+ sh '''
+ export PATH=$PATH:/usr/local/go/bin
+ targetLocation=$targetParentLocation$VERSION-$RELEASE-$BRANCH
+ cd integration
+ go run checksum/main.go -dir $targetLocation -out $targetLocation
+ '''
+ }
+ }
+
stage('Publish builds locally'){
steps {
script {
diff --git a/mkdocs.yml b/mkdocs.yml
index 0d021847c..4d15b2aef 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -19,12 +19,13 @@ nav:
- 'Containerd': 'install/containerd.md'
- 'Native': 'install/native.md'
- System Requirements: 'system-requirements.md'
- - Prepare Monitored Clusters:
+ - Prepare Monitored Systems:
- 'ONTAP cDOT': 'prepare-cdot-clusters.md'
- 'ASA r2': 'asar2.md'
- 'Amazon FSx for ONTAP': 'prepare-fsx-clusters.md'
- 'ONTAP 7mode': 'prepare-7mode-clusters.md'
- 'StorageGRID': 'prepare-storagegrid-clusters.md'
+ - 'Cisco Switches': 'prepare-cisco-switch.md'
- Configure Harvest (basic): 'configure-harvest-basic.md'
- Configure Exporters:
- 'Prometheus': 'prometheus-exporter.md'
@@ -38,6 +39,7 @@ nav:
- 'EMS': 'configure-ems.md'
- 'StorageGRID': 'configure-storagegrid.md'
- 'Unix': 'configure-unix.md'
+ - 'CiscoRest': 'configure-cisco-rest.md'
- Templates: 'configure-templates.md'
- Dashboards: 'dashboards.md'
- Manage Harvest Pollers: 'manage-harvest.md'
diff --git a/pkg/matrix/matrix.go b/pkg/matrix/matrix.go
index 195d10c38..530ba95b9 100644
--- a/pkg/matrix/matrix.go
+++ b/pkg/matrix/matrix.go
@@ -78,11 +78,19 @@ func (m *Matrix) SetExportable(b bool) {
}
func (m *Matrix) Clone(with With) *Matrix {
- clone := &Matrix{UUID: m.UUID, Object: m.Object, Identifier: m.Identifier}
- clone.globalLabels = m.globalLabels
- clone.exportOptions = m.exportOptions
- clone.exportable = m.exportable
- clone.displayMetrics = make(map[string]string)
+ clone := &Matrix{
+ UUID: m.UUID,
+ Object: m.Object,
+ Identifier: m.Identifier,
+ globalLabels: m.globalLabels,
+ exportOptions: nil,
+ exportable: m.exportable,
+ displayMetrics: make(map[string]string),
+ }
+ // Deep clone exportOptions if it is not nil
+ if m.exportOptions != nil {
+ clone.exportOptions = m.exportOptions.Copy()
+ }
if with.Instances {
clone.instances = make(map[string]*Instance, len(m.GetInstances()))
diff --git a/pkg/tree/node/node.go b/pkg/tree/node/node.go
index 7775d85eb..607c7591a 100644
--- a/pkg/tree/node/node.go
+++ b/pkg/tree/node/node.go
@@ -478,6 +478,8 @@ func ToString(n ast.Node) string {
switch v := n.(type) {
case *ast.StringNode:
return v.Value
+ case *ast.NullNode:
+ return ""
default:
return n.String()
}
diff --git a/pkg/tree/tree_test.go b/pkg/tree/tree_test.go
index fffa5840d..43751a7bc 100644
--- a/pkg/tree/tree_test.go
+++ b/pkg/tree/tree_test.go
@@ -257,6 +257,7 @@ key2: "value with : colon"
"key with colon :" : val4
'keyWithSingleQuote': val5
key6: val6 #comment
+emptyKey: # This key has an empty value
`
n, err := LoadYaml([]byte(yamlTest))
@@ -275,6 +276,7 @@ key6: val6 #comment
{key: "key with colon :", value: "val4"},
{key: "keyWithSingleQuote", value: "val5"},
{key: "key6", value: "val6"},
+ {key: "emptyKey", value: ""},
}
for _, test := range tests {