Skip to content

Commit 9f6250e

Browse files
committed
profilecreator: add and use Alert function
We used the `logrus` package internally, but minimally and pretty much only to notify processing status to the user. This code was thus the only remaining consumer of that package, which we don't really need. Add a neutral (nor log, nor warning) minimal abstraction `Alert`, similar in spirit to `ghw`, to be used when we need to notify the user about processing status. Make the backend easy to replace to make integration of this code easier. We don't see this happening, but the extra cost is negligible. Remove the direct dependency to `logrus`. Note we still have quite many indirect dependencies pulling it in. Signed-off-by: Francesco Romani <fromani@redhat.com>
1 parent 92a4e35 commit 9f6250e

File tree

7 files changed

+135
-111
lines changed

7 files changed

+135
-111
lines changed

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ require (
2626
github.com/openshift/library-go v0.0.0-20240419113445-f1541d628746
2727
github.com/pkg/errors v0.9.1
2828
github.com/prometheus/client_golang v1.21.1
29-
github.com/sirupsen/logrus v1.9.3
3029
github.com/spf13/cobra v1.9.1
3130
github.com/spf13/pflag v1.0.6
3231
gopkg.in/fsnotify.v1 v1.4.7
@@ -141,6 +140,7 @@ require (
141140
github.com/prometheus/common v0.62.0 // indirect
142141
github.com/prometheus/procfs v0.15.1 // indirect
143142
github.com/robfig/cron v1.2.0 // indirect
143+
github.com/sirupsen/logrus v1.9.3 // indirect
144144
github.com/stoewer/go-strcase v1.3.0 // indirect
145145
github.com/vincent-petithory/dataurl v1.0.0 // indirect
146146
github.com/x448/float16 v0.8.4 // indirect
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
package profilecreator
2+
3+
import (
4+
"log"
5+
"os"
6+
)
7+
8+
var alertSink *log.Logger
9+
10+
func init() {
11+
// intentionally no flag for minimal output
12+
alertSink = log.New(os.Stderr, "PPC: ", 0)
13+
}
14+
15+
// Alert raise status messages. `profilecreator` is meant to run interactively,
16+
// so the default behavior is to bubble up the messages on the user console.
17+
// It is however possible to reroute the messages to any log.Logger compliant backend
18+
// see the function SetAlertSink. All Alerts are considered Info messages.
19+
func Alert(format string, values ...any) {
20+
alertSink.Printf(format, values...)
21+
}
22+
23+
// Call this function before any other else in the `profilecreator` package
24+
func SetAlertSink(lh *log.Logger) {
25+
alertSink = lh
26+
}
27+
28+
func GetAlertSink() *log.Logger {
29+
return alertSink
30+
}

pkg/performanceprofile/profilecreator/cmd/info.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ import (
77
"sort"
88
"strings"
99

10-
log "github.com/sirupsen/logrus"
1110
"github.com/spf13/cobra"
1211
)
1312

@@ -95,13 +94,13 @@ func makeClusterInfoFromClusterData(cluster ClusterData) ClusterInfo {
9594
for _, handle := range nodeHandlers {
9695
topology, err := handle.SortedTopology()
9796
if err != nil {
98-
log.Infof("%s(Topology discovery error: %v)", handle.Node.GetName(), err)
97+
Alert("%s(Topology discovery error: %v)", handle.Node.GetName(), err)
9998
continue
10099
}
101100

102101
htEnabled, err := handle.IsHyperthreadingEnabled()
103102
if err != nil {
104-
log.Infof("%s(HT discovery error: %v)", handle.Node.GetName(), err)
103+
Alert("%s(HT discovery error: %v)", handle.Node.GetName(), err)
105104
}
106105

107106
nInfo := NodeInfo{
@@ -136,7 +135,7 @@ func showClusterInfo(cInfo ClusterInfo, infoOpts *infoOptions) error {
136135
fmt.Print(textInfo)
137136
return nil
138137
}
139-
log.Infof("Cluster info:\n%s", textInfo)
138+
Alert("Cluster info:\n%s", textInfo)
140139
return nil
141140
}
142141

pkg/performanceprofile/profilecreator/cmd/root.go

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ import (
2222
"os"
2323
"strings"
2424

25-
log "github.com/sirupsen/logrus"
2625
"github.com/spf13/cobra"
2726
"github.com/spf13/pflag"
2827

@@ -54,7 +53,7 @@ const (
5453
// 3. nosoftlockup
5554
// 4. nmi_watchdog=0
5655
// 5. mce=off
57-
// 6. skew_tick=1
56+
Alertick = 1
5857
//
5958
// tuned configuration
6059
// 1. stalld enabled
@@ -105,11 +104,10 @@ type ProfileData struct {
105104
// ClusterData collects the cluster wide information, each mcp points to a list of ghw node handlers
106105
type ClusterData map[string][]*profilecreator.GHWHandler
107106

107+
// shortcut
108+
var Alert = profilecreator.Alert
109+
108110
func init() {
109-
log.SetOutput(os.Stderr)
110-
log.SetFormatter(&log.TextFormatter{
111-
DisableTimestamp: true,
112-
})
113111
utilruntime.Must(performancev2.AddToScheme(scheme))
114112
}
115113

@@ -200,7 +198,7 @@ func makeNodesHandlers(mustGatherDirPath, poolName string, nodes []*corev1.Node)
200198
sb.WriteString(node.Name + " ")
201199
}
202200
// NodePoolName is alias of MCPName
203-
log.Infof("Nodes names targeted by %s pool are: %s", poolName, sb.String())
201+
Alert("Nodes names targeted by %s pool are: %s", poolName, sb.String())
204202
return handlers, nil
205203
}
206204

@@ -310,8 +308,8 @@ func makeProfileDataFrom(nodeHandler *profilecreator.GHWHandler, args *ProfileCr
310308
if err != nil {
311309
return nil, fmt.Errorf("failed to compute the reserved and isolated CPUs: %v", err)
312310
}
313-
log.Infof("%d reserved CPUs allocated: %v ", reservedCPUs.Size(), reservedCPUs.String())
314-
log.Infof("%d isolated CPUs allocated: %v", isolatedCPUs.Size(), isolatedCPUs.String())
311+
Alert("%d reserved CPUs allocated: %v ", reservedCPUs.Size(), reservedCPUs.String())
312+
Alert("%d isolated CPUs allocated: %v", isolatedCPUs.Size(), isolatedCPUs.String())
315313
kernelArgs := profilecreator.GetAdditionalKernelArgs(args.DisableHT)
316314
profileData := &ProfileData{
317315
reservedCPUs: reservedCPUs.String(),

pkg/performanceprofile/profilecreator/mcp.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@ import (
44
"fmt"
55
"strings"
66

7-
log "github.com/sirupsen/logrus"
8-
97
corev1 "k8s.io/api/core/v1"
108
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
119
"k8s.io/apimachinery/pkg/labels"
@@ -77,7 +75,7 @@ func GetNodesForPool(pool *mcfgv1.MachineConfigPool, clusterPools []*mcfgv1.Mach
7775
for _, n := range clusterNodes {
7876
p, err := getPrimaryPoolForNode(n, clusterPools)
7977
if err != nil {
80-
log.Warningf("can't get pool for node %q: %v", n.Name, err)
78+
Alert("can't get pool for node %q: %v", n.Name, err)
8179
continue
8280
}
8381
if p == nil {
@@ -121,7 +119,7 @@ func getPoolsForNode(node *corev1.Node, clusterPools []*mcfgv1.MachineConfigPool
121119
if isWindows(node) {
122120
// This is not an error, is this a Windows Node and it won't be managed by MCO. We're explicitly logging
123121
// here at a high level to disambiguate this from other pools = nil scenario
124-
log.Infof("Node %v is a windows node so won't be managed by MCO", node.Name)
122+
Alert("Node %v is a windows node so won't be managed by MCO", node.Name)
125123
return nil, nil
126124
}
127125

pkg/performanceprofile/profilecreator/profilecreator.go

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929
"github.com/jaypipes/ghw/pkg/cpu"
3030
"github.com/jaypipes/ghw/pkg/option"
3131
"github.com/jaypipes/ghw/pkg/topology"
32-
log "github.com/sirupsen/logrus"
3332

3433
configv1 "github.com/openshift/api/config/v1"
3534
machineconfigv1 "github.com/openshift/api/machineconfiguration/v1"
@@ -96,7 +95,7 @@ func getMustGatherFullPathsWithFilter(mustGatherPath string, suffix string, filt
9695
return "", fmt.Errorf("no match for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
9796
}
9897
if len(paths) > 1 {
99-
log.Infof("Multiple matches for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
98+
Alert("Multiple matches for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
10099
return "", fmt.Errorf("Multiple matches for the specified must gather directory path: %s and suffix: %s.\n Expected only one performance-addon-operator-must-gather* directory, please check the must-gather tarball", mustGatherPath, suffix)
101100
}
102101
// returning one possible path
@@ -504,7 +503,7 @@ func getOfflinedCPUs(extCpuInfo *extendedCPUInfo, offlinedCPUCount int, disableH
504503
}
505504

506505
if lpOfflined < offlinedCPUCount {
507-
log.Warnf("could not offline enough logical processors (required:%d, offlined:%d)", offlinedCPUCount, lpOfflined)
506+
Alert("could not offline enough logical processors (required:%d, offlined:%d)", offlinedCPUCount, lpOfflined)
508507
}
509508
return offlined.Result(), nil
510509
}
@@ -513,29 +512,29 @@ func updateTopologyInfo(topoInfo *topology.Info, disableHTFlag bool, htEnabled b
513512
//currently HT is enabled on the system and the user wants to disable HT
514513

515514
if htEnabled && disableHTFlag {
516-
log.Infof("Updating Topology info because currently hyperthreading is enabled and the performance profile will disable it")
515+
Alert("Updating Topology info because currently hyperthreading is enabled and the performance profile will disable it")
517516
return topologyHTDisabled(topoInfo), nil
518517
}
519518
return topoInfo, nil
520519
}
521520

522521
func getReservedCPUs(topologyInfo *topology.Info, reservedCPUCount int, splitReservedCPUsAcrossNUMA bool, disableHTFlag bool, htEnabled bool) (cpuset.CPUSet, error) {
523522
if htEnabled && disableHTFlag {
524-
log.Infof("Currently hyperthreading is enabled and the performance profile will disable it")
523+
Alert("Currently hyperthreading is enabled and the performance profile will disable it")
525524
htEnabled = false
526525
}
527-
log.Infof("NUMA cell(s): %d", len(topologyInfo.Nodes))
526+
Alert("NUMA cell(s): %d", len(topologyInfo.Nodes))
528527
totalCPUs := 0
529528
for id, node := range topologyInfo.Nodes {
530529
coreList := []int{}
531530
for _, core := range node.Cores {
532531
coreList = append(coreList, core.LogicalProcessors...)
533532
}
534-
log.Infof("NUMA cell %d : %v", id, coreList)
533+
Alert("NUMA cell %d : %v", id, coreList)
535534
totalCPUs += len(coreList)
536535
}
537536

538-
log.Infof("CPU(s): %d", totalCPUs)
537+
Alert("CPU(s): %d", totalCPUs)
539538

540539
if splitReservedCPUsAcrossNUMA {
541540
res, err := getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfo.Nodes)
@@ -616,7 +615,7 @@ func getCPUsSplitAcrossNUMA(reservedCPUCount int, htEnabled bool, topologyInfoNo
616615
reservedPerNuma := reservedCPUCount / numaNodeNum
617616
remainder := reservedCPUCount % numaNodeNum
618617
if remainder != 0 {
619-
log.Warnf("The reserved CPUs cannot be split equally across NUMA Nodes")
618+
Alert("The reserved CPUs cannot be split equally across NUMA Nodes")
620619
}
621620
for numaID, node := range topologyInfoNodes {
622621
if remainder != 0 {
@@ -732,7 +731,7 @@ func ensureSameTopology(topology1, topology2 *topology.Info, tols toleration.Set
732731
// as the NUMA cells have same logical processors' count and IDs and same threads' number,
733732
// core ID equality is treated as best effort. That is because when scheduling workloads,
734733
// we care about the logical processors ids and their location on the NUMAs.
735-
log.Warnf("the CPU core ids in NUMA node %d differ: %d vs %d", node1.ID, core1.ID, cores2[j].ID)
734+
Alert("the CPU core ids in NUMA node %d differ: %d vs %d", node1.ID, core1.ID, cores2[j].ID)
736735
tols[toleration.DifferentCoreIDs] = true
737736
}
738737
if core1.NumThreads != cores2[j].NumThreads {
@@ -753,7 +752,7 @@ func GetAdditionalKernelArgs(disableHT bool) []string {
753752
kernelArgs = append(kernelArgs, noSMTKernelArg)
754753
}
755754
sort.Strings(kernelArgs)
756-
log.Infof("Additional Kernel Args based on configuration: %v", kernelArgs)
755+
Alert("Additional Kernel Args based on configuration: %v", kernelArgs)
757756
return kernelArgs
758757
}
759758

0 commit comments

Comments
 (0)