Skip to content

Commit 5b573a6

Browse files
committed
Add Unit Tests
1 parent 2f60c03 commit 5b573a6

File tree

6 files changed

+58
-8
lines changed

6 files changed

+58
-8
lines changed

cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1734,6 +1734,7 @@ func TestNodeGroupGetOptions(t *testing.T) {
17341734
ScaleDownUnneededTime: time.Second,
17351735
ScaleDownUnreadyTime: time.Minute,
17361736
MaxNodeProvisionTime: 15 * time.Minute,
1737+
MaxNodeStartupTime: 35 * time.Minute,
17371738
}
17381739

17391740
cases := []struct {
@@ -1754,13 +1755,15 @@ func TestNodeGroupGetOptions(t *testing.T) {
17541755
config.DefaultScaleDownUnneededTimeKey: "1h",
17551756
config.DefaultScaleDownUnreadyTimeKey: "30m",
17561757
config.DefaultMaxNodeProvisionTimeKey: "60m",
1758+
config.DefaultMaxNodeStartupTimeKey: "35m",
17571759
},
17581760
expected: &config.NodeGroupAutoscalingOptions{
17591761
ScaleDownGpuUtilizationThreshold: 0.6,
17601762
ScaleDownUtilizationThreshold: 0.7,
17611763
ScaleDownUnneededTime: time.Hour,
17621764
ScaleDownUnreadyTime: 30 * time.Minute,
17631765
MaxNodeProvisionTime: 60 * time.Minute,
1766+
MaxNodeStartupTime: 35 * time.Minute,
17641767
},
17651768
},
17661769
{
@@ -1775,6 +1778,7 @@ func TestNodeGroupGetOptions(t *testing.T) {
17751778
ScaleDownUnneededTime: time.Minute,
17761779
ScaleDownUnreadyTime: defaultOptions.ScaleDownUnreadyTime,
17771780
MaxNodeProvisionTime: 15 * time.Minute,
1781+
MaxNodeStartupTime: 35 * time.Minute,
17781782
},
17791783
},
17801784
{

cluster-autoscaler/clusterstate/clusterstate_test.go

Lines changed: 35 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ func TestTooManyUnready(t *testing.T) {
423423
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
424424
MaxTotalUnreadyPercentage: 10,
425425
OkTotalUnreadyCount: 1,
426-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
426+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
427427
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
428428
assert.NoError(t, err)
429429
assert.False(t, clusterstate.IsClusterHealthy())
@@ -462,6 +462,37 @@ func TestUnreadyLongAfterCreation(t *testing.T) {
462462
assert.Empty(t, upcomingRegistered["ng1"])
463463
}
464464

465+
func TestUnreadyAfterCreationWithIncreasedStartupTime(t *testing.T) {
466+
now := time.Now()
467+
468+
ng1_1 := BuildTestNode("ng1-1", 1000, 1000)
469+
SetNodeReadyState(ng1_1, true, now.Add(-time.Minute))
470+
ng2_1 := BuildTestNode("ng2-1", 1000, 1000)
471+
SetNodeReadyState(ng2_1, false, now.Add(-time.Minute))
472+
ng2_1.CreationTimestamp = metav1.Time{Time: now.Add(-30 * time.Minute)}
473+
474+
provider := testprovider.NewTestCloudProviderBuilder().Build()
475+
provider.AddNodeGroup("ng1", 1, 10, 1)
476+
provider.AddNodeGroup("ng2", 1, 10, 1)
477+
provider.AddNode("ng1", ng1_1)
478+
provider.AddNode("ng2", ng2_1)
479+
480+
assert.NotNil(t, provider)
481+
fakeClient := &fake.Clientset{}
482+
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "some-map")
483+
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
484+
MaxTotalUnreadyPercentage: 10,
485+
OkTotalUnreadyCount: 1,
486+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
487+
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
488+
assert.NoError(t, err)
489+
assert.Equal(t, 0, len(clusterstate.GetClusterReadiness().Unready))
490+
assert.Equal(t, 1, len(clusterstate.GetClusterReadiness().NotStarted))
491+
upcoming, upcomingRegistered := clusterstate.GetUpcomingNodes()
492+
assert.Equal(t, 0, upcoming["ng1"])
493+
assert.Empty(t, upcomingRegistered["ng1"])
494+
}
495+
465496
func TestNotStarted(t *testing.T) {
466497
now := time.Now()
467498

@@ -484,7 +515,7 @@ func TestNotStarted(t *testing.T) {
484515
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
485516
MaxTotalUnreadyPercentage: 10,
486517
OkTotalUnreadyCount: 1,
487-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
518+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
488519
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
489520
assert.NoError(t, err)
490521
assert.Equal(t, 1, len(clusterstate.GetClusterReadiness().NotStarted))
@@ -546,7 +577,7 @@ func TestRegisterScaleDown(t *testing.T) {
546577
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
547578
MaxTotalUnreadyPercentage: 10,
548579
OkTotalUnreadyCount: 1,
549-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
580+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 35 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
550581
now := time.Now()
551582
clusterstate.RegisterScaleDown(provider.GetNodeGroup("ng1"), "ng1-1", now.Add(time.Minute), now)
552583
assert.Equal(t, 1, len(clusterstate.scaleDownRequests))
@@ -639,7 +670,7 @@ func TestUpcomingNodes(t *testing.T) {
639670
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
640671
MaxTotalUnreadyPercentage: 10,
641672
OkTotalUnreadyCount: 1,
642-
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
673+
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
643674
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1, ng5_1, ng5_2}, nil, now)
644675
assert.NoError(t, err)
645676
assert.Empty(t, clusterstate.GetScaleUpFailures())

cluster-autoscaler/core/static_autoscaler_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2464,7 +2464,7 @@ func TestStaticAutoscalerUpcomingScaleDownCandidates(t *testing.T) {
24642464

24652465
// Create CSR with unhealthy cluster protection effectively disabled, to guarantee we reach the tested logic.
24662466
csrConfig := clusterstate.ClusterStateRegistryConfig{OkTotalUnreadyCount: nodeGroupCount * unreadyNodesCount}
2467-
csr := clusterstate.NewClusterStateRegistry(provider, csrConfig, autoscalingCtx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)
2467+
csr := clusterstate.NewClusterStateRegistry(provider, csrConfig, autoscalingCtx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute, MaxNodeStartupTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)
24682468

24692469
// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
24702470
actuator := actuation.NewActuator(&autoscalingCtx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processorstest.NewTestProcessors(&autoscalingCtx).NodeGroupConfigProcessor)

cluster-autoscaler/processors/nodegroupconfig/node_group_config_processor.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,11 +110,11 @@ func (p *DelegatingNodeGroupConfigProcessor) GetMaxNodeProvisionTime(nodeGroup c
110110
return ngConfig.MaxNodeProvisionTime, nil
111111
}
112112

113-
// GetMaxNodeProvisionTime returns MaxNodeStartupTime value that should be used for a given NodeGroup.
113+
// GetMaxNodeStartupTime returns MaxNodeStartupTime value that should be used for a given NodeGroup.
114114
func (p *DelegatingNodeGroupConfigProcessor) GetMaxNodeStartupTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
115115
ngConfig, err := nodeGroup.GetOptions(p.nodeGroupDefaults)
116116
if err != nil && err != cloudprovider.ErrNotImplemented {
117-
return time.Duration(0), err
117+
return p.nodeGroupDefaults.MaxNodeStartupTime, err
118118
}
119119
if ngConfig == nil || err == cloudprovider.ErrNotImplemented {
120120
return p.nodeGroupDefaults.MaxNodeStartupTime, nil

cluster-autoscaler/processors/nodegroupconfig/node_group_config_processor_test.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
4747
ScaleDownGpuUtilizationThreshold: 0.6,
4848
ScaleDownUtilizationThreshold: 0.5,
4949
MaxNodeProvisionTime: 15 * time.Minute,
50+
MaxNodeStartupTime: 15 * time.Minute,
5051
IgnoreDaemonSetsUtilization: true,
5152
}
5253
ngOpts := &config.NodeGroupAutoscalingOptions{
@@ -55,6 +56,7 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
5556
ScaleDownGpuUtilizationThreshold: 0.85,
5657
ScaleDownUtilizationThreshold: 0.75,
5758
MaxNodeProvisionTime: 60 * time.Minute,
59+
MaxNodeStartupTime: 35 * time.Minute,
5860
IgnoreDaemonSetsUtilization: false,
5961
}
6062

@@ -109,6 +111,17 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
109111
assert.Equal(t, res, results[w])
110112
}
111113

114+
testMaxNodeStartupTime := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
115+
res, err := p.GetMaxNodeStartupTime(ng)
116+
assert.Equal(t, err, we)
117+
results := map[Want]time.Duration{
118+
NIL: 15 * time.Minute,
119+
GLOBAL: 15 * time.Minute,
120+
NG: 35 * time.Minute,
121+
}
122+
assert.Equal(t, res, results[w])
123+
}
124+
112125
// for IgnoreDaemonSetsUtilization
113126
testIgnoreDSUtilization := func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
114127
res, err := p.GetIgnoreDaemonSetsUtilization(ng)
@@ -127,13 +140,15 @@ func TestDelegatingNodeGroupConfigProcessor(t *testing.T) {
127140
"ScaleDownUtilizationThreshold": testUtilizationThreshold,
128141
"ScaleDownGpuUtilizationThreshold": testGpuThreshold,
129142
"MaxNodeProvisionTime": testMaxNodeProvisionTime,
143+
"MaxNodeStartupTime": testMaxNodeStartupTime,
130144
"IgnoreDaemonSetsUtilization": testIgnoreDSUtilization,
131145
"MultipleOptions": func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {
132146
testUnneededTime(t, p, ng, w, we)
133147
testUnreadyTime(t, p, ng, w, we)
134148
testUtilizationThreshold(t, p, ng, w, we)
135149
testGpuThreshold(t, p, ng, w, we)
136150
testMaxNodeProvisionTime(t, p, ng, w, we)
151+
testMaxNodeStartupTime(t, p, ng, w, we)
137152
testIgnoreDSUtilization(t, p, ng, w, we)
138153
},
139154
"RepeatingTheSameCallGivesConsistentResults": func(t *testing.T, p NodeGroupConfigProcessor, ng cloudprovider.NodeGroup, w Want, we error) {

cluster-autoscaler/simulator/dynamicresources/snapshot/snapshot_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -633,7 +633,7 @@ func TestSnapshotForkCommitRevert(t *testing.T) {
633633

634634
addedSlices := []*resourceapi.ResourceSlice{addedNodeSlice.DeepCopy()}
635635
if err := s.AddNodeResourceSlices(*addedNodeSlice.Spec.NodeName, addedSlices); err != nil {
636-
t.Fatalf("failed to add %s resource slices: %v", addedNodeSlice.Spec.NodeName, err)
636+
t.Fatalf("failed to add %s resource slices: %v", *addedNodeSlice.Spec.NodeName, err)
637637
}
638638
if err := s.AddClaims([]*resourceapi.ResourceClaim{addedClaim}); err != nil {
639639
t.Fatalf("failed to add %s claim: %v", addedClaim.Name, err)

0 commit comments

Comments
 (0)