-
Notifications
You must be signed in to change notification settings - Fork 21
CLOUDP-317886 - block removing cluster from MC Sharded deployment #495
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 9 commits
590f2bc
f12a39c
22d4993
0e4c8d5
cd33620
98e7244
11e67b3
ddb9e57
31c346e
883949a
ee6c193
d31bbba
37602d2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
--- | ||
kind: fix | ||
date: 2025-10-06 | ||
--- | ||
|
||
* **MultiClusterSharded**: Block removing non-zero member cluster from MongoDB resource. This prevents from scaling down member cluster without current configuration available, which can lead to unexpected issues. Previously operator was crashing in that scenario, after the fix it will mark reconciliation as `Failed` with appropriate message. Example unsafe scenario that is now blocked: | ||
MaciejKaras marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
* User has 2 member clusters: `main` is used for application traffic, `read-analytics` is used for read-only analytics | ||
* `main` cluster has 7 voting members | ||
* `read-analytics` cluster has 3 non-voting members | ||
* User decides to remove `read-analytics` cluster, by removing the `clusterSpecItem` completely | ||
* Operator scales down members from `read-analytics` cluster one by one | ||
* Because the configuration does not have voting options specified anymore and by default `priority` is set to 1, the operator will remove one member, but the other two members will be reconfigured as voting members | ||
* `replicaset` contains now 9 voting members, which is not [supported by MongoDB](https://www.mongodb.com/docs/manual/reference/limits/#mongodb-limit-Number-of-Voting-Members-of-a-Replica-Set) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -65,7 +65,7 @@ func TestX509ClusterAuthentication_CanBeEnabled_IfX509AuthenticationIsEnabled_Sh | |
ctx := context.Background() | ||
scWithTls := test.DefaultClusterBuilder().EnableTLS().EnableX509().SetName("sc-with-tls").SetTLSCA("custom-ca").Build() | ||
|
||
reconciler, _, client, _, err := defaultClusterReconciler(ctx, nil, "", "", scWithTls, nil) | ||
reconciler, _, client, _, err := defaultShardedClusterReconciler(ctx, nil, "", "", scWithTls, nil) | ||
require.NoError(t, err) | ||
addKubernetesTlsResources(ctx, client, scWithTls) | ||
|
||
|
@@ -76,7 +76,7 @@ func TestX509CanBeEnabled_WhenThereAreOnlyTlsDeployments_ShardedCluster(t *testi | |
ctx := context.Background() | ||
scWithTls := test.DefaultClusterBuilder().EnableTLS().EnableX509().SetName("sc-with-tls").SetTLSCA("custom-ca").Build() | ||
|
||
reconciler, _, client, _, err := defaultClusterReconciler(ctx, nil, "", "", scWithTls, nil) | ||
reconciler, _, client, _, err := defaultShardedClusterReconciler(ctx, nil, "", "", scWithTls, nil) | ||
require.NoError(t, err) | ||
addKubernetesTlsResources(ctx, client, scWithTls) | ||
|
||
|
@@ -333,7 +333,7 @@ func TestX509InternalClusterAuthentication_CanBeEnabledWithScram_ShardedCluster( | |
EnableX509InternalClusterAuth(). | ||
Build() | ||
|
||
r, _, kubeClient, omConnectionFactory, _ := defaultClusterReconciler(ctx, nil, "", "", sc, nil) | ||
r, _, kubeClient, omConnectionFactory, _ := defaultShardedClusterReconciler(ctx, nil, "", "", sc, nil) | ||
addKubernetesTlsResources(ctx, r.client, sc) | ||
checkReconcileSuccessful(ctx, t, r, sc, kubeClient) | ||
|
||
|
@@ -770,15 +770,16 @@ func Test_NoAdditionalDomainsPresent(t *testing.T) { | |
// The default secret we create does not contain additional domains so it will not be valid for this RS | ||
rs.Spec.Security.TLSConfig.AdditionalCertificateDomains = []string{"foo"} | ||
|
||
reconciler, _, client, _, err := defaultClusterReconciler(ctx, nil, "", "", rs, nil) | ||
require.NoError(t, err) | ||
addKubernetesTlsResources(ctx, client, rs) | ||
kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) | ||
reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this was previously using shardedClusterReconciler instead of replicaset reconciler |
||
addKubernetesTlsResources(ctx, kubeClient, rs) | ||
|
||
secret := &corev1.Secret{} | ||
certSecret := &corev1.Secret{} | ||
|
||
_ = client.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-cert", rs.Name), Namespace: rs.Namespace}, secret) | ||
_ = kubeClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s-cert", rs.Name), Namespace: rs.Namespace}, certSecret) | ||
|
||
err = certs.VerifyAndEnsureCertificatesForStatefulSet(ctx, reconciler.SecretClient, reconciler.SecretClient, fmt.Sprintf("%s-cert", rs.Name), certs.ReplicaSetConfig(*rs), nil) | ||
err := certs.VerifyAndEnsureCertificatesForStatefulSet(ctx, reconciler.SecretClient, reconciler.SecretClient, fmt.Sprintf("%s-cert", rs.Name), certs.ReplicaSetConfig(*rs), nil) | ||
require.Error(t, err) | ||
for i := 0; i < rs.Spec.Members; i++ { | ||
expectedErrorMessage := fmt.Sprintf("domain %s-%d.foo is not contained in the list of DNSNames", rs.Name, i) | ||
assert.Contains(t, err.Error(), expectedErrorMessage) | ||
|
Uh oh!
There was an error while loading. Please reload this page.