From 0cfa83afbb870e69cb06ab52f6ee7c508a4a18e8 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Fri, 21 Nov 2025 11:59:42 +0530 Subject: [PATCH 1/9] wip: outlining backup controller --- Makefile | 21 ++ api/v1alpha1/mongodbbackup_types.go | 77 +++++ api/v1alpha1/zz_generated.deepcopy.go | 199 +++++++++++++ backup-image/Dockerfile | 29 ++ backup-image/entrypoint.sh | 150 ++++++++++ ...lock.cloud.rocket.chat_mongodbbackups.yaml | 185 ++++++++++++ config/crd/kustomization.yaml | 3 +- config/rbac/role.yaml | 46 +++ .../airlock_v1alpha1_mongodbbackup.yaml | 29 ++ .../airlock_v1alpha1_mongodbcluster.yaml | 31 +- config/samples/kustomization.yaml | 1 + controllers/common.go | 45 +++ controllers/mongodbbackup_controller.go | 269 ++++++++++++++++++ main.go | 7 + tests/controller_test.go | 46 +++ tests/utils/k3d.go | 12 +- 16 files changed, 1141 insertions(+), 9 deletions(-) create mode 100644 api/v1alpha1/mongodbbackup_types.go create mode 100644 backup-image/Dockerfile create mode 100644 backup-image/entrypoint.sh create mode 100644 config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml create mode 100644 config/samples/airlock_v1alpha1_mongodbbackup.yaml create mode 100644 controllers/mongodbbackup_controller.go diff --git a/Makefile b/Makefile index 2b430b8..8f692ff 100644 --- a/Makefile +++ b/Makefile @@ -254,3 +254,24 @@ catalog-build: opm ## Build a catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) + +.PHONY: k3d-cluster +k3d-cluster: + k3d cluster list -o json | jq '.[].name' -r | grep -q ${NAME} || \ + k3d cluster create ${NAME} --kubeconfig-update-default=false --kubeconfig-switch-context=false --no-lb --no-rollback --wait -s1 -a1 + +.PHONY: k3d-load-image +k3d-load-image: docker-build-no-test k3d-cluster + k3d image load ${IMG} -c ${NAME} + +.PHONY: k3d-deploy +k3d-deploy: k3d-load-image + k3d kubeconfig print ${NAME} > /tmp/${NAME}.kube.config + KUBECONFIG=/tmp/${NAME}.kube.config kubectl apply -f config/crd/bases + KUBECONFIG=/tmp/${NAME}.kube.config kubectl get namespace airlock-system || KUBECONFIG=/tmp/${NAME}.kube.config kubectl create namespace airlock-system + KUBECONFIG=/tmp/${NAME}.kube.config kubectl apply -k config/rbac + KUBECONFIG=/tmp/${NAME}.kube.config kubectl apply -f config/manager/manager.yaml + +.PHONY: k3d-destroy +k3d-destroy: + k3d cluster delete ${NAME} diff --git a/api/v1alpha1/mongodbbackup_types.go b/api/v1alpha1/mongodbbackup_types.go new file mode 100644 index 0000000..917a4bf --- /dev/null +++ b/api/v1alpha1/mongodbbackup_types.go @@ -0,0 +1,77 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MongoDBBackupSpec defines the desired state of MongoDBBackup +// +kubebuilder:object:generate=true +// +k8s:deepcopy-gen=true +type MongoDBBackupSpec struct { + MongoDBRef MongoDBRef `json:"mongodbRef"` + Namespaces []MongoDBNamespace `json:"namespaces,omitempty"` + Storage MongoDBBackupStorage `json:"storage"` +} + +type MongoDBRef struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +type MongoDBNamespace struct { + Database string `json:"database"` + Collections []string `json:"collections,omitempty"` +} + +type MongoDBBackupStorage struct { + Type string `json:"type"` + S3 *MongoDBBackupS3 `json:"s3,omitempty"` +} + +type MongoDBBackupS3 struct { + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` + Region string `json:"region"` + SecretRef S3SecretRef `json:"secretRef"` + Prefix string `json:"prefix,omitempty"` +} + +type S3SecretRef struct { + Name string `json:"name"` + Key string `json:"key"` +} + +// MongoDBBackupStatus defines the observed state of MongoDBBackup +// +kubebuilder:object:generate=true +// +k8s:deepcopy-gen=true +type MongoDBBackupStatus struct { + Phase string `json:"phase,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + BackupPath string `json:"backupPath,omitempty"` + Size string `json:"size,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MongoDBBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MongoDBBackupSpec `json:"spec,omitempty"` + Status MongoDBBackupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MongoDBBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MongoDBBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MongoDBBackup{}, &MongoDBBackupList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 56179a6..31645be 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -137,6 +137,155 @@ func (in *MongoDBAccessRequestStatus) DeepCopy() *MongoDBAccessRequestStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackup) DeepCopyInto(out *MongoDBBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackup. +func (in *MongoDBBackup) DeepCopy() *MongoDBBackup { + if in == nil { + return nil + } + out := new(MongoDBBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupList) DeepCopyInto(out *MongoDBBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongoDBBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupList. +func (in *MongoDBBackupList) DeepCopy() *MongoDBBackupList { + if in == nil { + return nil + } + out := new(MongoDBBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupS3) DeepCopyInto(out *MongoDBBackupS3) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupS3. +func (in *MongoDBBackupS3) DeepCopy() *MongoDBBackupS3 { + if in == nil { + return nil + } + out := new(MongoDBBackupS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupSpec) DeepCopyInto(out *MongoDBBackupSpec) { + *out = *in + out.MongoDBRef = in.MongoDBRef + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]MongoDBNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Storage.DeepCopyInto(&out.Storage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupSpec. +func (in *MongoDBBackupSpec) DeepCopy() *MongoDBBackupSpec { + if in == nil { + return nil + } + out := new(MongoDBBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStatus) DeepCopyInto(out *MongoDBBackupStatus) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStatus. +func (in *MongoDBBackupStatus) DeepCopy() *MongoDBBackupStatus { + if in == nil { + return nil + } + out := new(MongoDBBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStorage) DeepCopyInto(out *MongoDBBackupStorage) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(MongoDBBackupS3) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStorage. +func (in *MongoDBBackupStorage) DeepCopy() *MongoDBBackupStorage { + if in == nil { + return nil + } + out := new(MongoDBBackupStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MongoDBCluster) DeepCopyInto(out *MongoDBCluster) { *out = *in @@ -237,3 +386,53 @@ func (in *MongoDBClusterStatus) DeepCopy() *MongoDBClusterStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBNamespace) DeepCopyInto(out *MongoDBNamespace) { + *out = *in + if in.Collections != nil { + in, out := &in.Collections, &out.Collections + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBNamespace. +func (in *MongoDBNamespace) DeepCopy() *MongoDBNamespace { + if in == nil { + return nil + } + out := new(MongoDBNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBRef) DeepCopyInto(out *MongoDBRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBRef. +func (in *MongoDBRef) DeepCopy() *MongoDBRef { + if in == nil { + return nil + } + out := new(MongoDBRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3SecretRef) DeepCopyInto(out *S3SecretRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SecretRef. +func (in *S3SecretRef) DeepCopy() *S3SecretRef { + if in == nil { + return nil + } + out := new(S3SecretRef) + in.DeepCopyInto(out) + return out +} diff --git a/backup-image/Dockerfile b/backup-image/Dockerfile new file mode 100644 index 0000000..04a6e9b --- /dev/null +++ b/backup-image/Dockerfile @@ -0,0 +1,29 @@ +FROM mongo:7 + +# Install AWS CLI, jq, and other utilities +RUN apt-get update && \ + apt-get install -y \ + curl \ + unzip \ + jq \ + coreutils \ + && rm -rf /var/lib/apt/lists/* + +# Install AWS CLI v2 +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install && \ + rm -rf awscliv2.zip aws/ + +# Create backup directory +RUN mkdir -p /backups + +# Copy entrypoint script +COPY entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +# Set working directory +WORKDIR /backups + +# Set entrypoint +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/backup-image/entrypoint.sh b/backup-image/entrypoint.sh new file mode 100644 index 0000000..1eb102e --- /dev/null +++ b/backup-image/entrypoint.sh @@ -0,0 +1,150 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Environment variables (with defaults) +MONGODB_URI=${MONGODB_URI:-"mongodb://localhost:27017"} +DB_NAME=${DB_NAME:-""} +COLLECTION_NAMES=${COLLECTION_NAMES:-""} # Comma-separated list +BACKUP_DIR="/backups" +S3_BUCKET=${S3_BUCKET:-""} +S3_PREFIX=${S3_PREFIX:-"mongodb-backups"} +BACKUP_NAME=${BACKUP_NAME:-"backup-$(date +%Y%m%d-%H%M%S)"} +SPLIT_SIZE=${SPLIT_SIZE:-"1G"} # 1GB chunks + +# Create backup directory +mkdir -p "$BACKUP_DIR" + +echo "Starting MongoDB backup..." +echo "Database: $DB_NAME" +echo "Collections: $COLLECTION_NAMES" +echo "Backup name: $BACKUP_NAME" + +# Build mongodump command +MONGODUMP_CMD="mongodump --uri=\"$MONGODB_URI\" --archive=\"$BACKUP_DIR/${BACKUP_NAME}.archive\" --gzip" + +# Add database filter if specified +if [[ -n "$DB_NAME" ]]; then + MONGODUMP_CMD="$MONGODUMP_CMD --db=\"$DB_NAME\"" +fi + +# Add collection filters if specified +if [[ -n "$COLLECTION_NAMES" ]]; then + IFS=',' read -ra COLLECTIONS <<< "$COLLECTION_NAMES" + for collection in "${COLLECTIONS[@]}"; do + collection=$(echo "$collection" | xargs) # trim whitespace + if [[ -n "$collection" ]]; then + MONGODUMP_CMD="$MONGODUMP_CMD --collection=\"$collection\"" + fi + done +fi + +echo "Running: $MONGODUMP_CMD" +eval "$MONGODUMP_CMD" + +echo "Backup completed. Archive size:" +ls -lh "$BACKUP_DIR/${BACKUP_NAME}.archive" + +# Split the backup into chunks +echo "Splitting backup into ${SPLIT_SIZE} chunks..." +cd "$BACKUP_DIR" +split -b "$SPLIT_SIZE" -d "${BACKUP_NAME}.archive" "${BACKUP_NAME}_part_" + +# Remove original archive after splitting +rm "${BACKUP_NAME}.archive" + +# Generate hashes and create manifest +echo "Generating hashes and manifest..." +manifest_file="$BACKUP_DIR/manifest.json" +cat > "$manifest_file" << 'EOF' +{ + "backup_name": "", + "created_at": "", + "database": "", + "collections": [], + "total_size": 0, + "parts": [] +} +EOF + +# Update manifest with metadata +collections_array=$(echo "$COLLECTION_NAMES" | sed 's/,/","/g' | sed 's/^/"/' | sed 's/$/"/' | sed 's/""//g') +if [[ "$collections_array" == '""' ]]; then + collections_array='[]' +else + collections_array="[$collections_array]" +fi + +total_size=0 +parts_json="[" + +for part_file in ${BACKUP_NAME}_part_*; do + if [[ -f "$part_file" ]]; then + echo "Processing $part_file..." + + # Calculate hash + hash=$(sha256sum "$part_file" | cut -d' ' -f1) + size=$(stat -f%z "$part_file" 2>/dev/null || stat -c%s "$part_file") + total_size=$((total_size + size)) + + # Add to parts JSON + if [[ "$parts_json" != "[" ]]; then + parts_json="$parts_json," + fi + parts_json="$parts_json{\"filename\":\"$part_file\",\"size\":$size,\"sha256\":\"$hash\"}" + + echo " $part_file: $hash ($(numfmt --to=iec $size))" + fi +done + +parts_json="$parts_json]" + +# Update manifest file using jq if available, otherwise sed +jq --arg backup_name "$BACKUP_NAME" \ + --arg created_at "$(date -Iseconds)" \ + --arg database "$DB_NAME" \ + --argjson collections "$collections_array" \ + --arg total_size "$total_size" \ + --argjson parts "$parts_json" \ + '.backup_name = $backup_name | .created_at = $created_at | .database = $database | .collections = $collections | .total_size = ($total_size | tonumber) | .parts = $parts' \ + "$manifest_file" > "${manifest_file}.tmp" && mv "${manifest_file}.tmp" "$manifest_file" + +echo "Manifest created:" +cat "$manifest_file" + +# Upload to S3 if bucket is specified +if [[ -n "$S3_BUCKET" ]]; then + echo "Uploading to S3 bucket: $S3_BUCKET" + s3_path="s3://$S3_BUCKET/$S3_PREFIX/$BACKUP_NAME" + + # Configure AWS CLI with custom endpoint if specified + if [[ -n "${AWS_S3_ENDPOINT:-}" ]]; then + export AWS_CLI_S3_ENDPOINT="--endpoint-url=$AWS_S3_ENDPOINT" + else + export AWS_CLI_S3_ENDPOINT="" + fi + + # Upload manifest first + echo "Uploading manifest..." + eval "aws s3 cp $AWS_CLI_S3_ENDPOINT \"$manifest_file\" \"$s3_path/manifest.json\"" + + # Upload all parts + for part_file in ${BACKUP_NAME}_part_*; do + if [[ -f "$part_file" ]]; then + echo "Uploading $part_file..." + eval "aws s3 cp $AWS_CLI_S3_ENDPOINT \"$part_file\" \"$s3_path/$part_file\"" + fi + done + + echo "Backup uploaded successfully to: $s3_path" + + # Clean up local files after successful upload + echo "Cleaning up local files..." + rm -f ${BACKUP_NAME}_part_* "$manifest_file" + +else + echo "No S3 bucket specified. Backup files remain in $BACKUP_DIR" + echo "Total backup size: $(numfmt --to=iec $total_size)" +fi + +echo "Backup process completed successfully!" diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml new file mode 100644 index 0000000..8328fc8 --- /dev/null +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml @@ -0,0 +1,185 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: mongodbbackups.airlock.cloud.rocket.chat +spec: + group: airlock.cloud.rocket.chat + names: + kind: MongoDBBackup + listKind: MongoDBBackupList + plural: mongodbbackups + singular: mongodbbackup + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MongoDBBackupSpec defines the desired state of MongoDBBackup + properties: + mongodbRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + namespaces: + items: + properties: + collections: + items: + type: string + type: array + database: + type: string + required: + - database + type: object + type: array + storage: + properties: + s3: + properties: + bucket: + type: string + endpoint: + type: string + prefix: + type: string + region: + type: string + secretRef: + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + required: + - bucket + - endpoint + - region + - secretRef + type: object + type: + type: string + required: + - type + type: object + required: + - mongodbRef + - storage + type: object + status: + description: MongoDBBackupStatus defines the observed state of MongoDBBackup + properties: + backupPath: + type: string + completionTime: + format: date-time + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + type: string + size: + type: string + startTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 04c5fc8..3f03da0 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -4,9 +4,10 @@ resources: - bases/airlock.cloud.rocket.chat_mongodbclusters.yaml - bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml +- bases/airlock.cloud.rocket.chat_mongodbbackups.yaml #+kubebuilder:scaffold:crdkustomizeresource -patchesStrategicMerge: +patchesStrategicMerge: [] # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_mongodbclusters.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e50801c..feabc6b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -20,6 +20,14 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch - apiGroups: - "" - apps @@ -60,6 +68,32 @@ rules: - get - patch - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackups/finalizers + verbs: + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackups/status + verbs: + - get + - patch + - update - apiGroups: - airlock.cloud.rocket.chat resources: @@ -86,3 +120,15 @@ rules: - get - patch - update +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/config/samples/airlock_v1alpha1_mongodbbackup.yaml b/config/samples/airlock_v1alpha1_mongodbbackup.yaml new file mode 100644 index 0000000..ac17abe --- /dev/null +++ b/config/samples/airlock_v1alpha1_mongodbbackup.yaml @@ -0,0 +1,29 @@ +apiVersion: airlock.cloud.rocket.chat/v1alpha1 +kind: MongoDBBackup +metadata: + labels: + app.kubernetes.io/name: mongodbbackup + app.kubernetes.io/instance: mongodbbackup-sample + app.kubernetes.io/part-of: airlock + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: airlock + name: mongodbbackup-sample +spec: + mongodbRef: + name: mongo + namespace: default + namespaces: + - database: test + collections: + - users + - messages + storage: + type: s3 + s3: + endpoint: s3.amazonaws.com + bucket: rocketchat-backups + region: us-east-1 + secretRef: + name: s3-credentials + key: credentials + prefix: mongodb-backups/ \ No newline at end of file diff --git a/config/samples/airlock_v1alpha1_mongodbcluster.yaml b/config/samples/airlock_v1alpha1_mongodbcluster.yaml index ebf6808..a646f87 100644 --- a/config/samples/airlock_v1alpha1_mongodbcluster.yaml +++ b/config/samples/airlock_v1alpha1_mongodbcluster.yaml @@ -1,11 +1,32 @@ apiVersion: airlock.cloud.rocket.chat/v1alpha1 -kind: MongoDBCluster +kind: MongoDBBackup metadata: - name: teste-mongodb1 + labels: + app.kubernetes.io/name: mongodbbackup + app.kubernetes.io/instance: mongodbbackup-sample + app.kubernetes.io/part-of: airlock + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: airlock + name: mongodbbackup-sample spec: - connectionSecret: airlock-connection-string - hostTemplate: "mongodb.airlock-test" - optionsTemplate: ?replicaSet=rs0 + mongodbRef: + name: mongo + namespace: default + namespaces: + - database: test + collections: + - users + - messages + storage: + type: s3 + s3: + endpoint: s3.amazonaws.com + bucket: rocketchat-backups + region: us-east-1 + secretRef: + name: s3-credentials + key: credentials + prefix: mongodb-backups/ --- diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index c270b73..9bd6b84 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -2,4 +2,5 @@ resources: - airlock_v1alpha1_mongodbcluster.yaml - airlock_v1alpha1_mongodbaccessrequest.yaml +- airlock_v1alpha1_mongodbbackup.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controllers/common.go b/controllers/common.go index 33294bf..5d6d96b 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -6,6 +6,9 @@ import ( "github.com/mongodb-forks/digest" "go.mongodb.org/atlas/mongodbatlas" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ) @@ -67,3 +70,45 @@ func getClusterNameFromHostTemplate(ctx context.Context, client *mongodbatlas.Cl return "", errors.NewBadRequest("Cluster not found when searching for it's connectionString in atlas") } + +// getDatabaseSize is used to calculate the volume size required for the backup job +func getDatabaseSize(ctx context.Context, connectionString, database string, collections []string) (int64, error) { + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connectionString)) + if err != nil { + return 0, err + } + defer client.Disconnect(ctx) + + db := client.Database(database) + var totalSize int64 + + if len(collections) == 0 { + // Get all collections in the database + collectionNames, err := db.ListCollectionNames(ctx, map[string]interface{}{}) + if err != nil { + return 0, err + } + collections = collectionNames + } + + // Calculate size for each collection + for _, collectionName := range collections { + // Get collection stats using the collStats command + var result struct { + Size int64 `bson:"size"` + } + + err := db.RunCommand(ctx, bson.M{ + "collStats": collectionName, + }).Decode(&result) + + if err != nil { + // Collection might not exist, skip it + continue + } + + totalSize += result.Size + } + + return totalSize, nil +} diff --git a/controllers/mongodbbackup_controller.go b/controllers/mongodbbackup_controller.go new file mode 100644 index 0000000..8fd588d --- /dev/null +++ b/controllers/mongodbbackup_controller.go @@ -0,0 +1,269 @@ +package controllers + +import ( + "context" + "fmt" + "strings" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" +) + +// MongoDBBackupReconciler reconciles a MongoDBBackup object +type MongoDBBackupReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups/finalizers,verbs=update +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch + +func (r *MongoDBBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx) + + // Fetch the MongoDBBackup instance + var backup airlockv1alpha1.MongoDBBackup + if err := r.Get(ctx, req.NamespacedName, &backup); err != nil { + log.Error(err, "unable to fetch MongoDBBackup") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Check if backup is already completed + if backup.Status.Phase == "Completed" || backup.Status.Phase == "Failed" { + return ctrl.Result{}, nil + } + + // Initialize status if empty + if backup.Status.Phase == "" { + backup.Status.Phase = "Pending" + backup.Status.StartTime = &metav1.Time{Time: time.Now()} + if err := r.Status().Update(ctx, &backup); err != nil { + log.Error(err, "failed to update backup status") + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + // Check if Job already exists + jobName := fmt.Sprintf("%s-backup-job", backup.Name) + var existingJob batchv1.Job + err := r.Get(ctx, client.ObjectKey{Name: jobName, Namespace: backup.Namespace}, &existingJob) + if err == nil { + // Job exists, check its status + return r.updateBackupStatusFromJob(ctx, &backup, &existingJob) + } else if client.IgnoreNotFound(err) != nil { + log.Error(err, "failed to get backup job") + return ctrl.Result{}, err + } + + // Create backup Job + job, err := r.createBackupJob(ctx, &backup) + if err != nil { + log.Error(err, "failed to create backup job") + r.updateBackupStatusFailed(ctx, &backup, err.Error()) + return ctrl.Result{}, err + } + + if err := r.Create(ctx, job); err != nil { + log.Error(err, "failed to create Job") + r.updateBackupStatusFailed(ctx, &backup, err.Error()) + return ctrl.Result{}, err + } + + log.Info("created backup job", "job", jobName) + + backup.Status.Phase = "Running" + if err := r.Status().Update(ctx, &backup); err != nil { + log.Error(err, "failed to update backup status") + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: time.Second * 30}, nil +} + +func (r *MongoDBBackupReconciler) createBackupJob(ctx context.Context, backup *airlockv1alpha1.MongoDBBackup) (*batchv1.Job, error) { + jobName := fmt.Sprintf("%s-backup-job", backup.Name) + + // Build connection string + connectionString := fmt.Sprintf("mongodb://%s.%s.svc.cluster.local:27017", + backup.Spec.MongoDBRef.Name, backup.Spec.MongoDBRef.Namespace) + + // Build environment variables for backup + env := []corev1.EnvVar{ + {Name: "MONGODB_URI", Value: connectionString}, + {Name: "BACKUP_NAME", Value: backup.Name}, + } + + // Add S3 configuration if specified + if backup.Spec.Storage.Type == "s3" && backup.Spec.Storage.S3 != nil { + s3 := backup.Spec.Storage.S3 + env = append(env, []corev1.EnvVar{ + {Name: "S3_BUCKET", Value: s3.Bucket}, + {Name: "S3_PREFIX", Value: s3.Prefix}, + {Name: "AWS_REGION", Value: s3.Region}, + {Name: "AWS_S3_ENDPOINT", Value: s3.Endpoint}, + // AWS credentials from secret + { + Name: "AWS_ACCESS_KEY_ID", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretRef.Name}, + Key: "accessKeyId", + }, + }, + }, + { + Name: "AWS_SECRET_ACCESS_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretRef.Name}, + Key: "secretAccessKey", + }, + }, + }, + }...) + } + + // Add database/collection filters + var databases, collections []string + for _, ns := range backup.Spec.Namespaces { + databases = append(databases, ns.Database) + if len(ns.Collections) > 0 { + collections = append(collections, ns.Collections...) + } + } + + // Set database and collection names as env vars + if len(databases) > 0 { + env = append(env, corev1.EnvVar{Name: "DB_NAME", Value: databases[0]}) // For now, support single DB + } + if len(collections) > 0 { + env = append(env, corev1.EnvVar{Name: "COLLECTION_NAMES", Value: strings.Join(collections, ",")}) + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: backup.Namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "backup", + Image: "airlock-backup:latest", // TODO: make this configurable + Env: env, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "backup-storage", + MountPath: "/backups", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "backup-storage", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + }, + } + + // Set owner reference + if err := controllerutil.SetControllerReference(backup, job, r.Scheme); err != nil { + return nil, err + } + + return job, nil +} + +func (r *MongoDBBackupReconciler) updateBackupStatusFromJob(ctx context.Context, backup *airlockv1alpha1.MongoDBBackup, job *batchv1.Job) (ctrl.Result, error) { + log := log.FromContext(ctx) + + if job.Status.CompletionTime != nil { + // Job completed successfully + backup.Status.Phase = "Completed" + backup.Status.CompletionTime = job.Status.CompletionTime + backup.Status.BackupPath = "local:/backup/backup.archive" + backup.Status.Conditions = []metav1.Condition{ + { + Type: "Ready", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Message: "Backup completed successfully", + }, + } + + if err := r.Status().Update(ctx, backup); err != nil { + log.Error(err, "failed to update backup status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + } + + if job.Status.Failed > 0 { + // Job failed + backup.Status.Phase = "Failed" + backup.Status.CompletionTime = &metav1.Time{Time: time.Now()} + backup.Status.Conditions = []metav1.Condition{ + { + Type: "Ready", + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Message: "Backup job failed", + }, + } + + if err := r.Status().Update(ctx, backup); err != nil { + log.Error(err, "failed to update backup status") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + } + + // Job is still running + return ctrl.Result{RequeueAfter: time.Second * 30}, nil +} + +func (r *MongoDBBackupReconciler) updateBackupStatusFailed(ctx context.Context, backup *airlockv1alpha1.MongoDBBackup, message string) { + backup.Status.Phase = "Failed" + backup.Status.CompletionTime = &metav1.Time{Time: time.Now()} + backup.Status.Conditions = []metav1.Condition{ + { + Type: "Ready", + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Message: message, + }, + } + r.Status().Update(ctx, backup) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MongoDBBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&airlockv1alpha1.MongoDBBackup{}). + Owns(&batchv1.Job{}). + Complete(r) +} diff --git a/main.go b/main.go index b441590..674df05 100644 --- a/main.go +++ b/main.go @@ -103,6 +103,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "MongoDBAccessRequest") os.Exit(1) } + if err = (&controllers.MongoDBBackupReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MongoDBBackup") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/tests/controller_test.go b/tests/controller_test.go index 8bae4a9..3357cf2 100644 --- a/tests/controller_test.go +++ b/tests/controller_test.go @@ -217,5 +217,51 @@ var _ = Describe("airlock", Ordered, func() { return nil }, time.Minute, time.Second).Should(Succeed()) }) + + It("should create and manage MongoDBBackup", func() { + backupName := "test-backup" + backup := &airlockv1alpha1.MongoDBBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupName, + Namespace: "mongo", + }, + Spec: airlockv1alpha1.MongoDBBackupSpec{ + MongoDBRef: airlockv1alpha1.MongoDBRef{ + Name: "mongo", + Namespace: "default", + }, + Namespaces: []airlockv1alpha1.MongoDBNamespace{ + { + Database: "test", + Collections: []string{"users"}, + }, + }, + Storage: airlockv1alpha1.MongoDBBackupStorage{ + Type: "s3", + S3: &airlockv1alpha1.MongoDBBackupS3{ + Endpoint: "s3.amazonaws.com", + Bucket: "test-bucket", + Region: "us-east-1", + SecretRef: airlockv1alpha1.S3SecretRef{ + Name: "s3-credentials", + Key: "credentials", + }, + }, + }, + }, + } + + By("Creating MongoDBBackup") + err := k8sClient.Create(context.Background(), backup) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying backup is created") + var fetchedBackup airlockv1alpha1.MongoDBBackup + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: backupName, Namespace: "mongo", + }, &fetchedBackup) + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedBackup.Spec.MongoDBRef.Name).To(Equal("mongo")) + }) }) }) diff --git a/tests/utils/k3d.go b/tests/utils/k3d.go index c4b1b27..ab5aba9 100644 --- a/tests/utils/k3d.go +++ b/tests/utils/k3d.go @@ -4,7 +4,8 @@ import ( "fmt" airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" - v1 "k8s.io/api/core/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" @@ -19,7 +20,8 @@ func NewK3dCluster(name string) K3dCluster { } func (k K3dCluster) Start() error { - stdout, err := Run("k3d", "cluster", "create", k.name, "--kubeconfig-update-default=false", "--kubeconfig-switch-context=false", "--no-lb", "--no-rollback", "--wait", "-s1", "-a1") + // stdout, err := Run("k3d", "cluster", "create", k.name, "--kubeconfig-update-default=false", "--kubeconfig-switch-context=false", "--no-lb", "--no-rollback", "--wait", "-s1", "-a1") + stdout, err := Run("make", "k3d-cluster") fmt.Println(string(stdout)) return err } @@ -75,7 +77,11 @@ func (k K3dCluster) K8sClient() (*client.Client, error) { return nil, err } - err = v1.AddToScheme(scheme.Scheme) + err = corev1.AddToScheme(scheme.Scheme) + if err != nil { + return nil, err + } + err = batchv1.AddToScheme(scheme.Scheme) if err != nil { return nil, err } From 085c39e090d83378296b26172a3dc1611ea9a070 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Fri, 21 Nov 2025 13:38:25 +0530 Subject: [PATCH 2/9] chore: local configuration details --- .gitignore | 2 + Makefile | 54 +++++++++-- tests/assets/k3d/local-path-config.yaml | 33 +++++++ tests/assets/k3d/manual-storageclass.yaml | 9 ++ tests/assets/local-tests/backup-pod.yaml | 35 +++++++ tests/assets/minio/create-buckets-job.yaml | 32 +++++++ tests/assets/minio/tenant-base.yaml | 104 +++++++++++++++++++++ tests/utils/utils.go | 13 +++ 8 files changed, 275 insertions(+), 7 deletions(-) create mode 100644 tests/assets/k3d/local-path-config.yaml create mode 100644 tests/assets/k3d/manual-storageclass.yaml create mode 100644 tests/assets/local-tests/backup-pod.yaml create mode 100644 tests/assets/minio/create-buckets-job.yaml create mode 100644 tests/assets/minio/tenant-base.yaml diff --git a/.gitignore b/.gitignore index 229dcde..5c69c00 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,5 @@ mongoValues.yaml db-secret.yaml db-cluster.yaml *.ignore + +tests/k3d/disk/ \ No newline at end of file diff --git a/Makefile b/Makefile index 8f692ff..1d61904 100644 --- a/Makefile +++ b/Makefile @@ -48,6 +48,11 @@ endif # Image URL to use all building/pushing image targets IMG ?= $(IMAGE_TAG_BASE):$(VERSION) + +BIMG ?= backup:latest + +# Reusable kubectl command with kubeconfig +KUBECTL_WITH_CONFIG = k3d kubeconfig print ${NAME} > /tmp/${NAME}.kube.config && KUBECONFIG=/tmp/${NAME}.kube.config kubectl # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -257,21 +262,56 @@ catalog-push: ## Push a catalog image. .PHONY: k3d-cluster k3d-cluster: +ifndef NAME + $(error NAME is required. Usage: make k3d-cluster NAME=my-cluster) +endif + test -d tests/k3d/disk || mkdir -pv tests/k3d/disk k3d cluster list -o json | jq '.[].name' -r | grep -q ${NAME} || \ - k3d cluster create ${NAME} --kubeconfig-update-default=false --kubeconfig-switch-context=false --no-lb --no-rollback --wait -s1 -a1 + k3d cluster create ${NAME} --kubeconfig-update-default=false --kubeconfig-switch-context=false --no-lb --no-rollback --wait -s1 -a1 --volume $(PWD)/tests/k3d/disk:/disk + +.PHONY: k3d-add-storageclass +k3d-add-storageclass: k3d-cluster + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/k3d/local-path-config.yaml + $(KUBECTL_WITH_CONFIG) rollout restart deployment/local-path-provisioner -n kube-system + $(KUBECTL_WITH_CONFIG) rollout status deployment/local-path-provisioner -n kube-system + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/k3d/manual-storageclass.yaml .PHONY: k3d-load-image -k3d-load-image: docker-build-no-test k3d-cluster +k3d-load-image: docker-build-no-test k3d-cluster k3d-add-storageclass k3d image load ${IMG} -c ${NAME} .PHONY: k3d-deploy k3d-deploy: k3d-load-image - k3d kubeconfig print ${NAME} > /tmp/${NAME}.kube.config - KUBECONFIG=/tmp/${NAME}.kube.config kubectl apply -f config/crd/bases - KUBECONFIG=/tmp/${NAME}.kube.config kubectl get namespace airlock-system || KUBECONFIG=/tmp/${NAME}.kube.config kubectl create namespace airlock-system - KUBECONFIG=/tmp/${NAME}.kube.config kubectl apply -k config/rbac - KUBECONFIG=/tmp/${NAME}.kube.config kubectl apply -f config/manager/manager.yaml + $(KUBECTL_WITH_CONFIG) apply -f config/crd/bases + $(KUBECTL_WITH_CONFIG) get namespace airlock-system || $(KUBECTL_WITH_CONFIG) create namespace airlock-system + $(KUBECTL_WITH_CONFIG) apply -k config/rbac + $(KUBECTL_WITH_CONFIG) apply -f config/manager/manager.yaml .PHONY: k3d-destroy k3d-destroy: +ifndef NAME + $(error NAME is required. Usage: make k3d-cluster NAME=my-cluster) +endif k3d cluster delete ${NAME} + +.PHONY: k3d-deploy-mongo +k3d-deploy-mongo: k3d-cluster + $(KUBECTL_WITH_CONFIG) get namespace mongo || $(KUBECTL_WITH_CONFIG) create namespace mongo + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/mongo + +.PHONY: k3d-deploy-minio +k3d-deploy-minio: k3d-cluster k3d-add-storageclass + $(KUBECTL_WITH_CONFIG) apply -k "github.com/minio/operator?ref=v6.0.4" + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/minio + +.PHONY: docker-build-backup-image +docker-build-backup-image: + docker build -t ${BIMG} backup-image/ + +.PHONY: k3d-load-backup-image +k3d-load-backup-image: k3d-cluster docker-build-backup-image + k3d image import -c ${NAME} ${BIMG} + +.PHONY: k3d-run-backup-pod +k3d-run-backup-pod: k3d-cluster k3d-load-backup-image + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/local-tests/backup-pod.yaml \ No newline at end of file diff --git a/tests/assets/k3d/local-path-config.yaml b/tests/assets/k3d/local-path-config.yaml new file mode 100644 index 0000000..de1cde1 --- /dev/null +++ b/tests/assets/k3d/local-path-config.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-path-config + namespace: kube-system +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/disk"] + } + ] + } + setup: |- + #!/bin/sh + set -eu + mkdir -m 0777 -p "$VOL_DIR" + teardown: |- + #!/bin/sh + set -eu + rm -rf "$VOL_DIR" + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: busybox + imagePullPolicy: IfNotPresent \ No newline at end of file diff --git a/tests/assets/k3d/manual-storageclass.yaml b/tests/assets/k3d/manual-storageclass.yaml new file mode 100644 index 0000000..4bb6619 --- /dev/null +++ b/tests/assets/k3d/manual-storageclass.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: manual +provisioner: rancher.io/local-path +parameters: + nodePath: /disk +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete \ No newline at end of file diff --git a/tests/assets/local-tests/backup-pod.yaml b/tests/assets/local-tests/backup-pod.yaml new file mode 100644 index 0000000..09f7a99 --- /dev/null +++ b/tests/assets/local-tests/backup-pod.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Pod +metadata: + name: backup-test + namespace: default + labels: + app: backup-test +spec: + restartPolicy: Never + containers: + - name: backup + image: backup:latest + imagePullPolicy: IfNotPresent + command: + - sleep + - 1d + env: + - name: MONGODB_URI + value: "mongodb://mongo.default.svc.cluster.local:27017" + - name: DB_NAME + value: "test" + - name: COLLECTION_NAMES + value: "users,messages" + - name: BACKUP_NAME + value: "manual-backup" + - name: S3_BUCKET + value: "" + - name: S3_PREFIX + value: "mongodb-backups" + volumeMounts: + - name: backup-storage + mountPath: /backups + volumes: + - name: backup-storage + emptyDir: {} \ No newline at end of file diff --git a/tests/assets/minio/create-buckets-job.yaml b/tests/assets/minio/create-buckets-job.yaml new file mode 100644 index 0000000..43c81de --- /dev/null +++ b/tests/assets/minio/create-buckets-job.yaml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: create-minio-buckets + namespace: minio-tenant +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: create-buckets + image: minio/mc:latest + command: + - /bin/sh + - -c + - | + echo "Waiting for MinIO to be ready..." + until mc alias set myminio https://myminio-hl.minio-tenant.svc.cluster.local:9000 minio minio123; do + echo "MinIO not ready yet, retrying in 5 seconds..." + sleep 5 + done + + echo "Creating buckets..." + mc mb myminio/backups --ignore-existing + + echo "Buckets created successfully:" + mc ls myminio/ + + echo "Setting bucket policies..." + mc anonymous set download myminio/backups + + echo "Bucket setup complete!" \ No newline at end of file diff --git a/tests/assets/minio/tenant-base.yaml b/tests/assets/minio/tenant-base.yaml new file mode 100644 index 0000000..507d8f0 --- /dev/null +++ b/tests/assets/minio/tenant-base.yaml @@ -0,0 +1,104 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: minio-tenant +--- +apiVersion: v1 +kind: Secret +metadata: + name: storage-configuration + namespace: minio-tenant +stringData: + config.env: |- + export MINIO_ROOT_USER="minio" + export MINIO_ROOT_PASSWORD="minio123" + export MINIO_BROWSER="off" +type: Opaque +--- +apiVersion: v1 +data: + CONSOLE_ACCESS_KEY: Y29uc29sZQ== # console + CONSOLE_SECRET_KEY: Y29uc29sZTEyMw== # console123 +kind: Secret +metadata: + name: storage-user + namespace: minio-tenant +type: Opaque +--- +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + annotations: + prometheus.io/path: /minio/v2/metrics/cluster + prometheus.io/port: "9000" + prometheus.io/scrape: "true" + labels: + app: minio + name: myminio + namespace: minio-tenant +spec: + certConfig: {} + configuration: + name: storage-configuration + env: [] + externalCaCertSecret: [] + externalCertSecret: [] + externalClientCertSecrets: [] + features: + bucketDNS: false + domains: {} + image: quay.io/minio/minio:RELEASE.2025-04-08T15-41-24Z + imagePullSecret: {} + mountPath: /export + podManagementPolicy: Parallel + pools: + - affinity: + nodeAffinity: {} + podAffinity: {} + podAntiAffinity: {} + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + name: pool-0 + nodeSelector: {} + resources: {} + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + servers: 1 + tolerations: [] + topologySpreadConstraints: [] + volumeClaimTemplate: + apiVersion: v1 + kind: persistentvolumeclaims + metadata: {} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: manual + status: {} + volumesPerServer: 1 + priorityClassName: "" + requestAutoCert: true + serviceAccountName: "" + serviceMetadata: + consoleServiceAnnotations: {} + consoleServiceLabels: {} + minioServiceAnnotations: {} + minioServiceLabels: {} + subPath: "" + users: + - name: storage-user diff --git a/tests/utils/utils.go b/tests/utils/utils.go index bc24407..12a7755 100644 --- a/tests/utils/utils.go +++ b/tests/utils/utils.go @@ -13,8 +13,21 @@ import ( . "github.com/onsi/gomega" ) +func getRootDir() (string, error) { + output, err := Run("git", "rev-parse", "--show-toplevel") + return string(output), err +} + func Run(cmd ...string) ([]byte, error) { + root, err := getRootDir() + if err != nil { + return nil, err + } + command := exec.Command(cmd[0], cmd[1:]...) + + command.Dir = root + return runCommand(command) } From 813652dc708df75cf373197ebfca985475e79138 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Mon, 8 Dec 2025 20:34:44 +0530 Subject: [PATCH 3/9] chore: load mongo testing data k3d --- Makefile | 20 ++-- api/v1alpha1/zz_generated.deepcopy.go | 1 - go.mod | 5 +- go.sum | 14 +-- .../assets/local-tests/mongo-restore-job.yaml | 113 ++++++++++++++++++ .../{tenant-base.yaml => 0_tenant-base.yaml} | 0 ...ets-job.yaml => 1_create-buckets-job.yaml} | 0 7 files changed, 130 insertions(+), 23 deletions(-) create mode 100644 tests/assets/local-tests/mongo-restore-job.yaml rename tests/assets/minio/{tenant-base.yaml => 0_tenant-base.yaml} (100%) rename tests/assets/minio/{create-buckets-job.yaml => 1_create-buckets-job.yaml} (100%) diff --git a/Makefile b/Makefile index 1d61904..9ab087e 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,8 @@ IMG ?= $(IMAGE_TAG_BASE):$(VERSION) BIMG ?= backup:latest # Reusable kubectl command with kubeconfig -KUBECTL_WITH_CONFIG = k3d kubeconfig print ${NAME} > /tmp/${NAME}.kube.config && KUBECONFIG=/tmp/${NAME}.kube.config kubectl +# KUBECTL_WITH_CONFIG = k3d kubeconfig print ${NAME} > /tmp/${NAME}.kube.config && KUBECONFIG=/tmp/${NAME}.kube.config kubectl +KUBECTL_WITH_CONFIG = KUBECONFIG=/tmp/${NAME}.kube.config kubectl # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -131,7 +132,7 @@ docker-build: test ## Build docker image with the manager. docker build -t ${IMG} . .PHONY: docker-build-no-test -docker-build-no-test: +docker-build-no-test: build docker build -t ${IMG} . .PHONY: docker-push @@ -191,7 +192,7 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.10.0 +CONTROLLER_TOOLS_VERSION ?= v0.19.0 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize @@ -268,6 +269,7 @@ endif test -d tests/k3d/disk || mkdir -pv tests/k3d/disk k3d cluster list -o json | jq '.[].name' -r | grep -q ${NAME} || \ k3d cluster create ${NAME} --kubeconfig-update-default=false --kubeconfig-switch-context=false --no-lb --no-rollback --wait -s1 -a1 --volume $(PWD)/tests/k3d/disk:/disk + k3d kubeconfig print ${NAME} > /tmp/${NAME}.kube.config .PHONY: k3d-add-storageclass k3d-add-storageclass: k3d-cluster @@ -281,9 +283,9 @@ k3d-load-image: docker-build-no-test k3d-cluster k3d-add-storageclass k3d image load ${IMG} -c ${NAME} .PHONY: k3d-deploy -k3d-deploy: k3d-load-image +k3d-deploy-airlock: k3d-load-image $(KUBECTL_WITH_CONFIG) apply -f config/crd/bases - $(KUBECTL_WITH_CONFIG) get namespace airlock-system || $(KUBECTL_WITH_CONFIG) create namespace airlock-system + $(KUBECTL_WITH_CONFIG) get namespace airlock-system 2>&1 >/dev/null || $(KUBECTL_WITH_CONFIG) create namespace airlock-system $(KUBECTL_WITH_CONFIG) apply -k config/rbac $(KUBECTL_WITH_CONFIG) apply -f config/manager/manager.yaml @@ -296,7 +298,7 @@ endif .PHONY: k3d-deploy-mongo k3d-deploy-mongo: k3d-cluster - $(KUBECTL_WITH_CONFIG) get namespace mongo || $(KUBECTL_WITH_CONFIG) create namespace mongo + $(KUBECTL_WITH_CONFIG) get namespace mongo 2>&1 >/dev/null || $(KUBECTL_WITH_CONFIG) create namespace mongo $(KUBECTL_WITH_CONFIG) apply -f tests/assets/mongo .PHONY: k3d-deploy-minio @@ -314,4 +316,8 @@ k3d-load-backup-image: k3d-cluster docker-build-backup-image .PHONY: k3d-run-backup-pod k3d-run-backup-pod: k3d-cluster k3d-load-backup-image - $(KUBECTL_WITH_CONFIG) apply -f tests/assets/local-tests/backup-pod.yaml \ No newline at end of file + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/local-tests/backup-pod.yaml + +.PHONY: k3d-load-mongo-data +k3d-load-mongo-data: k3d-deploy-mongo + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/local-tests/mongo-restore-job.yaml \ No newline at end of file diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 31645be..c628730 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022. diff --git a/go.mod b/go.mod index d6f4387..1f94f8d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/RocketChat/airlock -go 1.25.3 +go 1.24.0 require ( github.com/davecgh/go-spew v1.1.1 @@ -91,8 +91,7 @@ require ( google.golang.org/protobuf v1.36.10 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.34.2 // indirect - k8s.io/component-base v0.34.2 // indirect + k8s.io/apiextensions-apiserver v0.34.2 k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect diff --git a/go.sum b/go.sum index d27b945..b0efa57 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,6 @@ github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bF github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -75,6 +73,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -105,10 +105,6 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8 github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= @@ -234,8 +230,6 @@ gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnf gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= @@ -246,16 +240,12 @@ k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= -k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= -sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= diff --git a/tests/assets/local-tests/mongo-restore-job.yaml b/tests/assets/local-tests/mongo-restore-job.yaml new file mode 100644 index 0000000..f598d3c --- /dev/null +++ b/tests/assets/local-tests/mongo-restore-job.yaml @@ -0,0 +1,113 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: backup-pvc + namespace: mongo + labels: + app: backup-storage +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + storageClassName: manual +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: backup-pv + labels: + app: backup-storage +spec: + capacity: + storage: 500Mi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: /tmp/backup-storage +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: restore-job + namespace: mongo + labels: + app: restore-job +spec: + template: + metadata: + labels: + app: restore-job + spec: + restartPolicy: Never + initContainers: + - name: fix-permissions + image: busybox:latest + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - | + echo "Setting permissions on /backups directory" + chmod 777 /backups + volumeMounts: + - name: backup-storage + mountPath: /backups + - name: download-backup + image: curlimages/curl:latest + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - | + echo "downloading sample datasheet" + if test -f /backups/$BACKUP_NAME; then + echo "backup exists already" + else + curl https://atlas-education.s3.amazonaws.com/sampledata.archive -o /backups/$BACKUP_NAME + fi + env: + - name: BACKUP_NAME + value: "test.archive" + volumeMounts: + - name: backup-storage + mountPath: /backups + containers: + - name: restore + image: mongo:7 + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - -c + - | + echo "starting mongodb sample loading thing" + + while :; do + echo "attempting to connect to mongo . . . . . . . . . . . " + if mongosh "$MONGODB_URI" --eval "db.runCommand('ping')" > /dev/null 2>&1; then + echo "successfully connected to mongo" + break + else + echo "mongodb not yet ready, sleeping for 5 seconds" + sleep 5 + fi + done + + # Perform the restore + echo "starting mongodb restore" + mongorestore --uri="$MONGODB_URI" --drop --archive < /backups/$BACKUP_NAME + echo "Restore completed successfully" + env: + - name: MONGODB_URI + value: "mongodb://root:root@mongo.mongo.svc.cluster.local:27017" + - name: BACKUP_NAME + value: "test.archive" + volumeMounts: + - name: backup-storage + mountPath: /backups + volumes: + - name: backup-storage + persistentVolumeClaim: + claimName: backup-pvc \ No newline at end of file diff --git a/tests/assets/minio/tenant-base.yaml b/tests/assets/minio/0_tenant-base.yaml similarity index 100% rename from tests/assets/minio/tenant-base.yaml rename to tests/assets/minio/0_tenant-base.yaml diff --git a/tests/assets/minio/create-buckets-job.yaml b/tests/assets/minio/1_create-buckets-job.yaml similarity index 100% rename from tests/assets/minio/create-buckets-job.yaml rename to tests/assets/minio/1_create-buckets-job.yaml From da80faa9a51bae97b1a413fa15e719f88ed8c900 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Mon, 8 Dec 2025 20:41:57 +0530 Subject: [PATCH 4/9] docs: Makefile new targets --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index 656986a..e90bbcb 100644 --- a/README.md +++ b/README.md @@ -74,3 +74,19 @@ make manifests More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) +## Testing + +A testing environment can be spun up locally with k3d. + +Run +```sh +make k3d-load-mongo-data k3d-deploy-airlock k3d-deploy-minio NAME=airlock IMG=controller:latest +``` + +This +1. deploys a k3d cluster +2. sets up storage class that uses local path `tests/k3d/disk` +3. deploys minio +4. deploys mongo +5. loads sample data into mongo +6. deploys airlock operator \ No newline at end of file From 61cac3fca6b8a710a43f59a8d02a313e62a62a00 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Wed, 10 Dec 2025 15:48:41 +0530 Subject: [PATCH 5/9] chore: test suites to use make targets --- ...oud.rocket.chat_mongodbaccessrequests.yaml | 66 ++++++++--------- ...lock.cloud.rocket.chat_mongodbbackups.yaml | 66 ++++++++--------- ...ock.cloud.rocket.chat_mongodbclusters.yaml | 73 ++++++++----------- config/rbac/role.yaml | 62 +--------------- tests/controller_test.go | 23 +----- tests/suite_test.go | 13 +++- tests/utils/k3d.go | 27 ++++--- tests/utils/kubectl.go | 10 +++ tests/utils/utils.go | 34 ++++++++- 9 files changed, 163 insertions(+), 211 deletions(-) diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml index 4145e26..7446320 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: mongodbaccessrequests.airlock.cloud.rocket.chat spec: group: airlock.cloud.rocket.chat @@ -32,14 +31,19 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -69,43 +73,35 @@ spec: conditions: description: Conditions is the list of status condition updates items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -120,10 +116,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml index 8328fc8..87f7fd2 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: mongodbbackups.airlock.cloud.rocket.chat spec: group: airlock.cloud.rocket.chat @@ -20,14 +19,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -104,43 +108,35 @@ spec: type: string conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -155,10 +151,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml index 2df808b..efe5384 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.19.0 name: mongodbclusters.airlock.cloud.rocket.chat spec: group: airlock.cloud.rocket.chat @@ -28,14 +27,19 @@ spec: description: MongoDBCluster is the Schema for the mongodbclusters API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -51,10 +55,9 @@ spec: properties: enabled: default: false - description: If this is set, the cluster will be enabled for scheduled - autoscaling. The way it works is that the cluster will be scaled - up to the high tier at the specified time, and scaled down to - the lowTier at the specified time. + description: |- + If this is set, the cluster will be enabled for scheduled autoscaling. + The way it works is that the cluster will be scaled up to the high tier at the specified time, and scaled down to the lowTier at the specified time. type: boolean highTier: default: M50 @@ -135,43 +138,35 @@ spec: properties: conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -186,10 +181,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index feabc6b..afb379f 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: @@ -16,13 +15,6 @@ rules: - "" resources: - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - secrets verbs: - get @@ -46,57 +38,7 @@ rules: - airlock.cloud.rocket.chat resources: - mongodbaccessrequests - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - airlock.cloud.rocket.chat - resources: - - mongodbaccessrequests/finalizers - verbs: - - update -- apiGroups: - - airlock.cloud.rocket.chat - resources: - - mongodbaccessrequests/status - verbs: - - get - - patch - - update -- apiGroups: - - airlock.cloud.rocket.chat - resources: - mongodbbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - airlock.cloud.rocket.chat - resources: - - mongodbbackups/finalizers - verbs: - - update -- apiGroups: - - airlock.cloud.rocket.chat - resources: - - mongodbbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - airlock.cloud.rocket.chat - resources: - mongodbclusters verbs: - create @@ -109,12 +51,16 @@ rules: - apiGroups: - airlock.cloud.rocket.chat resources: + - mongodbaccessrequests/finalizers + - mongodbbackups/finalizers - mongodbclusters/finalizers verbs: - update - apiGroups: - airlock.cloud.rocket.chat resources: + - mongodbaccessrequests/status + - mongodbbackups/status - mongodbclusters/status verbs: - get diff --git a/tests/controller_test.go b/tests/controller_test.go index 3357cf2..84b3250 100644 --- a/tests/controller_test.go +++ b/tests/controller_test.go @@ -3,7 +3,6 @@ package tests import ( "context" "fmt" - "os" "path/filepath" "strings" "time" @@ -31,23 +30,11 @@ const accessRequestName = "test-request" var _ = Describe("airlock", Ordered, func() { BeforeAll(func() { By("Creating the namespace") - Expect(kubectl.CreateNamespace(namespace)).ToNot(HaveOccurred()) + Expect(kubectl.CreateNamespaceIfNotExists(namespace)).ToNot(HaveOccurred()) By("applying RBAC") Expect(kubectl.KApply(filepath.Join("..", "config", "rbac"))).ToNot(HaveOccurred()) - By("installing mongo namespace") - Expect(kubectl.CreateNamespace("mongo")).ToNot(HaveOccurred()) - - mongoImage := os.Getenv("LOAD_MONGO_FROM_LOCAL") - if mongoImage != "" { - By("loading mongo image from local") - Expect(cluster.LoadImage(mongoImage)).ToNot(HaveOccurred()) - } - - By("installing mongodb pod and service") - Expect(kubectl.Apply(filepath.Join("assets", "mongo"))).ToNot(HaveOccurred()) - getPodStatus := func() error { output, err := kubectl.WithNamespace("mongo").GetPods("-l", "app=mongo", "-o", "jsonpath={.items[*].status}") if len(output) > 0 { @@ -71,14 +58,6 @@ var _ = Describe("airlock", Ordered, func() { Context("Airlock Controller", func() { It("should run successfully", func() { - // FIXME: this is failig -_- - // utils.BuildImage("controller:latest") - - By("deploying airlock") - err := kubectl.Apply(filepath.Join("..", "config", "manager", "manager.yaml")) - - Expect(err).NotTo(HaveOccurred()) - By("validating pod status phase=running") getPodStatus := func() error { output, err := kubectl.WithNamespace(namespace).GetPods("-l", "app.kubernetes.io/name=airlock", "-o", "jsonpath={.items[*].status}") diff --git a/tests/suite_test.go b/tests/suite_test.go index b0c7297..9abd513 100644 --- a/tests/suite_test.go +++ b/tests/suite_test.go @@ -57,9 +57,14 @@ var _ = BeforeSuite(func() { err = cluster.Start() Expect(err).NotTo(HaveOccurred()) - By("load controller image") - err = cluster.LoadImage("controller:latest") - Expect(err).NotTo(HaveOccurred()) + By("Deploy mongodb") + Expect(cluster.DeployMongo()).NotTo(HaveOccurred()) + + By("Deploy minio") + Expect(cluster.DeployMinio()).NotTo(HaveOccurred()) + + By("Deploy airlock") + Expect(cluster.DeployAirlock()).NotTo(HaveOccurred()) By("get kubectl handler") kubectl, err = cluster.Kubectl() @@ -76,6 +81,8 @@ var _ = BeforeSuite(func() { Expect(k).NotTo(BeNil()) k8sClient = *k + By("load mongodb sample data for testing") + Expect(utils.RunStreamOutput("make", "k3d-load-mongo-data", utils.MakeVar("NAME", "airlock-test"))) }) var _ = AfterSuite(func() { diff --git a/tests/utils/k3d.go b/tests/utils/k3d.go index ab5aba9..d823348 100644 --- a/tests/utils/k3d.go +++ b/tests/utils/k3d.go @@ -1,8 +1,6 @@ package utils import ( - "fmt" - airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -21,24 +19,31 @@ func NewK3dCluster(name string) K3dCluster { func (k K3dCluster) Start() error { // stdout, err := Run("k3d", "cluster", "create", k.name, "--kubeconfig-update-default=false", "--kubeconfig-switch-context=false", "--no-lb", "--no-rollback", "--wait", "-s1", "-a1") - stdout, err := Run("make", "k3d-cluster") - fmt.Println(string(stdout)) - return err + return RunStreamOutput("make", "k3d-cluster", MakeVar("NAME", k.name)) } func (k K3dCluster) Stop() error { - _, err := Run("k3d", "cluster", "stop", k.name) - return err + return RunStreamOutput("k3d", "cluster", "stop", k.name) } func (k K3dCluster) Delete() error { - _, err := Run("k3d", "cluster", "delete", k.name) - return err + return RunStreamOutput("k3d", "cluster", "delete", k.name) } func (k K3dCluster) LoadImage(image string) error { - _, err := Run("k3d", "image", "import", "-c", k.name, image) - return err + return RunStreamOutput("k3d", "image", "import", "-c", k.name, image) +} + +func (k K3dCluster) DeployMongo() error { + return RunStreamOutput("make", "k3d-deploy-mongo", MakeVar("NAME", k.name)) +} + +func (k K3dCluster) DeployMinio() error { + return RunStreamOutput("make", "k3d-deploy-minio", MakeVar("NAME", k.name)) +} + +func (k K3dCluster) DeployAirlock() error { + return RunStreamOutput("make", "k3d-deploy-airlock", MakeVar("NAME", k.name), MakeVar("IMG", "controller:latest")) } func (k K3dCluster) Kubeconfig() ([]byte, error) { diff --git a/tests/utils/kubectl.go b/tests/utils/kubectl.go index 4e15dc6..5447aec 100644 --- a/tests/utils/kubectl.go +++ b/tests/utils/kubectl.go @@ -66,6 +66,16 @@ func (k Kubectl) DescribeDeployment(name string) ([]byte, error) { return k.run([]string{"describe", "deployment", name}) } +func (k Kubectl) CreateNamespaceIfNotExists(name string) error { + _, err := k.Get("namespace", name) + if err != nil { + _, err := k.run([]string{"create", "namespace", name}) + return err + } + + return nil +} + func (k Kubectl) CreateNamespace(name string) error { _, err := k.run([]string{"create", "namespace", name}) return err diff --git a/tests/utils/utils.go b/tests/utils/utils.go index 12a7755..74bf21e 100644 --- a/tests/utils/utils.go +++ b/tests/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "fmt" + "os" "os/exec" //nolint:golint @@ -14,8 +15,9 @@ import ( ) func getRootDir() (string, error) { - output, err := Run("git", "rev-parse", "--show-toplevel") - return string(output), err + output, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() + // remove the \n before returning + return string(output[:len(output)-1]), err } func Run(cmd ...string) ([]byte, error) { @@ -31,6 +33,30 @@ func Run(cmd ...string) ([]byte, error) { return runCommand(command) } +func RunStreamOutput(cmd ...string) error { + root, err := getRootDir() + if err != nil { + return err + } + + command := exec.Command(cmd[0], cmd[1:]...) + + command.Dir = root + + command.Stdout = os.Stdout + + fmt.Fprintf(GinkgoWriter, "running: %s\n", command.String()) + + err = command.Run() + if err != nil { + return fmt.Errorf("%s failed with error: %v", command, err) + } + + fmt.Println("here") + + return nil +} + func runCommand(command *exec.Cmd) ([]byte, error) { fmt.Fprintf(GinkgoWriter, "running: %s\n", command.String()) output, err := command.CombinedOutput() @@ -54,3 +80,7 @@ func BuildImage(imageName string) { ExpectWithOffset(1, err).NotTo(HaveOccurred()) } + +func MakeVar(variable, value string) string { + return fmt.Sprintf("%s=%s", variable, value) +} From 897a9268d9f9f71b51fc4776bf12f99d3d4d0427 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Wed, 10 Dec 2025 17:05:18 +0530 Subject: [PATCH 6/9] chore: moving code around --- tests/controller_test.go | 202 +++++++++++++++++---------------------- tests/suite_test.go | 24 ++--- tests/utils/kubectl.go | 75 ++++++++++++++- tests/utils/utils.go | 2 - 4 files changed, 177 insertions(+), 126 deletions(-) diff --git a/tests/controller_test.go b/tests/controller_test.go index 84b3250..ea49caa 100644 --- a/tests/controller_test.go +++ b/tests/controller_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "path/filepath" - "strings" "time" //nolint:golint @@ -27,7 +26,7 @@ const namespace = "airlock-system" const accessRequestName = "test-request" -var _ = Describe("airlock", Ordered, func() { +var _ = Describe("Airlock Controller", Ordered, func() { BeforeAll(func() { By("Creating the namespace") Expect(kubectl.CreateNamespaceIfNotExists(namespace)).ToNot(HaveOccurred()) @@ -35,20 +34,15 @@ var _ = Describe("airlock", Ordered, func() { By("applying RBAC") Expect(kubectl.KApply(filepath.Join("..", "config", "rbac"))).ToNot(HaveOccurred()) - getPodStatus := func() error { - output, err := kubectl.WithNamespace("mongo").GetPods("-l", "app=mongo", "-o", "jsonpath={.items[*].status}") - if len(output) > 0 { - fmt.Println(string(output)) - } - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - if !strings.Contains(string(output), "\"phase\":\"Running\"") { - return fmt.Errorf("airlock pod in %s status", output) - } + By("validating mongo is running") + Eventually(func() error { + return kubectl.WithNamespace("mongo").IsAnyPodReady("mongo", map[string]string{"app": "mongo"}) + }, time.Minute, time.Second).Should(Succeed()) - return nil - } - - EventuallyWithOffset(1, getPodStatus, time.Minute, time.Second).Should(Succeed()) + By("validating airlock is running") + Eventually(func() error { + return kubectl.WithNamespace("airlock-system").IsAnyPodReady("airlock", map[string]string{"app.kubernetes.io/name": "airlock"}) + }, time.Minute, time.Second).Should(Succeed()) }) // AfterAll(func() { @@ -56,25 +50,7 @@ var _ = Describe("airlock", Ordered, func() { // Expect(kubectl.DeleteNamespace(namespace)).ToNot(HaveOccurred()) // }) - Context("Airlock Controller", func() { - It("should run successfully", func() { - By("validating pod status phase=running") - getPodStatus := func() error { - output, err := kubectl.WithNamespace(namespace).GetPods("-l", "app.kubernetes.io/name=airlock", "-o", "jsonpath={.items[*].status}") - if len(output) > 0 { - fmt.Println(string(output)) - } - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - if !strings.Contains(string(output), "\"phase\":\"Running\"") { - return fmt.Errorf("airlock pod in %s status", output) - } - - return nil - } - - EventuallyWithOffset(1, getPodStatus, time.Minute, time.Second).Should(Succeed()) - }) - + Context("MongoDBCluster", func() { It("should mark cluster resource as ready", func() { By("applying mongodb cluster resources") Expect(kubectl.Apply(filepath.Join("assets", "airlock"))).ToNot(HaveOccurred()) @@ -95,106 +71,108 @@ var _ = Describe("airlock", Ordered, func() { }, time.Minute, time.Second).Should(Succeed()) }) - It("should create mongo user as per access request", func() { - accessRequestResource := &airlockv1alpha1.MongoDBAccessRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: accessRequestName, - Namespace: "mongo", - }, + Context("MongoDBAccessRequest", func() { + It("should create mongo user as per access request", func() { + accessRequestResource := &airlockv1alpha1.MongoDBAccessRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: accessRequestName, + Namespace: "mongo", + }, - Spec: airlockv1alpha1.MongoDBAccessRequestSpec{ - Database: "test", - ClusterName: "airlock-test", - SecretName: "test-access-secret", - }, - } + Spec: airlockv1alpha1.MongoDBAccessRequestSpec{ + Database: "test", + ClusterName: "airlock-test", + SecretName: "test-access-secret", + }, + } - err := k8sClient.Create(context.Background(), accessRequestResource) - Expect(err).ToNot(HaveOccurred()) + err := k8sClient.Create(context.Background(), accessRequestResource) + Expect(err).ToNot(HaveOccurred()) - // next we need to wait for the user to have been created - EventuallyWithOffset(1, func() error { - accessRequest := airlockv1alpha1.MongoDBAccessRequest{} + // next we need to wait for the user to have been created + EventuallyWithOffset(1, func() error { + accessRequest := airlockv1alpha1.MongoDBAccessRequest{} - err = k8sClient.Get(context.Background(), client.ObjectKey{Name: accessRequestName, Namespace: "mongo"}, &accessRequest) - if err != nil { - return err - } + err = k8sClient.Get(context.Background(), client.ObjectKey{Name: accessRequestName, Namespace: "mongo"}, &accessRequest) + if err != nil { + return err + } - ready := false + ready := false - // TODO: i doubt this is full proof - for _, condition := range accessRequest.Status.Conditions { - if condition.Type == "Ready" { - ready = true - break + // TODO: i doubt this is full proof + for _, condition := range accessRequest.Status.Conditions { + if condition.Type == "Ready" { + ready = true + break + } } - } - if !ready { - return fmt.Errorf("access request not yet ready") - } + if !ready { + return fmt.Errorf("access request not yet ready") + } - var secret v1.Secret + var secret v1.Secret - err = k8sClient.Get(context.Background(), client.ObjectKey{ - Name: accessRequestResource.Spec.SecretName, - Namespace: "mongo", - }, &secret) + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: accessRequestResource.Spec.SecretName, + Namespace: "mongo", + }, &secret) - if err != nil { - return err - } + if err != nil { + return err + } - _, hasConnectionString := secret.Data["connectionString"] - if !hasConnectionString { - return fmt.Errorf("generated secret is missing connectionSecret") - } + _, hasConnectionString := secret.Data["connectionString"] + if !hasConnectionString { + return fmt.Errorf("generated secret is missing connectionSecret") + } - _, hasPassword := secret.Data["password"] - if !hasPassword { - return fmt.Errorf("generated secret is missing password") - } + _, hasPassword := secret.Data["password"] + if !hasPassword { + return fmt.Errorf("generated secret is missing password") + } - return nil - }, time.Minute, time.Second).Should(Succeed()) - }) + return nil + }, time.Minute, time.Second).Should(Succeed()) + }) - It("should delete the access secret if the corresponding accessrequest is deleted", func() { - accessRequestResource := &airlockv1alpha1.MongoDBAccessRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: accessRequestName, - Namespace: "mongo", - }, + It("should delete the access secret if the corresponding accessrequest is deleted", func() { + accessRequestResource := &airlockv1alpha1.MongoDBAccessRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: accessRequestName, + Namespace: "mongo", + }, - Spec: airlockv1alpha1.MongoDBAccessRequestSpec{ - Database: "test", - ClusterName: "airlock-test", - SecretName: "test-access-secret", - }, - } + Spec: airlockv1alpha1.MongoDBAccessRequestSpec{ + Database: "test", + ClusterName: "airlock-test", + SecretName: "test-access-secret", + }, + } - err := k8sClient.Delete(context.Background(), accessRequestResource) - Expect(err).ToNot(HaveOccurred()) + err := k8sClient.Delete(context.Background(), accessRequestResource) + Expect(err).ToNot(HaveOccurred()) - Eventually(func() error { - var secret v1.Secret + Eventually(func() error { + var secret v1.Secret - err = k8sClient.Get(context.Background(), client.ObjectKey{ - Name: accessRequestResource.Spec.SecretName, - Namespace: "mongo", - }, &secret) + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: accessRequestResource.Spec.SecretName, + Namespace: "mongo", + }, &secret) - if err == nil { - return fmt.Errorf("secret hasn't been deleted yet") - } + if err == nil { + return fmt.Errorf("secret hasn't been deleted yet") + } - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to try to fetch secret: %v", err) - } + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to try to fetch secret: %v", err) + } - return nil - }, time.Minute, time.Second).Should(Succeed()) + return nil + }, time.Minute, time.Second).Should(Succeed()) + }) }) It("should create and manage MongoDBBackup", func() { diff --git a/tests/suite_test.go b/tests/suite_test.go index 9abd513..8d8c6a5 100644 --- a/tests/suite_test.go +++ b/tests/suite_test.go @@ -57,6 +57,19 @@ var _ = BeforeSuite(func() { err = cluster.Start() Expect(err).NotTo(HaveOccurred()) + By("get kubectl handler") + kubectl, err = cluster.Kubectl() + Expect(err).NotTo(HaveOccurred()) + Expect(kubectl).NotTo(BeNil()) + + By("get k8s client") + k, err := cluster.K8sClient() + Expect(err).NotTo(HaveOccurred()) + Expect(k).NotTo(BeNil()) + k8sClient = *k + + kubectl.SetK8sClient(k8sClient) + By("Deploy mongodb") Expect(cluster.DeployMongo()).NotTo(HaveOccurred()) @@ -66,21 +79,10 @@ var _ = BeforeSuite(func() { By("Deploy airlock") Expect(cluster.DeployAirlock()).NotTo(HaveOccurred()) - By("get kubectl handler") - kubectl, err = cluster.Kubectl() - Expect(err).NotTo(HaveOccurred()) - Expect(kubectl).NotTo(BeNil()) - By("apply CRDs") err = kubectl.Apply(filepath.Join("..", "config", "crd", "bases")) Expect(err).NotTo(HaveOccurred()) - By("get k8s client") - k, err := cluster.K8sClient() - Expect(err).NotTo(HaveOccurred()) - Expect(k).NotTo(BeNil()) - k8sClient = *k - By("load mongodb sample data for testing") Expect(utils.RunStreamOutput("make", "k3d-load-mongo-data", utils.MakeVar("NAME", "airlock-test"))) }) diff --git a/tests/utils/kubectl.go b/tests/utils/kubectl.go index 5447aec..7fcaf0e 100644 --- a/tests/utils/kubectl.go +++ b/tests/utils/kubectl.go @@ -2,19 +2,33 @@ package utils import ( "bytes" + "context" + "errors" + "fmt" "os/exec" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" ) type Kubectl struct { kubeConfig []byte namespace string + + k8sClient client.Client } func NewKubectl(kubeConfig []byte) Kubectl { return Kubectl{kubeConfig: kubeConfig} } +func (k *Kubectl) SetK8sClient(c client.Client) { + k.k8sClient = c +} + func (k Kubectl) getArgs(cmd []string) []string { args := []string{"--kubeconfig=/dev/fd/0"} @@ -41,7 +55,7 @@ func (k Kubectl) run(cmd []string) ([]byte, error) { } func (k Kubectl) WithNamespace(namespace string) Kubectl { - return Kubectl{namespace: namespace, kubeConfig: k.kubeConfig} + return Kubectl{namespace: namespace, kubeConfig: k.kubeConfig, k8sClient: k.k8sClient} } func (k Kubectl) Apply(file string) error { @@ -89,3 +103,62 @@ func (k Kubectl) DeleteNamespace(name string) error { func (k Kubectl) Get(args ...string) ([]byte, error) { return k.run(append([]string{"get"}, args...)) } + +var noK8sClient = errors.New("k8sClient not found") + +// TODO: maybe move to diff struct +func (k Kubectl) isAnyPodReadyNative(pod string, selector map[string]string) error { + if k.k8sClient == nil { + fmt.Println("no k8sClient is set") + return noK8sClient + } + + podList := &corev1.PodList{} + listOptions := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(selector), + } + + if k.namespace != "" { + listOptions.Namespace = k.namespace + } + + err := k.k8sClient.List(context.Background(), podList, listOptions) + if err != nil { + return err + } + + // Check if any pods are ready + for _, pod := range podList.Items { + if pod.Status.Phase == corev1.PodRunning { + return nil + } + } + + return fmt.Errorf("no pods are ready for %s", pod) +} +func (k Kubectl) IsAnyPodReady(pod string, selector map[string]string) error { + if err := k.isAnyPodReadyNative(pod, selector); !errors.Is(err, noK8sClient) { + return err + } + + selectorStrings := []string{} + for label, value := range selector { + selectorStrings = append(selectorStrings, fmt.Sprintf("%s=%s", label, value)) + } + + output, err := k.GetPods("-l", strings.Join(selectorStrings, ","), "-o", "jsonpath={.items[*].status}") + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if err != nil { + return err + } + + if !strings.Contains(string(output), "\"phase\":\"Running\"") { + return fmt.Errorf("%s pod in %s status", pod, output) + } + + return nil +} diff --git a/tests/utils/utils.go b/tests/utils/utils.go index 74bf21e..b1063b3 100644 --- a/tests/utils/utils.go +++ b/tests/utils/utils.go @@ -52,8 +52,6 @@ func RunStreamOutput(cmd ...string) error { return fmt.Errorf("%s failed with error: %v", command, err) } - fmt.Println("here") - return nil } From d6f4895759d16c47928553a25b3874d94fb7033f Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Thu, 11 Dec 2025 17:31:16 +0530 Subject: [PATCH 7/9] noop --- Makefile | 3 +- api/v1alpha1/mongodbbackup_types.go | 39 +-- api/v1alpha1/mongodbcluster_types.go | 2 + api/v1alpha1/zz_generated.deepcopy.go | 97 +----- backup-image/Dockerfile | 9 - backup-image/entrypoint.sh | 302 +++++++++--------- ...lock.cloud.rocket.chat_mongodbbackups.yaml | 63 +--- ...ock.cloud.rocket.chat_mongodbclusters.yaml | 3 + config/rbac/role.yaml | 10 + controllers/mongodbbackup_controller.go | 279 +++++++--------- tests/assets/airlock/mongodbcluster.yaml | 1 + tests/assets/local-tests/mongodbbackup.yaml | 15 + tests/controller_test.go | 45 --- 13 files changed, 339 insertions(+), 529 deletions(-) create mode 100644 tests/assets/local-tests/mongodbbackup.yaml diff --git a/Makefile b/Makefile index 9ab087e..6689736 100644 --- a/Makefile +++ b/Makefile @@ -117,7 +117,7 @@ test: manifests generate fmt vet ## Run tests. ##@ Build .PHONY: build -build: generate fmt vet ## Build manager binary. +build: generate manifests fmt vet ## Build manager binary. CGO_ENABLED=0 GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -o bin/manager main.go .PHONY: run @@ -288,6 +288,7 @@ k3d-deploy-airlock: k3d-load-image $(KUBECTL_WITH_CONFIG) get namespace airlock-system 2>&1 >/dev/null || $(KUBECTL_WITH_CONFIG) create namespace airlock-system $(KUBECTL_WITH_CONFIG) apply -k config/rbac $(KUBECTL_WITH_CONFIG) apply -f config/manager/manager.yaml + $(KUBECTL_WITH_CONFIG) apply -f tests/assets/airlock .PHONY: k3d-destroy k3d-destroy: diff --git a/api/v1alpha1/mongodbbackup_types.go b/api/v1alpha1/mongodbbackup_types.go index 917a4bf..af8bf5c 100644 --- a/api/v1alpha1/mongodbbackup_types.go +++ b/api/v1alpha1/mongodbbackup_types.go @@ -8,38 +8,25 @@ import ( // +kubebuilder:object:generate=true // +k8s:deepcopy-gen=true type MongoDBBackupSpec struct { - MongoDBRef MongoDBRef `json:"mongodbRef"` - Namespaces []MongoDBNamespace `json:"namespaces,omitempty"` - Storage MongoDBBackupStorage `json:"storage"` + Cluster string `json:"cluster"` + Database string `json:"database"` + ExcludedCollections []string `json:"excludedCollections"` + IncludedCollections []string `json:"includedCollections"` + BackupBucketSecretRef MongoDbBackupBucketSecretRef `json:"backupBucketSecretRef"` } -type MongoDBRef struct { +type MongoDbBackupBucketSecretRef struct { Name string `json:"name"` Namespace string `json:"namespace"` } -type MongoDBNamespace struct { - Database string `json:"database"` - Collections []string `json:"collections,omitempty"` -} - -type MongoDBBackupStorage struct { - Type string `json:"type"` - S3 *MongoDBBackupS3 `json:"s3,omitempty"` -} - -type MongoDBBackupS3 struct { - Endpoint string `json:"endpoint"` - Bucket string `json:"bucket"` - Region string `json:"region"` - SecretRef S3SecretRef `json:"secretRef"` - Prefix string `json:"prefix,omitempty"` -} - -type S3SecretRef struct { - Name string `json:"name"` - Key string `json:"key"` -} +// type MongoDBBackupS3 struct { +// Endpoint string `json:"endpoint"` +// Bucket string `json:"bucket"` +// Region string `json:"region"` +// SecretRef S3SecretRef `json:"secretRef"` +// Prefix string `json:"prefix,omitempty"` +// } // MongoDBBackupStatus defines the observed state of MongoDBBackup // +kubebuilder:object:generate=true diff --git a/api/v1alpha1/mongodbcluster_types.go b/api/v1alpha1/mongodbcluster_types.go index a818230..1bab0c5 100644 --- a/api/v1alpha1/mongodbcluster_types.go +++ b/api/v1alpha1/mongodbcluster_types.go @@ -53,6 +53,8 @@ type MongoDBClusterSpec struct { AtlasNodeIPAccessStrategy string `json:"atlasNodeIpAccessStrategy,omitempty"` AtlasScheduledAutoscaling *AtlasScheduledAutoscaling `json:"atlasScheduledAutoscaling,omitempty"` + + BackupImage string `json:"backupImage"` } type AtlasScheduledAutoscaling struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c628730..376f61a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -195,34 +195,20 @@ func (in *MongoDBBackupList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDBBackupS3) DeepCopyInto(out *MongoDBBackupS3) { - *out = *in - out.SecretRef = in.SecretRef -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupS3. -func (in *MongoDBBackupS3) DeepCopy() *MongoDBBackupS3 { - if in == nil { - return nil - } - out := new(MongoDBBackupS3) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MongoDBBackupSpec) DeepCopyInto(out *MongoDBBackupSpec) { *out = *in - out.MongoDBRef = in.MongoDBRef - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]MongoDBNamespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.ExcludedCollections != nil { + in, out := &in.ExcludedCollections, &out.ExcludedCollections + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedCollections != nil { + in, out := &in.IncludedCollections, &out.IncludedCollections + *out = make([]string, len(*in)) + copy(*out, *in) } - in.Storage.DeepCopyInto(&out.Storage) + out.BackupBucketSecretRef = in.BackupBucketSecretRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupSpec. @@ -265,26 +251,6 @@ func (in *MongoDBBackupStatus) DeepCopy() *MongoDBBackupStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDBBackupStorage) DeepCopyInto(out *MongoDBBackupStorage) { - *out = *in - if in.S3 != nil { - in, out := &in.S3, &out.S3 - *out = new(MongoDBBackupS3) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStorage. -func (in *MongoDBBackupStorage) DeepCopy() *MongoDBBackupStorage { - if in == nil { - return nil - } - out := new(MongoDBBackupStorage) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MongoDBCluster) DeepCopyInto(out *MongoDBCluster) { *out = *in @@ -387,51 +353,16 @@ func (in *MongoDBClusterStatus) DeepCopy() *MongoDBClusterStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDBNamespace) DeepCopyInto(out *MongoDBNamespace) { - *out = *in - if in.Collections != nil { - in, out := &in.Collections, &out.Collections - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBNamespace. -func (in *MongoDBNamespace) DeepCopy() *MongoDBNamespace { - if in == nil { - return nil - } - out := new(MongoDBNamespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDBRef) DeepCopyInto(out *MongoDBRef) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBRef. -func (in *MongoDBRef) DeepCopy() *MongoDBRef { - if in == nil { - return nil - } - out := new(MongoDBRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3SecretRef) DeepCopyInto(out *S3SecretRef) { +func (in *MongoDbBackupBucketSecretRef) DeepCopyInto(out *MongoDbBackupBucketSecretRef) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SecretRef. -func (in *S3SecretRef) DeepCopy() *S3SecretRef { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDbBackupBucketSecretRef. +func (in *MongoDbBackupBucketSecretRef) DeepCopy() *MongoDbBackupBucketSecretRef { if in == nil { return nil } - out := new(S3SecretRef) + out := new(MongoDbBackupBucketSecretRef) in.DeepCopyInto(out) return out } diff --git a/backup-image/Dockerfile b/backup-image/Dockerfile index 04a6e9b..cceb40a 100644 --- a/backup-image/Dockerfile +++ b/backup-image/Dockerfile @@ -1,6 +1,5 @@ FROM mongo:7 -# Install AWS CLI, jq, and other utilities RUN apt-get update && \ apt-get install -y \ curl \ @@ -9,21 +8,13 @@ RUN apt-get update && \ coreutils \ && rm -rf /var/lib/apt/lists/* -# Install AWS CLI v2 RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ unzip awscliv2.zip && \ ./aws/install && \ rm -rf awscliv2.zip aws/ -# Create backup directory RUN mkdir -p /backups - -# Copy entrypoint script COPY entrypoint.sh /usr/local/bin/entrypoint.sh RUN chmod +x /usr/local/bin/entrypoint.sh - -# Set working directory WORKDIR /backups - -# Set entrypoint ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/backup-image/entrypoint.sh b/backup-image/entrypoint.sh index 1eb102e..83308e5 100644 --- a/backup-image/entrypoint.sh +++ b/backup-image/entrypoint.sh @@ -1,150 +1,154 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Environment variables (with defaults) -MONGODB_URI=${MONGODB_URI:-"mongodb://localhost:27017"} -DB_NAME=${DB_NAME:-""} -COLLECTION_NAMES=${COLLECTION_NAMES:-""} # Comma-separated list -BACKUP_DIR="/backups" -S3_BUCKET=${S3_BUCKET:-""} -S3_PREFIX=${S3_PREFIX:-"mongodb-backups"} -BACKUP_NAME=${BACKUP_NAME:-"backup-$(date +%Y%m%d-%H%M%S)"} -SPLIT_SIZE=${SPLIT_SIZE:-"1G"} # 1GB chunks - -# Create backup directory -mkdir -p "$BACKUP_DIR" - -echo "Starting MongoDB backup..." -echo "Database: $DB_NAME" -echo "Collections: $COLLECTION_NAMES" -echo "Backup name: $BACKUP_NAME" - -# Build mongodump command -MONGODUMP_CMD="mongodump --uri=\"$MONGODB_URI\" --archive=\"$BACKUP_DIR/${BACKUP_NAME}.archive\" --gzip" - -# Add database filter if specified -if [[ -n "$DB_NAME" ]]; then - MONGODUMP_CMD="$MONGODUMP_CMD --db=\"$DB_NAME\"" -fi - -# Add collection filters if specified -if [[ -n "$COLLECTION_NAMES" ]]; then - IFS=',' read -ra COLLECTIONS <<< "$COLLECTION_NAMES" - for collection in "${COLLECTIONS[@]}"; do - collection=$(echo "$collection" | xargs) # trim whitespace - if [[ -n "$collection" ]]; then - MONGODUMP_CMD="$MONGODUMP_CMD --collection=\"$collection\"" - fi - done -fi - -echo "Running: $MONGODUMP_CMD" -eval "$MONGODUMP_CMD" - -echo "Backup completed. Archive size:" -ls -lh "$BACKUP_DIR/${BACKUP_NAME}.archive" - -# Split the backup into chunks -echo "Splitting backup into ${SPLIT_SIZE} chunks..." -cd "$BACKUP_DIR" -split -b "$SPLIT_SIZE" -d "${BACKUP_NAME}.archive" "${BACKUP_NAME}_part_" - -# Remove original archive after splitting -rm "${BACKUP_NAME}.archive" - -# Generate hashes and create manifest -echo "Generating hashes and manifest..." -manifest_file="$BACKUP_DIR/manifest.json" -cat > "$manifest_file" << 'EOF' -{ - "backup_name": "", - "created_at": "", - "database": "", - "collections": [], - "total_size": 0, - "parts": [] +#!/bin/sh + +readonly mongodb_uri="${MONGODB_URI?MONGODB_URI is required}" +readonly database="${DATABASE?DATABASE must be passed}" +readonly included_collections="${COLLECTIONS?COLLECTIONS must be passed}" +readonly excluded_collections="${EXCLUDED_COLLECTIONS?EXCLUDED_COLLECTIONS must be passed}" + +readonly backup_file="${BACKUP_FILE?BACKUP_FILE must be passed}" + +readonly red="\e[31m" +readonly blue="\e[34m" +readonly yellow="\e[33m" +readonly reset="\e[0m" + +error() { + echo "${red}[Error]${reset} $*" >&2 + exit 1 +} + +info() { + echo "${blue}[Info]${reset} $*" +} + +warn() { + echo "${yellow}[Warn]${reset} $*" +} + +debug() { + echo "[Debug] $*" +} + +# dump() { +# info "starting in dump mode" + +# local excluded_arg excluded_col + +# for excluded_col in $(echo $excluded_collections | tr ',' ' '); do +# excluded_arg="${excluded_arg}${database}.${excluded_col}," +# done + +# excluded_arg="$(echo "$excluded_arg" | sed 's/,$//')" + +# debug "--nsExclude=$excluded_arg" + +# local included_arg included_col + +# for included_col in $(echo $included_collections | tr ',' ' '); do +# included_arg="${included_arg}${database}.${included_col}," +# done + +# included_arg="$(echo "$included_arg" | sed 's/,$//')" + +# debug "--nsInclude=$included_arg" + +# local cmd="mongodump --uri=$mongodb_uri --nsInclude=$included_arg --nsExclude=$excluded_arg --archive --gzip" + +# warn "executing \"$cmd > $backup_file\"" + +# $cmd >"$backup_file" || error "failed to back up database" + +# info "backup finished" +# } + +dump() { + info "starting in dump mode" + + local excluded_arg excluded_col + + for excluded_col in $(echo $excluded_collections | tr ',' ' '); do + excluded_arg="${excluded_arg} --excludeCollection=$excluded_col" + done + + local included_arg included_col + + for included_col in $(echo $included_collections | tr ',' ' '); do + included_arg="${included_arg} --collection=$included_col" + done + + local cmd="mongodump --uri=$mongodb_uri $included_arg $excluded_arg -d $database --archive --gzip" + + warn "executing \"$cmd > $backup_file\"" + + $cmd >"$backup_file" || error "failed to back up database" + + [ -f "$backup_file" ] || error "failed to back up db, file not found" + + info "backup finished" +} + +restore() { + error "[restore] function not implemented" +} + +s3push() { + info "starting to split and push dump to s3" + + local split_size="${SPLIT_SIZE:-1024}" + local split_prefix="${backup_file}.part" + + info "splitting backup file into ${split_size}MB parts" + + split -b "${split_size}m" -d -a 3 "$backup_file" "$split_prefix" || error "failed to split backup archive" + + info "backup file split successfully" + + # List the created parts for verification + # shellcheck disable=SC2046 + local parts=$(ls "${split_prefix}"* 2>/dev/null | wc -l) + info "created $parts backup parts" + + # shellcheck disable=SC2046 + local manifest="$(generate_manifest "$backup_file" $(ls "${split_prefix}"*))" + + debug "pretty manifest: $(echo "$manifest" | jq)" + + # TODO: push this manifest first + + # TODO: Upload each part to S3 + for part in "${split_prefix}"*; do + debug "part: $part" + # aws s3 cp "$part" "s3://$S3_BUCKET/$S3_PREFIX/" + done +} + +hash() { + sha256sum "$1" | awk '{print $1}' +} + +hash_json() { + printf '{"hash":{"sha256":"%s"},"filename":"%s"}' "$(hash "$1")" "$1" } -EOF - -# Update manifest with metadata -collections_array=$(echo "$COLLECTION_NAMES" | sed 's/,/","/g' | sed 's/^/"/' | sed 's/$/"/' | sed 's/""//g') -if [[ "$collections_array" == '""' ]]; then - collections_array='[]' -else - collections_array="[$collections_array]" -fi - -total_size=0 -parts_json="[" - -for part_file in ${BACKUP_NAME}_part_*; do - if [[ -f "$part_file" ]]; then - echo "Processing $part_file..." - - # Calculate hash - hash=$(sha256sum "$part_file" | cut -d' ' -f1) - size=$(stat -f%z "$part_file" 2>/dev/null || stat -c%s "$part_file") - total_size=$((total_size + size)) - - # Add to parts JSON - if [[ "$parts_json" != "[" ]]; then - parts_json="$parts_json," - fi - parts_json="$parts_json{\"filename\":\"$part_file\",\"size\":$size,\"sha256\":\"$hash\"}" - - echo " $part_file: $hash ($(numfmt --to=iec $size))" - fi -done - -parts_json="$parts_json]" - -# Update manifest file using jq if available, otherwise sed -jq --arg backup_name "$BACKUP_NAME" \ - --arg created_at "$(date -Iseconds)" \ - --arg database "$DB_NAME" \ - --argjson collections "$collections_array" \ - --arg total_size "$total_size" \ - --argjson parts "$parts_json" \ - '.backup_name = $backup_name | .created_at = $created_at | .database = $database | .collections = $collections | .total_size = ($total_size | tonumber) | .parts = $parts' \ - "$manifest_file" > "${manifest_file}.tmp" && mv "${manifest_file}.tmp" "$manifest_file" - -echo "Manifest created:" -cat "$manifest_file" - -# Upload to S3 if bucket is specified -if [[ -n "$S3_BUCKET" ]]; then - echo "Uploading to S3 bucket: $S3_BUCKET" - s3_path="s3://$S3_BUCKET/$S3_PREFIX/$BACKUP_NAME" - - # Configure AWS CLI with custom endpoint if specified - if [[ -n "${AWS_S3_ENDPOINT:-}" ]]; then - export AWS_CLI_S3_ENDPOINT="--endpoint-url=$AWS_S3_ENDPOINT" - else - export AWS_CLI_S3_ENDPOINT="" - fi - - # Upload manifest first - echo "Uploading manifest..." - eval "aws s3 cp $AWS_CLI_S3_ENDPOINT \"$manifest_file\" \"$s3_path/manifest.json\"" - - # Upload all parts - for part_file in ${BACKUP_NAME}_part_*; do - if [[ -f "$part_file" ]]; then - echo "Uploading $part_file..." - eval "aws s3 cp $AWS_CLI_S3_ENDPOINT \"$part_file\" \"$s3_path/$part_file\"" - fi - done - - echo "Backup uploaded successfully to: $s3_path" - - # Clean up local files after successful upload - echo "Cleaning up local files..." - rm -f ${BACKUP_NAME}_part_* "$manifest_file" - -else - echo "No S3 bucket specified. Backup files remain in $BACKUP_DIR" - echo "Total backup size: $(numfmt --to=iec $total_size)" -fi - -echo "Backup process completed successfully!" + +generate_manifest() { + local manifest='{"version":1,"parts":' part + + local full_file="$1" + + shift + + local parts_json= + + for part in "$@"; do + parts_json="${parts_json}$(hash_json "$part")," + done + + parts_json="$(echo "$parts_json" | sed 's/,$//')" + + manifest="${manifest}[${parts_json}],$(hash_json "$full_file" | sed -E 's/^\{(.+)\}$/\1/')}" + + echo "$manifest" +} + +dump +s3push \ No newline at end of file diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml index 87f7fd2..61c8d2a 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml @@ -38,7 +38,7 @@ spec: spec: description: MongoDBBackupSpec defines the desired state of MongoDBBackup properties: - mongodbRef: + backupBucketSecretRef: properties: name: type: string @@ -48,55 +48,24 @@ spec: - name - namespace type: object - namespaces: + cluster: + type: string + database: + type: string + excludedCollections: items: - properties: - collections: - items: - type: string - type: array - database: - type: string - required: - - database - type: object + type: string + type: array + includedCollections: + items: + type: string type: array - storage: - properties: - s3: - properties: - bucket: - type: string - endpoint: - type: string - prefix: - type: string - region: - type: string - secretRef: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - required: - - bucket - - endpoint - - region - - secretRef - type: object - type: - type: string - required: - - type - type: object required: - - mongodbRef - - storage + - backupBucketSecretRef + - cluster + - database + - excludedCollections + - includedCollections type: object status: description: MongoDBBackupStatus defines the observed state of MongoDBBackup diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml index efe5384..4772378 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml @@ -98,6 +98,8 @@ spec: scaled up. type: string type: object + backupImage: + type: string connectionSecret: description: Secret in which Airlock will look for a ConnectionString or Atlas credentials, that will be used to connect to the cluster. @@ -130,6 +132,7 @@ spec: for this cluster. Will be overridden if "username" is specified. type: string required: + - backupImage - connectionSecret - hostTemplate type: object diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index afb379f..46c98e8 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -20,6 +20,16 @@ rules: - get - list - watch +- apiGroups: + - "" + resourceNames: + - '*' + resources: + - secrets + verbs: + - get + - list + - watch - apiGroups: - "" - apps diff --git a/controllers/mongodbbackup_controller.go b/controllers/mongodbbackup_controller.go index 8fd588d..f10a3c8 100644 --- a/controllers/mongodbbackup_controller.go +++ b/controllers/mongodbbackup_controller.go @@ -7,7 +7,7 @@ import ( "time" batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -24,246 +24,187 @@ type MongoDBBackupReconciler struct { Scheme *runtime.Scheme } +const ( + StatusBackupCompleted = "Completed" + StatusBackupFailed = "Failed" + StatusBackupPending = "Pending" +) + //+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups/status,verbs=get;update;patch //+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups/finalizers,verbs=update //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch,resourceNames=* func (r *MongoDBBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := log.FromContext(ctx) - // Fetch the MongoDBBackup instance var backup airlockv1alpha1.MongoDBBackup if err := r.Get(ctx, req.NamespacedName, &backup); err != nil { log.Error(err, "unable to fetch MongoDBBackup") return ctrl.Result{}, client.IgnoreNotFound(err) } - // Check if backup is already completed - if backup.Status.Phase == "Completed" || backup.Status.Phase == "Failed" { + // nothing to do if we already have updated the status of the "backup" + // TODO: likely since a job, we should retry checking the job status and update the state here + if backup.Status.Phase == StatusBackupCompleted || backup.Status.Phase == StatusBackupFailed { return ctrl.Result{}, nil } - // Initialize status if empty if backup.Status.Phase == "" { - backup.Status.Phase = "Pending" + backup.Status.Phase = StatusBackupPending + backup.Status.StartTime = &metav1.Time{Time: time.Now()} + + // meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + // Type: "Pending", + // Status: metav1.ConditionUnknown, + // Reason: "backup has not started yet", + // LastTransitionTime: metav1.NewTime(time.Now()), + // Message: "backup job has not been scheduled yet", + // }) + if err := r.Status().Update(ctx, &backup); err != nil { log.Error(err, "failed to update backup status") return ctrl.Result{}, err } + return ctrl.Result{RequeueAfter: time.Second * 5}, nil } - // Check if Job already exists - jobName := fmt.Sprintf("%s-backup-job", backup.Name) - var existingJob batchv1.Job - err := r.Get(ctx, client.ObjectKey{Name: jobName, Namespace: backup.Namespace}, &existingJob) - if err == nil { - // Job exists, check its status - return r.updateBackupStatusFromJob(ctx, &backup, &existingJob) - } else if client.IgnoreNotFound(err) != nil { - log.Error(err, "failed to get backup job") - return ctrl.Result{}, err + // now use the secret as a reference for all mongo env vars + // use the backup.Spec.backupBucketSecret for the same purpose + var backupJob = batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: backup.Name, + Namespace: backup.Namespace, + }, } - // Create backup Job - job, err := r.createBackupJob(ctx, &backup) - if err != nil { - log.Error(err, "failed to create backup job") - r.updateBackupStatusFailed(ctx, &backup, err.Error()) - return ctrl.Result{}, err - } + err := r.Get(ctx, client.ObjectKeyFromObject(&backupJob), &backupJob) - if err := r.Create(ctx, job); err != nil { - log.Error(err, "failed to create Job") - r.updateBackupStatusFailed(ctx, &backup, err.Error()) + if client.IgnoreNotFound(err) != nil { + // FIXME: handle it return ctrl.Result{}, err } - log.Info("created backup job", "job", jobName) + var maxParallel int32 = 1 - backup.Status.Phase = "Running" - if err := r.Status().Update(ctx, &backup); err != nil { - log.Error(err, "failed to update backup status") + backupImage, err := r.getBackupImage(ctx, backup.Spec.Cluster) + if err != nil { + //FIXME: handle non nil including when the referenced cluster does not exist return ctrl.Result{}, err } - return ctrl.Result{RequeueAfter: time.Second * 30}, nil -} - -func (r *MongoDBBackupReconciler) createBackupJob(ctx context.Context, backup *airlockv1alpha1.MongoDBBackup) (*batchv1.Job, error) { - jobName := fmt.Sprintf("%s-backup-job", backup.Name) - - // Build connection string - connectionString := fmt.Sprintf("mongodb://%s.%s.svc.cluster.local:27017", - backup.Spec.MongoDBRef.Name, backup.Spec.MongoDBRef.Namespace) - - // Build environment variables for backup - env := []corev1.EnvVar{ - {Name: "MONGODB_URI", Value: connectionString}, - {Name: "BACKUP_NAME", Value: backup.Name}, + envVars, err := r.getMongoDbEnvVars(ctx, backup) + if err != nil { } - // Add S3 configuration if specified - if backup.Spec.Storage.Type == "s3" && backup.Spec.Storage.S3 != nil { - s3 := backup.Spec.Storage.S3 - env = append(env, []corev1.EnvVar{ - {Name: "S3_BUCKET", Value: s3.Bucket}, - {Name: "S3_PREFIX", Value: s3.Prefix}, - {Name: "AWS_REGION", Value: s3.Region}, - {Name: "AWS_S3_ENDPOINT", Value: s3.Endpoint}, - // AWS credentials from secret - { - Name: "AWS_ACCESS_KEY_ID", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretRef.Name}, - Key: "accessKeyId", + backupJob.Spec = batchv1.JobSpec{ + Parallelism: &maxParallel, + // Completions: 1, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyNever, + Containers: []v1.Container{ + { + ImagePullPolicy: v1.PullIfNotPresent, + Name: backup.Name, + Image: backupImage, + Command: []string{"sleep", "1d"}, + Env: *envVars, }, }, }, - { - Name: "AWS_SECRET_ACCESS_KEY", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretRef.Name}, - Key: "secretAccessKey", - }, - }, - }, - }...) + }, } - // Add database/collection filters - var databases, collections []string - for _, ns := range backup.Spec.Namespaces { - databases = append(databases, ns.Database) - if len(ns.Collections) > 0 { - collections = append(collections, ns.Collections...) - } - } + controllerutil.SetControllerReference(&backup, &backupJob, r.Scheme) - // Set database and collection names as env vars - if len(databases) > 0 { - env = append(env, corev1.EnvVar{Name: "DB_NAME", Value: databases[0]}) // For now, support single DB - } - if len(collections) > 0 { - env = append(env, corev1.EnvVar{Name: "COLLECTION_NAMES", Value: strings.Join(collections, ",")}) + err = r.Create(ctx, &backupJob) + if err != nil { + return ctrl.Result{}, err } - job := &batchv1.Job{ + return ctrl.Result{}, nil +} + +func (r *MongoDBBackupReconciler) getBackupImage(ctx context.Context, cluster string) (string, error) { + var mongodbCluster = airlockv1alpha1.MongoDBCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: backup.Namespace, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "backup", - Image: "airlock-backup:latest", // TODO: make this configurable - Env: env, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "backup-storage", - MountPath: "/backups", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "backup-storage", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - }, - }, - }, + Name: cluster, }, } - // Set owner reference - if err := controllerutil.SetControllerReference(backup, job, r.Scheme); err != nil { - return nil, err + err := r.Get(ctx, client.ObjectKeyFromObject(&mongodbCluster), &mongodbCluster) + if err != nil { + return "", err } - return job, nil + return mongodbCluster.Spec.BackupImage, nil } -func (r *MongoDBBackupReconciler) updateBackupStatusFromJob(ctx context.Context, backup *airlockv1alpha1.MongoDBBackup, job *batchv1.Job) (ctrl.Result, error) { - log := log.FromContext(ctx) +func (r *MongoDBBackupReconciler) getMongoDbEnvVars(ctx context.Context, backup airlockv1alpha1.MongoDBBackup) (*[]v1.EnvVar, error) { + var accessRequest airlockv1alpha1.MongoDBAccessRequest - if job.Status.CompletionTime != nil { - // Job completed successfully - backup.Status.Phase = "Completed" - backup.Status.CompletionTime = job.Status.CompletionTime - backup.Status.BackupPath = "local:/backup/backup.archive" - backup.Status.Conditions = []metav1.Condition{ - { - Type: "Ready", - Status: metav1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Message: "Backup completed successfully", - }, - } + accessRequest.Name = fmt.Sprintf("%s-access", backup.Name) + accessRequest.Namespace = backup.Namespace - if err := r.Status().Update(ctx, backup); err != nil { - log.Error(err, "failed to update backup status") - return ctrl.Result{}, err - } - - return ctrl.Result{}, nil + err := r.Get(ctx, client.ObjectKeyFromObject(&accessRequest), &accessRequest) + if client.IgnoreNotFound(err) != nil { + // FIXME: handle error here } - if job.Status.Failed > 0 { - // Job failed - backup.Status.Phase = "Failed" - backup.Status.CompletionTime = &metav1.Time{Time: time.Now()} - backup.Status.Conditions = []metav1.Condition{ - { - Type: "Ready", - Status: metav1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Message: "Backup job failed", - }, - } + err = nil - if err := r.Status().Update(ctx, backup); err != nil { - log.Error(err, "failed to update backup status") - return ctrl.Result{}, err - } + accessRequest.Spec.ClusterName = backup.Spec.Cluster + accessRequest.Spec.Database = backup.Spec.Database + accessRequest.Spec.UserName = backup.Name + "-user" - return ctrl.Result{}, nil + controllerutil.SetControllerReference(&backup, &accessRequest, r.Scheme) + + err = r.Create(ctx, &accessRequest) + if err != nil { + // TODO: handle this error } - // Job is still running - return ctrl.Result{RequeueAfter: time.Second * 30}, nil + secretRef := accessRequest.Name + + return &[]v1.EnvVar{ + getEnvVarFromSecret("MONGODB_URI", secretRef, "connectionString"), + getEnvVar("DATABASE", backup.Spec.Database), + getEnvVar("COLLECTIONS", strings.Join(backup.Spec.IncludedCollections, ",")), + getEnvVar("EXCLUDED_COLLECTIONS", strings.Join(backup.Spec.ExcludedCollections, ",")), + }, nil } -func (r *MongoDBBackupReconciler) updateBackupStatusFailed(ctx context.Context, backup *airlockv1alpha1.MongoDBBackup, message string) { - backup.Status.Phase = "Failed" - backup.Status.CompletionTime = &metav1.Time{Time: time.Now()} - backup.Status.Conditions = []metav1.Condition{ - { - Type: "Ready", - Status: metav1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Message: message, +func getEnvVar(name, value string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + Value: value, + } +} + +func getEnvVarFromSecret(name, secretRef, key string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + Key: key, + LocalObjectReference: v1.LocalObjectReference{ + Name: secretRef, + }, + }, }, } - r.Status().Update(ctx, backup) } -// SetupWithManager sets up the controller with the Manager. +// SetupWithManager sets up the controller with the Manager func (r *MongoDBBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&airlockv1alpha1.MongoDBBackup{}). Owns(&batchv1.Job{}). + Owns(&airlockv1alpha1.MongoDBAccessRequest{}). Complete(r) } diff --git a/tests/assets/airlock/mongodbcluster.yaml b/tests/assets/airlock/mongodbcluster.yaml index 78c16ec..2be71f8 100644 --- a/tests/assets/airlock/mongodbcluster.yaml +++ b/tests/assets/airlock/mongodbcluster.yaml @@ -16,4 +16,5 @@ spec: hostTemplate: "mongo.mongo.svc.cluster.local" optionsTemplate: "?directConnection=true" prefixTemplate: mongodb + backupImage: backup:latest --- diff --git a/tests/assets/local-tests/mongodbbackup.yaml b/tests/assets/local-tests/mongodbbackup.yaml new file mode 100644 index 0000000..9cab0f4 --- /dev/null +++ b/tests/assets/local-tests/mongodbbackup.yaml @@ -0,0 +1,15 @@ +apiVersion: airlock.cloud.rocket.chat/v1alpha1 +kind: MongoDBBackup +metadata: + name: test-backup + namespace: mongo + labels: + app: test-backup +spec: + cluster: "airlock-test" + database: "sample_training" + excludedCollections: [] + includedCollections: [] + backupBucketSecretRef: + name: "s3-backup-secret" + namespace: "mongo" \ No newline at end of file diff --git a/tests/controller_test.go b/tests/controller_test.go index ea49caa..5db7e07 100644 --- a/tests/controller_test.go +++ b/tests/controller_test.go @@ -175,50 +175,5 @@ var _ = Describe("Airlock Controller", Ordered, func() { }) }) - It("should create and manage MongoDBBackup", func() { - backupName := "test-backup" - backup := &airlockv1alpha1.MongoDBBackup{ - ObjectMeta: metav1.ObjectMeta{ - Name: backupName, - Namespace: "mongo", - }, - Spec: airlockv1alpha1.MongoDBBackupSpec{ - MongoDBRef: airlockv1alpha1.MongoDBRef{ - Name: "mongo", - Namespace: "default", - }, - Namespaces: []airlockv1alpha1.MongoDBNamespace{ - { - Database: "test", - Collections: []string{"users"}, - }, - }, - Storage: airlockv1alpha1.MongoDBBackupStorage{ - Type: "s3", - S3: &airlockv1alpha1.MongoDBBackupS3{ - Endpoint: "s3.amazonaws.com", - Bucket: "test-bucket", - Region: "us-east-1", - SecretRef: airlockv1alpha1.S3SecretRef{ - Name: "s3-credentials", - Key: "credentials", - }, - }, - }, - }, - } - - By("Creating MongoDBBackup") - err := k8sClient.Create(context.Background(), backup) - Expect(err).NotTo(HaveOccurred()) - - By("Verifying backup is created") - var fetchedBackup airlockv1alpha1.MongoDBBackup - err = k8sClient.Get(context.Background(), client.ObjectKey{ - Name: backupName, Namespace: "mongo", - }, &fetchedBackup) - Expect(err).NotTo(HaveOccurred()) - Expect(fetchedBackup.Spec.MongoDBRef.Name).To(Equal("mongo")) - }) }) }) From 7995f922fa8ea97259534053f7baa387f4fa1c89 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Wed, 21 Jan 2026 13:42:02 +0530 Subject: [PATCH 8/9] wip: .... --- .github/workflows/build.yml | 4 +- Makefile | 38 +- README.md | 266 +++++++++++++- api/v1alpha1/mongodbbackup_types.go | 16 +- api/v1alpha1/mongodbbackupschedule_types.go | 85 +++++ api/v1alpha1/mongodbbackupstore_types.go | 68 ++++ api/v1alpha1/zz_generated.deepcopy.go | 305 +++++++++++++++- backup-image/entrypoint.sh | 36 +- ...oud.rocket.chat_mongodbaccessrequests.yaml | 66 ++-- ...lock.cloud.rocket.chat_mongodbbackups.yaml | 81 +++-- ...ud.rocket.chat_mongodbbackupschedules.yaml | 211 +++++++++++ ...cloud.rocket.chat_mongodbbackupstores.yaml | 171 +++++++++ ...ock.cloud.rocket.chat_mongodbclusters.yaml | 73 ++-- config/crd/kustomization.yaml | 1 + config/rbac/role.yaml | 134 ++++++- .../airlock_v1alpha1_mongodbbackupstore.yaml | 24 ++ config/samples/kustomization.yaml | 1 + controllers/backup_restore.go | 282 +++++++++++++++ controllers/common.go | 57 +-- controllers/mongodbbackup_controller.go | 233 ++++++------ .../mongodbbackupschedule_controller.go | 300 ++++++++++++++++ controllers/mongodbbackupstore_controller.go | 245 +++++++++++++ controllers/reconciler/common.go | 26 ++ go.mod | 11 + go.sum | 22 ++ main.go | 17 + tests/assets/k3d/manual-storageclass.yaml | 2 + tests/assets/local-tests/mongodbbackup.yaml | 5 +- .../local-tests/mongodbbucketstoresecret.yaml | 9 + tests/assets/mongo/00_mongo_namespace.yaml | 4 + tests/controller_test.go | 340 +++++++++++++++++- tests/suite_test.go | 15 +- tests/utils/k3d.go | 20 +- tests/utils/utils.go | 24 +- 34 files changed, 2881 insertions(+), 311 deletions(-) create mode 100644 api/v1alpha1/mongodbbackupschedule_types.go create mode 100644 api/v1alpha1/mongodbbackupstore_types.go create mode 100644 config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupschedules.yaml create mode 100644 config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupstores.yaml create mode 100644 config/samples/airlock_v1alpha1_mongodbbackupstore.yaml create mode 100644 controllers/backup_restore.go create mode 100644 controllers/mongodbbackupschedule_controller.go create mode 100644 controllers/mongodbbackupstore_controller.go create mode 100644 controllers/reconciler/common.go create mode 100644 tests/assets/local-tests/mongodbbucketstoresecret.yaml create mode 100644 tests/assets/mongo/00_mongo_namespace.yaml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aa5531f..28d938f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,9 +25,7 @@ jobs: with: go-version: '=1.21.0' cache: false - - uses: nolar/setup-k3d-k3s@v1 - with: - skip-creation: true + - uses: RocketChat/k3d-with-registry@main - name: Build binary run: | cd $__W_SRC_REL diff --git a/Makefile b/Makefile index 6689736..04d60a7 100644 --- a/Makefile +++ b/Makefile @@ -276,6 +276,7 @@ k3d-add-storageclass: k3d-cluster $(KUBECTL_WITH_CONFIG) apply -f tests/assets/k3d/local-path-config.yaml $(KUBECTL_WITH_CONFIG) rollout restart deployment/local-path-provisioner -n kube-system $(KUBECTL_WITH_CONFIG) rollout status deployment/local-path-provisioner -n kube-system + $(KUBECTL_WITH_CONFIG) annotate storageclass local-path storageclass.kubernetes.io/is-default-class- || true $(KUBECTL_WITH_CONFIG) apply -f tests/assets/k3d/manual-storageclass.yaml .PHONY: k3d-load-image @@ -289,6 +290,7 @@ k3d-deploy-airlock: k3d-load-image $(KUBECTL_WITH_CONFIG) apply -k config/rbac $(KUBECTL_WITH_CONFIG) apply -f config/manager/manager.yaml $(KUBECTL_WITH_CONFIG) apply -f tests/assets/airlock + $(KUBECTL_WITH_CONFIG) set env deployment/controller-manager DEV_MODE=true -n airlock-system .PHONY: k3d-destroy k3d-destroy: @@ -299,13 +301,12 @@ endif .PHONY: k3d-deploy-mongo k3d-deploy-mongo: k3d-cluster - $(KUBECTL_WITH_CONFIG) get namespace mongo 2>&1 >/dev/null || $(KUBECTL_WITH_CONFIG) create namespace mongo - $(KUBECTL_WITH_CONFIG) apply -f tests/assets/mongo + $(KUBECTL_WITH_CONFIG) apply -f ./tests/assets/mongo .PHONY: k3d-deploy-minio k3d-deploy-minio: k3d-cluster k3d-add-storageclass $(KUBECTL_WITH_CONFIG) apply -k "github.com/minio/operator?ref=v6.0.4" - $(KUBECTL_WITH_CONFIG) apply -f tests/assets/minio + $(KUBECTL_WITH_CONFIG) apply -f ./tests/assets/minio .PHONY: docker-build-backup-image docker-build-backup-image: @@ -317,8 +318,35 @@ k3d-load-backup-image: k3d-cluster docker-build-backup-image .PHONY: k3d-run-backup-pod k3d-run-backup-pod: k3d-cluster k3d-load-backup-image - $(KUBECTL_WITH_CONFIG) apply -f tests/assets/local-tests/backup-pod.yaml + $(KUBECTL_WITH_CONFIG) apply -f ./tests/assets/local-tests/backup-pod.yaml .PHONY: k3d-load-mongo-data k3d-load-mongo-data: k3d-deploy-mongo - $(KUBECTL_WITH_CONFIG) apply -f tests/assets/local-tests/mongo-restore-job.yaml \ No newline at end of file + $(KUBECTL_WITH_CONFIG) apply -f ./tests/assets/local-tests/mongo-restore-job.yaml + +# subject to change as more matures +.PHONY: k3d-setup-all +k3d-setup-all: k3d-load-mongo-data k3d-load-backup-image k3d-deploy-airlock k3d-deploy-minio + +.PHONY: k3d-retsart-airlock +k3d-restart-airlock: +ifndef NAME + $(error NAME is required. Usage: make k3d-restart-airlock NAME=my-cluster) +endif + $(KUBECTL_WITH_CONFIG) rollout restart deployment controller-manager -n airlock-system + +# Example: make k3d-kubectl NAME=airlock-test get pods \\-A +k3d-kubectl: +ifndef NAME + $(error NAME is required. Usage: make k3d-kubectl NAME=my-cluster [kubectl args...]) +endif + $(KUBECTL_WITH_CONFIG) $(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) + +# Support for passing commands after the target name +%:: + @: + +.PHONY: k3d-add-backup-store +k3d-add-backup-store: k3d-cluster + $(KUBECTL_WITH_CONFIG) apply -f ./tests/assets/local-tests/mongodbbucketstoresecret.yaml + $(KUBECTL_WITH_CONFIG) apply -f ./config/samples/airlock_v1alpha1_mongodbbackupstore.yaml \ No newline at end of file diff --git a/README.md b/README.md index e90bbcb..6641af3 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,8 @@ More information can be found via the [Kubebuilder Documentation](https://book.k A testing environment can be spun up locally with k3d. +### Quick Start + Run ```sh make k3d-load-mongo-data k3d-deploy-airlock k3d-deploy-minio NAME=airlock IMG=controller:latest @@ -89,4 +91,266 @@ This 3. deploys minio 4. deploys mongo 5. loads sample data into mongo -6. deploys airlock operator \ No newline at end of file +6. deploys airlock operator + +**NOTE: All tests run through `make test` use Makefile targets, i.e. should be also run manually in case needed. This is intentional to allow for debugging/troubleshooting the tests individually.** + +### Makefile Targets + +#### K3d Cluster Management + +**`k3d-cluster`** - Create a k3d cluster +```sh +# Create a cluster named 'airlock-test' +make k3d-cluster NAME=airlock-test + +# The cluster will be created with: +# - 1 server node and 1 agent node +# - Local storage mounted at tests/k3d/disk:/disk +# - Kubeconfig saved to /tmp/${NAME}.kube.config +``` + +**`k3d-kubectl`** - Run kubectl commands against the cluster `k3d-cluster` created. + +Pass the name of the cluster as the first argument. +```sh +# Get all pods in all namespaces +make k3d-kubectl NAME=airlock-test get pods \\-A +``` + +Make sure to escape the backslashes in the command, i.e. `\\-` is correct, `\-` is incorrect. + +**`k3d-destroy`** - Delete a k3d cluster +```sh +# Delete the cluster +make k3d-destroy NAME=airlock-test +``` + +When running `make test` if any of the tests fail, the cluster should not be deleted. At that point use `make k3d-kubectl NAME=airlock-test` to start debugging/troubleshooting the test. + +**`k3d-add-storageclass`** - Configure storage classes for the cluster +```sh +# Sets up local-path provisioner and manual storage class +make k3d-add-storageclass NAME=airlock-test +``` + +#### K3d Deployment Targets + +**`k3d-deploy-mongo`** - Deploy MongoDB to the cluster +```sh +# Deploy MongoDB with sample configuration +make k3d-deploy-mongo NAME=airlock-test +``` + +**`k3d-deploy-minio`** - Deploy MinIO to the cluster +```sh +# Deploy MinIO operator and tenant +make k3d-deploy-minio NAME=airlock-test +``` + +**`k3d-deploy-airlock`** - Deploy Airlock operator to the cluster +```sh +# Build, load, and deploy the operator +make k3d-deploy-airlock NAME=airlock-test IMG=controller:latest + +# This target: +# - Builds the Docker image (without running tests) +# - Loads the image into the k3d cluster +# - Applies CRDs, RBAC, and manager deployment +# - Sets DEV_MODE=true for development +``` + +**`k3d-setup-all`** - Complete setup (mongo data, backup image, airlock, minio) +```sh +# One command to set up everything +make k3d-setup-all NAME=airlock-test IMG=controller:latest +``` + +#### K3d Image Management + +**`k3d-load-image`** - Build and load controller image into cluster +```sh +# Build and load the controller image +make k3d-load-image NAME=airlock-test IMG=controller:latest +``` + +**`docker-build-backup-image`** - Build the backup image +```sh +# Build backup image (default: backup:latest) +make docker-build-backup-image + +# Build with custom tag +make docker-build-backup-image BIMG=my-backup:1.0.0 +``` + +**`k3d-load-backup-image`** - Build and load backup image into cluster +```sh +# Build and load backup image +make k3d-load-backup-image NAME=airlock-test +``` + +#### K3d Data Management + +**`k3d-load-mongo-data`** - Load sample data into MongoDB +```sh +# Deploy a job that restores sample data to MongoDB +make k3d-load-mongo-data NAME=airlock-test +``` + +**`k3d-add-backup-store`** - Add backup store configuration +```sh +# Apply backup store secret and CR +make k3d-add-backup-store NAME=airlock-test +``` + +#### K3d Utility Targets + +**`k3d-kubectl`** - Run kubectl commands against the cluster +```sh +# Get all pods in all namespaces +make k3d-kubectl NAME=airlock-test get pods \\-A + +# Get MongoDBBackup resources +make k3d-kubectl NAME=airlock-test get mongodbbackups \\-A + +# Describe a resource +make k3d-kubectl NAME=airlock-test describe pod my-pod \\-n mongo + +# Apply a manifest +make k3d-kubectl NAME=airlock-test apply \\-f my-manifest.yaml + +# Get logs +make k3d-kubectl NAME=airlock-test logs deployment/controller-manager \\-n airlock-system +``` + +**`k3d-restart-airlock`** - Restart the Airlock controller deployment +```sh +# Restart the controller to pick up changes +make k3d-restart-airlock NAME=airlock-test +``` + +**`k3d-run-backup-pod`** - Run a test backup pod +```sh +# Deploy a test pod for manual backup testing +make k3d-run-backup-pod NAME=airlock-test +``` + +#### Development Targets + +**`build`** - Build the manager binary +```sh +# Build for current platform +make build + +# Build for specific OS/arch +make build TARGETOS=linux TARGETARCH=amd64 +``` + +**`run`** - Run the controller locally +```sh +# Run controller from your host (uses current kubeconfig) +make run +``` + +**`test`** - Run tests +```sh +# Run all tests with coverage +make test +``` + +**`manifests`** - Generate CRD and RBAC manifests +```sh +# Regenerate CRDs and RBAC after API changes +make manifests +``` + +**`generate`** - Generate DeepCopy code +```sh +# Generate DeepCopy methods for API types +make generate +``` + +#### Docker Build Targets + +**`docker-build`** - Build Docker image (runs tests first) +```sh +# Build image with default tag +make docker-build + +# Build with custom tag +make docker-build IMG=myregistry/airlock:v1.0.0 +``` + +**`docker-build-no-test`** - Build Docker image without running tests +```sh +# Faster build for development +make docker-build-no-test IMG=controller:latest +``` + +**`docker-push`** - Push Docker image +```sh +# Push to registry +make docker-push IMG=myregistry/airlock:v1.0.0 +``` + +#### Deployment Targets + +**`install`** - Install CRDs to cluster +```sh +# Install CRDs to current kubeconfig cluster +make install +``` + +**`uninstall`** - Uninstall CRDs +```sh +# Remove CRDs (ignores not found errors) +make uninstall ignore-not-found=true +``` + +**`deploy`** - Deploy controller to cluster +```sh +# Deploy with custom image +make deploy IMG=myregistry/airlock:v1.0.0 +``` + +**`undeploy`** - Remove controller from cluster +```sh +# Remove controller deployment +make undeploy ignore-not-found=true +``` +### Common Workflows + +**Complete local development setup:** +```sh +# 1. Create cluster and deploy everything +make k3d-setup-all NAME=airlock-test IMG=controller:latest + +# 2. Make code changes, rebuild and restart +make docker-build-no-test IMG=controller:latest +make k3d-load-image NAME=airlock-test IMG=controller:latest +make k3d-restart-airlock NAME=airlock-test + +# 3. Check logs +make k3d-kubectl NAME=airlock-test logs \\-f deployment/controller-manager \\-n airlock-system +``` + +**Testing backup functionality:** +```sh +# 1. Setup environment +make k3d-setup-all NAME=airlock-test IMG=controller:latest + +# 2. Add backup store +make k3d-add-backup-store NAME=airlock-test + +# 3. Create a backup (via kubectl or YAML) +make k3d-kubectl NAME=airlock-test apply \\-f tests/assets/local-tests/mongodbbackup.yaml + +# 4. Check backup status +make k3d-kubectl NAME=airlock-test get mongodbbackups \\-A +``` + +**Cleanup:** +```sh +# Delete the entire cluster +make k3d-destroy NAME=airlock-test +``` \ No newline at end of file diff --git a/api/v1alpha1/mongodbbackup_types.go b/api/v1alpha1/mongodbbackup_types.go index af8bf5c..5fe82a6 100644 --- a/api/v1alpha1/mongodbbackup_types.go +++ b/api/v1alpha1/mongodbbackup_types.go @@ -8,16 +8,17 @@ import ( // +kubebuilder:object:generate=true // +k8s:deepcopy-gen=true type MongoDBBackupSpec struct { - Cluster string `json:"cluster"` - Database string `json:"database"` - ExcludedCollections []string `json:"excludedCollections"` - IncludedCollections []string `json:"includedCollections"` - BackupBucketSecretRef MongoDbBackupBucketSecretRef `json:"backupBucketSecretRef"` + Cluster string `json:"cluster"` + Database string `json:"database"` + ExcludedCollections []string `json:"excludedCollections,omitempty"` + IncludedCollections []string `json:"includedCollections,omitempty"` + BackupStoreRef MongoDBBackupStoreRef `json:"backupStoreRef"` + Prefix string `json:"prefix,omitempty"` } -type MongoDbBackupBucketSecretRef struct { +type MongoDBBackupStoreRef struct { Name string `json:"name"` - Namespace string `json:"namespace"` + Namespace string `json:"namespace,omitempty"` } // type MongoDBBackupS3 struct { @@ -42,6 +43,7 @@ type MongoDBBackupStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type MongoDBBackup struct { metav1.TypeMeta `json:",inline"` diff --git a/api/v1alpha1/mongodbbackupschedule_types.go b/api/v1alpha1/mongodbbackupschedule_types.go new file mode 100644 index 0000000..127f45e --- /dev/null +++ b/api/v1alpha1/mongodbbackupschedule_types.go @@ -0,0 +1,85 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MongoDBBackupScheduleSpec defines the desired state of MongoDBBackupSchedule +// +kubebuilder:object:generate=true +// +k8s:deepcopy-gen=true +type MongoDBBackupScheduleSpec struct { + // Schedule is a cron expression defining when backups should run + // +kubebuilder:validation:Required + Schedule string `json:"schedule"` + + // BackupSpec defines the template for creating MongoDBBackup resources + // +kubebuilder:validation:Required + BackupSpec MongoDBBackupSpec `json:"backupSpec"` + + // SuccessfulJobsHistoryLimit is the number of successful backup jobs to keep + // +kubebuilder:default=3 + // +kubebuilder:validation:Minimum=0 + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"` + + // FailedJobsHistoryLimit is the number of failed backup jobs to keep + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=0 + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` + + // Suspend suspends the schedule if true + // +kubebuilder:default=false + Suspend *bool `json:"suspend,omitempty"` +} + +// MongoDBBackupScheduleStatus defines the observed state of MongoDBBackupSchedule +// +kubebuilder:object:generate=true +// +k8s:deepcopy-gen=true +type MongoDBBackupScheduleStatus struct { + // Phase indicates the overall status of the schedule + // +kubebuilder:validation:Enum=Succeeding;Failing + Phase string `json:"phase,omitempty"` + + // LastBackupTime is the time of the last successful backup + LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` + + // LastBackupName is the name of the last created backup + LastBackupName string `json:"lastBackupName,omitempty"` + + // LastFailureTime is the time of the last failed backup + LastFailureTime *metav1.Time `json:"lastFailureTime,omitempty"` + + // LastFailureMessage contains the error message from the last failed backup + LastFailureMessage string `json:"lastFailureMessage,omitempty"` + + // ActiveBackups is a list of active backup names created by this schedule + ActiveBackups []string `json:"activeBackups,omitempty"` + + // Conditions represent the latest available observations of the schedule's state + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Schedule",type="string",JSONPath=".spec.schedule" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="Last Backup",type="date",JSONPath=".status.lastBackupTime" +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MongoDBBackupSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MongoDBBackupScheduleSpec `json:"spec,omitempty"` + Status MongoDBBackupScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MongoDBBackupScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MongoDBBackupSchedule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MongoDBBackupSchedule{}, &MongoDBBackupScheduleList{}) +} diff --git a/api/v1alpha1/mongodbbackupstore_types.go b/api/v1alpha1/mongodbbackupstore_types.go new file mode 100644 index 0000000..0e3d1db --- /dev/null +++ b/api/v1alpha1/mongodbbackupstore_types.go @@ -0,0 +1,68 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MongoDBBackupStoreSpec defines the desired state of MongoDBBackupStore +// +kubebuilder:object:generate=true +// +k8s:deepcopy-gen=true +type MongoDBBackupStoreSpec struct { + // +kubebuilder:validation:Enum=s3 + Type string `json:"type"` + S3 *MongoDBBackupStoreS3 `json:"s3,omitempty"` +} + +type MongoDBBackupStoreS3 struct { + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` + Region string `json:"region"` + SecretRef S3SecretRef `json:"secretRef"` +} + +type S3SecretRef struct { + Name string `json:"name"` + Mappings S3SecretMappings `json:"mappings"` +} + +type S3SecretMappings struct { + AccessKeyID ToKeyMap `json:"accessKeyId"` + SecretAccessKey ToKeyMap `json:"secretAccessKey"` +} + +type ToKeyMap struct { + Key string `json:"key"` +} + +// MongoDBBackupStoreStatus defines the observed state of MongoDBBackupStore +// +kubebuilder:object:generate=true +// +k8s:deepcopy-gen=true +type MongoDBBackupStoreStatus struct { + Phase string `json:"phase,omitempty"` + LastTested *metav1.Time `json:"lastTested,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" +// +kubebuilder:subresource:status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MongoDBBackupStore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MongoDBBackupStoreSpec `json:"spec,omitempty"` + Status MongoDBBackupStoreStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MongoDBBackupStoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MongoDBBackupStore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MongoDBBackupStore{}, &MongoDBBackupStoreList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 376f61a..230bb70 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,4 +1,5 @@ //go:build !ignore_autogenerated +// +build !ignore_autogenerated /* Copyright 2022. @@ -195,6 +196,131 @@ func (in *MongoDBBackupList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupSchedule) DeepCopyInto(out *MongoDBBackupSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupSchedule. +func (in *MongoDBBackupSchedule) DeepCopy() *MongoDBBackupSchedule { + if in == nil { + return nil + } + out := new(MongoDBBackupSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBBackupSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupScheduleList) DeepCopyInto(out *MongoDBBackupScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongoDBBackupSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupScheduleList. +func (in *MongoDBBackupScheduleList) DeepCopy() *MongoDBBackupScheduleList { + if in == nil { + return nil + } + out := new(MongoDBBackupScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBBackupScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupScheduleSpec) DeepCopyInto(out *MongoDBBackupScheduleSpec) { + *out = *in + in.BackupSpec.DeepCopyInto(&out.BackupSpec) + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupScheduleSpec. +func (in *MongoDBBackupScheduleSpec) DeepCopy() *MongoDBBackupScheduleSpec { + if in == nil { + return nil + } + out := new(MongoDBBackupScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupScheduleStatus) DeepCopyInto(out *MongoDBBackupScheduleStatus) { + *out = *in + if in.LastBackupTime != nil { + in, out := &in.LastBackupTime, &out.LastBackupTime + *out = (*in).DeepCopy() + } + if in.LastFailureTime != nil { + in, out := &in.LastFailureTime, &out.LastFailureTime + *out = (*in).DeepCopy() + } + if in.ActiveBackups != nil { + in, out := &in.ActiveBackups, &out.ActiveBackups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupScheduleStatus. +func (in *MongoDBBackupScheduleStatus) DeepCopy() *MongoDBBackupScheduleStatus { + if in == nil { + return nil + } + out := new(MongoDBBackupScheduleStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MongoDBBackupSpec) DeepCopyInto(out *MongoDBBackupSpec) { *out = *in @@ -208,7 +334,7 @@ func (in *MongoDBBackupSpec) DeepCopyInto(out *MongoDBBackupSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - out.BackupBucketSecretRef = in.BackupBucketSecretRef + out.BackupStoreRef = in.BackupStoreRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupSpec. @@ -251,6 +377,142 @@ func (in *MongoDBBackupStatus) DeepCopy() *MongoDBBackupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStore) DeepCopyInto(out *MongoDBBackupStore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStore. +func (in *MongoDBBackupStore) DeepCopy() *MongoDBBackupStore { + if in == nil { + return nil + } + out := new(MongoDBBackupStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBBackupStore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStoreList) DeepCopyInto(out *MongoDBBackupStoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongoDBBackupStore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStoreList. +func (in *MongoDBBackupStoreList) DeepCopy() *MongoDBBackupStoreList { + if in == nil { + return nil + } + out := new(MongoDBBackupStoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBBackupStoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStoreRef) DeepCopyInto(out *MongoDBBackupStoreRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStoreRef. +func (in *MongoDBBackupStoreRef) DeepCopy() *MongoDBBackupStoreRef { + if in == nil { + return nil + } + out := new(MongoDBBackupStoreRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStoreS3) DeepCopyInto(out *MongoDBBackupStoreS3) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStoreS3. +func (in *MongoDBBackupStoreS3) DeepCopy() *MongoDBBackupStoreS3 { + if in == nil { + return nil + } + out := new(MongoDBBackupStoreS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStoreSpec) DeepCopyInto(out *MongoDBBackupStoreSpec) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(MongoDBBackupStoreS3) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStoreSpec. +func (in *MongoDBBackupStoreSpec) DeepCopy() *MongoDBBackupStoreSpec { + if in == nil { + return nil + } + out := new(MongoDBBackupStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBBackupStoreStatus) DeepCopyInto(out *MongoDBBackupStoreStatus) { + *out = *in + if in.LastTested != nil { + in, out := &in.LastTested, &out.LastTested + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBBackupStoreStatus. +func (in *MongoDBBackupStoreStatus) DeepCopy() *MongoDBBackupStoreStatus { + if in == nil { + return nil + } + out := new(MongoDBBackupStoreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MongoDBCluster) DeepCopyInto(out *MongoDBCluster) { *out = *in @@ -353,16 +615,49 @@ func (in *MongoDBClusterStatus) DeepCopy() *MongoDBClusterStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDbBackupBucketSecretRef) DeepCopyInto(out *MongoDbBackupBucketSecretRef) { +func (in *S3SecretMappings) DeepCopyInto(out *S3SecretMappings) { + *out = *in + out.AccessKeyID = in.AccessKeyID + out.SecretAccessKey = in.SecretAccessKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SecretMappings. +func (in *S3SecretMappings) DeepCopy() *S3SecretMappings { + if in == nil { + return nil + } + out := new(S3SecretMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3SecretRef) DeepCopyInto(out *S3SecretRef) { + *out = *in + out.Mappings = in.Mappings +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SecretRef. +func (in *S3SecretRef) DeepCopy() *S3SecretRef { + if in == nil { + return nil + } + out := new(S3SecretRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ToKeyMap) DeepCopyInto(out *ToKeyMap) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDbBackupBucketSecretRef. -func (in *MongoDbBackupBucketSecretRef) DeepCopy() *MongoDbBackupBucketSecretRef { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToKeyMap. +func (in *ToKeyMap) DeepCopy() *ToKeyMap { if in == nil { return nil } - out := new(MongoDbBackupBucketSecretRef) + out := new(ToKeyMap) in.DeepCopyInto(out) return out } diff --git a/backup-image/entrypoint.sh b/backup-image/entrypoint.sh index 83308e5..c563751 100644 --- a/backup-image/entrypoint.sh +++ b/backup-image/entrypoint.sh @@ -81,7 +81,7 @@ dump() { warn "executing \"$cmd > $backup_file\"" $cmd >"$backup_file" || error "failed to back up database" - + [ -f "$backup_file" ] || error "failed to back up db, file not found" info "backup finished" @@ -113,12 +113,27 @@ s3push() { debug "pretty manifest: $(echo "$manifest" | jq)" + echo "$manifest" >manifest.json + + local destination="s3://$BUCKET" + if [ "$PREFIX" != "" ]; then destination="$destination/$PREFIX"; fi + + __aws() { + if [ "$NO_VERIFY_SSL" = "true" ]; then + aws --no-verify-ssl "$@" + else + aws "$@" + fi + } + + __aws s3 cp manifest.json "$destination" + # TODO: push this manifest first # TODO: Upload each part to S3 for part in "${split_prefix}"*; do debug "part: $part" - # aws s3 cp "$part" "s3://$S3_BUCKET/$S3_PREFIX/" + __aws s3 cp "$part" "$destination" done } @@ -127,7 +142,7 @@ hash() { } hash_json() { - printf '{"hash":{"sha256":"%s"},"filename":"%s"}' "$(hash "$1")" "$1" + printf '{"hash":{"sha256":"%s"},"filename":"%s"}' "$(hash "$1")" "$(basename "$1")" } generate_manifest() { @@ -150,5 +165,16 @@ generate_manifest() { echo "$manifest" } -dump -s3push \ No newline at end of file +main() { + case "$1" in + "backup") + dump + s3push + ;; + "restore") + error "not implemented" + ;; + esac +} + +main "$@" diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml index 7446320..4145e26 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null name: mongodbaccessrequests.airlock.cloud.rocket.chat spec: group: airlock.cloud.rocket.chat @@ -31,19 +32,14 @@ spec: API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -73,35 +69,43 @@ spec: conditions: description: Conditions is the list of status condition updates items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -116,6 +120,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml index 61c8d2a..ee9c6b1 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackups.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null name: mongodbbackups.airlock.cloud.rocket.chat spec: group: airlock.cloud.rocket.chat @@ -14,31 +15,30 @@ spec: singular: mongodbbackup scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: MongoDBBackupSpec defines the desired state of MongoDBBackup properties: - backupBucketSecretRef: + backupStoreRef: properties: name: type: string @@ -46,7 +46,6 @@ spec: type: string required: - name - - namespace type: object cluster: type: string @@ -60,12 +59,12 @@ spec: items: type: string type: array + prefix: + type: string required: - - backupBucketSecretRef + - backupStoreRef - cluster - database - - excludedCollections - - includedCollections type: object status: description: MongoDBBackupStatus defines the observed state of MongoDBBackup @@ -77,35 +76,43 @@ spec: type: string conditions: items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -120,6 +127,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupschedules.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupschedules.yaml new file mode 100644 index 0000000..0b7d42e --- /dev/null +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupschedules.yaml @@ -0,0 +1,211 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: mongodbbackupschedules.airlock.cloud.rocket.chat +spec: + group: airlock.cloud.rocket.chat + names: + kind: MongoDBBackupSchedule + listKind: MongoDBBackupScheduleList + plural: mongodbbackupschedules + singular: mongodbbackupschedule + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.schedule + name: Schedule + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.lastBackupTime + name: Last Backup + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MongoDBBackupScheduleSpec defines the desired state of MongoDBBackupSchedule + properties: + backupSpec: + description: BackupSpec defines the template for creating MongoDBBackup + resources + properties: + backupStoreRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + cluster: + type: string + database: + type: string + excludedCollections: + items: + type: string + type: array + includedCollections: + items: + type: string + type: array + prefix: + type: string + required: + - backupStoreRef + - cluster + - database + type: object + failedJobsHistoryLimit: + default: 1 + description: FailedJobsHistoryLimit is the number of failed backup + jobs to keep + format: int32 + minimum: 0 + type: integer + schedule: + description: Schedule is a cron expression defining when backups should + run + type: string + successfulJobsHistoryLimit: + default: 3 + description: SuccessfulJobsHistoryLimit is the number of successful + backup jobs to keep + format: int32 + minimum: 0 + type: integer + suspend: + default: false + description: Suspend suspends the schedule if true + type: boolean + required: + - backupSpec + - schedule + type: object + status: + description: MongoDBBackupScheduleStatus defines the observed state of + MongoDBBackupSchedule + properties: + activeBackups: + description: ActiveBackups is a list of active backup names created + by this schedule + items: + type: string + type: array + conditions: + description: Conditions represent the latest available observations + of the schedule's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastBackupName: + description: LastBackupName is the name of the last created backup + type: string + lastBackupTime: + description: LastBackupTime is the time of the last successful backup + format: date-time + type: string + lastFailureMessage: + description: LastFailureMessage contains the error message from the + last failed backup + type: string + lastFailureTime: + description: LastFailureTime is the time of the last failed backup + format: date-time + type: string + phase: + description: Phase indicates the overall status of the schedule + enum: + - Succeeding + - Failing + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupstores.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupstores.yaml new file mode 100644 index 0000000..c51a089 --- /dev/null +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbbackupstores.yaml @@ -0,0 +1,171 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: mongodbbackupstores.airlock.cloud.rocket.chat +spec: + group: airlock.cloud.rocket.chat + names: + kind: MongoDBBackupStore + listKind: MongoDBBackupStoreList + plural: mongodbbackupstores + singular: mongodbbackupstore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Phase + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MongoDBBackupStoreSpec defines the desired state of MongoDBBackupStore + properties: + s3: + properties: + bucket: + type: string + endpoint: + type: string + region: + type: string + secretRef: + properties: + mappings: + properties: + accessKeyId: + properties: + key: + type: string + required: + - key + type: object + secretAccessKey: + properties: + key: + type: string + required: + - key + type: object + required: + - accessKeyId + - secretAccessKey + type: object + name: + type: string + required: + - mappings + - name + type: object + required: + - bucket + - endpoint + - region + - secretRef + type: object + type: + enum: + - s3 + type: string + required: + - type + type: object + status: + description: MongoDBBackupStoreStatus defines the observed state of MongoDBBackupStore + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastTested: + format: date-time + type: string + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml b/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml index 4772378..2dace5f 100644 --- a/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml +++ b/config/crd/bases/airlock.cloud.rocket.chat_mongodbclusters.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null name: mongodbclusters.airlock.cloud.rocket.chat spec: group: airlock.cloud.rocket.chat @@ -27,19 +28,14 @@ spec: description: MongoDBCluster is the Schema for the mongodbclusters API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -55,9 +51,10 @@ spec: properties: enabled: default: false - description: |- - If this is set, the cluster will be enabled for scheduled autoscaling. - The way it works is that the cluster will be scaled up to the high tier at the specified time, and scaled down to the lowTier at the specified time. + description: If this is set, the cluster will be enabled for scheduled + autoscaling. The way it works is that the cluster will be scaled + up to the high tier at the specified time, and scaled down to + the lowTier at the specified time. type: boolean highTier: default: M50 @@ -141,35 +138,43 @@ spec: properties: conditions: items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -184,6 +189,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 3f03da0..d2411e7 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/airlock.cloud.rocket.chat_mongodbclusters.yaml - bases/airlock.cloud.rocket.chat_mongodbaccessrequests.yaml - bases/airlock.cloud.rocket.chat_mongodbbackups.yaml +- bases/airlock.cloud.rocket.chat_mongodbbackupstores.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: [] diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 46c98e8..85a76e1 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + creationTimestamp: null name: manager-role rules: - apiGroups: @@ -15,6 +16,25 @@ rules: - "" resources: - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: - secrets verbs: - get @@ -48,8 +68,6 @@ rules: - airlock.cloud.rocket.chat resources: - mongodbaccessrequests - - mongodbbackups - - mongodbclusters verbs: - create - delete @@ -62,15 +80,123 @@ rules: - airlock.cloud.rocket.chat resources: - mongodbaccessrequests/finalizers - - mongodbbackups/finalizers - - mongodbclusters/finalizers verbs: - update - apiGroups: - airlock.cloud.rocket.chat resources: - mongodbaccessrequests/status + verbs: + - get + - patch + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackups/finalizers + verbs: + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: - mongodbbackups/status + verbs: + - get + - patch + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupschedules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupschedules/finalizers + verbs: + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupschedules/status + verbs: + - get + - patch + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupstores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupstores/finalizers + verbs: + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupstores/status + verbs: + - get + - patch + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbbackupstorestores + verbs: + - get + - list + - watch +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - airlock.cloud.rocket.chat + resources: + - mongodbclusters/finalizers + verbs: + - update +- apiGroups: + - airlock.cloud.rocket.chat + resources: - mongodbclusters/status verbs: - get diff --git a/config/samples/airlock_v1alpha1_mongodbbackupstore.yaml b/config/samples/airlock_v1alpha1_mongodbbackupstore.yaml new file mode 100644 index 0000000..e4bd884 --- /dev/null +++ b/config/samples/airlock_v1alpha1_mongodbbackupstore.yaml @@ -0,0 +1,24 @@ +apiVersion: airlock.cloud.rocket.chat/v1alpha1 +kind: MongoDBBackupStore +metadata: + labels: + app.kubernetes.io/name: mongodbbackupstore + app.kubernetes.io/instance: mongodbbackupstore-sample + app.kubernetes.io/part-of: airlock + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: airlock + name: mongodbbackupstore-sample + namespace: mongo +spec: + type: s3 + s3: + endpoint: https://myminio-hl.minio-tenant.svc.cluster.local:9000 + bucket: backups + region: us-east-1 + secretRef: + name: mongodbbucketstoresecret + mappings: + accessKeyId: + key: accessKeyId + secretAccessKey: + key: secretAccessKey \ No newline at end of file diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 9bd6b84..fd95c47 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -3,4 +3,5 @@ resources: - airlock_v1alpha1_mongodbcluster.yaml - airlock_v1alpha1_mongodbaccessrequest.yaml - airlock_v1alpha1_mongodbbackup.yaml +- airlock_v1alpha1_mongodbbackupstore.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controllers/backup_restore.go b/controllers/backup_restore.go new file mode 100644 index 0000000..1f45c35 --- /dev/null +++ b/controllers/backup_restore.go @@ -0,0 +1,282 @@ +package controllers //chan type + +import ( + "context" + "fmt" + "math" + "strings" + "time" + + "github.com/RocketChat/airlock/api/v1alpha1" + "github.com/RocketChat/airlock/controllers/reconciler" + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// getDatabaseSize is used to calculate the volume size required for the backup job +func getDatabaseSize(ctx context.Context, connectionString, database string) (int64, error) { + logger := log.FromContext(ctx) + + logger.Info("trying to estimate db size", "database", database) + + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connectionString)) + if err != nil { + return 0, err + } + defer client.Disconnect(ctx) + + db := client.Database(database) + + logger.Info("running dbStats against db", "database", database) + + // Run dbStats command to get database size + var result bson.M + err = db.RunCommand(ctx, bson.D{{Key: "dbStats", Value: 1}}).Decode(&result) + if err != nil { + return 0, err + } + + // Extract dataSize from the result + dataSize, ok := result["dataSize"] + if !ok { + return 0, fmt.Errorf("dataSize not found in dbStats result") + } + + logger.Info("dbSize response", "database", database, "size", dataSize) + + // Convert to int64 (dataSize can be int32 or int64) + switch v := dataSize.(type) { + case int32: + return int64(v), nil + case int64: + return v, nil + case float64: + return int64(math.Ceil(v)), nil + default: + return 0, fmt.Errorf("unexpected dataSize type: %T", dataSize) + } +} + +func getMongoDbBackupImage(ctx context.Context, handler client.Client, cluster string) (string, error) { + var clusterCr v1alpha1.MongoDBCluster + + err := handler.Get(ctx, client.ObjectKey{Name: cluster}, &clusterCr) + if err != nil { + return "", err + } + + return clusterCr.Spec.BackupImage, nil +} + +func reconcileMongoDbAccessRequest(ctx context.Context, cl client.Client, backupCr *v1alpha1.MongoDBBackup) (*v1alpha1.MongoDBAccessRequest, error) { + var accessRequest v1alpha1.MongoDBAccessRequest + + accessRequest.Name = fmt.Sprintf("%s-access", backupCr.Name) + + accessRequest.Namespace = backupCr.Namespace + + _, err := reconciler.CreateOrUpdate(ctx, cl, backupCr, &accessRequest, func() error { + accessRequest.Spec.ClusterName = backupCr.Spec.Cluster + accessRequest.Spec.Database = backupCr.Spec.Database + accessRequest.Spec.UserName = backupCr.Name + "-user" + + return nil + }) + + return &accessRequest, err +} + +func reconcilePvc(ctx context.Context, cl client.Client, backupCr v1alpha1.MongoDBBackup, accessRequest v1alpha1.MongoDBAccessRequest) (*v1.PersistentVolumeClaim, error) { + logger := log.FromContext(ctx) + + var pvc v1.PersistentVolumeClaim + + pvc.Name = backupCr.Name + pvc.Namespace = backupCr.Namespace + + _, err := reconciler.CreateOrPatch(ctx, cl, &backupCr, &pvc, func() error { + exisingStorage := pvc.Spec.Resources.Requests.Storage() + + if exisingStorage.CmpInt64(0) == 0 { + logger.Info("no existing request set, requesting db size") + // get new size and set it + if err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute*3, false, func(ctx context.Context) (done bool, err error) { + if err := cl.Get(ctx, client.ObjectKeyFromObject(&accessRequest), &accessRequest); err != nil { + return false, err + } + + if accessRequest.Status.Conditions[0].Status == metav1.ConditionTrue { + return true, nil + } + + return false, nil + }); err != nil { + return err + } + + var secret v1.Secret + + secret.Name = accessRequest.Spec.SecretName + if secret.Name == "" { + secret.Name = accessRequest.Name + } + + secret.Namespace = accessRequest.Namespace + + if err := cl.Get(ctx, client.ObjectKeyFromObject(&secret), &secret); err != nil { + return err + } + + size, err := getDatabaseSize(ctx, string(secret.Data["connectionString"]), backupCr.Spec.Database) + if err != nil { + return err + } + + // add some buffer (2x the database size) for backup overhead and splitting + requestSize := max(size*2, 1024*1024*1024) + + pvc.Spec = v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + Resources: v1.VolumeResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: *resource.NewQuantity(requestSize, resource.BinarySI), + }, + }, + StorageClassName: nil, + } + } else { + // mmake sure we keep thi9s + pvc.Spec.Resources.Requests = v1.ResourceList{ + v1.ResourceStorage: *pvc.Spec.Resources.Requests.Storage(), + } + } + + return nil + }) + + if err != nil { + return nil, err + } + + return &pvc, nil +} + +func _getEnvsForMongo(accessRequest v1alpha1.MongoDBAccessRequest, backup v1alpha1.MongoDBBackup) []v1.EnvVar { + return []v1.EnvVar{ + getEnvVarFromSecret("MONGODB_URI", accessRequest.Name, "connectionString"), + getEnvVar("COLLECTIONS", strings.Join(backup.Spec.IncludedCollections, ",")), + getEnvVar("EXCLUDED_COLLECTIONS", strings.Join(backup.Spec.ExcludedCollections, ",")), + getEnvVar("DATABASE", backup.Spec.Database), + } +} + +func _getS3EnvVars(ctx context.Context, cl client.Client, backupCr v1alpha1.MongoDBBackup) ([]v1.EnvVar, error) { + var store = v1alpha1.MongoDBBackupStore{} + + store.Name = backupCr.Spec.BackupStoreRef.Name + store.Namespace = backupCr.Spec.BackupStoreRef.Namespace + + err := cl.Get(ctx, client.ObjectKeyFromObject(&store), &store) + if err != nil { + return []v1.EnvVar{}, err + } + + return []v1.EnvVar{ + getEnvVar("AWS_ENDPOINT_URL_S3", store.Spec.S3.Endpoint), + getEnvVar("AWS_REGION", store.Spec.S3.Region), + getEnvVarFromSecret("AWS_ACCESS_KEY_ID", store.Spec.S3.SecretRef.Name, store.Spec.S3.SecretRef.Mappings.AccessKeyID.Key), + getEnvVarFromSecret("AWS_SECRET_ACCESS_KEY", store.Spec.S3.SecretRef.Name, store.Spec.S3.SecretRef.Mappings.SecretAccessKey.Key), + getEnvVar("BUCKET", store.Spec.S3.Bucket), + }, nil +} + +func _reconcileJob(ctx context.Context, cl client.Client, backupCr *v1alpha1.MongoDBBackup, mode string) (*batchv1.Job, error) { + // use backup job for the image + image, err := getMongoDbBackupImage(ctx, cl, backupCr.Spec.Cluster) + if err != nil { + return nil, err + } + + accessRequest, err := reconcileMongoDbAccessRequest(ctx, cl, backupCr) + if err != nil { + return nil, err + } + + pvc, err := reconcilePvc(ctx, cl, *backupCr, *accessRequest) + if err != nil { + return nil, err + } + + s3EnvVars, err := _getS3EnvVars(ctx, cl, *backupCr) + if err != nil { + return nil, err + } + + mongoEnvVars := _getEnvsForMongo(*accessRequest, *backupCr) + + var job = batchv1.Job{} + + job.Name = backupCr.Name + job.Namespace = backupCr.Namespace + + _, err = reconciler.CreateOrPatch(ctx, cl, backupCr, &job, func() error { + container := v1.Container{ + Image: image, + ImagePullPolicy: v1.PullIfNotPresent, + Args: []string{mode}, + Name: backupCr.Name, + Env: append( + append(mongoEnvVars, s3EnvVars...), + getEnvVar("PREFIX", backupCr.Spec.Prefix), + getEnvVar("NO_VERIFY_SSL", "true"), + getEnvVar("BACKUP_FILE", "/backups/backup.gz"), + ), + VolumeMounts: []v1.VolumeMount{ + { + Name: "backup-storage", + MountPath: "/backups", + }, + }, + } + + job.Spec.Template.Spec.Containers = []v1.Container{container} + job.Spec.Template.Spec.Volumes = []v1.Volume{ + { + Name: "backup-storage", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + } + + job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyNever + return nil + }) + + if err != nil { + return nil, err + } + + return &job, nil +} + +func reconcileBackupJob(ctx context.Context, cl client.Client, backupCr *v1alpha1.MongoDBBackup) (*batchv1.Job, error) { + return _reconcileJob(ctx, cl, backupCr, "backup") +} + +func reconcileRestoreJob(ctx context.Context, cl client.Client, backupCr *v1alpha1.MongoDBBackup) (*batchv1.Job, error) { + return _reconcileJob(ctx, cl, backupCr, "restore") +} diff --git a/controllers/common.go b/controllers/common.go index 5d6d96b..b205c98 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -6,10 +6,8 @@ import ( "github.com/mongodb-forks/digest" "go.mongodb.org/atlas/mongodbatlas" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" ) @@ -71,44 +69,23 @@ func getClusterNameFromHostTemplate(ctx context.Context, client *mongodbatlas.Cl return "", errors.NewBadRequest("Cluster not found when searching for it's connectionString in atlas") } -// getDatabaseSize is used to calculate the volume size required for the backup job -func getDatabaseSize(ctx context.Context, connectionString, database string, collections []string) (int64, error) { - client, err := mongo.Connect(ctx, options.Client().ApplyURI(connectionString)) - if err != nil { - return 0, err - } - defer client.Disconnect(ctx) - - db := client.Database(database) - var totalSize int64 - - if len(collections) == 0 { - // Get all collections in the database - collectionNames, err := db.ListCollectionNames(ctx, map[string]interface{}{}) - if err != nil { - return 0, err - } - collections = collectionNames +func getEnvVar(name, value string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + Value: value, } +} - // Calculate size for each collection - for _, collectionName := range collections { - // Get collection stats using the collStats command - var result struct { - Size int64 `bson:"size"` - } - - err := db.RunCommand(ctx, bson.M{ - "collStats": collectionName, - }).Decode(&result) - - if err != nil { - // Collection might not exist, skip it - continue - } - - totalSize += result.Size +func getEnvVarFromSecret(name, secretRef, key string) v1.EnvVar { + return v1.EnvVar{ + Name: name, + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + Key: key, + LocalObjectReference: v1.LocalObjectReference{ + Name: secretRef, + }, + }, + }, } - - return totalSize, nil } diff --git a/controllers/mongodbbackup_controller.go b/controllers/mongodbbackup_controller.go index f10a3c8..fc147f6 100644 --- a/controllers/mongodbbackup_controller.go +++ b/controllers/mongodbbackup_controller.go @@ -3,22 +3,22 @@ package controllers import ( "context" "fmt" - "strings" "time" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" ) -// MongoDBBackupReconciler reconciles a MongoDBBackup object +// TODO(deb): use more consts for phasesm, reasons etc + type MongoDBBackupReconciler struct { client.Client Scheme *runtime.Scheme @@ -33,171 +33,138 @@ const ( //+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups/status,verbs=get;update;patch //+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups/finalizers,verbs=update +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupstores,verbs=get;list;watch //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch,resourceNames=* +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete func (r *MongoDBBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := log.FromContext(ctx) var backup airlockv1alpha1.MongoDBBackup - if err := r.Get(ctx, req.NamespacedName, &backup); err != nil { - log.Error(err, "unable to fetch MongoDBBackup") + + backup.Name = req.Name + backup.Namespace = req.Namespace + + err := r.Get(ctx, req.NamespacedName, &backup) + if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } - // nothing to do if we already have updated the status of the "backup" - // TODO: likely since a job, we should retry checking the job status and update the state here - if backup.Status.Phase == StatusBackupCompleted || backup.Status.Phase == StatusBackupFailed { - return ctrl.Result{}, nil - } + base := backup.DeepCopy() - if backup.Status.Phase == "" { - backup.Status.Phase = StatusBackupPending + log.Info("reconciling backup job") - backup.Status.StartTime = &metav1.Time{Time: time.Now()} + // Check if backup store is ready + var store airlockv1alpha1.MongoDBBackupStore + store.Name = backup.Spec.BackupStoreRef.Name + if backup.Spec.BackupStoreRef.Namespace != "" { + store.Namespace = backup.Spec.BackupStoreRef.Namespace + } else { + store.Namespace = backup.Namespace + } - // meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ - // Type: "Pending", - // Status: metav1.ConditionUnknown, - // Reason: "backup has not started yet", - // LastTransitionTime: metav1.NewTime(time.Now()), - // Message: "backup job has not been scheduled yet", - // }) + if err := r.Get(ctx, client.ObjectKeyFromObject(&store), &store); err != nil { + meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BackupStoreNotFound", + Message: fmt.Sprintf("backup store not found: %s", err.Error()), + }) + backup.Status.Phase = StatusBackupFailed - if err := r.Status().Update(ctx, &backup); err != nil { - log.Error(err, "failed to update backup status") + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(base)); err != nil { return ctrl.Result{}, err } - return ctrl.Result{RequeueAfter: time.Second * 5}, nil - } - - // now use the secret as a reference for all mongo env vars - // use the backup.Spec.backupBucketSecret for the same purpose - var backupJob = batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: backup.Name, - Namespace: backup.Namespace, - }, - } - - err := r.Get(ctx, client.ObjectKeyFromObject(&backupJob), &backupJob) - - if client.IgnoreNotFound(err) != nil { - // FIXME: handle it - return ctrl.Result{}, err + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil } - var maxParallel int32 = 1 + // Check if backup store is ready + if store.Status.Phase != "Ready" { + meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BackupStoreNotReady", + Message: fmt.Sprintf("backup store is not ready: phase=%s", store.Status.Phase), + }) + if backup.Status.Phase == "" { + backup.Status.Phase = StatusBackupPending + } - backupImage, err := r.getBackupImage(ctx, backup.Spec.Cluster) - if err != nil { - //FIXME: handle non nil including when the referenced cluster does not exist - return ctrl.Result{}, err - } + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err + } - envVars, err := r.getMongoDbEnvVars(ctx, backup) - if err != nil { + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil } - backupJob.Spec = batchv1.JobSpec{ - Parallelism: &maxParallel, - // Completions: 1, - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyNever, - Containers: []v1.Container{ - { - ImagePullPolicy: v1.PullIfNotPresent, - Name: backup.Name, - Image: backupImage, - Command: []string{"sleep", "1d"}, - Env: *envVars, - }, - }, - }, - }, + // Initialize phase if not set + if backup.Status.Phase == "" { + backup.Status.Phase = StatusBackupPending + if backup.Status.StartTime == nil { + now := metav1.Now() + backup.Status.StartTime = &now + } } - controllerutil.SetControllerReference(&backup, &backupJob, r.Scheme) - - err = r.Create(ctx, &backupJob) + job, err := reconcileBackupJob(ctx, r.Client, &backup) if err != nil { - return ctrl.Result{}, err - } - - return ctrl.Result{}, nil -} - -func (r *MongoDBBackupReconciler) getBackupImage(ctx context.Context, cluster string) (string, error) { - var mongodbCluster = airlockv1alpha1.MongoDBCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: cluster, - }, - } + meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "FailedJobReconciliation", + Message: fmt.Sprintf("failed to reconcile backup job: %s", err.Error()), + }) + backup.Status.Phase = StatusBackupFailed + + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err + } - err := r.Get(ctx, client.ObjectKeyFromObject(&mongodbCluster), &mongodbCluster) - if err != nil { - return "", err + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil } - return mongodbCluster.Spec.BackupImage, nil -} - -func (r *MongoDBBackupReconciler) getMongoDbEnvVars(ctx context.Context, backup airlockv1alpha1.MongoDBBackup) (*[]v1.EnvVar, error) { - var accessRequest airlockv1alpha1.MongoDBAccessRequest - - accessRequest.Name = fmt.Sprintf("%s-access", backup.Name) - accessRequest.Namespace = backup.Namespace - - err := r.Get(ctx, client.ObjectKeyFromObject(&accessRequest), &accessRequest) - if client.IgnoreNotFound(err) != nil { - // FIXME: handle error here + // Update phase based on job status + if job.Status.CompletionTime != nil { + backup.Status.Phase = StatusBackupCompleted + backup.Status.CompletionTime = job.Status.CompletionTime + meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionTrue, + Reason: "BackupCompleted", + Message: "Backup job completed successfully", + }) + } else if job.Status.Failed > 0 { + backup.Status.Phase = StatusBackupFailed + backup.Status.CompletionTime = &metav1.Time{Time: time.Now()} + meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BackupFailed", + Message: "Backup job failed", + }) + } else if backup.Status.Phase == StatusBackupPending { + backup.Status.Phase = StatusBackupPending + meta.SetStatusCondition(&backup.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BackupInProgress", + Message: "Backup job is in progress", + }) } - err = nil - - accessRequest.Spec.ClusterName = backup.Spec.Cluster - accessRequest.Spec.Database = backup.Spec.Database - accessRequest.Spec.UserName = backup.Name + "-user" - - controllerutil.SetControllerReference(&backup, &accessRequest, r.Scheme) - - err = r.Create(ctx, &accessRequest) - if err != nil { - // TODO: handle this error + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err } - secretRef := accessRequest.Name - - return &[]v1.EnvVar{ - getEnvVarFromSecret("MONGODB_URI", secretRef, "connectionString"), - getEnvVar("DATABASE", backup.Spec.Database), - getEnvVar("COLLECTIONS", strings.Join(backup.Spec.IncludedCollections, ",")), - getEnvVar("EXCLUDED_COLLECTIONS", strings.Join(backup.Spec.ExcludedCollections, ",")), - }, nil -} - -func getEnvVar(name, value string) v1.EnvVar { - return v1.EnvVar{ - Name: name, - Value: value, + // If not completed or failed, requeue to check status + if backup.Status.Phase == StatusBackupPending { + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil } -} -func getEnvVarFromSecret(name, secretRef, key string) v1.EnvVar { - return v1.EnvVar{ - Name: name, - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - Key: key, - LocalObjectReference: v1.LocalObjectReference{ - Name: secretRef, - }, - }, - }, - } + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager @@ -205,6 +172,8 @@ func (r *MongoDBBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&airlockv1alpha1.MongoDBBackup{}). Owns(&batchv1.Job{}). + Owns(&v1.PersistentVolumeClaim{}). Owns(&airlockv1alpha1.MongoDBAccessRequest{}). + Owns(&airlockv1alpha1.MongoDBBackupStore{}). Complete(r) } diff --git a/controllers/mongodbbackupschedule_controller.go b/controllers/mongodbbackupschedule_controller.go new file mode 100644 index 0000000..5a11c24 --- /dev/null +++ b/controllers/mongodbbackupschedule_controller.go @@ -0,0 +1,300 @@ +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-co-op/gocron/v2" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" + "github.com/RocketChat/airlock/controllers/reconciler" +) + +type MongoDBBackupScheduleReconciler struct { + client.Client + Scheme *runtime.Scheme + Scheduler gocron.Scheduler +} + +// TODO: mor econsts +const ( + PhaseSucceeding = "Succeeding" + PhaseFailing = "Failing" +) + +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupschedules,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupschedules/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupschedules/finalizers,verbs=update +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackups,verbs=get;list;watch;create + +func (r *MongoDBBackupScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx) + + var schedule airlockv1alpha1.MongoDBBackupSchedule + + schedule.Name = req.Name + schedule.Namespace = req.Namespace + + err := r.Get(ctx, req.NamespacedName, &schedule) + if err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + base := schedule.DeepCopy() + + log.Info("reconciling backup schedule", "schedule", schedule.Spec.Schedule) + + var store airlockv1alpha1.MongoDBBackupStore + store.Name = schedule.Spec.BackupSpec.BackupStoreRef.Name + if schedule.Spec.BackupSpec.BackupStoreRef.Namespace != "" { + store.Namespace = schedule.Spec.BackupSpec.BackupStoreRef.Namespace + } else { + store.Namespace = schedule.Namespace + } + + if err := r.Get(ctx, client.ObjectKeyFromObject(&store), &store); err != nil { + meta.SetStatusCondition(&schedule.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BackupStoreNotFound", + Message: fmt.Sprintf("backup store not found: %s", err.Error()), + }) + schedule.Status.Phase = PhaseFailing + + if err := r.Status().Patch(ctx, &schedule, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil + } + + if store.Status.Phase != "Ready" { + meta.SetStatusCondition(&schedule.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BackupStoreNotReady", + Message: fmt.Sprintf("backup store is not ready: phase=%s", store.Status.Phase), + }) + schedule.Status.Phase = PhaseFailing + + if err := r.Status().Patch(ctx, &schedule, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil + } + + suspend := false + if schedule.Spec.Suspend != nil { + suspend = *schedule.Spec.Suspend + } + + jobs := r.Scheduler.Jobs() + var existingJob gocron.Job + for _, job := range jobs { + tags := job.Tags() + if len(tags) >= 2 && tags[0] == schedule.Namespace && tags[1] == schedule.Name { + existingJob = job + break + } + } + + if suspend { + if existingJob != nil { + err = r.Scheduler.RemoveJob(existingJob.ID()) + if err != nil { + log.Error(err, "failed to remove job") + } + } + schedule.Status.Phase = PhaseFailing + meta.SetStatusCondition(&schedule.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "Suspended", + Message: "Schedule is suspended", + }) + } else { + if existingJob != nil { + err = r.Scheduler.RemoveJob(existingJob.ID()) + if err != nil { + log.Error(err, "failed to remove existing job") + } + } + + // TODO(deb): add a flag to only keep x amount of backup crs and delete older ones + scheduleCopy := schedule.DeepCopy() + _, err = r.Scheduler.NewJob( + gocron.CronJob(schedule.Spec.Schedule, false), + gocron.NewTask( + func() { + r.createBackup(context.Background(), scheduleCopy) + }, + ), + gocron.WithTags(schedule.Namespace, schedule.Name), + ) + + if err != nil { + meta.SetStatusCondition(&schedule.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "ScheduleCreationFailed", + Message: fmt.Sprintf("failed to create schedule: %s", err.Error()), + }) + schedule.Status.Phase = PhaseFailing + + if err := r.Status().Patch(ctx, &schedule, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: time.Minute * 1}, nil + } + } + + if err := r.updateStatusFromBackups(ctx, &schedule); err != nil { + log.Error(err, "failed to update status from backups") + } + + if err := r.Status().Patch(ctx, &schedule, client.MergeFrom(base)); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: time.Minute * 5}, nil +} + +func (r *MongoDBBackupScheduleReconciler) createBackup(ctx context.Context, schedule *airlockv1alpha1.MongoDBBackupSchedule) { + log := log.FromContext(ctx) + + timestamp := time.Now().Format("20060102150405") + backupName := fmt.Sprintf("%s-%s", schedule.Name, timestamp) + + backup := &airlockv1alpha1.MongoDBBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupName, + Namespace: schedule.Namespace, + }, + } + + backupSpec := schedule.Spec.BackupSpec + backupSpec.Prefix = fmt.Sprintf("%s/%s", schedule.Spec.BackupSpec.Prefix, timestamp) + + _, err := reconciler.CreateOrPatch(ctx, r.Client, schedule, backup, func() error { + backup.Spec = backupSpec + if backup.Labels == nil { + backup.Labels = make(map[string]string) + } + backup.Labels["airlock.cloud.rocket.chat/scheduler"] = schedule.Name + return nil + }) + + if err != nil { + log.Error(err, "failed to create or patch backup", "backup", backupName) + return + } + + log.Info("Created or patched backup from schedule", "backup", backupName, "schedule", schedule.Name) +} + +func (r *MongoDBBackupScheduleReconciler) updateStatusFromBackups(ctx context.Context, schedule *airlockv1alpha1.MongoDBBackupSchedule) error { + log := log.FromContext(ctx) + + var backupList airlockv1alpha1.MongoDBBackupList + err := r.List(ctx, &backupList, client.InNamespace(schedule.Namespace), client.MatchingLabels{ + "airlock.cloud.rocket.chat/scheduler": schedule.Name, + }) + if err != nil { + return fmt.Errorf("failed to list backups: %w", err) + } + + var ( + activeBackups []string + lastBackupTime *metav1.Time + lastBackupName string + lastFailureTime *metav1.Time + lastFailureMessage string + ) + + recentSuccessCount := 0 + recentFailureCount := 0 + cutoffTime := time.Now().Add(-24 * time.Hour) + + for i := range backupList.Items { + backup := &backupList.Items[i] + + if backup.Status.Phase != "Completed" && backup.Status.Phase != "Failed" { + activeBackups = append(activeBackups, backup.Name) + } + + if backup.Status.CompletionTime != nil { + completionTime := backup.Status.CompletionTime.Time + + if completionTime.After(cutoffTime) { + if backup.Status.Phase == "Completed" { + recentSuccessCount++ + if lastBackupTime == nil || completionTime.After(lastBackupTime.Time) { + lastBackupTime = backup.Status.CompletionTime + lastBackupName = backup.Name + } + } else if backup.Status.Phase == "Failed" { + recentFailureCount++ + if lastFailureTime == nil || completionTime.After(lastFailureTime.Time) { + lastFailureTime = backup.Status.CompletionTime + readyCondition := meta.FindStatusCondition(backup.Status.Conditions, "Ready") + if readyCondition != nil { + lastFailureMessage = readyCondition.Message + } + } + } + } + } + } + + if recentFailureCount > 0 && recentSuccessCount == 0 { + schedule.Status.Phase = PhaseFailing + meta.SetStatusCondition(&schedule.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "RecentBackupsFailed", + Message: fmt.Sprintf("All recent backups failed (%d failures in last 24h)", recentFailureCount), + }) + } else { + schedule.Status.Phase = PhaseSucceeding + meta.SetStatusCondition(&schedule.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionTrue, + Reason: "BackupsSucceeding", + Message: fmt.Sprintf("Recent backups succeeding (%d successes, %d failures in last 24h)", recentSuccessCount, recentFailureCount), + }) + } + + schedule.Status.ActiveBackups = activeBackups + schedule.Status.LastBackupTime = lastBackupTime + schedule.Status.LastBackupName = lastBackupName + schedule.Status.LastFailureTime = lastFailureTime + schedule.Status.LastFailureMessage = lastFailureMessage + + log.Info("updated schedule status", "phase", schedule.Status.Phase, "recentSuccesses", recentSuccessCount, "recentFailures", recentFailureCount) + + return nil +} + +func (r *MongoDBBackupScheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { + var err error + + r.Scheduler, err = gocron.NewScheduler() + if err != nil { + return err + } + + r.Scheduler.Start() + + return ctrl.NewControllerManagedBy(mgr). + For(&airlockv1alpha1.MongoDBBackupSchedule{}). + Complete(r) +} diff --git a/controllers/mongodbbackupstore_controller.go b/controllers/mongodbbackupstore_controller.go new file mode 100644 index 0000000..7090610 --- /dev/null +++ b/controllers/mongodbbackupstore_controller.go @@ -0,0 +1,245 @@ +package controllers + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "reflect" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go/logging" + "github.com/go-logr/logr" +) + +// MongoDBBackupReconciler reconciles a MongoDBBackup object +type MongoDBBackupStoreReconciler struct { + client.Client + Scheme *runtime.Scheme + + // TODO: better name + Development bool +} + +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupstores,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupstores/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupstores/finalizers,verbs=update +//+kubebuilder:rbac:groups=airlock.cloud.rocket.chat,resources=mongodbbackupstorestores,verbs=get;list;watch +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch,resourceNames=* +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete + +func (r *MongoDBBackupStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx) + + log.Info("reconciling backup store", "identifier", req.NamespacedName.String()) + + var store airlockv1alpha1.MongoDBBackupStore + + store.Name = req.Name + store.Namespace = req.Namespace + + err := r.Get(ctx, req.NamespacedName, &store) + if err != nil { + return ctrl.Result{}, client.IgnoreAlreadyExists(err) + } + + base := store.DeepCopy() + + if store.Status.Phase == "" { + now := metav1.Now() + store.Status.LastTested = &now + store.Status.Phase = "NotReady" + meta.SetStatusCondition(&store.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "NotReady", + Message: "Store is not ready", + }) + + if !reflect.DeepEqual(base.Status, store.Status) { + return ctrl.Result{}, r.Status().Patch(ctx, &store, client.MergeFrom(base)) + } + + return ctrl.Result{}, nil + } + + if err := r.validateBucketExists(ctx, &store); err != nil { + now := metav1.Now() + store.Status.LastTested = &now + store.Status.Phase = "NotReady" + meta.SetStatusCondition(&store.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionFalse, + Reason: "BucketNotExists", + Message: fmt.Sprintf("failed to validate bucket exists: %s", err.Error()), + }) + + if !reflect.DeepEqual(base.Status, store.Status) { + return ctrl.Result{}, r.Status().Patch(ctx, &store, client.MergeFrom(base)) + } + + return ctrl.Result{}, nil + } + + now := metav1.Now() + store.Status.LastTested = &now + store.Status.Phase = "Ready" + meta.SetStatusCondition(&store.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionTrue, + Reason: "BucketExists", + Message: "Store config successfully validated", + }) + + if !reflect.DeepEqual(base.Status, store.Status) { + return ctrl.Result{}, r.Status().Patch(ctx, &store, client.MergeFrom(base)) + } + + return ctrl.Result{}, nil +} + +func (r *MongoDBBackupStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { + // builds a map of secret name to backup store names + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &airlockv1alpha1.MongoDBBackupStore{}, "spec.s3.secretRef.name", func(rawObj client.Object) []string { + backupStore := rawObj.(*airlockv1alpha1.MongoDBBackupStore) + return []string{backupStore.Spec.S3.SecretRef.Name} + }); err != nil { + return fmt.Errorf("failed to index spec.s3.secretRef.name field: %w", err) + } + + return ctrl.NewControllerManagedBy(mgr). + For(&airlockv1alpha1.MongoDBBackupStore{}). + // allows for controller to always reflect the correct status of the backup store when the secret is updated + // when a secret is updated, the controller will check what backup store CRs are mapped to this secret, and start reconciling them (which in this case just reflects the status of the store, i.e. the keys are valid or not) + // references: + // https://github.com/kubernetes-sigs/controller-runtime/blob/aebc15d7c68925a659ee8ae4a747802b7f87594f/pkg/client/example_test.go#L297-L298 + // https://buraksekili.github.io/articles/client-k8s-indexing/ better :" ) + //https://github.com/kubernetes-sigs/controller-runtime/issues/1941 + Watches(&v1.Secret{}, handler.EnqueueRequestsFromMapFunc(r.getMappedBackupStore)). + Complete(r) +} + +type runtimeToAwslogger struct { + logger logr.Logger +} + +func (l runtimeToAwslogger) Logf(class logging.Classification, msg string, args ...any) { + l.logger.WithValues("source", "aws", "class", class).Info(fmt.Sprintf(msg, args...)) +} + +func (r *MongoDBBackupStoreReconciler) validateBucketExists(ctx context.Context, store *airlockv1alpha1.MongoDBBackupStore) error { + logger := log.FromContext(ctx) + + l := runtimeToAwslogger{ + logger: logger, + } + + var secret v1.Secret + secret.Name = store.Spec.S3.SecretRef.Name + secret.Namespace = store.Namespace + + if err := r.Get(ctx, client.ObjectKeyFromObject(&secret), &secret); err != nil { + return fmt.Errorf("failed to get S3 credentials secret: %w", err) + } + + accessKeyData, exists := secret.Data[store.Spec.S3.SecretRef.Mappings.AccessKeyID.Key] + if !exists { + return fmt.Errorf("access key not found in secret at key: %s", store.Spec.S3.SecretRef.Mappings.AccessKeyID.Key) + } + + secretKeyData, exists := secret.Data[store.Spec.S3.SecretRef.Mappings.SecretAccessKey.Key] + if !exists { + return fmt.Errorf("secret key not found in secret at key: %s", store.Spec.S3.SecretRef.Mappings.SecretAccessKey.Key) + } + + accessKey := string(accessKeyData) + secretKey := string(secretKeyData) + + if accessKey == "" || secretKey == "" { + return fmt.Errorf("S3 credentials are empty") + } + + var httpClient *http.Client + if r.Development { + httpClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + } + s3Client := s3.New(s3.Options{ + BaseEndpoint: &store.Spec.S3.Endpoint, + Logger: l, + UsePathStyle: true, + Credentials: aws.CredentialsProviderFunc(func(ctx context.Context) (aws.Credentials, error) { + return aws.Credentials{ + AccessKeyID: accessKey, + SecretAccessKey: secretKey, + }, nil + }), + Region: store.Spec.S3.Region, + HTTPClient: httpClient, + }) + + logger.Info("using creds", "accessKey", accessKey, "secretKey", secretKey, "bucket", store.Spec.S3.Bucket) + + if err := s3.NewBucketExistsWaiter(s3Client).Wait(ctx, &s3.HeadBucketInput{ + Bucket: &store.Spec.S3.Bucket, + }, time.Second*10); err != nil { + return fmt.Errorf("provided bucket could not be found %s", err.Error()) + } + + logger.Info("bucket exists", "bucket", store.Spec.S3.Bucket) + + return nil +} + +func (r *MongoDBBackupStoreReconciler) getMappedBackupStore(ctx context.Context, object client.Object) []reconcile.Request { + logger := log.FromContext(ctx) + + logger.Info("getting mapped backup stores for secret", "name", object.GetName(), "namespace", object.GetNamespace()) + + secret := object.(*v1.Secret) + + listOptions := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.s3.secretRef.name", secret.Name), + Namespace: secret.Namespace, + } + + var storeList airlockv1alpha1.MongoDBBackupStoreList + if err := r.List(ctx, &storeList, listOptions); err != nil { + logger.Error(err, "failed to list secrets") + return nil + } + + var requests []reconcile.Request = make([]reconcile.Request, len(storeList.Items)) + for i, store := range storeList.Items { + requests[i] = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: store.Name, + Namespace: store.Namespace, + }, + } + } + + return requests +} diff --git a/controllers/reconciler/common.go b/controllers/reconciler/common.go new file mode 100644 index 0000000..3b758fd --- /dev/null +++ b/controllers/reconciler/common.go @@ -0,0 +1,26 @@ +package reconciler + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// CreateOrUpdate creates the object if it doesn't exist and sets the owner reference, excludes the Status field, sends a POST update request if object already exists and has a diff +func CreateOrUpdate(ctx context.Context, c client.Client, owner client.Object, object client.Object, mutateFn controllerutil.MutateFn) (controllerutil.OperationResult, error) { + if err := controllerutil.SetOwnerReference(owner, object, c.Scheme()); err != nil { + return controllerutil.OperationResultNone, err + } + + return controllerutil.CreateOrUpdate(ctx, c, object, mutateFn) +} + +// CreateOrPatch sends a patch request if object already exists and has a diff, includes Status field, sets owner reference +func CreateOrPatch(ctx context.Context, c client.Client, owner client.Object, object client.Object, mutateFn controllerutil.MutateFn) (controllerutil.OperationResult, error) { + if err := controllerutil.SetOwnerReference(owner, object, c.Scheme()); err != nil { + return controllerutil.OperationResultNone, err + } + + return controllerutil.CreateOrPatch(ctx, c, object, mutateFn) +} diff --git a/go.mod b/go.mod index 1f94f8d..f9e5fbd 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/RocketChat/airlock go 1.24.0 require ( + github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 github.com/davecgh/go-spew v1.1.1 github.com/go-co-op/gocron/v2 v2.18.0 github.com/onsi/ginkgo/v2 v2.22.0 @@ -16,6 +17,16 @@ require ( ) require ( + github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect + github.com/aws/smithy-go v1.24.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-openapi/swag/cmdutils v0.25.3 // indirect github.com/go-openapi/swag/conv v0.25.3 // indirect diff --git a/go.sum b/go.sum index b0efa57..53ccaf0 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,25 @@ +github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= +github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A= +github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0= +github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= diff --git a/main.go b/main.go index 674df05..97888e4 100644 --- a/main.go +++ b/main.go @@ -19,6 +19,7 @@ package main import ( "flag" "os" + "strings" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -110,6 +111,22 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "MongoDBBackup") os.Exit(1) } + + if err = (&controllers.MongoDBBackupStoreReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Development: strings.ToLower(os.Getenv("DEV_MODE")) == "true", + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MongoDBBackupStore") + os.Exit(1) + } + if err = (&controllers.MongoDBBackupScheduleReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MongoDBBackupSchedule") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/tests/assets/k3d/manual-storageclass.yaml b/tests/assets/k3d/manual-storageclass.yaml index 4bb6619..e43f72c 100644 --- a/tests/assets/k3d/manual-storageclass.yaml +++ b/tests/assets/k3d/manual-storageclass.yaml @@ -2,6 +2,8 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: manual + annotations: + storageclass.kubernetes.io/is-default-class: "true" provisioner: rancher.io/local-path parameters: nodePath: /disk diff --git a/tests/assets/local-tests/mongodbbackup.yaml b/tests/assets/local-tests/mongodbbackup.yaml index 9cab0f4..2d63c4b 100644 --- a/tests/assets/local-tests/mongodbbackup.yaml +++ b/tests/assets/local-tests/mongodbbackup.yaml @@ -10,6 +10,7 @@ spec: database: "sample_training" excludedCollections: [] includedCollections: [] - backupBucketSecretRef: - name: "s3-backup-secret" + prefix: haha + backupStoreRef: + name: mongodbbackupstore-sample namespace: "mongo" \ No newline at end of file diff --git a/tests/assets/local-tests/mongodbbucketstoresecret.yaml b/tests/assets/local-tests/mongodbbucketstoresecret.yaml new file mode 100644 index 0000000..0abd3ee --- /dev/null +++ b/tests/assets/local-tests/mongodbbucketstoresecret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: mongodbbucketstoresecret + namespace: mongo +type: Opaque +stringData: + accessKeyId: minio + secretAccessKey: minio123 \ No newline at end of file diff --git a/tests/assets/mongo/00_mongo_namespace.yaml b/tests/assets/mongo/00_mongo_namespace.yaml new file mode 100644 index 0000000..58f4593 --- /dev/null +++ b/tests/assets/mongo/00_mongo_namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: mongo \ No newline at end of file diff --git a/tests/controller_test.go b/tests/controller_test.go index 5db7e07..eadf261 100644 --- a/tests/controller_test.go +++ b/tests/controller_test.go @@ -3,6 +3,7 @@ package tests import ( "context" "fmt" + "os" "path/filepath" "time" @@ -16,7 +17,9 @@ import ( . "github.com/onsi/gomega" airlockv1alpha1 "github.com/RocketChat/airlock/api/v1alpha1" + "github.com/RocketChat/airlock/tests/utils" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -89,7 +92,6 @@ var _ = Describe("Airlock Controller", Ordered, func() { err := k8sClient.Create(context.Background(), accessRequestResource) Expect(err).ToNot(HaveOccurred()) - // next we need to wait for the user to have been created EventuallyWithOffset(1, func() error { accessRequest := airlockv1alpha1.MongoDBAccessRequest{} @@ -175,5 +177,341 @@ var _ = Describe("Airlock Controller", Ordered, func() { }) }) + Context("MongodbBackupStoreController", func() { + var storeSecretData map[string][]byte + + It("should check state of store positively", func() { + Expect(cluster.ApplyMongodbBackupStore()).ToNot(HaveOccurred()) + + By("Eventually store status should be Ready") + + Eventually(func() (string, error) { + store := &airlockv1alpha1.MongoDBBackupStore{} + err := k8sClient.Get(context.Background(), client.ObjectKey{ + Name: "mongodbbackupstore-sample", + Namespace: "mongo", + }, store) + + if err != nil { + return "", err + } + + return store.Status.Phase, nil + }, time.Minute, time.Second).Should(Equal("Ready")) + }) + + It("should check state of store negatively", func() { + By("Eventually store status should be NotReady") + + // update secret to have invalid credentials + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodbbucketstoresecret", + Namespace: "mongo", + }, + Data: map[string][]byte{ + "accessKeyId": []byte("invalid"), + "secretAccessKey": []byte("invalid"), + }, + } + + var storeSecret v1.Secret + Expect(k8sClient.Get(context.Background(), client.ObjectKeyFromObject(secret), &storeSecret)).ToNot(HaveOccurred()) + + storeSecretData = storeSecret.Data + + Expect(k8sClient.Update(context.Background(), secret)).ToNot(HaveOccurred()) + + Eventually(func() (string, error) { + store := &airlockv1alpha1.MongoDBBackupStore{} + err := k8sClient.Get(context.Background(), client.ObjectKey{ + Name: "mongodbbackupstore-sample", + Namespace: "mongo", + }, store) + if err != nil { + return "", err + } + return store.Status.Phase, nil + }, time.Minute, time.Second).Should(Equal("NotReady")) + }) + + AfterAll(func() { + Expect(k8sClient.Update(context.Background(), &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodbbucketstoresecret", + Namespace: "mongo", + }, + Data: storeSecretData, + })).ToNot(HaveOccurred()) + }) + }) + + Context("MongoDBBackup", func() { + backupName := "test-backup" + + BeforeAll(func() { + By("Ensuring backup store is ready") + Expect(cluster.ApplyMongodbBackupStore()).ToNot(HaveOccurred()) + + Eventually(func() (string, error) { + store := &airlockv1alpha1.MongoDBBackupStore{} + err := k8sClient.Get(context.Background(), client.ObjectKey{ + Name: "mongodbbackupstore-sample", + Namespace: "mongo", + }, store) + + if err != nil { + return "", err + } + + return store.Status.Phase, nil + }, time.Minute, time.Second).Should(Equal("Ready")) + + By("Loading sample data to mongo") + Expect(cluster.LoadSampleDataToMongo()).ToNot(HaveOccurred()) + + By("Loading backup image") + Expect(cluster.LoadBackupImage()).ToNot(HaveOccurred()) + }) + + It("should create backup and eventually complete", func() { + By("Creating MongoDBBackup resource") + backup := &airlockv1alpha1.MongoDBBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupName, + Namespace: "mongo", + }, + Spec: airlockv1alpha1.MongoDBBackupSpec{ + Cluster: "airlock-test", + Database: "sample_training", + ExcludedCollections: []string{}, + IncludedCollections: []string{}, + Prefix: "test-prefix", + BackupStoreRef: airlockv1alpha1.MongoDBBackupStoreRef{ + Name: "mongodbbackupstore-sample", + Namespace: "mongo", + }, + }, + } + + err := k8sClient.Create(context.Background(), backup) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for backup to eventually complete") + Eventually(func() (string, error) { + backupCR := &airlockv1alpha1.MongoDBBackup{} + err := k8sClient.Get(context.Background(), client.ObjectKey{ + Name: backupName, + Namespace: "mongo", + }, backupCR) + if err != nil { + return "", err + } + + phase := backupCR.Status.Phase + if phase == "Completed" || phase == "Failed" { + return phase, nil + } + + return "", fmt.Errorf("backup still in progress, phase: %s", phase) + }, 3*time.Minute, 10*time.Second).Should(Or(Equal("Completed"), Equal("Failed"))) + + By("Verifying backup phase is Completed") + backupCR := &airlockv1alpha1.MongoDBBackup{} + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: backupName, + Namespace: "mongo", + }, backupCR) + Expect(err).ToNot(HaveOccurred()) + Expect(backupCR.Status.Phase).To(Equal("Completed")) + }) + + It("should create backup file in the PVC volume", func() { + root, err := utils.GetRootDir() + Expect(err).ToNot(HaveOccurred()) + + By("Finding the PVC created for the backup") + var pvc v1.PersistentVolumeClaim + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: backupName, + Namespace: "mongo", + }, &pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Spec.VolumeName).ToNot(BeEmpty(), "PVC should be bound to a volume") + + By("Finding the PV bound to the PVC") + var pv v1.PersistentVolume + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: pvc.Spec.VolumeName, + }, &pv) + Expect(err).ToNot(HaveOccurred()) + + By("Determining the volume path on the host") + var directoryName string + if pv.Spec.HostPath != nil { + directoryName = filepath.Base(pv.Spec.HostPath.Path) + } else { + directoryName = fmt.Sprintf("pvc-%s-%s-%s", pvc.UID, pvc.Namespace, pvc.Name) + } + + relativeDiskPath := filepath.Join("tests", "k3d", "disk", directoryName) + + By(fmt.Sprintf("Checking if backup file exists at %s", relativeDiskPath)) + + backupFilePath := filepath.Join(root, relativeDiskPath, "backup.gz") + + _, err = os.Stat(backupFilePath) + Expect(err).ToNot(HaveOccurred()) + + By("Verifying backup file is not empty") + fileInfo, err := os.Stat(backupFilePath) + Expect(err).ToNot(HaveOccurred()) + Expect(fileInfo.Size()).To(BeNumerically(">", 0), "backup file should not be empty") + }) + + It("should set phase to Failed when backup fails", func() { + failedBackupName := "test-backup-failed" + + By("Creating MongoDBBackup resource with non-existent database") + backup := &airlockv1alpha1.MongoDBBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: failedBackupName, + Namespace: "mongo", + }, + Spec: airlockv1alpha1.MongoDBBackupSpec{ + Cluster: "airlock-test", + Database: "somedb", + ExcludedCollections: []string{}, + IncludedCollections: []string{}, + Prefix: "test-prefix", + BackupStoreRef: airlockv1alpha1.MongoDBBackupStoreRef{ + Name: "nonexistent-store", + Namespace: "mongo", + }, + }, + } + + err := k8sClient.Create(context.Background(), backup) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for backup to eventually fail") + Eventually(func() string { + backupCR := &airlockv1alpha1.MongoDBBackup{} + err := k8sClient.Get(context.Background(), client.ObjectKey{ + Name: failedBackupName, + Namespace: "mongo", + }, backupCR) + if err != nil { + return "" + } + + return backupCR.Status.Phase + }, 3*time.Minute, 10*time.Second).Should(Equal("Failed")) + + By("Verifying backup phase is Failed") + backupCR := &airlockv1alpha1.MongoDBBackup{} + err = k8sClient.Get(context.Background(), client.ObjectKey{ + Name: failedBackupName, + Namespace: "mongo", + }, backupCR) + Expect(err).ToNot(HaveOccurred()) + Expect(backupCR.Status.Phase).To(Equal("Failed")) + + By("Verifying Ready condition is False") + readyCondition := meta.FindStatusCondition(backupCR.Status.Conditions, "Ready") + Expect(readyCondition).ToNot(BeNil()) + Expect(readyCondition.Status).To(Equal(metav1.ConditionFalse)) + Expect(readyCondition.Reason).To(Equal("BackupStoreNotFound")) + }) + }) + + Context("MongoDBBackupSchedule", func() { + scheduleName := "test-backup-schedule" + + BeforeAll(func() { + By("Ensuring backup store is ready") + Expect(cluster.ApplyMongodbBackupStore()).ToNot(HaveOccurred()) + + Eventually(func() (string, error) { + store := &airlockv1alpha1.MongoDBBackupStore{} + err := k8sClient.Get(context.Background(), client.ObjectKey{ + Name: "mongodbbackupstore-sample", + Namespace: "mongo", + }, store) + if err != nil { + return "", err + } + return store.Status.Phase, nil + }, time.Minute, time.Second).Should(Equal("Ready")) + }) + + It("should create backup CRs", func() { + By("Creating MongoDBBackupSchedule resource") + schedule := &airlockv1alpha1.MongoDBBackupSchedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: scheduleName, + Namespace: "mongo", + }, + Spec: airlockv1alpha1.MongoDBBackupScheduleSpec{ + Schedule: "*/1 * * * *", + BackupSpec: airlockv1alpha1.MongoDBBackupSpec{ + Cluster: "airlock-test", + Database: "sample_training", + ExcludedCollections: []string{}, + IncludedCollections: []string{}, + Prefix: "schedule-test", + BackupStoreRef: airlockv1alpha1.MongoDBBackupStoreRef{ + Name: "mongodbbackupstore-sample", + Namespace: "mongo", + }, + }, + Suspend: func() *bool { b := false; return &b }(), + }, + } + + err := k8sClient.Create(context.Background(), schedule) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for backup CRs to be created") + Eventually(func() (int, error) { + var backupList airlockv1alpha1.MongoDBBackupList + err := k8sClient.List(context.Background(), &backupList, + client.InNamespace("mongo"), + client.MatchingLabels{"airlock.cloud.rocket.chat/scheduler": scheduleName}) + if err != nil { + return 0, err + } + return len(backupList.Items), nil + }, 2*time.Minute, 10*time.Second).Should(BeNumerically(">=", 1)) + }) + + It("should create at least 2 backup CRs according to schedule", func() { + By("Waiting for first backup to be created") + var initialBackupCount int + Eventually(func() (int, error) { + var backupList airlockv1alpha1.MongoDBBackupList + err := k8sClient.List(context.Background(), &backupList, + client.InNamespace("mongo"), + client.MatchingLabels{"airlock.cloud.rocket.chat/scheduler": scheduleName}) + if err != nil { + return 0, err + } + initialBackupCount = len(backupList.Items) + return initialBackupCount, nil + }, 2*time.Minute, 10*time.Second).Should(BeNumerically(">=", 1)) + + By("Waiting for second backup to be created") + Eventually(func() (int, error) { + var backupList airlockv1alpha1.MongoDBBackupList + err := k8sClient.List(context.Background(), &backupList, + client.InNamespace("mongo"), + client.MatchingLabels{"airlock.cloud.rocket.chat/scheduler": scheduleName}) + if err != nil { + return 0, err + } + return len(backupList.Items), nil + }, 2*time.Minute, 10*time.Second).Should(BeNumerically(">=", 2)) + }) + }) }) }) diff --git a/tests/suite_test.go b/tests/suite_test.go index 8d8c6a5..1f2f6a7 100644 --- a/tests/suite_test.go +++ b/tests/suite_test.go @@ -19,8 +19,10 @@ package tests import ( "path/filepath" "testing" + "time" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" @@ -70,6 +72,8 @@ var _ = BeforeSuite(func() { kubectl.SetK8sClient(k8sClient) + time.Sleep(30 * time.Second) + By("Deploy mongodb") Expect(cluster.DeployMongo()).NotTo(HaveOccurred()) @@ -87,7 +91,16 @@ var _ = BeforeSuite(func() { Expect(utils.RunStreamOutput("make", "k3d-load-mongo-data", utils.MakeVar("NAME", "airlock-test"))) }) -var _ = AfterSuite(func() { +var _ = ReportAfterSuite("Teardown cluster", func(report types.Report) { + // Check if any spec in the suite failed + failedCount := report.SpecReports.CountWithState(types.SpecStateFailed) + + if failedCount > 0 { + By("Skipping teardown of test cluster since one or more specs failed") + By("Use 'make k3d-kubectl NAME=airlock-test' to debug the cluster") + return + } + By("tearing down the test environment") err := cluster.Stop() Expect(err).NotTo(HaveOccurred()) diff --git a/tests/utils/k3d.go b/tests/utils/k3d.go index d823348..419a614 100644 --- a/tests/utils/k3d.go +++ b/tests/utils/k3d.go @@ -19,7 +19,7 @@ func NewK3dCluster(name string) K3dCluster { func (k K3dCluster) Start() error { // stdout, err := Run("k3d", "cluster", "create", k.name, "--kubeconfig-update-default=false", "--kubeconfig-switch-context=false", "--no-lb", "--no-rollback", "--wait", "-s1", "-a1") - return RunStreamOutput("make", "k3d-cluster", MakeVar("NAME", k.name)) + return Make("k3d-cluster", MakeVar("NAME", k.name)) } func (k K3dCluster) Stop() error { @@ -35,15 +35,27 @@ func (k K3dCluster) LoadImage(image string) error { } func (k K3dCluster) DeployMongo() error { - return RunStreamOutput("make", "k3d-deploy-mongo", MakeVar("NAME", k.name)) + return Make("k3d-deploy-mongo", MakeVar("NAME", k.name)) } func (k K3dCluster) DeployMinio() error { - return RunStreamOutput("make", "k3d-deploy-minio", MakeVar("NAME", k.name)) + return Make("k3d-deploy-minio", MakeVar("NAME", k.name)) } func (k K3dCluster) DeployAirlock() error { - return RunStreamOutput("make", "k3d-deploy-airlock", MakeVar("NAME", k.name), MakeVar("IMG", "controller:latest")) + return Make("k3d-deploy-airlock", MakeVar("NAME", k.name), MakeVar("IMG", "controller:latest")) +} + +func (k K3dCluster) ApplyMongodbBackupStore() error { + return Make("k3d-add-backup-store", MakeVar("NAME", k.name)) +} + +func (k K3dCluster) LoadSampleDataToMongo() error { + return Make("k3d-load-mongo-data", MakeVar("NAME", k.name)) +} + +func (k K3dCluster) LoadBackupImage() error { + return Make("k3d-load-backup-image", MakeVar("NAME", k.name)) } func (k K3dCluster) Kubeconfig() ([]byte, error) { diff --git a/tests/utils/utils.go b/tests/utils/utils.go index b1063b3..cd056c7 100644 --- a/tests/utils/utils.go +++ b/tests/utils/utils.go @@ -14,14 +14,14 @@ import ( . "github.com/onsi/gomega" ) -func getRootDir() (string, error) { +func GetRootDir() (string, error) { output, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() // remove the \n before returning return string(output[:len(output)-1]), err } func Run(cmd ...string) ([]byte, error) { - root, err := getRootDir() + root, err := GetRootDir() if err != nil { return nil, err } @@ -34,7 +34,7 @@ func Run(cmd ...string) ([]byte, error) { } func RunStreamOutput(cmd ...string) error { - root, err := getRootDir() + root, err := GetRootDir() if err != nil { return err } @@ -45,16 +45,30 @@ func RunStreamOutput(cmd ...string) error { command.Stdout = os.Stdout + command.Stderr = os.Stderr + fmt.Fprintf(GinkgoWriter, "running: %s\n", command.String()) err = command.Run() - if err != nil { - return fmt.Errorf("%s failed with error: %v", command, err) + exitCode := command.ProcessState.ExitCode() + if exitCode != 0 || err != nil { + return fmt.Errorf("%s failed with error: %v, err: %v", command, exitCode, err.Error()) } return nil } +func Make(target string, vars ...string) error { + root, err := GetRootDir() + if err != nil { + return err + } + + cmd := append([]string{"make", "-w", "-C", root, target}, vars...) + + return RunStreamOutput(cmd...) +} + func runCommand(command *exec.Cmd) ([]byte, error) { fmt.Fprintf(GinkgoWriter, "running: %s\n", command.String()) output, err := command.CombinedOutput() From b04eab3b54ed567cb0daaa6f32bb9650cfb80ac5 Mon Sep 17 00:00:00 2001 From: Debdut Chakraborty Date: Wed, 21 Jan 2026 14:54:31 +0530 Subject: [PATCH 9/9] fix: use patch --- controllers/backup_restore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/backup_restore.go b/controllers/backup_restore.go index 1f45c35..a2366de 100644 --- a/controllers/backup_restore.go +++ b/controllers/backup_restore.go @@ -84,7 +84,7 @@ func reconcileMongoDbAccessRequest(ctx context.Context, cl client.Client, backup accessRequest.Namespace = backupCr.Namespace - _, err := reconciler.CreateOrUpdate(ctx, cl, backupCr, &accessRequest, func() error { + _, err := reconciler.CreateOrPatch(ctx, cl, backupCr, &accessRequest, func() error { accessRequest.Spec.ClusterName = backupCr.Spec.Cluster accessRequest.Spec.Database = backupCr.Spec.Database accessRequest.Spec.UserName = backupCr.Name + "-user"