|
| 1 | +// Copyright 2023 MongoDB Inc |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +package mongodbatlas |
| 16 | + |
| 17 | +import ( |
| 18 | + "context" |
| 19 | + "fmt" |
| 20 | + "net/http" |
| 21 | +) |
| 22 | + |
| 23 | +const dataLakesPipelineBasePath = "api/atlas/v1.0/groups/%s/pipelines" |
| 24 | + |
| 25 | +// DataLakePipelineService is an interface for interfacing with the Data Lake Pipeline endpoints of the MongoDB Atlas API. |
| 26 | +// |
| 27 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines |
| 28 | +type DataLakePipelineService interface { |
| 29 | + List(context.Context, string) ([]*DataLakePipeline, *Response, error) |
| 30 | + ListSnapshots(context.Context, string, string, *ListDataLakePipelineSnapshotOptions) (*DataLakePipelineSnapshotsResponse, *Response, error) |
| 31 | + ListIngestionSchedules(context.Context, string, string) ([]*DataLakePipelineIngestionSchedule, *Response, error) |
| 32 | + Get(context.Context, string, string) (*DataLakePipeline, *Response, error) |
| 33 | + Create(context.Context, string, *DataLakePipeline) (*DataLakePipeline, *Response, error) |
| 34 | + Update(context.Context, string, string, *DataLakePipeline) (*DataLakePipeline, *Response, error) |
| 35 | + Delete(context.Context, string, string) (*Response, error) |
| 36 | +} |
| 37 | + |
| 38 | +// DataLakePipelineServiceOp handles communication with the DataLakePipelineService related methods of the |
| 39 | +// MongoDB Atlas API. |
| 40 | +type DataLakePipelineServiceOp service |
| 41 | + |
| 42 | +var _ DataLakePipelineService = &DataLakePipelineServiceOp{} |
| 43 | + |
| 44 | +// DataLakePipeline represents a store of data lake data. Docs: https://docs.mongodb.com/datalake/reference/format/data-lake-configuration/#stores |
| 45 | +type DataLakePipeline struct { |
| 46 | + ID string `json:"_id,omitempty"` // Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline. |
| 47 | + GroupID string `json:"groupId,omitempty"` // Unique identifier for the project. |
| 48 | + Name string `json:"name,omitempty"` // Name of this Data Lake Pipeline. |
| 49 | + CreatedDate string `json:"createdDate,omitempty"` // Timestamp that indicates when the Data Lake Pipeline was created. |
| 50 | + LastUpdatedDate string `json:"lastUpdatedDate,omitempty"` // Timestamp that indicates the last time that the Data Lake Pipeline was updated. |
| 51 | + State string `json:"state,omitempty"` // State of this Data Lake Pipeline. |
| 52 | + Sink *DataLakePipelineSink `json:"sink,omitempty"` // Ingestion destination of a Data Lake Pipeline. |
| 53 | + Source *DataLakePipelineSource `json:"source,omitempty"` // Ingestion Source of a Data Lake Pipeline. |
| 54 | + Transformations []*DataLakePipelineTransformation `json:"transformations,omitempty"` // Fields to be excluded for this Data Lake Pipeline. |
| 55 | +} |
| 56 | + |
| 57 | +// DataLakePipelineTransformation represents fields to be excluded for this Data Lake Pipeline. |
| 58 | +type DataLakePipelineTransformation struct { |
| 59 | + Field string `json:"field,omitempty"` // Key in the document. |
| 60 | + Type string `json:"type,omitempty"` // Type of transformation applied during the export of the namespace in a Data Lake Pipeline. |
| 61 | +} |
| 62 | + |
| 63 | +// DataLakePipelineSink represents ingestion destination of a Data Lake Pipeline. |
| 64 | +type DataLakePipelineSink struct { |
| 65 | + Type string `json:"type,omitempty"` // Type of ingestion destination of this Data Lake Pipeline. |
| 66 | + MetadataProvider string `json:"metadataProvider,omitempty"` // Target cloud provider for this Data Lake Pipeline. |
| 67 | + MetadataRegion string `json:"metadataRegion,omitempty"` // Target cloud provider region for this Data Lake Pipeline. |
| 68 | + PartitionFields *DataLakePipelinePartitionField `json:"partitionFields,omitempty"` // Ordered fields used to physically organize data in the destination. |
| 69 | +} |
| 70 | + |
| 71 | +// DataLakePipelinePartitionField represents ordered fields used to physically organize data in the destination. |
| 72 | +type DataLakePipelinePartitionField struct { |
| 73 | + FieldName string `json:"fieldName,omitempty"` |
| 74 | + Order int32 `json:"order,omitempty"` |
| 75 | +} |
| 76 | + |
| 77 | +// DataLakePipelineSource represents the storage configuration for a data lake. |
| 78 | +type DataLakePipelineSource struct { |
| 79 | + Type string `json:"type,omitempty"` // Type of ingestion source of this Data Lake Pipeline. |
| 80 | + ClusterName string `json:"clusterName,omitempty"` // Human-readable name that identifies the cluster. |
| 81 | + CollectionName string `json:"collectionName,omitempty"` // Human-readable name that identifies the collection. |
| 82 | + DatabaseName string `json:"databaseName,omitempty"` // Human-readable name that identifies the database. |
| 83 | + GroupID string `json:"groupId,omitempty"` // Unique 24-hexadecimal character string that identifies the project. |
| 84 | +} |
| 85 | + |
| 86 | +// ListDataLakePipelineSnapshotOptions specifies the optional parameters to ListSnapshots method. |
| 87 | +type ListDataLakePipelineSnapshotOptions struct { |
| 88 | + *ListOptions |
| 89 | + CompletedAfter string `url:"completedAfter,omitempty"` // Date and time after which MongoDB Cloud created the snapshot. |
| 90 | +} |
| 91 | + |
| 92 | +// DataLakePipelineSnapshotsResponse represents the response of DataLakePipelineService.ListSnapshots. |
| 93 | +type DataLakePipelineSnapshotsResponse struct { |
| 94 | + Links []*Link `json:"links,omitempty"` // List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. |
| 95 | + Results []*DataLakePipelineSnapshot `json:"results,omitempty"` // List of returned documents that MongoDB Cloud providers when completing this request. |
| 96 | + TotalCount int `json:"totalCount,omitempty"` // Number of documents returned in this response. |
| 97 | +} |
| 98 | + |
| 99 | +// DataLakePipelineSnapshot represents a snapshot that you can use to trigger an on demand pipeline run. |
| 100 | +type DataLakePipelineSnapshot struct { |
| 101 | + ID string `json:"id,omitempty"` // Unique 24-hexadecimal digit string that identifies the snapshot. |
| 102 | + CloudProvider string `json:"cloudProvider,omitempty"` // Human-readable label that identifies the cloud provider that stores this snapshot. |
| 103 | + CreatedAt string `json:"createdAt,omitempty"` // Date and time when MongoDB Cloud took the snapshot. |
| 104 | + Description string `json:"description,omitempty"` // Human-readable phrase or sentence that explains the purpose of the snapshot. |
| 105 | + ExpiresAt string `json:"expiresAt,omitempty"` // Date and time when MongoDB Cloud deletes the snapshot. |
| 106 | + FrequencyType string `json:"frequencyType,omitempty"` // Human-readable label that identifies how often this snapshot triggers. |
| 107 | + MasterKeyUUID string `json:"masterKeyUUID,omitempty"` // Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot. |
| 108 | + MongodVersion string `json:"mongodVersion,omitempty"` // Version of the MongoDB host that this snapshot backs up. |
| 109 | + ReplicaSetName string `json:"replicaSetName,omitempty"` // Human-readable label that identifies the replica set from which MongoDB Cloud took this snapshot. |
| 110 | + SnapshotType string `json:"snapshotType,omitempty"` // Human-readable label that identifies when this snapshot triggers. |
| 111 | + Status string `json:"status,omitempty"` // Human-readable label that indicates the stage of the backup process for this snapshot. |
| 112 | + Type string `json:"type,omitempty"` // Human-readable label that categorizes the cluster as a replica set or sharded cluster. |
| 113 | + StorageSizeBytes int64 `json:"storageSizeBytes,omitempty"` // Number of bytes taken to store the backup snapshot. |
| 114 | + CopyRegions []string `json:"copyRegions,omitempty"` // List that identifies the regions to which MongoDB Cloud copies the snapshot. |
| 115 | + PolicyItems []string `json:"policyItems,omitempty"` // List that contains unique identifiers for the policy items. |
| 116 | + Links []*Link `json:"links,omitempty"` // List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. |
| 117 | +} |
| 118 | + |
| 119 | +// DataLakePipelineIngestionSchedule represents a backup schedule policy item that you can use as a Data Lake Pipeline source. |
| 120 | +type DataLakePipelineIngestionSchedule struct { |
| 121 | + ID string `json:"id,omitempty"` // Unique 24-hexadecimal digit string that identifies this backup policy item. |
| 122 | + FrequencyType string `json:"frequencyType,omitempty"` // Human-readable label that identifies the frequency type associated with the backup policy. |
| 123 | + RetentionUnit string `json:"retentionUnit,omitempty"` // Unit of time in which MongoDB Cloud measures snapshot retention. |
| 124 | + FrequencyInterval int32 `json:"frequencyInterval,omitempty"` // Number that indicates the frequency interval for a set of snapshots. |
| 125 | + RetentionValue int32 `json:"retentionValue,omitempty"` // Duration in days, weeks, or months that MongoDB Cloud retains the snapshot. |
| 126 | +} |
| 127 | + |
| 128 | +// List gets a list of Data Lake Pipelines. |
| 129 | +// |
| 130 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/listPipelines |
| 131 | +func (s *DataLakePipelineServiceOp) List(ctx context.Context, groupID string) ([]*DataLakePipeline, *Response, error) { |
| 132 | + if groupID == "" { |
| 133 | + return nil, nil, NewArgError("groupID", "must be set") |
| 134 | + } |
| 135 | + |
| 136 | + path := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 137 | + |
| 138 | + req, err := s.Client.NewRequest(ctx, http.MethodGet, path, nil) |
| 139 | + if err != nil { |
| 140 | + return nil, nil, err |
| 141 | + } |
| 142 | + |
| 143 | + var root []*DataLakePipeline |
| 144 | + resp, err := s.Client.Do(ctx, req, &root) |
| 145 | + if err != nil { |
| 146 | + return nil, resp, err |
| 147 | + } |
| 148 | + |
| 149 | + return root, resp, nil |
| 150 | +} |
| 151 | + |
| 152 | +// ListSnapshots gets a list of backup snapshots that you can use to trigger an on demand pipeline run. |
| 153 | +// |
| 154 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/listPipelineSchedules |
| 155 | +func (s *DataLakePipelineServiceOp) ListSnapshots(ctx context.Context, groupID, name string, options *ListDataLakePipelineSnapshotOptions) (*DataLakePipelineSnapshotsResponse, *Response, error) { |
| 156 | + if groupID == "" { |
| 157 | + return nil, nil, NewArgError("groupID", "must be set") |
| 158 | + } |
| 159 | + |
| 160 | + if name == "" { |
| 161 | + return nil, nil, NewArgError("name", "must be set") |
| 162 | + } |
| 163 | + |
| 164 | + basePath := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 165 | + path := fmt.Sprintf("%s/%s/availableSnapshots", basePath, name) |
| 166 | + |
| 167 | + // Add query params from ListDataLakePipelineSnapshotOptions |
| 168 | + pathWithOptions, err := setListOptions(path, options) |
| 169 | + if err != nil { |
| 170 | + return nil, nil, err |
| 171 | + } |
| 172 | + |
| 173 | + req, err := s.Client.NewRequest(ctx, http.MethodGet, pathWithOptions, nil) |
| 174 | + if err != nil { |
| 175 | + return nil, nil, err |
| 176 | + } |
| 177 | + |
| 178 | + var root *DataLakePipelineSnapshotsResponse |
| 179 | + resp, err := s.Client.Do(ctx, req, &root) |
| 180 | + if err != nil { |
| 181 | + return nil, resp, err |
| 182 | + } |
| 183 | + |
| 184 | + return root, resp, nil |
| 185 | +} |
| 186 | + |
| 187 | +// ListIngestionSchedules gets a list of backup schedule policy items that you can use as a Data Lake Pipeline source. |
| 188 | +// |
| 189 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/listPipelineSchedules |
| 190 | +func (s *DataLakePipelineServiceOp) ListIngestionSchedules(ctx context.Context, groupID, name string) ([]*DataLakePipelineIngestionSchedule, *Response, error) { |
| 191 | + if groupID == "" { |
| 192 | + return nil, nil, NewArgError("groupID", "must be set") |
| 193 | + } |
| 194 | + |
| 195 | + if name == "" { |
| 196 | + return nil, nil, NewArgError("name", "must be set") |
| 197 | + } |
| 198 | + |
| 199 | + basePath := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 200 | + path := fmt.Sprintf("%s/%s/availableSchedules", basePath, name) |
| 201 | + |
| 202 | + req, err := s.Client.NewRequest(ctx, http.MethodGet, path, nil) |
| 203 | + if err != nil { |
| 204 | + return nil, nil, err |
| 205 | + } |
| 206 | + |
| 207 | + var root []*DataLakePipelineIngestionSchedule |
| 208 | + resp, err := s.Client.Do(ctx, req, &root) |
| 209 | + if err != nil { |
| 210 | + return nil, resp, err |
| 211 | + } |
| 212 | + |
| 213 | + return root, resp, nil |
| 214 | +} |
| 215 | + |
| 216 | +// Get gets the details of one Data Lake Pipeline within the specified project. |
| 217 | +// |
| 218 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/getPipeline |
| 219 | +func (s *DataLakePipelineServiceOp) Get(ctx context.Context, groupID, name string) (*DataLakePipeline, *Response, error) { |
| 220 | + if groupID == "" { |
| 221 | + return nil, nil, NewArgError("groupID", "must be set") |
| 222 | + } |
| 223 | + if name == "" { |
| 224 | + return nil, nil, NewArgError("name", "must be set") |
| 225 | + } |
| 226 | + |
| 227 | + basePath := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 228 | + path := fmt.Sprintf("%s/%s", basePath, name) |
| 229 | + |
| 230 | + req, err := s.Client.NewRequest(ctx, http.MethodGet, path, nil) |
| 231 | + if err != nil { |
| 232 | + return nil, nil, err |
| 233 | + } |
| 234 | + |
| 235 | + root := new(DataLakePipeline) |
| 236 | + resp, err := s.Client.Do(ctx, req, root) |
| 237 | + if err != nil { |
| 238 | + return nil, resp, err |
| 239 | + } |
| 240 | + |
| 241 | + return root, resp, err |
| 242 | +} |
| 243 | + |
| 244 | +// Create creates one Data Lake Pipeline. |
| 245 | +// |
| 246 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/createPipeline |
| 247 | +func (s *DataLakePipelineServiceOp) Create(ctx context.Context, groupID string, createRequest *DataLakePipeline) (*DataLakePipeline, *Response, error) { |
| 248 | + if groupID == "" { |
| 249 | + return nil, nil, NewArgError("groupID", "must be set") |
| 250 | + } |
| 251 | + if createRequest == nil { |
| 252 | + return nil, nil, NewArgError("createRequest", "must be set") |
| 253 | + } |
| 254 | + |
| 255 | + path := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 256 | + req, err := s.Client.NewRequest(ctx, http.MethodPost, path, createRequest) |
| 257 | + if err != nil { |
| 258 | + return nil, nil, err |
| 259 | + } |
| 260 | + |
| 261 | + root := new(DataLakePipeline) |
| 262 | + resp, err := s.Client.Do(ctx, req, root) |
| 263 | + if err != nil { |
| 264 | + return nil, resp, err |
| 265 | + } |
| 266 | + |
| 267 | + return root, resp, err |
| 268 | +} |
| 269 | + |
| 270 | +// Update updates one Data Lake Pipeline. |
| 271 | +// |
| 272 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/updatePipeline |
| 273 | +func (s *DataLakePipelineServiceOp) Update(ctx context.Context, groupID, name string, updateRequest *DataLakePipeline) (*DataLakePipeline, *Response, error) { |
| 274 | + if groupID == "" { |
| 275 | + return nil, nil, NewArgError("groupID", "must be set") |
| 276 | + } |
| 277 | + if name == "" { |
| 278 | + return nil, nil, NewArgError("name", "must be set") |
| 279 | + } |
| 280 | + if updateRequest == nil { |
| 281 | + return nil, nil, NewArgError("updateRequest", "cannot be nil") |
| 282 | + } |
| 283 | + |
| 284 | + basePath := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 285 | + path := fmt.Sprintf("%s/%s", basePath, name) |
| 286 | + req, err := s.Client.NewRequest(ctx, http.MethodPatch, path, updateRequest) |
| 287 | + if err != nil { |
| 288 | + return nil, nil, err |
| 289 | + } |
| 290 | + |
| 291 | + root := new(DataLakePipeline) |
| 292 | + resp, err := s.Client.Do(ctx, req, root) |
| 293 | + if err != nil { |
| 294 | + return nil, resp, err |
| 295 | + } |
| 296 | + |
| 297 | + return root, resp, err |
| 298 | +} |
| 299 | + |
| 300 | +// Delete deletes one Data Lake Pipeline. |
| 301 | +// |
| 302 | +// See more: https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Data-Lake-Pipelines/operation/deletePipeline |
| 303 | +func (s *DataLakePipelineServiceOp) Delete(ctx context.Context, groupID, name string) (*Response, error) { |
| 304 | + if groupID == "" { |
| 305 | + return nil, NewArgError("groupId", "must be set") |
| 306 | + } |
| 307 | + if name == "" { |
| 308 | + return nil, NewArgError("name", "must be set") |
| 309 | + } |
| 310 | + |
| 311 | + basePath := fmt.Sprintf(dataLakesPipelineBasePath, groupID) |
| 312 | + path := fmt.Sprintf("%s/%s", basePath, name) |
| 313 | + |
| 314 | + req, err := s.Client.NewRequest(ctx, http.MethodDelete, path, nil) |
| 315 | + if err != nil { |
| 316 | + return nil, err |
| 317 | + } |
| 318 | + |
| 319 | + resp, err := s.Client.Do(ctx, req, nil) |
| 320 | + |
| 321 | + return resp, err |
| 322 | +} |
0 commit comments