diff --git a/common/types.go b/common/types.go index 48183edbd..4c9511e90 100644 --- a/common/types.go +++ b/common/types.go @@ -100,6 +100,29 @@ func CloudfuseVersion_() string { return cloudfuseVersion_ } +// custom errors shared by different components +type CloudUnreachableError struct { + Message string + CloudStorageError error +} + +func NewCloudUnreachableError(originalError error) CloudUnreachableError { + return CloudUnreachableError{ + Message: "Failed to connect to cloud storage", + CloudStorageError: originalError, + } +} +func (e CloudUnreachableError) Error() string { + return fmt.Sprintf("%s. Here's why: %v", e.Message, e.CloudStorageError) +} +func (e CloudUnreachableError) Unwrap() error { + return e.CloudStorageError +} +func (e CloudUnreachableError) Is(target error) bool { + _, ok := target.(*CloudUnreachableError) + return ok +} + var DefaultWorkDir string var DefaultLogFilePath string var StatsConfigFilePath string diff --git a/component/azstorage/azstorage.go b/component/azstorage/azstorage.go index e15c05808..05de1a234 100644 --- a/component/azstorage/azstorage.go +++ b/component/azstorage/azstorage.go @@ -27,11 +27,14 @@ package azstorage import ( "context" + "errors" "fmt" + "sync" "sync/atomic" "syscall" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Seagate/cloudfuse/common" "github.com/Seagate/cloudfuse/common/config" "github.com/Seagate/cloudfuse/common/log" @@ -47,6 +50,16 @@ type AzStorage struct { stConfig AzStorageConfig startTime time.Time listBlocked bool + state connectionState + ctx context.Context + cancelFn context.CancelFunc +} + +type connectionState struct { + sync.Mutex + lastConnectionAttempt *time.Time + firstOffline *time.Time + retryTicker *time.Ticker } const compName = "azstorage" @@ -192,6 +205,16 @@ func (az *AzStorage) Start(ctx context.Context) error { // create stats collector for azstorage azStatsCollector = stats_manager.NewStatsCollector(az.Name()) log.Debug("Starting azstorage stats collector") + // create a shared context for all cloud operations, with ability to cancel + az.ctx, az.cancelFn = context.WithCancel(ctx) + // create the retry ticker + az.state.retryTicker = time.NewTicker(time.Duration(az.stConfig.backoffTime) * time.Second) + az.state.retryTicker.Stop() // stop it for now, we will start it when we are offline + go func() { + for range az.state.retryTicker.C { + az.CloudConnected() + } + }() return nil } @@ -203,9 +226,118 @@ func (az *AzStorage) Stop() error { return nil } +// ------------------------- Connectivity check ------------------------------------------- + +// Online check +func (az *AzStorage) CloudConnected() bool { + log.Trace("AzStorage::CloudConnected") + connected := az.state.firstOffline == nil + // don't check the connection when it's up, or if we are not ready to retry + if connected || !az.timeToRetry() { + return connected + } + // check connection + ctx, cancelFun := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancelFun() + err := az.storage.ConnectionOkay(ctx) + log.Debug("AzStorage::CloudConnected : err is %v", err) + nowConnected := az.updateConnectionState(err) + return nowConnected +} + +func (az *AzStorage) timeToRetry() bool { + timeSinceLastAttempt := time.Since(*az.state.lastConnectionAttempt) + switch { + case timeSinceLastAttempt < time.Duration(az.stConfig.backoffTime)*time.Second: + // minimum delay before retrying + return false + case timeSinceLastAttempt > 90*time.Second: + // maximum delay + return true + default: + // when between the minimum and maximum delay, we use an exponential backoff + timeOfflineAtLastAttempt := az.state.lastConnectionAttempt.Sub(*az.state.firstOffline) + return timeSinceLastAttempt > timeOfflineAtLastAttempt + } +} + +func (az *AzStorage) updateConnectionState(err error) bool { + az.state.Lock() + defer az.state.Unlock() + currentTime := time.Now() + az.state.lastConnectionAttempt = ¤tTime + connected := !isOfflineError(err) + wasConnected := az.state.firstOffline == nil + stateChanged := connected != wasConnected + if stateChanged { + log.Warn("AzStorage::updateConnectionState : connected is now: %t", connected) + if connected { + az.state.firstOffline = nil + // reset the context to allow new requests + az.ctx, az.cancelFn = context.WithCancel(context.Background()) + // stop the retry ticker + az.state.retryTicker.Stop() + } else { + az.state.firstOffline = ¤tTime + // cancel all outstanding requests + az.cancelFn() + log.Warn("AzStorage::updateConnectionState : cancelled all outstanding requests") + // reset the ticker to retry the connection + az.state.retryTicker.Reset(time.Duration(az.stConfig.backoffTime) * time.Second) + } + } + return connected +} + +func isOfflineError(err error) bool { + // handle common error cases + switch { + case err == nil: + return false + case errors.Is(err, syscall.ENOENT): + return false + case errors.Is(err, context.DeadlineExceeded): + return true + case errors.Is(err, context.Canceled): + return true + case errors.Is(err, syscall.ECONNREFUSED): + return true + case errors.Is(err, &common.CloudUnreachableError{}): + return true + default: + var respErr *azcore.ResponseError + errors.As(err, &respErr) + if respErr != nil && storeBlobErrToErr(respErr) != ErrUnknown { + log.Debug("isOfflineError: errors.As(err, &respErr)") + return false + } + // log the error details + unwrappedErr := err + for unwrappedErr != nil { + log.Debug( + "isOfflineError: Uncaught AZ error is of type \"%T\" and value [%v].\n", + unwrappedErr, + unwrappedErr, + ) + unwrappedErr = errors.Unwrap(unwrappedErr) + } + return false + } +} + +// handleStorageError updates connection state and wraps offline errors. +// Returns CloudUnreachableError for offline conditions, original error otherwise. +func (az *AzStorage) handleStorageError(err error) error { + az.updateConnectionState(err) + if isOfflineError(err) { + return common.NewCloudUnreachableError(err) + } + return err +} + // ------------------------- Container listing ------------------------------------------- func (az *AzStorage) ListContainers() ([]string, error) { - return az.storage.ListContainers() + return az.storage.ListContainers(az.ctx) } // ------------------------- Core Operations ------------------------------------------- @@ -214,7 +346,8 @@ func (az *AzStorage) ListContainers() ([]string, error) { func (az *AzStorage) CreateDir(options internal.CreateDirOptions) error { log.Trace("AzStorage::CreateDir : %s", options.Name) - err := az.storage.CreateDirectory(internal.TruncateDirName(options.Name)) + err := az.storage.CreateDirectory(az.ctx, internal.TruncateDirName(options.Name)) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents( @@ -231,7 +364,8 @@ func (az *AzStorage) CreateDir(options internal.CreateDirOptions) error { func (az *AzStorage) DeleteDir(options internal.DeleteDirOptions) error { log.Trace("AzStorage::DeleteDir : %s", options.Name) - err := az.storage.DeleteDirectory(internal.TruncateDirName(options.Name)) + err := az.storage.DeleteDirectory(az.ctx, internal.TruncateDirName(options.Name)) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents(deleteDir, options.Name, nil) @@ -254,7 +388,8 @@ func formatListDirName(path string) string { func (az *AzStorage) IsDirEmpty(options internal.IsDirEmptyOptions) bool { log.Trace("AzStorage::IsDirEmpty : %s", options.Name) - list, _, err := az.storage.List(formatListDirName(options.Name), nil, 1) + list, _, err := az.storage.List(az.ctx, formatListDirName(options.Name), nil, 1) + err = az.handleStorageError(err) if err != nil { log.Err("AzStorage::IsDirEmpty : error listing [%s]", err) return false @@ -294,7 +429,8 @@ func (az *AzStorage) StreamDir( options.Count = common.MaxDirListCount } - new_list, new_marker, err := az.storage.List(path, &options.Token, options.Count) + new_list, new_marker, err := az.storage.List(az.ctx, path, &options.Token, options.Count) + err = az.handleStorageError(err) if err != nil { log.Err("AzStorage::StreamDir : Failed to read dir [%s]", err) return new_list, "", err @@ -344,7 +480,8 @@ func (az *AzStorage) RenameDir(options internal.RenameDirOptions) error { options.Src = internal.TruncateDirName(options.Src) options.Dst = internal.TruncateDirName(options.Dst) - err := az.storage.RenameDirectory(options.Src, options.Dst) + err := az.storage.RenameDirectory(az.ctx, options.Src, options.Dst) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents( @@ -369,7 +506,8 @@ func (az *AzStorage) CreateFile(options internal.CreateFileOptions) (*handlemap. return nil, syscall.EFAULT } - err := az.storage.CreateFile(options.Name, options.Mode) + err := az.storage.CreateFile(az.ctx, options.Name, options.Mode) + err = az.handleStorageError(err) if err != nil { return nil, err } @@ -390,7 +528,8 @@ func (az *AzStorage) CreateFile(options internal.CreateFileOptions) (*handlemap. func (az *AzStorage) OpenFile(options internal.OpenFileOptions) (*handlemap.Handle, error) { log.Trace("AzStorage::OpenFile : %s", options.Name) - attr, err := az.storage.GetAttr(options.Name) + attr, err := az.storage.GetAttr(az.ctx, options.Name) + err = az.handleStorageError(err) if err != nil { return nil, err } @@ -423,7 +562,8 @@ func (az *AzStorage) ReleaseFile(options internal.ReleaseFileOptions) error { func (az *AzStorage) DeleteFile(options internal.DeleteFileOptions) error { log.Trace("AzStorage::DeleteFile : %s", options.Name) - err := az.storage.DeleteFile(options.Name) + err := az.storage.DeleteFile(az.ctx, options.Name) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents(deleteFile, options.Name, nil) @@ -436,7 +576,8 @@ func (az *AzStorage) DeleteFile(options internal.DeleteFileOptions) error { func (az *AzStorage) RenameFile(options internal.RenameFileOptions) error { log.Trace("AzStorage::RenameFile : %s to %s", options.Src, options.Dst) - err := az.storage.RenameFile(options.Src, options.Dst, options.SrcAttr) + err := az.storage.RenameFile(az.ctx, options.Src, options.Dst, options.SrcAttr) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents( @@ -480,7 +621,7 @@ func (az *AzStorage) ReadInBuffer(options *internal.ReadInBufferOptions) (length } length = int(dataLen) - err = az.storage.ReadInBuffer(path, options.Offset, dataLen, options.Data, options.Etag) + err = az.storage.ReadInBuffer(az.ctx, path, options.Offset, dataLen, options.Data, options.Etag) if err != nil { log.Err("AzStorage::ReadInBuffer : Failed to read %s [%s]", path, err.Error()) length = 0 @@ -490,20 +631,23 @@ func (az *AzStorage) ReadInBuffer(options *internal.ReadInBufferOptions) (length } func (az *AzStorage) WriteFile(options *internal.WriteFileOptions) (int, error) { - err := az.storage.Write(options) + err := az.storage.Write(az.ctx, options) + err = az.handleStorageError(err) return len(options.Data), err } func (az *AzStorage) GetFileBlockOffsets( options internal.GetFileBlockOffsetsOptions, ) (*common.BlockOffsetList, error) { - return az.storage.GetFileBlockOffsets(options.Name) - + bol, err := az.storage.GetFileBlockOffsets(az.ctx, options.Name) + err = az.handleStorageError(err) + return bol, err } func (az *AzStorage) TruncateFile(options internal.TruncateFileOptions) error { log.Trace("AzStorage::TruncateFile : %s to %d bytes", options.Name, options.NewSize) - err := az.storage.TruncateFile(options) + err := az.storage.TruncateFile(az.ctx, options) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents( @@ -518,12 +662,16 @@ func (az *AzStorage) TruncateFile(options internal.TruncateFileOptions) error { func (az *AzStorage) CopyToFile(options internal.CopyToFileOptions) error { log.Trace("AzStorage::CopyToFile : Read file %s", options.Name) - return az.storage.ReadToFile(options.Name, options.Offset, options.Count, options.File) + err := az.storage.ReadToFile(az.ctx, options.Name, options.Offset, options.Count, options.File) + err = az.handleStorageError(err) + return err } func (az *AzStorage) CopyFromFile(options internal.CopyFromFileOptions) error { log.Trace("AzStorage::CopyFromFile : Upload file %s", options.Name) - return az.storage.WriteFromFile(options.Name, options.Metadata, options.File) + err := az.storage.WriteFromFile(az.ctx, options.Name, options.Metadata, options.File) + err = az.handleStorageError(err) + return err } // Symlink operations @@ -537,7 +685,8 @@ func (az *AzStorage) CreateLink(options internal.CreateLinkOptions) error { return syscall.ENOTSUP } log.Trace("AzStorage::CreateLink : Create symlink %s -> %s", options.Name, options.Target) - err := az.storage.CreateLink(options.Name, options.Target) + err := az.storage.CreateLink(az.ctx, options.Name, options.Target) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents( @@ -557,7 +706,8 @@ func (az *AzStorage) ReadLink(options internal.ReadLinkOptions) (string, error) return "", syscall.ENOENT } log.Trace("AzStorage::ReadLink : Read symlink %s", options.Name) - data, err := az.storage.ReadBuffer(options.Name, 0, options.Size) + data, err := az.storage.ReadBuffer(az.ctx, options.Name, 0, options.Size) + err = az.handleStorageError(err) if err != nil { azStatsCollector.PushEvents(readLink, options.Name, nil) @@ -570,12 +720,15 @@ func (az *AzStorage) ReadLink(options internal.ReadLinkOptions) (string, error) // Attribute operations func (az *AzStorage) GetAttr(options internal.GetAttrOptions) (attr *internal.ObjAttr, err error) { //log.Trace("AzStorage::GetAttr : Get attributes of file %s", name) - return az.storage.GetAttr(options.Name) + attr, err = az.storage.GetAttr(az.ctx, options.Name) + err = az.handleStorageError(err) + return attr, err } func (az *AzStorage) Chmod(options internal.ChmodOptions) error { log.Trace("AzStorage::Chmod : Change mod of file %s", options.Name) - err := az.storage.ChangeMod(options.Name, options.Mode) + err := az.storage.ChangeMod(az.ctx, options.Name, options.Mode) + err = az.handleStorageError(err) if err == nil { azStatsCollector.PushEvents( @@ -596,24 +749,38 @@ func (az *AzStorage) Chown(options internal.ChownOptions) error { options.Owner, options.Group, ) - return az.storage.ChangeOwner(options.Name, options.Owner, options.Group) + err := az.storage.ChangeOwner(az.ctx, options.Name, options.Owner, options.Group) + err = az.handleStorageError(err) + return err } func (az *AzStorage) FlushFile(options internal.FlushFileOptions) error { log.Trace("AzStorage::FlushFile : Flush file %s", options.Handle.Path) - return az.storage.StageAndCommit(options.Handle.Path, options.Handle.CacheObj.BlockOffsetList) + err := az.storage.StageAndCommit( + az.ctx, + options.Handle.Path, + options.Handle.CacheObj.BlockOffsetList, + ) + err = az.handleStorageError(err) + return err } func (az *AzStorage) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) { - return az.storage.GetCommittedBlockList(name) + cbl, err := az.storage.GetCommittedBlockList(az.ctx, name) + err = az.handleStorageError(err) + return cbl, err } func (az *AzStorage) StageData(opt internal.StageDataOptions) error { - return az.storage.StageBlock(opt.Name, opt.Data, opt.Id) + err := az.storage.StageBlock(az.ctx, opt.Name, opt.Data, opt.Id) + err = az.handleStorageError(err) + return err } func (az *AzStorage) CommitData(opt internal.CommitDataOptions) error { - return az.storage.CommitBlocks(opt.Name, opt.List, opt.NewETag) + err := az.storage.CommitBlocks(az.ctx, opt.Name, opt.List, opt.NewETag) + err = az.handleStorageError(err) + return err } // TODO : Below methods are pending to be implemented diff --git a/component/azstorage/block_blob.go b/component/azstorage/block_blob.go index b200f8153..25c609765 100644 --- a/component/azstorage/block_blob.go +++ b/component/azstorage/block_blob.go @@ -271,13 +271,13 @@ func (bb *BlockBlob) IsAccountADLS() bool { return false } -func (bb *BlockBlob) ListContainers() ([]string, error) { +func (bb *BlockBlob) ListContainers(ctx context.Context) ([]string, error) { log.Trace("BlockBlob::ListContainers : Listing containers") cntList := make([]string, 0) pager := bb.Service.NewListContainersPager(nil) for pager.More() { - resp, err := pager.NextPage(context.Background()) + resp, err := pager.NextPage(ctx) if err != nil { log.Err("BlockBlob::ListContainers : Failed to get container list [%s]", err.Error()) return cntList, err @@ -290,6 +290,13 @@ func (bb *BlockBlob) ListContainers() ([]string, error) { return cntList, nil } +// check the connection to the service by calling GetProperties on the container +func (bb *BlockBlob) ConnectionOkay(ctx context.Context) error { + log.Trace("BlockBlob::ConnectionOkay : checking connection to cloud service") + _, err := bb.Container.GetProperties(ctx, nil) + return err +} + func (bb *BlockBlob) SetPrefixPath(path string) error { log.Trace("BlockBlob::SetPrefixPath : path %s", path) bb.Config.prefixPath = path @@ -297,38 +304,38 @@ func (bb *BlockBlob) SetPrefixPath(path string) error { } // CreateFile : Create a new file in the container/virtual directory -func (bb *BlockBlob) CreateFile(name string, mode os.FileMode) error { +func (bb *BlockBlob) CreateFile(ctx context.Context, name string, mode os.FileMode) error { log.Trace("BlockBlob::CreateFile : name %s", name) var data []byte - return bb.WriteFromBuffer(name, nil, data) + return bb.WriteFromBuffer(ctx, name, nil, data) } // CreateDirectory : Create a new directory in the container/virtual directory -func (bb *BlockBlob) CreateDirectory(name string) error { +func (bb *BlockBlob) CreateDirectory(ctx context.Context, name string) error { log.Trace("BlockBlob::CreateDirectory : name %s", name) var data []byte metadata := make(map[string]*string) metadata[folderKey] = new("true") - return bb.WriteFromBuffer(name, metadata, data) + return bb.WriteFromBuffer(ctx, name, metadata, data) } // CreateLink : Create a symlink in the container/virtual directory -func (bb *BlockBlob) CreateLink(source string, target string) error { +func (bb *BlockBlob) CreateLink(ctx context.Context, source string, target string) error { log.Trace("BlockBlob::CreateLink : %s -> %s", source, target) data := []byte(target) metadata := make(map[string]*string) metadata[symlinkKey] = new("true") - return bb.WriteFromBuffer(source, metadata, data) + return bb.WriteFromBuffer(ctx, source, metadata, data) } // DeleteFile : Delete a blob in the container/virtual directory -func (bb *BlockBlob) DeleteFile(name string) (err error) { +func (bb *BlockBlob) DeleteFile(ctx context.Context, name string) (err error) { log.Trace("BlockBlob::DeleteFile : name %s", name) blobClient := bb.getBlobClient(name) - _, err = blobClient.Delete(context.Background(), &blob.DeleteOptions{ + _, err = blobClient.Delete(ctx, &blob.DeleteOptions{ DeleteSnapshots: to.Ptr(blob.DeleteSnapshotsOptionTypeInclude), }) if err != nil { @@ -350,9 +357,9 @@ func (bb *BlockBlob) DeleteFile(name string) (err error) { } // DeleteDirectory : Delete a virtual directory in the container/virtual directory -func (bb *BlockBlob) DeleteDirectory(name string) (err error) { +func (bb *BlockBlob) DeleteDirectory(ctx context.Context, name string) (err error) { log.Trace("BlockBlob::DeleteDirectory : name %s", name) - err = bb.DeleteFile(name) + err = bb.DeleteFile(ctx, name) // libfuse deletes the files in the directory before this method is called. // If the marker blob for directory is not present, ignore the ENOENT error. if err == syscall.ENOENT { @@ -368,7 +375,12 @@ func (bb *BlockBlob) DeleteDirectory(name string) (err error) { // Etag of the destination blob changes. // Copy the LMT to the src attr if the copy is success. // https://learn.microsoft.com/en-us/rest/api/storageservices/copy-blob?tabs=microsoft-entra-id -func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal.ObjAttr) error { +func (bb *BlockBlob) RenameFile( + ctx context.Context, + source string, + target string, + srcAttr *internal.ObjAttr, +) error { log.Trace("BlockBlob::RenameFile : %s -> %s", source, target) blobClient := bb.getBlobClient(source) @@ -377,7 +389,7 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. // not specifying source blob metadata, since passing empty metadata headers copies // the source blob metadata to destination blob copyResponse, err := newBlobClient.StartCopyFromURL( - context.Background(), + ctx, blobClient.URL(), &blob.StartCopyFromURLOptions{ Tier: bb.Config.defaultTier, @@ -405,7 +417,7 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. for copyStatus != nil && *copyStatus == blob.CopyStatusTypePending { time.Sleep(time.Second * 1) pollCnt++ - prop, err = newBlobClient.GetProperties(context.Background(), &blob.GetPropertiesOptions{ + prop, err = newBlobClient.GetProperties(ctx, &blob.GetPropertiesOptions{ CPKInfo: bb.blobCPKOpt, }) if err != nil { @@ -430,7 +442,7 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. log.Trace("BlockBlob::RenameFile : %s -> %s done", source, target) // Copy of the file is done so now delete the older file - err = bb.DeleteFile(source) + err = bb.DeleteFile(ctx, source) for retry := 0; retry < 3 && err == syscall.ENOENT; retry++ { // Sometimes backend is able to copy source file to destination but when we try to delete the // source files it returns back with ENOENT. If file was just created on backend it might happen @@ -442,7 +454,7 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. retry, ) time.Sleep(1 * time.Second) - err = bb.DeleteFile(source) + err = bb.DeleteFile(ctx, source) } if err == syscall.ENOENT { @@ -455,7 +467,7 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. } // RenameDirectory : Rename the directory -func (bb *BlockBlob) RenameDirectory(source string, target string) error { +func (bb *BlockBlob) RenameDirectory(ctx context.Context, source string, target string) error { log.Trace("BlockBlob::RenameDirectory : %s -> %s", source, target) srcDirPresent := false @@ -463,7 +475,7 @@ func (bb *BlockBlob) RenameDirectory(source string, target string) error { Prefix: new(bb.getFormattedPath(source) + "/"), }) for pager.More() { - listBlobResp, err := pager.NextPage(context.Background()) + listBlobResp, err := pager.NextPage(ctx) if err != nil { log.Err("BlockBlob::RenameDirectory : Failed to get list of blobs %s", err.Error()) return err @@ -473,7 +485,7 @@ func (bb *BlockBlob) RenameDirectory(source string, target string) error { for _, blobInfo := range listBlobResp.Segment.BlobItems { srcDirPresent = true srcPath := removePrefixPath(bb.Config.prefixPath, *blobInfo.Name) - err = bb.RenameFile(srcPath, strings.Replace(srcPath, source, target, 1), nil) + err = bb.RenameFile(ctx, srcPath, strings.Replace(srcPath, source, target, 1), nil) if err != nil { log.Err( "BlockBlob::RenameDirectory : Failed to rename file %s [%s]", @@ -486,7 +498,7 @@ func (bb *BlockBlob) RenameDirectory(source string, target string) error { // To rename source marker blob check its properties before calling rename on it. blobClient := bb.Container.NewBlockBlobClient(filepath.Join(bb.Config.prefixPath, source)) - _, err := blobClient.GetProperties(context.Background(), &blob.GetPropertiesOptions{ + _, err := blobClient.GetProperties(ctx, &blob.GetPropertiesOptions{ CPKInfo: bb.blobCPKOpt, }) if err != nil { @@ -510,14 +522,17 @@ func (bb *BlockBlob) RenameDirectory(source string, target string) error { } } - return bb.RenameFile(source, target, nil) + return bb.RenameFile(ctx, source, target, nil) } -func (bb *BlockBlob) getAttrUsingRest(name string) (attr *internal.ObjAttr, err error) { +func (bb *BlockBlob) getAttrUsingRest( + ctx context.Context, + name string, +) (attr *internal.ObjAttr, err error) { log.Trace("BlockBlob::getAttrUsingRest : name %s", name) blobClient := bb.getBlockBlobClient(name) - prop, err := blobClient.GetProperties(context.Background(), &blob.GetPropertiesOptions{ + prop, err := blobClient.GetProperties(ctx, &blob.GetPropertiesOptions{ CPKInfo: bb.blobCPKOpt, }) @@ -566,7 +581,10 @@ func (bb *BlockBlob) getAttrUsingRest(name string) (attr *internal.ObjAttr, err return attr, nil } -func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err error) { +func (bb *BlockBlob) getAttrUsingList( + ctx context.Context, + name string, +) (attr *internal.ObjAttr, err error) { log.Trace("BlockBlob::getAttrUsingList : name %s", name) iteration := 0 @@ -575,7 +593,7 @@ func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err blobsRead := 0 for marker != nil || iteration == 0 { - blobs, new_marker, err = bb.List(name, marker, bb.Config.maxResultsForList) + blobs, new_marker, err = bb.List(ctx, name, marker, bb.Config.maxResultsForList) if err != nil { e := storeBlobErrToErr(err) switch e { @@ -632,14 +650,14 @@ func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err } // GetAttr : Retrieve attributes of the blob -func (bb *BlockBlob) GetAttr(name string) (attr *internal.ObjAttr, err error) { +func (bb *BlockBlob) GetAttr(ctx context.Context, name string) (attr *internal.ObjAttr, err error) { log.Trace("BlockBlob::GetAttr : name %s", name) // To support virtual directories with no marker blob, we call list instead of get properties since list will not return a 404 if bb.Config.virtualDirectory { - attr, err = bb.getAttrUsingList(name) + attr, err = bb.getAttrUsingList(ctx, name) } else { - attr, err = bb.getAttrUsingRest(name) + attr, err = bb.getAttrUsingRest(ctx, name) } if bb.Config.filter != nil && attr != nil { @@ -660,6 +678,7 @@ func (bb *BlockBlob) GetAttr(name string) (attr *internal.ObjAttr, err error) { // This fetches the list using a marker so the caller code should handle marker logic // If count=0 - fetch max entries func (bb *BlockBlob) List( + ctx context.Context, prefix string, marker *string, count int32, @@ -686,7 +705,7 @@ func (bb *BlockBlob) List( Include: bb.listDetails, }) - listBlob, err := pager.NextPage(context.Background()) + listBlob, err := pager.NextPage(ctx) // Note: Since we make a list call with a prefix, we will not fail here for a non-existent directory. // The blob service will not validate for us whether or not the path exists. @@ -701,7 +720,7 @@ func (bb *BlockBlob) List( // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) // Since block blob does not support acls, we set mode to 0 and FlagModeDefault to true so the fuse layer can return the default permission. - blobList, dirList, err := bb.processBlobItems(listBlob.Segment.BlobItems) + blobList, dirList, err := bb.processBlobItems(ctx, listBlob.Segment.BlobItems) if err != nil { return nil, nil, err } @@ -711,7 +730,7 @@ func (bb *BlockBlob) List( // dirList contains all dirs for which we got 0 byte meta file in this iteration, so exclude those and add rest to the list // Note: Since listing is paginated, sometimes the marker file may come in a different iteration from the BlobPrefix. For such // cases we manually call GetAttr to check the existence of the marker file. - err = bb.processBlobPrefixes(listBlob.Segment.BlobPrefixes, dirList, &blobList) + err = bb.processBlobPrefixes(ctx, listBlob.Segment.BlobPrefixes, dirList, &blobList) if err != nil { return nil, nil, err } @@ -729,6 +748,7 @@ func (bb *BlockBlob) getListPath(prefix string) string { } func (bb *BlockBlob) processBlobItems( + ctx context.Context, blobItems []*container.BlobItem, ) ([]*internal.ObjAttr, map[string]bool, error) { blobList := make([]*internal.ObjAttr, 0) @@ -738,7 +758,7 @@ func (bb *BlockBlob) processBlobItems( for _, blobInfo := range blobItems { blobInfo.Name = bb.getFileName(*blobInfo.Name) - blobAttr, err := bb.getBlobAttr(blobInfo) + blobAttr, err := bb.getBlobAttr(ctx, blobInfo) if err != nil { return nil, nil, err } @@ -767,13 +787,16 @@ func (bb *BlockBlob) processBlobItems( return blobList, dirList, nil } -func (bb *BlockBlob) getBlobAttr(blobInfo *container.BlobItem) (*internal.ObjAttr, error) { +func (bb *BlockBlob) getBlobAttr( + ctx context.Context, + blobInfo *container.BlobItem, +) (*internal.ObjAttr, error) { if blobInfo.Properties.CustomerProvidedKeySHA256 != nil && *blobInfo.Properties.CustomerProvidedKeySHA256 != "" { log.Trace( "BlockBlob::List : blob is encrypted with customer provided key so fetching metadata explicitly using REST", ) - return bb.getAttrUsingRest(*blobInfo.Name) + return bb.getAttrUsingRest(ctx, *blobInfo.Name) } mode, err := bb.getFileMode(blobInfo.Properties.Permissions) if err != nil { @@ -829,6 +852,7 @@ func (bb *BlockBlob) dereferenceTime(input *time.Time, defaultTime time.Time) ti } func (bb *BlockBlob) processBlobPrefixes( + ctx context.Context, blobPrefixes []*container.BlobPrefix, dirList map[string]bool, blobList *[]*internal.ObjAttr, @@ -848,7 +872,7 @@ func (bb *BlockBlob) processBlobPrefixes( *blobList = append(*blobList, attr) } else { // marker file not found in current iteration, so we need to manually check attributes via REST - _, err := bb.getAttrUsingRest(*blobInfo.Name) + _, err := bb.getAttrUsingRest(ctx, *blobInfo.Name) // marker file also not found via manual check, safe to add to list // For HNS accounts mounted as FNS we used to list directories and files in blobfusev1, // in blobfusev2 to replicate this behaviour the below check of blobInfo.Properties != nil is added. @@ -946,7 +970,13 @@ func trackDownload(name string, bytesTransferred int64, count int64, downloadPtr } // ReadToFile : Download a blob to a local file -func (bb *BlockBlob) ReadToFile(name string, offset int64, count int64, fi *os.File) (err error) { +func (bb *BlockBlob) ReadToFile( + ctx context.Context, + name string, + offset int64, + count int64, + fi *os.File, +) (err error) { log.Trace("BlockBlob::ReadToFile : name %s, offset : %d, count %d", name, offset, count) //defer exectime.StatTimeCurrentBlock("BlockBlob::ReadToFile")() @@ -968,7 +998,7 @@ func (bb *BlockBlob) ReadToFile(name string, offset int64, count int64, fi *os.F Count: count, } - _, err = blobClient.DownloadFile(context.Background(), fi, &dlOpts) + _, err = blobClient.DownloadFile(ctx, fi, &dlOpts) if err != nil { e := storeBlobErrToErr(err) @@ -992,7 +1022,7 @@ func (bb *BlockBlob) ReadToFile(name string, offset int64, count int64, fi *os.F log.Warn("BlockBlob::ReadToFile : Failed to generate MD5 Sum for %s", name) } else { // Get latest properties from container to get the md5 of blob - prop, err := blobClient.GetProperties(context.Background(), &blob.GetPropertiesOptions{ + prop, err := blobClient.GetProperties(ctx, &blob.GetPropertiesOptions{ CPKInfo: bb.blobCPKOpt, }) if err != nil { @@ -1020,11 +1050,16 @@ func (bb *BlockBlob) ReadToFile(name string, offset int64, count int64, fi *os.F } // ReadBuffer : Download a specific range from a blob to a buffer -func (bb *BlockBlob) ReadBuffer(name string, offset int64, length int64) ([]byte, error) { +func (bb *BlockBlob) ReadBuffer( + ctx context.Context, + name string, + offset int64, + length int64, +) ([]byte, error) { log.Trace("BlockBlob::ReadBuffer : name %s, offset %v, len %v", name, offset, length) var buff []byte if length == 0 { - attr, err := bb.GetAttr(name) + attr, err := bb.GetAttr(ctx, name) if err != nil { return buff, err } @@ -1040,7 +1075,7 @@ func (bb *BlockBlob) ReadBuffer(name string, offset int64, length int64) ([]byte Count: length, } - _, err := blobClient.DownloadBuffer(context.Background(), buff, &dlOpts) + _, err := blobClient.DownloadBuffer(ctx, buff, &dlOpts) if err != nil { e := storeBlobErrToErr(err) @@ -1060,6 +1095,7 @@ func (bb *BlockBlob) ReadBuffer(name string, offset int64, length int64) ([]byte // ReadInBuffer : Download specific range from a file to a user provided buffer func (bb *BlockBlob) ReadInBuffer( + ctx context.Context, name string, offset int64, length int64, @@ -1072,8 +1108,7 @@ func (bb *BlockBlob) ReadInBuffer( } blobClient := bb.getBlobClient(name) - - ctx, cancel := context.WithTimeout(context.Background(), max_context_timeout*time.Minute) + ctx, cancel := context.WithTimeout(ctx, max_context_timeout*time.Minute) defer cancel() opt := &blob.DownloadStreamOptions{ @@ -1206,6 +1241,7 @@ func trackUpload(name string, bytesTransferred int64, count int64, uploadPtr *in // WriteFromFile : Upload local file to blob func (bb *BlockBlob) WriteFromFile( + ctx context.Context, name string, metadata map[string]*string, fi *os.File, @@ -1265,7 +1301,7 @@ func (bb *BlockBlob) WriteFromFile( } } - _, err = blobClient.UploadFile(context.Background(), fi, uploadOptions) + _, err = blobClient.UploadFile(ctx, fi, uploadOptions) if err != nil { serr := storeBlobErrToErr(err) @@ -1301,13 +1337,18 @@ func (bb *BlockBlob) WriteFromFile( } // WriteFromBuffer : Upload from a buffer to a blob -func (bb *BlockBlob) WriteFromBuffer(name string, metadata map[string]*string, data []byte) error { +func (bb *BlockBlob) WriteFromBuffer( + ctx context.Context, + name string, + metadata map[string]*string, + data []byte, +) error { log.Trace("BlockBlob::WriteFromBuffer : name %s", name) blobClient := bb.getBlockBlobClient(name) defer log.TimeTrack(time.Now(), "BlockBlob::WriteFromBuffer", name) - _, err := blobClient.UploadBuffer(context.Background(), data, &blockblob.UploadBufferOptions{ + _, err := blobClient.UploadBuffer(ctx, data, &blockblob.UploadBufferOptions{ BlockSize: bb.Config.blockSize, Concurrency: bb.Config.maxConcurrency, Metadata: metadata, @@ -1327,13 +1368,16 @@ func (bb *BlockBlob) WriteFromBuffer(name string, metadata map[string]*string, d } // GetFileBlockOffsets: store blocks ids and corresponding offsets -func (bb *BlockBlob) GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) { +func (bb *BlockBlob) GetFileBlockOffsets( + ctx context.Context, + name string, +) (*common.BlockOffsetList, error) { var blockOffset int64 = 0 blockList := common.BlockOffsetList{} blobClient := bb.getBlockBlobClient(name) storageBlockList, err := blobClient.GetBlockList( - context.Background(), + ctx, blockblob.BlockListTypeCommitted, nil, ) @@ -1439,6 +1483,7 @@ func (bb *BlockBlob) createNewBlocks( // This function is called when the file is expanded using truncate operation and the new size is greater than the old // size of the file. func (bb *BlockBlob) createNewBlocksTruncate( + ctx context.Context, blockList *common.BlockOffsetList, options *internal.TruncateFileOptions, ) error { @@ -1501,6 +1546,7 @@ func (bb *BlockBlob) createNewBlocksTruncate( lastBlock.Id = common.GetBlockID(blockList.BlockIdLength) err := bb.ReadInBuffer( + ctx, options.Name, lastBlock.StartIndex, lastBlock.EndIndex-lastBlock.StartIndex, @@ -1537,6 +1583,7 @@ func (bb *BlockBlob) createNewBlocksTruncate( } func (bb *BlockBlob) removeBlocksTruncate( + ctx context.Context, blockList *common.BlockOffsetList, options *internal.TruncateFileOptions, ) error { @@ -1579,6 +1626,7 @@ func (bb *BlockBlob) removeBlocksTruncate( blk.Id = common.GetBlockID(blockList.BlockIdLength) err := bb.ReadInBuffer( + ctx, options.Name, blk.StartIndex, blk.EndIndex-blk.StartIndex, @@ -1601,13 +1649,13 @@ func (bb *BlockBlob) removeBlocksTruncate( return nil } -func (bb *BlockBlob) TruncateFile(options internal.TruncateFileOptions) error { +func (bb *BlockBlob) TruncateFile(ctx context.Context, options internal.TruncateFileOptions) error { log.Trace("BlockBlob::TruncateFile : name: %s, old size: %d, new size: %d", options.Name, options.OldSize, options.NewSize) // If old size is not specified, get it from the storage. if options.OldSize == -1 { - attr, err := bb.GetAttr(options.Name) + attr, err := bb.GetAttr(ctx, options.Name) if err != nil { log.Err( "BlockBlob::TruncateFile : Failed to get attributes of file %s [%v]", @@ -1633,7 +1681,7 @@ func (bb *BlockBlob) TruncateFile(options internal.TruncateFileOptions) error { if options.NewSize == 0 { var buf []byte - if err := bb.WriteFromBuffer(options.Name, nil, buf); err != nil { + if err := bb.WriteFromBuffer(ctx, options.Name, nil, buf); err != nil { log.Err( "BlockBlob::TruncateFile : Failed to truncate file %s to zero size [%v]", options.Name, @@ -1646,7 +1694,7 @@ func (bb *BlockBlob) TruncateFile(options internal.TruncateFileOptions) error { // Handle files whose new size <= 256MiB && when block size is not specified. if options.NewSize <= blockblob.MaxUploadBlobBytes && options.BlockSize == 0 { - if err := bb.TruncateFileWithoutBlocks(&options); err != nil { + if err := bb.TruncateFileWithoutBlocks(ctx, &options); err != nil { log.Err("BlockBlob::TruncateFile : Failed to truncate file %s[%v]", options.Name, err) return err } @@ -1654,7 +1702,7 @@ func (bb *BlockBlob) TruncateFile(options internal.TruncateFileOptions) error { } // Truncate file by managing blocks - if err := bb.TruncateFileUsingBlocks(&options); err != nil { + if err := bb.TruncateFileUsingBlocks(ctx, &options); err != nil { log.Err("BlockBlob::TruncateFile : Failed to truncate file %s[%v]", options.Name, err) return err } @@ -1662,7 +1710,10 @@ func (bb *BlockBlob) TruncateFile(options internal.TruncateFileOptions) error { return nil } -func (bb *BlockBlob) TruncateFileWithoutBlocks(options *internal.TruncateFileOptions) error { +func (bb *BlockBlob) TruncateFileWithoutBlocks( + ctx context.Context, + options *internal.TruncateFileOptions, +) error { log.Trace( "BlockBlob::TruncateFileWithoutBlocks : name: %s, old size: %d, new size: %d", options.Name, @@ -1675,7 +1726,7 @@ func (bb *BlockBlob) TruncateFileWithoutBlocks(options *internal.TruncateFileOpt if options.OldSize > 0 { // Read the file - err = bb.ReadInBuffer(options.Name, 0, min(options.NewSize, options.OldSize), buf, nil) + err = bb.ReadInBuffer(ctx, options.Name, 0, min(options.NewSize, options.OldSize), buf, nil) if err != nil { log.Err( "BlockBlob::TruncateFileWithoutBlocks : Failed to read small file %s[%v]", @@ -1687,7 +1738,7 @@ func (bb *BlockBlob) TruncateFileWithoutBlocks(options *internal.TruncateFileOpt } // Write the file - err = bb.WriteFromBuffer(options.Name, nil, buf) + err = bb.WriteFromBuffer(ctx, options.Name, nil, buf) if err != nil { log.Err( "BlockBlob::TruncateFileWithoutBlocks : Failed to write from buffer file %s[%v]", @@ -1700,7 +1751,10 @@ func (bb *BlockBlob) TruncateFileWithoutBlocks(options *internal.TruncateFileOpt return nil } -func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptions) error { +func (bb *BlockBlob) TruncateFileUsingBlocks( + ctx context.Context, + options *internal.TruncateFileOptions, +) error { log.Trace( "BlockBlob::TruncateFileUsingBlocks : name: %s, old size: %d, new size: %d", options.Name, @@ -1709,7 +1763,7 @@ func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptio ) var err error - blob, err := bb.GetFileBlockOffsets(options.Name) + blob, err := bb.GetFileBlockOffsets(ctx, options.Name) if err != nil { log.Err( "BlockBlob::TruncateFileUsingBlocks : Failed to get block offsets for file %s[%v]", @@ -1734,7 +1788,7 @@ func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptio buf := make([]byte, options.OldSize) // Read the file - err = bb.ReadInBuffer(options.Name, 0, options.OldSize, buf, nil) + err = bb.ReadInBuffer(ctx, options.Name, 0, options.OldSize, buf, nil) if err != nil { log.Err( "BlockBlob::TruncateFileUsingBlocks : Failed to read small file %s[%v]", @@ -1774,7 +1828,7 @@ func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptio } if options.NewSize < options.OldSize { - err = bb.removeBlocksTruncate(blob, options) + err = bb.removeBlocksTruncate(ctx, blob, options) if err != nil { log.Err( "BlockBlob::TruncateFileUsingBlocks : Failed to Remove Blocks from Blocklist for file %s[%v]", @@ -1784,7 +1838,7 @@ func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptio return err } } else { - err = bb.createNewBlocksTruncate(blob, options) + err = bb.createNewBlocksTruncate(ctx, blob, options) if err != nil { log.Err( "BlockBlob::TruncateFileUsingBlocks : Failed to Create New Blocks for file %s[%v]", @@ -1796,7 +1850,7 @@ func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptio } // Stage and commit the blocks. - err = bb.StageAndCommit(options.Name, blob) + err = bb.StageAndCommit(ctx, options.Name, blob) if err != nil { log.Err( "BlockBlob::TruncateFileUsingBlocks : Failed to Stage and Commit blocks for file %s[%v]", @@ -1810,7 +1864,7 @@ func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptio } // Write : write data at given offset to a blob -func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { +func (bb *BlockBlob) Write(ctx context.Context, options *internal.WriteFileOptions) error { name := options.Handle.Path offset := options.Offset defer log.TimeTrack(time.Now(), "BlockBlob::Write", options.Handle.Path) @@ -1818,7 +1872,7 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { // tracks the case where our offset is great than our current file size (appending only - not modifying pre-existing data) var dataBuffer *[]byte // when the file offset mapping is cached we don't need to make a get block list call - fileOffsets, err := bb.GetFileBlockOffsets(name) + fileOffsets, err := bb.GetFileBlockOffsets(ctx, name) if err != nil { return err } @@ -1827,7 +1881,7 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { // case 1: file consists of no blocks (small file) if fileOffsets.HasNoBlocks() { // get all the data - oldData, _ := bb.ReadBuffer(name, 0, 0) + oldData, _ := bb.ReadBuffer(ctx, name, 0, 0) // update the data with the new data // if we're only overwriting existing data if int64(len(oldData)) >= offset+length { @@ -1853,7 +1907,7 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { } } // WriteFromBuffer should be able to handle the case where now the block is too big and gets split into multiple blocks - err := bb.WriteFromBuffer(name, options.Metadata, *dataBuffer) + err := bb.WriteFromBuffer(ctx, name, options.Metadata, *dataBuffer) if err != nil { log.Err("BlockBlob::Write : Failed to upload to blob %s [%s]", name, err.Error()) return err @@ -1884,6 +1938,7 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { if !appendOnly { // fetch the blocks that will be impacted by the new changes so we can overwrite them err = bb.ReadInBuffer( + ctx, name, fileOffsets.BlockList[index].StartIndex, oldDataSize, @@ -1901,7 +1956,7 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { // this gives us where the offset with respect to the buffer that holds our old data - so we can start writing the new data blockOffset := offset - fileOffsets.BlockList[index].StartIndex copy(oldDataBuffer[blockOffset:], data) - err := bb.stageAndCommitModifiedBlocks(name, oldDataBuffer, fileOffsets) + err := bb.stageAndCommitModifiedBlocks(ctx, name, oldDataBuffer, fileOffsets) return err } return nil @@ -1909,6 +1964,7 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { // TODO: make a similar method facing stream that would enable us to write to cached blocks then stage and commit func (bb *BlockBlob) stageAndCommitModifiedBlocks( + ctx context.Context, name string, data []byte, offsetList *common.BlockOffsetList, @@ -1920,7 +1976,7 @@ func (bb *BlockBlob) stageAndCommitModifiedBlocks( blockIDList = append(blockIDList, blk.Id) if blk.Dirty() { _, err := blobClient.StageBlock( - context.Background(), + ctx, blk.Id, streaming.NopCloser( bytes.NewReader(data[blockOffset:(blk.EndIndex-blk.StartIndex)+blockOffset]), @@ -1942,7 +1998,7 @@ func (bb *BlockBlob) stageAndCommitModifiedBlocks( blockOffset = (blk.EndIndex - blk.StartIndex) + blockOffset } } - _, err := blobClient.CommitBlockList(context.Background(), + _, err := blobClient.CommitBlockList(ctx, blockIDList, &blockblob.CommitBlockListOptions{ HTTPHeaders: &blob.HTTPHeaders{ @@ -1963,7 +2019,11 @@ func (bb *BlockBlob) stageAndCommitModifiedBlocks( return nil } -func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) error { +func (bb *BlockBlob) StageAndCommit( + ctx context.Context, + name string, + bol *common.BlockOffsetList, +) error { // lock on the blob name so that no stage and commit race condition occur causing failure blobMtx := bb.blockLocks.GetLock(name) blobMtx.Lock() @@ -2005,7 +2065,7 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er } if blk.Dirty() { - _, err := blobClient.StageBlock(context.Background(), + _, err := blobClient.StageBlock(ctx, blk.Id, streaming.NopCloser(bytes.NewReader(data)), &blockblob.StageBlockOptions{ @@ -2027,7 +2087,7 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er } if staged { - _, err := blobClient.CommitBlockList(context.Background(), + _, err := blobClient.CommitBlockList(ctx, blockIDList, &blockblob.CommitBlockListOptions{ HTTPHeaders: &blob.HTTPHeaders{ @@ -2052,7 +2112,7 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er } // ChangeMod : Change mode of a blob -func (bb *BlockBlob) ChangeMod(name string, _ os.FileMode) error { +func (bb *BlockBlob) ChangeMod(ctx context.Context, name string, _ os.FileMode) error { log.Trace("BlockBlob::ChangeMod : name %s", name) if bb.Config.ignoreAccessModifiers { @@ -2066,7 +2126,7 @@ func (bb *BlockBlob) ChangeMod(name string, _ os.FileMode) error { } // ChangeOwner : Change owner of a blob -func (bb *BlockBlob) ChangeOwner(name string, _ int, _ int) error { +func (bb *BlockBlob) ChangeOwner(ctx context.Context, name string, _ int, _ int) error { log.Trace("BlockBlob::ChangeOwner : name %s", name) if bb.Config.ignoreAccessModifiers { @@ -2080,13 +2140,16 @@ func (bb *BlockBlob) ChangeOwner(name string, _ int, _ int) error { } // GetCommittedBlockList : Get the list of committed blocks -func (bb *BlockBlob) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) { +func (bb *BlockBlob) GetCommittedBlockList( + ctx context.Context, + name string, +) (*internal.CommittedBlockList, error) { blobClient := bb.Container.NewBlockBlobClient( common.JoinUnixFilepath(bb.Config.prefixPath, name), ) storageBlockList, err := blobClient.GetBlockList( - context.Background(), + ctx, blockblob.BlockListTypeCommitted, nil, ) @@ -2121,10 +2184,10 @@ func (bb *BlockBlob) GetCommittedBlockList(name string) (*internal.CommittedBloc } // StageBlock : stages a block and returns its blockid -func (bb *BlockBlob) StageBlock(name string, data []byte, id string) error { +func (bb *BlockBlob) StageBlock(ctx context.Context, name string, data []byte, id string) error { log.Trace("BlockBlob::StageBlock : name %s, ID %v, length %v", name, id, len(data)) - ctx, cancel := context.WithTimeout(context.Background(), max_context_timeout*time.Minute) + ctx, cancel := context.WithTimeout(ctx, max_context_timeout*time.Minute) defer cancel() blobClient := bb.Container.NewBlockBlobClient( @@ -2151,10 +2214,15 @@ func (bb *BlockBlob) StageBlock(name string, data []byte, id string) error { } // CommitBlocks : persists the block list -func (bb *BlockBlob) CommitBlocks(name string, blockList []string, newEtag *string) error { +func (bb *BlockBlob) CommitBlocks( + ctx context.Context, + name string, + blockList []string, + newEtag *string, +) error { log.Trace("BlockBlob::CommitBlocks : name %s", name) - ctx, cancel := context.WithTimeout(context.Background(), max_context_timeout*time.Minute) + ctx, cancel := context.WithTimeout(ctx, max_context_timeout*time.Minute) defer cancel() blobClient := bb.Container.NewBlockBlobClient( diff --git a/component/azstorage/block_blob_test.go b/component/azstorage/block_blob_test.go index 06959961b..721ce274e 100644 --- a/component/azstorage/block_blob_test.go +++ b/component/azstorage/block_blob_test.go @@ -461,6 +461,37 @@ func (s *blockBlobTestSuite) TestListContainers() { s.assert.Equal(num, count) } +func (s *blockBlobTestSuite) TestCloudConnected() { + defer s.cleanupTest() + s.assert.True(s.az.CloudConnected()) +} + +func (s *blockBlobTestSuite) TestUpdateConnectionState() { + defer s.cleanupTest() + connected := s.az.updateConnectionState(&common.CloudUnreachableError{}) + s.assert.False(connected) + s.assert.False(s.az.CloudConnected()) + connected = s.az.updateConnectionState(nil) + s.assert.True(connected) + s.assert.True(s.az.CloudConnected()) +} + +func (s *blockBlobTestSuite) TestCloudOfflineCached() { + defer s.cleanupTest() + s.az.updateConnectionState(&common.CloudUnreachableError{}) + s.assert.False(s.az.CloudConnected()) + s.az.updateConnectionState(nil) +} + +func (s *blockBlobTestSuite) TestCloudOfflineContext() { + defer s.cleanupTest() + s.az.updateConnectionState(&common.CloudUnreachableError{}) + h, err := s.az.CreateFile(internal.CreateFileOptions{Name: "file" + randomString(8)}) + s.assert.Nil(h) + s.assert.True(isOfflineError(err)) + s.az.updateConnectionState(nil) +} + // TODO : ListContainersHuge: Maybe this is overkill? func checkMetadata(metadata map[string]*string, key string, val string) bool { @@ -3075,7 +3106,7 @@ func (s *blockBlobTestSuite) TestFlushFileUpdateChunkedFile() { s.assert.NoError(err) h.CacheObj.BlockOffsetList.BlockList[1].Data = make([]byte, blockSize) - err = s.az.storage.ReadInBuffer( + err = s.az.storage.ReadInBuffer(ctx, name, int64(blockSize), int64(blockSize), @@ -3132,7 +3163,7 @@ func (s *blockBlobTestSuite) TestFlushFileTruncateUpdateChunkedFile() { // truncate block h.CacheObj.BlockOffsetList.BlockList[1].Data = make([]byte, blockSize/2) h.CacheObj.BlockOffsetList.BlockList[1].EndIndex = int64(blockSize + blockSize/2) - err = s.az.storage.ReadInBuffer( + err = s.az.storage.ReadInBuffer(ctx, name, int64(blockSize), int64(blockSize)/2, @@ -3663,10 +3694,10 @@ func (s *blockBlobTestSuite) TestMD5SetOnUpload() { s.assert.Equal(blockblob.MaxUploadBlobBytes+1, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -3675,7 +3706,7 @@ func (s *blockBlobTestSuite) TestMD5SetOnUpload() { s.assert.NoError(err) s.assert.Equal(localMD5, prop.MD5) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = f.Close() _ = os.Remove(name) }) @@ -3726,14 +3757,14 @@ func (s *blockBlobTestSuite) TestMD5NotSetOnUpload() { s.assert.Equal(blockblob.MaxUploadBlobBytes+1, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.Empty(prop.MD5) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = f.Close() _ = os.Remove(name) }) @@ -3784,10 +3815,10 @@ func (s *blockBlobTestSuite) TestMD5AutoSetOnUpload() { s.assert.Equal(100, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -3796,7 +3827,7 @@ func (s *blockBlobTestSuite) TestMD5AutoSetOnUpload() { s.assert.NoError(err) s.assert.Equal(localMD5, prop.MD5) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = f.Close() _ = os.Remove(name) }) @@ -3847,7 +3878,7 @@ func (s *blockBlobTestSuite) TestInvalidateMD5PostUpload() { s.assert.Equal(100, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) blobClient := s.containerClient.NewBlobClient(name) @@ -3857,7 +3888,7 @@ func (s *blockBlobTestSuite) TestInvalidateMD5PostUpload() { nil, ) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -3866,7 +3897,7 @@ func (s *blockBlobTestSuite) TestInvalidateMD5PostUpload() { s.assert.NoError(err) s.assert.NotEqual(localMD5, prop.MD5) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = f.Close() _ = os.Remove(name) }) @@ -3917,12 +3948,12 @@ func (s *blockBlobTestSuite) TestValidateAutoMD5OnRead() { s.assert.Equal(100, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) _ = f.Close() _ = os.Remove(name) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -3930,10 +3961,10 @@ func (s *blockBlobTestSuite) TestValidateAutoMD5OnRead() { s.assert.NoError(err) s.assert.NotNil(f) - err = s.az.storage.ReadToFile(name, 0, 100, f) + err = s.az.storage.ReadToFile(ctx, name, 0, 100, f) s.assert.NoError(err) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = os.Remove(name) }) } @@ -3983,12 +4014,12 @@ func (s *blockBlobTestSuite) TestValidateManualMD5OnRead() { s.assert.Equal(blockblob.MaxUploadBlobBytes+1, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) _ = f.Close() _ = os.Remove(name) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -3996,10 +4027,10 @@ func (s *blockBlobTestSuite) TestValidateManualMD5OnRead() { s.assert.NoError(err) s.assert.NotNil(f) - err = s.az.storage.ReadToFile(name, 0, blockblob.MaxUploadBlobBytes+1, f) + err = s.az.storage.ReadToFile(ctx, name, 0, blockblob.MaxUploadBlobBytes+1, f) s.assert.NoError(err) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = os.Remove(name) }) } @@ -4049,7 +4080,7 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnRead() { s.assert.Equal(100, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) _ = f.Close() _ = os.Remove(name) @@ -4061,7 +4092,7 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnRead() { nil, ) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -4069,11 +4100,11 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnRead() { s.assert.NoError(err) s.assert.NotNil(f) - err = s.az.storage.ReadToFile(name, 0, 100, f) + err = s.az.storage.ReadToFile(ctx, name, 0, 100, f) s.assert.Error(err) s.assert.Contains(err.Error(), "md5 sum mismatch on download") - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = os.Remove(name) }) } @@ -4123,7 +4154,7 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { s.assert.Equal(100, n) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name, nil, f) + err = s.az.storage.WriteFromFile(ctx, name, nil, f) s.assert.NoError(err) _ = f.Close() _ = os.Remove(name) @@ -4135,7 +4166,7 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { nil, ) - prop, err := s.az.storage.GetAttr(name) + prop, err := s.az.storage.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotEmpty(prop.MD5) @@ -4143,10 +4174,10 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { s.assert.NoError(err) s.assert.NotNil(f) - err = s.az.storage.ReadToFile(name, 0, 100, f) + err = s.az.storage.ReadToFile(ctx, name, 0, 100, f) s.assert.NoError(err) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = os.Remove(name) }) } @@ -4183,21 +4214,21 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { // s.assert.Nil(err) // s.assert.NotNil(f) -// err = s.az.storage.ReadToFile(name, 0, int64(len(data)), f) +// err = s.az.storage.ReadToFile(ctx, name, 0, int64(len(data)), f) // s.assert.Nil(err) // fileData, err := os.ReadFile(name) // s.assert.Nil(err) // s.assert.EqualValues(data, fileData) // buf := make([]byte, len(data)) -// err = s.az.storage.ReadInBuffer(name, 0, int64(len(data)), buf, nil) +// err = s.az.storage.ReadInBuffer(ctx, name, 0, int64(len(data)), buf, nil) // s.assert.Nil(err) // s.assert.EqualValues(data, buf) // rbuf, err := s.az.storage.ReadBuffer(name, 0, int64(len(data))) // s.assert.Nil(err) // s.assert.EqualValues(data, rbuf) -// _ = s.az.storage.DeleteFile(name) +// _ = s.az.storage.DeleteFile(ctx, name) // _ = os.Remove(name) // } @@ -4227,7 +4258,7 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { // s.assert.Nil(err) // _, _ = f.Seek(0, 0) -// err = s.az.storage.WriteFromFile(name1, nil, f) +// err = s.az.storage.WriteFromFile(ctx, name1, nil, f) // s.assert.Nil(err) // file := s.containerClient.NewBlobClient(name1) @@ -4266,8 +4297,8 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { // s.assert.Nil(err) // s.assert.NotNil(resp.RequestID) -// _ = s.az.storage.DeleteFile(name1) -// _ = s.az.storage.DeleteFile(name2) +// _ = s.az.storage.DeleteFile(ctx, name1) +// _ = s.az.storage.DeleteFile(ctx, name2) // _ = os.Remove(name1) // } @@ -4525,7 +4556,7 @@ func (s *blockBlobTestSuite) TestList() { base := generateDirectoryName() s.setupHierarchy(base) - blobList, marker, err := s.az.storage.List("", nil, 0) + blobList, marker, err := s.az.storage.List(ctx, "", nil, 0) s.assert.NoError(err) emptyString := "" s.assert.Equal(&emptyString, marker) @@ -4533,7 +4564,7 @@ func (s *blockBlobTestSuite) TestList() { s.assert.Len(blobList, 3) // Test listing with prefix - blobList, marker, err = s.az.storage.List(base+"b/", nil, 0) + blobList, marker, err = s.az.storage.List(ctx, base+"b/", nil, 0) s.assert.NoError(err) s.assert.Equal(&emptyString, marker) s.assert.NotNil(blobList) @@ -4548,7 +4579,7 @@ func (s *blockBlobTestSuite) TestList() { // s.assert.Nil(marker) // Test listing with count - blobList, marker, err = s.az.storage.List("", nil, 1) + blobList, marker, err = s.az.storage.List(ctx, "", nil, 1) s.assert.NoError(err) s.assert.NotNil(blobList) s.assert.NotEmpty(marker) diff --git a/component/azstorage/connection.go b/component/azstorage/connection.go index 4612c4f10..b17b56300 100644 --- a/component/azstorage/connection.go +++ b/component/azstorage/connection.go @@ -26,6 +26,7 @@ package azstorage import ( + "context" "os" "github.com/Seagate/cloudfuse/common" @@ -90,47 +91,65 @@ type AzConnection interface { Configure(cfg AzStorageConfig) error UpdateConfig(cfg AzStorageConfig) error + ConnectionOkay(ctx context.Context) error SetupPipeline() error TestPipeline() error IsAccountADLS() bool - ListContainers() ([]string, error) + ListContainers(ctx context.Context) ([]string, error) // This is just for test, shall not be used otherwise SetPrefixPath(string) error - CreateFile(name string, mode os.FileMode) error - CreateDirectory(name string) error - CreateLink(source string, target string) error + CreateFile(ctx context.Context, name string, mode os.FileMode) error + CreateDirectory(ctx context.Context, name string) error + CreateLink(ctx context.Context, source string, target string) error - DeleteFile(name string) error - DeleteDirectory(name string) error + DeleteFile(ctx context.Context, name string) error + DeleteDirectory(ctx context.Context, name string) error - RenameFile(string, string, *internal.ObjAttr) error - RenameDirectory(string, string) error + RenameFile(context.Context, string, string, *internal.ObjAttr) error + RenameDirectory(context.Context, string, string) error - GetAttr(name string) (attr *internal.ObjAttr, err error) + GetAttr(ctx context.Context, name string) (attr *internal.ObjAttr, err error) // Standard operations to be supported by any account type - List(prefix string, marker *string, count int32) ([]*internal.ObjAttr, *string, error) - - ReadToFile(name string, offset int64, count int64, fi *os.File) error - ReadBuffer(name string, offset int64, length int64) ([]byte, error) - ReadInBuffer(name string, offset int64, length int64, data []byte, etag *string) error - - WriteFromFile(name string, metadata map[string]*string, fi *os.File) error - WriteFromBuffer(name string, metadata map[string]*string, data []byte) error - Write(options *internal.WriteFileOptions) error - GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) - - ChangeMod(string, os.FileMode) error - ChangeOwner(string, int, int) error - TruncateFile(options internal.TruncateFileOptions) error - StageAndCommit(name string, bol *common.BlockOffsetList) error - - GetCommittedBlockList(string) (*internal.CommittedBlockList, error) - StageBlock(string, []byte, string) error - CommitBlocks(string, []string, *string) error + List( + ctx context.Context, + prefix string, + marker *string, + count int32, + ) ([]*internal.ObjAttr, *string, error) + + ReadToFile(ctx context.Context, name string, offset int64, count int64, fi *os.File) error + ReadBuffer(ctx context.Context, name string, offset int64, length int64) ([]byte, error) + ReadInBuffer( + ctx context.Context, + name string, + offset int64, + length int64, + data []byte, + etag *string, + ) error + + WriteFromFile(ctx context.Context, name string, metadata map[string]*string, fi *os.File) error + WriteFromBuffer( + ctx context.Context, + name string, + metadata map[string]*string, + data []byte, + ) error + Write(ctx context.Context, options *internal.WriteFileOptions) error + GetFileBlockOffsets(ctx context.Context, name string) (*common.BlockOffsetList, error) + + ChangeMod(context.Context, string, os.FileMode) error + ChangeOwner(context.Context, string, int, int) error + TruncateFile(ctx context.Context, options internal.TruncateFileOptions) error + StageAndCommit(ctx context.Context, name string, bol *common.BlockOffsetList) error + + GetCommittedBlockList(context.Context, string) (*internal.CommittedBlockList, error) + StageBlock(context.Context, string, []byte, string) error + CommitBlocks(context.Context, string, []string, *string) error UpdateServiceClient(_, _ string) error diff --git a/component/azstorage/datalake.go b/component/azstorage/datalake.go index 1399d0780..d2762e54d 100644 --- a/component/azstorage/datalake.go +++ b/component/azstorage/datalake.go @@ -231,9 +231,15 @@ func (dl *Datalake) IsAccountADLS() bool { return dl.BlockBlob.IsAccountADLS() } -func (dl *Datalake) ListContainers() ([]string, error) { +// check the connection to the service by calling GetProperties on the container +func (dl *Datalake) ConnectionOkay(ctx context.Context) error { + log.Trace("BlockBlob::ConnectionOkay : checking connection to cloud service") + return dl.BlockBlob.ConnectionOkay(ctx) +} + +func (dl *Datalake) ListContainers(ctx context.Context) ([]string, error) { log.Trace("Datalake::ListContainers : Listing containers") - return dl.BlockBlob.ListContainers() + return dl.BlockBlob.ListContainers(ctx) } func (dl *Datalake) SetPrefixPath(path string) error { @@ -243,14 +249,14 @@ func (dl *Datalake) SetPrefixPath(path string) error { } // CreateFile : Create a new file in the filesystem/directory -func (dl *Datalake) CreateFile(name string, mode os.FileMode) error { +func (dl *Datalake) CreateFile(ctx context.Context, name string, mode os.FileMode) error { log.Trace("Datalake::CreateFile : name %s", name) - err := dl.BlockBlob.CreateFile(name, mode) + err := dl.BlockBlob.CreateFile(ctx, name, mode) if err != nil { log.Err("Datalake::CreateFile : Failed to create file %s [%s]", name, err.Error()) return err } - err = dl.ChangeMod(name, mode) + err = dl.ChangeMod(ctx, name, mode) if err != nil { log.Err( "Datalake::CreateFile : Failed to set permissions on file %s [%s]", @@ -264,11 +270,11 @@ func (dl *Datalake) CreateFile(name string, mode os.FileMode) error { } // CreateDirectory : Create a new directory in the filesystem/directory -func (dl *Datalake) CreateDirectory(name string) error { +func (dl *Datalake) CreateDirectory(ctx context.Context, name string) error { log.Trace("Datalake::CreateDirectory : name %s", name) directoryURL := dl.getDirectoryClient(name) - _, err := directoryURL.Create(context.Background(), &directory.CreateOptions{ + _, err := directoryURL.Create(ctx, &directory.CreateOptions{ CPKInfo: dl.datalakeCPKOpt, AccessConditions: &directory.AccessConditions{ ModifiedAccessConditions: &directory.ModifiedAccessConditions{ @@ -308,16 +314,16 @@ func (dl *Datalake) CreateDirectory(name string) error { } // CreateLink : Create a symlink in the filesystem/directory -func (dl *Datalake) CreateLink(source string, target string) error { +func (dl *Datalake) CreateLink(ctx context.Context, source string, target string) error { log.Trace("Datalake::CreateLink : %s -> %s", source, target) - return dl.BlockBlob.CreateLink(source, target) + return dl.BlockBlob.CreateLink(ctx, source, target) } // DeleteFile : Delete a file in the filesystem/directory -func (dl *Datalake) DeleteFile(name string) (err error) { +func (dl *Datalake) DeleteFile(ctx context.Context, name string) (err error) { log.Trace("Datalake::DeleteFile : name %s", name) fileClient := dl.getFileClient(name) - _, err = fileClient.Delete(context.Background(), nil) + _, err = fileClient.Delete(ctx, nil) if err != nil { serr := storeDatalakeErrToErr(err) switch serr { @@ -344,11 +350,11 @@ func (dl *Datalake) DeleteFile(name string) (err error) { } // DeleteDirectory : Delete a directory in the filesystem/directory -func (dl *Datalake) DeleteDirectory(name string) (err error) { +func (dl *Datalake) DeleteDirectory(ctx context.Context, name string) (err error) { log.Trace("Datalake::DeleteDirectory : name %s", name) directoryClient := dl.getDirectoryClient(name) - _, err = directoryClient.Delete(context.Background(), nil) + _, err = directoryClient.Delete(ctx, nil) // TODO : There is an ability to pass a continuation token here for recursive delete, should we implement this logic to follow continuation token? The SDK does not currently do this. if err != nil { serr := storeDatalakeErrToErr(err) @@ -371,13 +377,18 @@ func (dl *Datalake) DeleteDirectory(name string) (err error) { // RenameFile : Rename the file // While renaming the file, Creation time is preserved but LMT is changed for the destination blob. // and also Etag of the destination blob changes -func (dl *Datalake) RenameFile(source string, target string, srcAttr *internal.ObjAttr) error { +func (dl *Datalake) RenameFile( + ctx context.Context, + source string, + target string, + srcAttr *internal.ObjAttr, +) error { log.Trace("Datalake::RenameFile : %s -> %s", source, target) fileClient := dl.getFileClientPathEscape(source) renameResponse, err := fileClient.Rename( - context.Background(), + ctx, dl.getFormattedPath(target), &file.RenameOptions{ CPKInfo: dl.datalakeCPKOpt, @@ -403,12 +414,12 @@ func (dl *Datalake) RenameFile(source string, target string, srcAttr *internal.O } // RenameDirectory : Rename the directory -func (dl *Datalake) RenameDirectory(source string, target string) error { +func (dl *Datalake) RenameDirectory(ctx context.Context, source string, target string) error { log.Trace("Datalake::RenameDirectory : %s -> %s", source, target) directoryClient := dl.getDirectoryClientPathEscape(source) _, err := directoryClient.Rename( - context.Background(), + ctx, dl.getFormattedPath(target), &directory.RenameOptions{ CPKInfo: dl.datalakeCPKOpt, @@ -434,11 +445,14 @@ func (dl *Datalake) RenameDirectory(source string, target string) error { } // GetAttr : Retrieve attributes of the path -func (dl *Datalake) GetAttr(name string) (blobAttr *internal.ObjAttr, err error) { +func (dl *Datalake) GetAttr( + ctx context.Context, + name string, +) (blobAttr *internal.ObjAttr, err error) { log.Trace("Datalake::GetAttr : name %s", name) fileClient := dl.getFileClient(name) - prop, err := fileClient.GetProperties(context.Background(), &file.GetPropertiesOptions{ + prop, err := fileClient.GetProperties(ctx, &file.GetPropertiesOptions{ CPKInfo: dl.datalakeCPKOpt, }) if err != nil { @@ -485,7 +499,7 @@ func (dl *Datalake) GetAttr(name string) (blobAttr *internal.ObjAttr, err error) } if dl.Config.honourACL && dl.Config.authConfig.ObjectID != "" { - acl, err := fileClient.GetAccessControl(context.Background(), nil) + acl, err := fileClient.GetAccessControl(ctx, nil) if err != nil { // Just ignore the error here as rest of the attributes have been retrieved log.Err("Datalake::GetAttr : Failed to get ACL for %s [%s]", name, err.Error()) @@ -521,36 +535,50 @@ func (dl *Datalake) GetAttr(name string) (blobAttr *internal.ObjAttr, err error) // This fetches the list using a marker so the caller code should handle marker logic // If count=0 - fetch max entries func (dl *Datalake) List( + ctx context.Context, prefix string, marker *string, count int32, ) ([]*internal.ObjAttr, *string, error) { - return dl.BlockBlob.List(prefix, marker, count) + return dl.BlockBlob.List(ctx, prefix, marker, count) } // ReadToFile : Download a file to a local file -func (dl *Datalake) ReadToFile(name string, offset int64, count int64, fi *os.File) (err error) { - return dl.BlockBlob.ReadToFile(name, offset, count, fi) +func (dl *Datalake) ReadToFile( + ctx context.Context, + name string, + offset int64, + count int64, + fi *os.File, +) (err error) { + return dl.BlockBlob.ReadToFile(ctx, name, offset, count, fi) } // ReadBuffer : Download a specific range from a file to a buffer -func (dl *Datalake) ReadBuffer(name string, offset int64, length int64) ([]byte, error) { - return dl.BlockBlob.ReadBuffer(name, offset, length) +func (dl *Datalake) ReadBuffer( + ctx context.Context, + name string, + offset int64, + length int64, +) ([]byte, error) { + return dl.BlockBlob.ReadBuffer(ctx, name, offset, length) } // ReadInBuffer : Download specific range from a file to a user provided buffer func (dl *Datalake) ReadInBuffer( + ctx context.Context, name string, offset int64, length int64, data []byte, etag *string, ) error { - return dl.BlockBlob.ReadInBuffer(name, offset, length, data, etag) + return dl.BlockBlob.ReadInBuffer(ctx, name, offset, length, data, etag) } // WriteFromFile : Upload local file to file func (dl *Datalake) WriteFromFile( + ctx context.Context, name string, metadata map[string]*string, fi *os.File, @@ -563,7 +591,7 @@ func (dl *Datalake) WriteFromFile( if dl.Config.preserveACL { fileClient = dl.Filesystem.NewFileClient(filepath.Join(dl.Config.prefixPath, name)) - resp, err := fileClient.GetAccessControl(context.Background(), nil) + resp, err := fileClient.GetAccessControl(ctx, nil) if err != nil { log.Err("Datalake::getACL : Failed to get ACLs for file %s [%s]", name, err.Error()) } else if resp.ACL != nil { @@ -572,13 +600,13 @@ func (dl *Datalake) WriteFromFile( } // Upload the file, which will override the permissions and ACL - retCode := dl.BlockBlob.WriteFromFile(name, metadata, fi) + retCode := dl.BlockBlob.WriteFromFile(ctx, name, metadata, fi) if acl != "" { // Cannot set both permissions and ACL in one call. ACL includes permission as well so just setting those back // Just setting up the permissions will delete existing ACLs applied on the blob so do not convert this code to // just set the permissions. - _, err := fileClient.SetAccessControl(context.Background(), &file.SetAccessControlOptions{ + _, err := fileClient.SetAccessControl(ctx, &file.SetAccessControlOptions{ ACL: &acl, }) @@ -592,29 +620,41 @@ func (dl *Datalake) WriteFromFile( } // WriteFromBuffer : Upload from a buffer to a file -func (dl *Datalake) WriteFromBuffer(name string, metadata map[string]*string, data []byte) error { - return dl.BlockBlob.WriteFromBuffer(name, metadata, data) +func (dl *Datalake) WriteFromBuffer( + ctx context.Context, + name string, + metadata map[string]*string, + data []byte, +) error { + return dl.BlockBlob.WriteFromBuffer(ctx, name, metadata, data) } // Write : Write to a file at given offset -func (dl *Datalake) Write(options *internal.WriteFileOptions) error { - return dl.BlockBlob.Write(options) +func (dl *Datalake) Write(ctx context.Context, options *internal.WriteFileOptions) error { + return dl.BlockBlob.Write(ctx, options) } -func (dl *Datalake) StageAndCommit(name string, bol *common.BlockOffsetList) error { - return dl.BlockBlob.StageAndCommit(name, bol) +func (dl *Datalake) StageAndCommit( + ctx context.Context, + name string, + bol *common.BlockOffsetList, +) error { + return dl.BlockBlob.StageAndCommit(ctx, name, bol) } -func (dl *Datalake) GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) { - return dl.BlockBlob.GetFileBlockOffsets(name) +func (dl *Datalake) GetFileBlockOffsets( + ctx context.Context, + name string, +) (*common.BlockOffsetList, error) { + return dl.BlockBlob.GetFileBlockOffsets(ctx, name) } -func (dl *Datalake) TruncateFile(options internal.TruncateFileOptions) error { - return dl.BlockBlob.TruncateFile(options) +func (dl *Datalake) TruncateFile(ctx context.Context, options internal.TruncateFileOptions) error { + return dl.BlockBlob.TruncateFile(ctx, options) } // ChangeMod : Change mode of a path -func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error { +func (dl *Datalake) ChangeMod(ctx context.Context, name string, mode os.FileMode) error { log.Trace("Datalake::ChangeMod : Change mode of file %s to %s", name, mode) fileClient := dl.getFileClient(name) @@ -623,7 +663,7 @@ func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error { // and create new string with the username included in the string // Keeping this code here so in future if its required we can get the string and manipulate - currPerm, err := fileURL.getACL(context.Background()) + currPerm, err := fileURL.getACL(ctx) e := storeDatalakeErrToErr(err) if e == ErrFileNotFound { return syscall.ENOENT @@ -634,7 +674,7 @@ func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error { */ newPerm := getACLPermissions(mode) - _, err := fileClient.SetAccessControl(context.Background(), &file.SetAccessControlOptions{ + _, err := fileClient.SetAccessControl(ctx, &file.SetAccessControlOptions{ Permissions: &newPerm, }) if err != nil { @@ -659,7 +699,7 @@ func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error { } // ChangeOwner : Change owner of a path -func (dl *Datalake) ChangeOwner(name string, _ int, _ int) error { +func (dl *Datalake) ChangeOwner(ctx context.Context, name string, _ int, _ int) error { log.Trace("Datalake::ChangeOwner : name %s", name) if dl.Config.ignoreAccessModifiers { @@ -672,7 +712,7 @@ func (dl *Datalake) ChangeOwner(name string, _ int, _ int) error { // fileURL := dl.Filesystem.NewRootDirectoryURL().NewFileURL(common.JoinUnixFilepath(dl.Config.prefixPath, name)) // group := strconv.Itoa(gid) // owner := strconv.Itoa(uid) - // _, err := fileURL.SetAccessControl(context.Background(), azbfs.BlobFSAccessControl{Group: group, Owner: owner}) + // _, err := fileURL.SetAccessControl(ctx, azbfs.BlobFSAccessControl{Group: group, Owner: owner}) // e := storeDatalakeErrToErr(err) // if e == ErrFileNotFound { // return syscall.ENOENT @@ -684,18 +724,26 @@ func (dl *Datalake) ChangeOwner(name string, _ int, _ int) error { } // GetCommittedBlockList : Get the list of committed blocks -func (dl *Datalake) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) { - return dl.BlockBlob.GetCommittedBlockList(name) +func (dl *Datalake) GetCommittedBlockList( + ctx context.Context, + name string, +) (*internal.CommittedBlockList, error) { + return dl.BlockBlob.GetCommittedBlockList(ctx, name) } // StageBlock : stages a block and returns its blockid -func (dl *Datalake) StageBlock(name string, data []byte, id string) error { - return dl.BlockBlob.StageBlock(name, data, id) +func (dl *Datalake) StageBlock(ctx context.Context, name string, data []byte, id string) error { + return dl.BlockBlob.StageBlock(ctx, name, data, id) } // CommitBlocks : persists the block list -func (dl *Datalake) CommitBlocks(name string, blockList []string, newEtag *string) error { - return dl.BlockBlob.CommitBlocks(name, blockList, newEtag) +func (dl *Datalake) CommitBlocks( + ctx context.Context, + name string, + blockList []string, + newEtag *string, +) error { + return dl.BlockBlob.CommitBlocks(ctx, name, blockList, newEtag) } func (dl *Datalake) SetFilter(filter string) error { diff --git a/component/azstorage/datalake_test.go b/component/azstorage/datalake_test.go index 4c621cdaf..ab83d048e 100644 --- a/component/azstorage/datalake_test.go +++ b/component/azstorage/datalake_test.go @@ -2584,6 +2584,7 @@ func (s *datalakeTestSuite) TestFlushFileUpdateChunkedFile() { s.assert.NoError(err) h.CacheObj.BlockOffsetList.BlockList[1].Data = make([]byte, blockSize) err = s.az.storage.ReadInBuffer( + ctx, name, int64(blockSize), int64(blockSize), @@ -2643,6 +2644,7 @@ func (s *datalakeTestSuite) TestFlushFileTruncateUpdateChunkedFile() { h.CacheObj.BlockOffsetList.BlockList[1].Data = make([]byte, blockSize/2) h.CacheObj.BlockOffsetList.BlockList[1].EndIndex = int64(blockSize + blockSize/2) err = s.az.storage.ReadInBuffer( + ctx, name, int64(blockSize), int64(blockSize)/2, @@ -3173,21 +3175,21 @@ func (s *datalakeTestSuite) TestDownloadWithCPKEnabled() { s.assert.NoError(err) s.assert.NotNil(f) - err = s.az.storage.ReadToFile(name, 0, int64(len(data)), f) + err = s.az.storage.ReadToFile(ctx, name, 0, int64(len(data)), f) s.assert.NoError(err) fileData, err := os.ReadFile(name) s.assert.NoError(err) s.assert.Equal(data, fileData) buf := make([]byte, len(data)) - err = s.az.storage.ReadInBuffer(name, 0, int64(len(data)), buf, nil) + err = s.az.storage.ReadInBuffer(ctx, name, 0, int64(len(data)), buf, nil) s.assert.NoError(err) s.assert.Equal(data, buf) - rbuf, err := s.az.storage.ReadBuffer(name, 0, int64(len(data))) + rbuf, err := s.az.storage.ReadBuffer(ctx, name, 0, int64(len(data))) s.assert.NoError(err) s.assert.Equal(data, rbuf) - _ = s.az.storage.DeleteFile(name) + _ = s.az.storage.DeleteFile(ctx, name) _ = os.Remove(name) } @@ -3224,12 +3226,12 @@ func (s *datalakeTestSuite) TestUploadWithCPKEnabled() { s.assert.NoError(err) _, _ = f.Seek(0, 0) - err = s.az.storage.WriteFromFile(name1, nil, f) + err = s.az.storage.WriteFromFile(ctx, name1, nil, f) s.assert.NoError(err) // Blob should have updated data fileClient := s.containerClient.NewFileClient(name1) - attr, err := s.az.storage.(*Datalake).GetAttr(name1) + attr, err := s.az.storage.(*Datalake).GetAttr(ctx, name1) s.assert.NoError(err) s.assert.NotNil(attr) @@ -3247,7 +3249,7 @@ func (s *datalakeTestSuite) TestUploadWithCPKEnabled() { s.assert.NotNil(resp.RequestID) name2 := generateFileName() - err = s.az.storage.WriteFromBuffer(name2, nil, data) + err = s.az.storage.WriteFromBuffer(ctx, name2, nil, data) s.assert.NoError(err) fileClient = s.containerClient.NewFileClient(name2) @@ -3264,8 +3266,8 @@ func (s *datalakeTestSuite) TestUploadWithCPKEnabled() { s.assert.NoError(err) s.assert.NotNil(resp.RequestID) - _ = s.az.storage.DeleteFile(name1) - _ = s.az.storage.DeleteFile(name2) + _ = s.az.storage.DeleteFile(ctx, name1) + _ = s.az.storage.DeleteFile(ctx, name2) _ = os.Remove(name1) } @@ -3511,7 +3513,7 @@ func (s *datalakeTestSuite) TestList() { base := generateDirectoryName() s.setupHierarchy(base) - blobList, marker, err := s.az.storage.List(base, nil, 0) + blobList, marker, err := s.az.storage.List(ctx, base, nil, 0) s.assert.NoError(err) emptyString := "" s.assert.Equal(&emptyString, marker) @@ -3520,7 +3522,7 @@ func (s *datalakeTestSuite) TestList() { s.assert.NotEqual(0, blobList[0].Mode) // Test listing with prefix - blobList, marker, err = s.az.storage.List(base+"b/", nil, 0) + blobList, marker, err = s.az.storage.List(ctx, base+"b/", nil, 0) s.assert.NoError(err) s.assert.Equal(&emptyString, marker) s.assert.NotNil(blobList) @@ -3529,13 +3531,13 @@ func (s *datalakeTestSuite) TestList() { s.assert.NotEqual(0, blobList[0].Mode) // Test listing with marker - blobList, marker, err = s.az.storage.List(base, new("invalid-marker"), 0) + blobList, marker, err = s.az.storage.List(ctx, base, new("invalid-marker"), 0) s.assert.Error(err) s.assert.Empty(blobList) s.assert.Nil(marker) // Test listing with count - blobList, marker, err = s.az.storage.List("", nil, 1) + blobList, marker, err = s.az.storage.List(ctx, "", nil, 1) s.assert.NoError(err) s.assert.NotNil(blobList) s.assert.NotEmpty(marker) diff --git a/component/loopback/loopback_fs.go b/component/loopback/loopback_fs.go index 85ba0ccf6..beb98128a 100644 --- a/component/loopback/loopback_fs.go +++ b/component/loopback/loopback_fs.go @@ -100,6 +100,10 @@ func (lfs *LoopbackFS) Priority() internal.ComponentPriority { return internal.EComponentPriority.Consumer() } +func (lfs *LoopbackFS) CloudConnected() bool { + return true +} + func (lfs *LoopbackFS) CreateDir(options internal.CreateDirOptions) error { log.Trace("LoopbackFS::CreateDir : name=%s", options.Name) dirPath := filepath.Join(lfs.path, options.Name) diff --git a/component/s3storage/client.go b/component/s3storage/client.go index dc56789d9..98e059e41 100644 --- a/component/s3storage/client.go +++ b/component/s3storage/client.go @@ -156,8 +156,10 @@ func (cl *Client) Configure(cfg Config) error { ) } + ctx := context.Background() + defaultConfig, err := config.LoadDefaultConfig( - context.Background(), + ctx, config.WithSharedConfigProfile(cl.Config.AuthConfig.Profile), config.WithCredentialsProvider(credentialsProvider), config.WithAppID(UserAgent()), @@ -170,7 +172,7 @@ func (cl *Client) Configure(cfg Config) error { // If a config profile is provided the sdk checks that it exists, otherwise it fails and // does not try other credentials. So try the other ones here if the profile does not exist defaultConfig, err = config.LoadDefaultConfig( - context.Background(), + ctx, config.WithCredentialsProvider(credentialsProvider), config.WithAppID(UserAgent()), config.WithRegion(cl.Config.AuthConfig.Region), @@ -197,7 +199,7 @@ func (cl *Client) Configure(cfg Config) error { } // ListBuckets here to test connection to S3 backend - bucketList, err := cl.ListBuckets() + bucketList, err := cl.ListBuckets(ctx) if err != nil { log.Err("Client::Configure : listing buckets failed. Here's why: %v", err) @@ -234,7 +236,7 @@ func (cl *Client) Configure(cfg Config) error { // if no bucket-name was set, default to the first authorized bucket in the list if cl.Config.AuthConfig.BucketName == "" { // which buckets does the user have access to? - authorizedBucketList := cl.filterAuthorizedBuckets(bucketList) + authorizedBucketList := cl.filterAuthorizedBuckets(ctx, bucketList) switch len(authorizedBucketList) { case 0: // if there are none, return an error @@ -259,7 +261,7 @@ func (cl *Client) Configure(cfg Config) error { } // Check that the provided bucket exists and that user has access to bucket - _, err = cl.headBucket(cl.Config.AuthConfig.BucketName) + _, err = cl.headBucket(ctx, cl.Config.AuthConfig.BucketName) if err != nil { // From the aws-sdk-go-v2 documentation // If the bucket does not exist or you do not have permission to access it, @@ -270,7 +272,7 @@ func (cl *Client) Configure(cfg Config) error { } // Use list objects validate user can list objects - _, _, err = cl.List("/", nil, 1) + _, _, err = cl.List(ctx, "/", nil, 1) if err != nil { log.Err("Client::Configure : listing objects failed. Here's why: %v", err) return err @@ -288,19 +290,22 @@ func (cl *Client) Configure(cfg Config) error { } // Use ListBuckets and filterAuthorizedBuckets to get a list of buckets that the user has access to -func (cl *Client) ListAuthorizedBuckets() ([]string, error) { +func (cl *Client) ListAuthorizedBuckets(ctx context.Context) ([]string, error) { log.Trace("Client::ListAuthorizedBuckets") - allBuckets, err := cl.ListBuckets() + allBuckets, err := cl.ListBuckets(ctx) if err != nil { log.Err("Client::ListAuthorizedBuckets : Failed to list buckets. Here's why: %v", err) return allBuckets, err } - authorizedBuckets := cl.filterAuthorizedBuckets(allBuckets) + authorizedBuckets := cl.filterAuthorizedBuckets(ctx, allBuckets) return authorizedBuckets, nil } // filter out buckets for which we do not have permissions -func (cl *Client) filterAuthorizedBuckets(bucketList []string) (authorizedBucketList []string) { +func (cl *Client) filterAuthorizedBuckets( + ctx context.Context, + bucketList []string, +) (authorizedBucketList []string) { if len(bucketList) == 0 { return bucketList } @@ -311,7 +316,7 @@ func (cl *Client) filterAuthorizedBuckets(bucketList []string) (authorizedBucket wg.Add(1) go func(bucketName string) { defer wg.Done() - if _, err := cl.headBucket(bucketName); err == nil { + if _, err := cl.headBucket(ctx, bucketName); err == nil { authorizedBuckets <- bucketName } }(bucketName) @@ -383,21 +388,21 @@ func (cl *Client) SetPrefixPath(path string) error { } // CreateFile : Create a new file in the bucket/virtual directory -func (cl *Client) CreateFile(name string, mode os.FileMode) error { +func (cl *Client) CreateFile(ctx context.Context, name string, mode os.FileMode) error { log.Trace("Client::CreateFile : name %s", name) var data []byte - return cl.WriteFromBuffer(name, nil, data) + return cl.WriteFromBuffer(ctx, name, nil, data) } // CreateDirectory : Create a new directory in the bucket/virtual directory -func (cl *Client) CreateDirectory(name string) error { +func (cl *Client) CreateDirectory(ctx context.Context, name string) error { log.Trace("Client::CreateDirectory : name %s", name) // If the S3 endpoint does not support directory markers then we can do nothing here. // So, let's make it clear: we expect the OS to call GetAttr() on the directory // to make sure it doesn't exist before trying to create it. if cl.Config.enableDirMarker { - err := cl.putObject(putObjectOptions{name: name, isDir: true}) + err := cl.putObject(ctx, putObjectOptions{name: name, isDir: true}) if err != nil { log.Err("Client::CreateDirectory : putObject(%s) failed. Here's why: %v", name, err) return err @@ -408,7 +413,12 @@ func (cl *Client) CreateDirectory(name string) error { } // CreateLink : Create a symlink in the bucket/virtual directory -func (cl *Client) CreateLink(source string, target string, isSymlink bool) error { +func (cl *Client) CreateLink( + ctx context.Context, + source string, + target string, + isSymlink bool, +) error { log.Trace("Client::CreateLink : %s -> %s", source, target) data := []byte(target) @@ -416,15 +426,15 @@ func (cl *Client) CreateLink(source string, target string, isSymlink bool) error if isSymlink { symlinkMap[symlinkKey] = new("true") } - return cl.WriteFromBuffer(source, symlinkMap, data) + return cl.WriteFromBuffer(ctx, source, symlinkMap, data) } // DeleteFile : Delete an object. // if the file does not exist, this returns an error (ENOENT). -func (cl *Client) DeleteFile(name string) error { +func (cl *Client) DeleteFile(ctx context.Context, name string) error { log.Trace("Client::DeleteFile : name %s", name) // first check if the object exists - attr, err := cl.getFileAttr(name) + attr, err := cl.getFileAttr(ctx, name) if err == syscall.ENOENT { log.Err("Client::DeleteFile : %s does not exist", name) return syscall.ENOENT @@ -440,7 +450,7 @@ func (cl *Client) DeleteFile(name string) error { isSymLink := attr.IsSymlink() // delete the object - err = cl.deleteObject(name, isSymLink, attr.IsDir()) + err = cl.deleteObject(ctx, name, isSymLink, attr.IsDir()) if err != nil { log.Err("Client::DeleteFile : Failed to delete object %s. Here's why: %v", name, err) return err @@ -452,7 +462,7 @@ func (cl *Client) DeleteFile(name string) error { // DeleteDirectory : Recursively delete all objects with the given prefix. // If name is given without a trailing slash, a slash will be added. // If the directory does not exist, no error will be returned. -func (cl *Client) DeleteDirectory(name string) error { +func (cl *Client) DeleteDirectory(ctx context.Context, name string) error { log.Trace("Client::DeleteDirectory : name %s", name) // make sure name has a trailing slash @@ -463,7 +473,7 @@ func (cl *Client) DeleteDirectory(name string) error { var err error for !done { // list all objects with the prefix - objects, marker, err := cl.List(name, marker, 0) + objects, marker, err := cl.List(ctx, name, marker, 0) if err != nil { log.Warn( "Client::DeleteDirectory : Failed to list object with prefix %s. Here's why: %v", @@ -488,7 +498,7 @@ func (cl *Client) DeleteDirectory(name string) error { var objectsToDelete []*internal.ObjAttr for _, object := range objects { if object.IsDir() { - err = cl.DeleteDirectory(object.Path) + err = cl.DeleteDirectory(ctx, object.Path) if err != nil { log.Err( "Client::DeleteDirectory : Failed to delete directory %s. Here's why: %v", @@ -504,7 +514,7 @@ func (cl *Client) DeleteDirectory(name string) error { } } // Delete the collected files - err = cl.deleteObjects(objectsToDelete) + err = cl.deleteObjects(ctx, objectsToDelete) if err != nil { log.Err( "Client::DeleteDirectory : deleteObjects() failed when called with %d objects. Here's why: %v", @@ -520,7 +530,7 @@ func (cl *Client) DeleteDirectory(name string) error { // Delete the current directory if cl.Config.enableDirMarker { - err = cl.deleteObject(name, false, true) + err = cl.deleteObject(ctx, name, false, true) if err != nil { log.Err( "Client::DeleteDirectory : Failed to delete directory %s. Here's why: %v", @@ -534,10 +544,16 @@ func (cl *Client) DeleteDirectory(name string) error { } // RenameFile : Rename the object (copy then delete). -func (cl *Client) RenameFile(source string, target string, isSymLink bool) error { +func (cl *Client) RenameFile( + ctx context.Context, + source string, + target string, + isSymLink bool, +) error { log.Trace("Client::RenameFile : %s -> %s", source, target) err := cl.renameObject( + ctx, renameObjectOptions{source: source, target: target, isSymLink: isSymLink}, ) if err != nil { @@ -553,7 +569,7 @@ func (cl *Client) RenameFile(source string, target string, isSymLink bool) error } // RenameDirectory : Rename the directory -func (cl *Client) RenameDirectory(source string, target string) error { +func (cl *Client) RenameDirectory(ctx context.Context, source string, target string) error { log.Trace("Client::RenameDirectory : %s -> %s", source, target) // TODO: should this fail when the target directory exists? @@ -567,7 +583,7 @@ func (cl *Client) RenameDirectory(source string, target string) error { var marker *string for !done { - sourceObjects, marker, err := cl.List(internal.ExtendDirName(source), marker, 0) + sourceObjects, marker, err := cl.List(ctx, internal.ExtendDirName(source), marker, 0) if err != nil { log.Err( "Client::RenameDirectory : Failed to list objects with prefix %s. Here's why: %v", @@ -581,9 +597,10 @@ func (cl *Client) RenameDirectory(source string, target string) error { srcPath := srcObject.Path dstPath := strings.Replace(srcPath, source, target, 1) if srcObject.IsDir() { - err = cl.RenameDirectory(srcPath, dstPath) + err = cl.RenameDirectory(ctx, srcPath, dstPath) } else { err = cl.RenameFile( + ctx, srcPath, dstPath, srcObject.IsSymlink(), @@ -604,6 +621,7 @@ func (cl *Client) RenameDirectory(source string, target string) error { // Rename the current directory if cl.Config.enableDirMarker { err := cl.renameObject( + ctx, renameObjectOptions{source: source, target: target, isDir: true}, ) if err != nil { @@ -623,7 +641,7 @@ func (cl *Client) RenameDirectory(source string, target string) error { // GetAttr : Get attributes for a given file or folder. // If name is a file, it should not have a trailing slash. // If name is a directory, the trailing slash is optional. -func (cl *Client) GetAttr(name string) (*internal.ObjAttr, error) { +func (cl *Client) GetAttr(ctx context.Context, name string) (*internal.ObjAttr, error) { log.Trace("Client::GetAttr : name %s", name) explicitDirLookup := len(name) > 0 && name[len(name)-1] == '/' dirName := internal.ExtendDirName(name) @@ -631,7 +649,7 @@ func (cl *Client) GetAttr(name string) (*internal.ObjAttr, error) { // first let's suppose the caller is looking for a file // so if this was called with a trailing slash, don't look for an object if !explicitDirLookup { - attr, err := cl.getFileAttr(name) + attr, err := cl.getFileAttr(ctx, name) if err == nil { return attr, err } @@ -644,30 +662,31 @@ func (cl *Client) GetAttr(name string) (*internal.ObjAttr, error) { } // now search for that as a directory - return cl.getDirectoryAttr(dirName, explicitDirLookup) + return cl.getDirectoryAttr(ctx, dirName, explicitDirLookup) } // Get attributes for the given file path. // Return ENOENT if there is no corresponding object in the bucket. // name should not have a trailing slash (or nothing will be found!). -func (cl *Client) getFileAttr(name string) (*internal.ObjAttr, error) { +func (cl *Client) getFileAttr(ctx context.Context, name string) (*internal.ObjAttr, error) { log.Trace("Client::getFileAttr : name %s", name) isSymlink := false - object, err := cl.headObject(name, isSymlink, false) + object, err := cl.headObject(ctx, name, isSymlink, false) if err == syscall.ENOENT && !cl.Config.disableSymlink { isSymlink = true - return cl.headObject(name, isSymlink, false) + return cl.headObject(ctx, name, isSymlink, false) } return object, err } func (cl *Client) getDirectoryAttr( + ctx context.Context, dirName string, explicitDirLookup bool, ) (*internal.ObjAttr, error) { log.Trace("Client::getDirectoryAttr : name %s", dirName) - objects, _, listErr := cl.List(dirName, nil, 1) + objects, _, listErr := cl.List(ctx, dirName, nil, 1) // Otherwise, the cloud does not support directory markers, or there is no // marker, so look for an object in the directory. @@ -685,7 +704,7 @@ func (cl *Client) getDirectoryAttr( // For file-like names, this saves one extra HeadObject // call on miss-heavy paths that are not directories. if cl.Config.enableDirMarker && shouldProbeDirMarker(dirName, explicitDirLookup) { - headAttr, headErr := cl.headObject(dirName, false, true) + headAttr, headErr := cl.headObject(ctx, dirName, false, true) if headErr == nil { return headAttr, nil } @@ -741,7 +760,13 @@ func shouldProbeDirMarker(dirName string, explicitDirLookup bool) bool { // Download object data to a file handle. // Read starting at a byte offset from the start of the object, with length in bytes = count. // count = 0 reads to the end of the object. -func (cl *Client) ReadToFile(name string, offset int64, count int64, fi *os.File) error { +func (cl *Client) ReadToFile( + ctx context.Context, + name string, + offset int64, + count int64, + fi *os.File, +) error { log.Trace( "Client::ReadToFile : name %s, offset : %d, count %d -> file %s", name, @@ -752,7 +777,7 @@ func (cl *Client) ReadToFile(name string, offset int64, count int64, fi *os.File // If we are reading the entire object, then we can use a multipart download if !cl.Config.disableConcurrentDownload && offset == 0 && count == 0 { - err := cl.getObjectMultipartDownload(name, fi) + err := cl.getObjectMultipartDownload(ctx, name, fi) if err != nil { log.Err( "Client::ReadToFile : getObjectMultipartDownload(%s) failed. Here's why: %v", @@ -766,6 +791,7 @@ func (cl *Client) ReadToFile(name string, offset int64, count int64, fi *os.File // get object data objectDataReader, err := cl.getObject( + ctx, getObjectOptions{name: name, offset: offset, count: count}, ) if err != nil { @@ -802,6 +828,7 @@ func (cl *Client) ReadToFile(name string, offset int64, count int64, fi *os.File // len = 0 reads to the end of the object. // name is the file path func (cl *Client) ReadBuffer( + ctx context.Context, name string, offset int64, length int64, @@ -810,6 +837,7 @@ func (cl *Client) ReadBuffer( log.Trace("Client::ReadBuffer : name %s (%d+%d)", name, offset, length) // get object data objectDataReader, err := cl.getObject( + ctx, getObjectOptions{name: name, offset: offset, count: length, isSymLink: isSymlink}, ) if err != nil { @@ -834,10 +862,17 @@ func (cl *Client) ReadBuffer( // Reads starting at a byte offset from the start of the object, with length in bytes = len. // len = 0 reads to the end of the object. // name is the file path. -func (cl *Client) ReadInBuffer(name string, offset int64, length int64, data []byte) error { +func (cl *Client) ReadInBuffer( + ctx context.Context, + name string, + offset int64, + length int64, + data []byte, +) error { log.Trace("Client::ReadInBuffer : name %s offset %d len %d", name, offset, length) // get object data objectDataReader, err := cl.getObject( + ctx, getObjectOptions{name: name, offset: offset, count: length}, ) if err != nil { @@ -857,7 +892,12 @@ func (cl *Client) ReadInBuffer(name string, offset int64, length int64, data []b // Upload from a file handle to an object. // The metadata parameter is not used. -func (cl *Client) WriteFromFile(name string, metadata map[string]*string, fi *os.File) error { +func (cl *Client) WriteFromFile( + ctx context.Context, + name string, + metadata map[string]*string, + fi *os.File, +) error { isSymlink := getSymlinkBool(metadata) log.Trace("Client::WriteFromFile : file %s -> name %s", fi.Name(), name) @@ -880,6 +920,7 @@ func (cl *Client) WriteFromFile(name string, metadata map[string]*string, fi *os // upload file data err = cl.putObject( + ctx, putObjectOptions{name: name, objectData: fi, size: stat.Size(), isSymLink: isSymlink}, ) if err != nil { @@ -905,7 +946,12 @@ func (cl *Client) WriteFromFile(name string, metadata map[string]*string, fi *os // WriteFromBuffer : Upload from a buffer to an object. // name is the file path. -func (cl *Client) WriteFromBuffer(name string, metadata map[string]*string, data []byte) error { +func (cl *Client) WriteFromBuffer( + ctx context.Context, + name string, + metadata map[string]*string, + data []byte, +) error { log.Trace("Client::WriteFromBuffer : name %s", name) isSymlink := getSymlinkBool(metadata) @@ -914,6 +960,7 @@ func (cl *Client) WriteFromBuffer(name string, metadata map[string]*string, data // upload data to object // TODO: handle metadata with S3 err := cl.putObject( + ctx, putObjectOptions{ name: name, objectData: dataReader, @@ -928,10 +975,13 @@ func (cl *Client) WriteFromBuffer(name string, metadata map[string]*string, data } // GetFileBlockOffsets: store blocks ids and corresponding offsets. -func (cl *Client) GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) { +func (cl *Client) GetFileBlockOffsets( + ctx context.Context, + name string, +) (*common.BlockOffsetList, error) { log.Trace("Client::GetFileBlockOffsets : name %s", name) blockList := common.BlockOffsetList{} - result, err := cl.headObject(name, false, false) + result, err := cl.headObject(ctx, name, false, false) if err != nil { log.Err("Client::GetFileBlockOffsets : Unable to headObject with name %v", name) return &blockList, err @@ -977,11 +1027,11 @@ func (cl *Client) GetFileBlockOffsets(name string) (*common.BlockOffsetList, err // Truncate object to size in bytes. // name is the file path. -func (cl *Client) TruncateFile(name string, size int64) error { +func (cl *Client) TruncateFile(ctx context.Context, name string, size int64) error { log.Trace("Client::TruncateFile : Truncating %s to %dB.", name, size) // get object data - objectDataReader, err := cl.getObject(getObjectOptions{name: name}) + objectDataReader, err := cl.getObject(ctx, getObjectOptions{name: name}) if err != nil { log.Err("Client::TruncateFile : getObject(%s) failed. Here's why: %v", name, err) return err @@ -1017,6 +1067,7 @@ func (cl *Client) TruncateFile(name string, size int64) error { // overwrite the object with the truncated data truncatedDataReader := bytes.NewReader(objectData) err = cl.putObject( + ctx, putObjectOptions{name: name, objectData: truncatedDataReader, size: int64(len(objectData))}, ) if err != nil { @@ -1027,7 +1078,7 @@ func (cl *Client) TruncateFile(name string, size int64) error { } // Write : write data at given offset to an object -func (cl *Client) Write(options *internal.WriteFileOptions) error { +func (cl *Client) Write(ctx context.Context, options *internal.WriteFileOptions) error { name := options.Handle.Path offset := options.Offset data := options.Data @@ -1038,7 +1089,7 @@ func (cl *Client) Write(options *internal.WriteFileOptions) error { // tracks the case where our offset is great than our current file size (appending only - not modifying pre-existing data) var dataBuffer *[]byte - fileOffsets, err := cl.GetFileBlockOffsets(name) + fileOffsets, err := cl.GetFileBlockOffsets(ctx, name) if err != nil { return err } @@ -1049,7 +1100,7 @@ func (cl *Client) Write(options *internal.WriteFileOptions) error { // get the existing object data isSymlink := getSymlinkBool(options.Metadata) - oldData, _ := cl.ReadBuffer(name, 0, 0, isSymlink) + oldData, _ := cl.ReadBuffer(ctx, name, 0, 0, isSymlink) // update the data with the new data // if we're only overwriting existing data if int64(len(oldData)) >= offset+length { @@ -1076,7 +1127,7 @@ func (cl *Client) Write(options *internal.WriteFileOptions) error { } // WriteFromBuffer should be able to handle the case where now the block is too big and gets split into multiple parts - err := cl.WriteFromBuffer(name, options.Metadata, *dataBuffer) + err := cl.WriteFromBuffer(ctx, name, options.Metadata, *dataBuffer) if err != nil { log.Err("Client::Write : Failed to upload to object %s. Here's why: %v", name, err) return err @@ -1100,6 +1151,7 @@ func (cl *Client) Write(options *internal.WriteFileOptions) error { if !appendOnly { // fetch the parts that will be impacted by the new changes so we can overwrite them err = cl.ReadInBuffer( + ctx, name, fileOffsets.BlockList[index].StartIndex, oldDataSize, @@ -1116,7 +1168,7 @@ func (cl *Client) Write(options *internal.WriteFileOptions) error { // this gives us where the offset with respect to the buffer that holds our old data - so we can start writing the new data blockOffset := offset - fileOffsets.BlockList[index].StartIndex copy(oldDataBuffer[blockOffset:], data) - err := cl.stageAndCommitModifiedBlocks(name, oldDataBuffer, fileOffsets) + err := cl.stageAndCommitModifiedBlocks(ctx, name, oldDataBuffer, fileOffsets) return err } @@ -1155,6 +1207,7 @@ func (cl *Client) createNewBlocks(blockList *common.BlockOffsetList, offset, len } func (cl *Client) stageAndCommitModifiedBlocks( + ctx context.Context, name string, data []byte, offsetList *common.BlockOffsetList, @@ -1171,10 +1224,14 @@ func (cl *Client) stageAndCommitModifiedBlocks( } } - return cl.StageAndCommit(name, offsetList) + return cl.StageAndCommit(ctx, name, offsetList) } -func (cl *Client) StageAndCommit(name string, bol *common.BlockOffsetList) error { +func (cl *Client) StageAndCommit( + ctx context.Context, + name string, + bol *common.BlockOffsetList, +) error { // lock on the object name so that no stage and commit race condition occur causing failure objectMtx := cl.blockLocks.GetLock(name) objectMtx.Lock() @@ -1211,7 +1268,7 @@ func (cl *Client) StageAndCommit(name string, bol *common.BlockOffsetList) error var err error if combineBlocks { - bol.BlockList, err = cl.combineSmallBlocks(name, bol.BlockList) + bol.BlockList, err = cl.combineSmallBlocks(ctx, name, bol.BlockList) if err != nil { log.Err("Client::StageAndCommit : Failed to combine small blocks for %s: %v", name, err) return err @@ -1219,7 +1276,6 @@ func (cl *Client) StageAndCommit(name string, bol *common.BlockOffsetList) error } //struct for starting a multipart upload - ctx := context.Background() key := cl.getKey(name, false, false) //send command to start copy and get the upload id as it is needed later @@ -1342,7 +1398,7 @@ func (cl *Client) StageAndCommit(name string, bol *common.BlockOffsetList) error "Client::StageAndCommit : Attempting to abort upload due to error: %s", err.Error(), ) - abortErr := cl.abortMultipartUpload(key, uploadID) + abortErr := cl.abortMultipartUpload(ctx, key, uploadID) return errors.Join(err, abortErr) } @@ -1380,7 +1436,7 @@ func (cl *Client) StageAndCommit(name string, bol *common.BlockOffsetList) error "Client::StageAndCommit : Attempting to abort upload due to error: %s", err.Error(), ) - abortErr := cl.abortMultipartUpload(key, uploadID) + abortErr := cl.abortMultipartUpload(ctx, key, uploadID) return errors.Join(err, abortErr) } @@ -1391,6 +1447,7 @@ func (cl *Client) StageAndCommit(name string, bol *common.BlockOffsetList) error // than the smallest size for a part in AWS, which is 5 MB. Blocks smaller than 5MB will be combined with the // next block in the list. func (cl *Client) combineSmallBlocks( + ctx context.Context, name string, blockList []*common.Block, ) ([]*common.Block, error) { @@ -1415,6 +1472,7 @@ func (cl *Client) combineSmallBlocks( // If there is no data in the block and it is not truncated, we need to get it from the cloud. Otherwise we can just copy it. if len(blk.Data) == 0 && !blk.Truncated() { result, err := cl.getObject( + ctx, getObjectOptions{ name: name, offset: blk.StartIndex, @@ -1458,8 +1516,8 @@ func (cl *Client) combineSmallBlocks( return newBlockList, nil } -func (cl *Client) GetUsedSize() (uint64, error) { - headBucketOutput, err := cl.headBucket(cl.Config.AuthConfig.BucketName) +func (cl *Client) GetUsedSize(ctx context.Context) (uint64, error) { + headBucketOutput, err := cl.headBucket(ctx, cl.Config.AuthConfig.BucketName) if err != nil { return 0, err } @@ -1487,10 +1545,13 @@ func (cl *Client) GetUsedSize() (uint64, error) { return bucketSizeBytes, nil } -func (cl *Client) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) { +func (cl *Client) GetCommittedBlockList( + ctx context.Context, + name string, +) (*internal.CommittedBlockList, error) { log.Trace("Client::GetCommittedBlockList : name %s", name) blockList := make(internal.CommittedBlockList, 0) - result, err := cl.headObject(name, false, false) + result, err := cl.headObject(ctx, name, false, false) if err != nil { log.Err("Client::GetCommittedBlockList : Unable to headObject with name %v", name) return nil, err @@ -1533,11 +1594,10 @@ func (cl *Client) GetCommittedBlockList(name string) (*internal.CommittedBlockLi } // CommitBlocks : Initiates and completes an S3 multipart upload using locally cached blocks. -func (cl *Client) CommitBlocks(name string, blockList []string) error { +func (cl *Client) CommitBlocks(ctx context.Context, name string, blockList []string) error { log.Trace("Client::CommitBlocks: name %s, %d blocks", name, len(blockList)) //struct for starting a multipart upload - ctx := context.Background() key := cl.getKey(name, false, false) // Retrieve cached blocks for this file @@ -1551,6 +1611,7 @@ func (cl *Client) CommitBlocks(name string, blockList []string) error { name, ) return cl.putObject( + ctx, putObjectOptions{name: name, objectData: bytes.NewReader([]byte{}), size: 0}, ) } @@ -1672,7 +1733,7 @@ func (cl *Client) CommitBlocks(name string, blockList []string) error { name, uploadErr, ) - _ = cl.abortMultipartUpload(key, uploadID) // Attempt to clean up S3 + _ = cl.abortMultipartUpload(ctx, key, uploadID) // Attempt to clean up S3 cl.cleanupStagedBlocks(name) return uploadErr } @@ -1694,7 +1755,7 @@ func (cl *Client) CommitBlocks(name string, blockList []string) error { name, err, ) - _ = cl.abortMultipartUpload(key, uploadID) + _ = cl.abortMultipartUpload(ctx, key, uploadID) cl.cleanupStagedBlocks(name) return parseS3Err(err, fmt.Sprintf("CompleteMultipartUpload(%s)", name)) } diff --git a/component/s3storage/client_test.go b/component/s3storage/client_test.go index 97f46b308..72a99308b 100644 --- a/component/s3storage/client_test.go +++ b/component/s3storage/client_test.go @@ -565,7 +565,7 @@ func (s *clientTestSuite) TestGetRegionEndpoint() { func (s *clientTestSuite) TestListBuckets() { defer s.cleanupTest() // TODO: generalize this test by creating, listing, then destroying a bucket - buckets, err := s.client.ListBuckets() + buckets, err := s.client.ListBuckets(context.TODO()) s.assert.NoError(err) s.assert.Contains(buckets, storageTestConfigurationParameters.BucketName) } @@ -583,7 +583,7 @@ func (s *clientTestSuite) TestDefaultBucketName() { ) err := s.setupTestHelper(config, false) s.assert.NoError(err) - buckets, _ := s.client.ListBuckets() + buckets, _ := s.client.ListBuckets(ctx) s.assert.Contains(buckets, s.client.Config.AuthConfig.BucketName) } @@ -594,8 +594,8 @@ func (s *clientTestSuite) TestSetPrefixPath() { fileName := generateFileName() err := s.client.SetPrefixPath(prefix) - s.assert.NoError(err) //stub - err = s.client.CreateFile(fileName, os.FileMode(0)) // create file uses prefix + s.assert.NoError(err) //stub + err = s.client.CreateFile(ctx, fileName, os.FileMode(0)) // create file uses prefix s.assert.NoError(err) // object should be at prefix @@ -611,7 +611,7 @@ func (s *clientTestSuite) TestCreateFile() { // setup name := generateFileName() - err := s.client.CreateFile(name, os.FileMode(0)) + err := s.client.CreateFile(ctx, name, os.FileMode(0)) s.assert.NoError(err) // file should be in bucket @@ -627,7 +627,7 @@ func (s *clientTestSuite) TestCreateDirectory() { // setup name := generateDirectoryName() - err := s.client.CreateDirectory(name) + err := s.client.CreateDirectory(ctx, name) s.assert.NoError(err) } func (s *clientTestSuite) TestCreateLink() { @@ -644,7 +644,7 @@ func (s *clientTestSuite) TestCreateLink() { s.assert.NoError(err) source := generateFileName() - err = s.client.CreateLink(source, target, true) + err = s.client.CreateLink(ctx, source, target, true) s.assert.NoError(err) source = s.client.getKey(source, true, false) @@ -670,7 +670,7 @@ func (s *clientTestSuite) TestReadLink() { source := generateFileName() - err := s.client.CreateLink(source, target, true) + err := s.client.CreateLink(ctx, source, target, true) s.assert.NoError(err) source = s.client.getKey(source, true, false) @@ -698,7 +698,7 @@ func (s *clientTestSuite) TestDeleteLink() { source := generateFileName() - err := s.client.CreateLink(source, target, true) + err := s.client.CreateLink(ctx, source, target, true) s.assert.NoError(err) source = s.client.getKey(source, true, false) @@ -735,7 +735,7 @@ func (s *clientTestSuite) TestDeleteLinks() { sources[i] = generateFileName() targets[i] = generateFileName() - err := s.client.CreateLink(folder+sources[i], targets[i], true) + err := s.client.CreateLink(ctx, folder+sources[i], targets[i], true) s.assert.NoError(err) sources[i] = s.client.getKey(sources[i], true, false) @@ -797,7 +797,7 @@ func (s *clientTestSuite) TestDeleteFile() { }) s.assert.NoError(err) - err = s.client.DeleteFile(name) + err = s.client.DeleteFile(ctx, name) s.assert.NoError(err) // This is similar to the s3 bucket command, use getobject for now @@ -823,7 +823,7 @@ func (s *clientTestSuite) TestDeleteDirectory() { }) s.assert.NoError(err) - err = s.client.DeleteDirectory(dirName) + err = s.client.DeleteDirectory(ctx, dirName) s.assert.NoError(err) // file in directory should no longer be there @@ -847,7 +847,7 @@ func (s *clientTestSuite) TestRenameFile() { s.assert.NoError(err) dst := generateFileName() - err = s.client.RenameFile(src, dst, false) + err = s.client.RenameFile(ctx, src, dst, false) s.assert.NoError(err) // Src should not be in the account @@ -872,7 +872,7 @@ func (s *clientTestSuite) TestRenameFileError() { src := generateFileName() dst := generateFileName() - err := s.client.RenameFile(src, dst, false) + err := s.client.RenameFile(ctx, src, dst, false) s.assert.EqualError(err, syscall.ENOENT.Error()) // Src should not be in the account @@ -903,7 +903,7 @@ func (s *clientTestSuite) TestRenameDirectory() { s.assert.NoError(err) dstDir := generateDirectoryName() - err = s.client.RenameDirectory(srcDir, dstDir) + err = s.client.RenameDirectory(ctx, srcDir, dstDir) s.assert.NoError(err) // file in srcDir should no longer be there @@ -933,7 +933,7 @@ func (s *clientTestSuite) TestGetAttrDir() { }) s.assert.NoError(err) - attr, err := s.client.GetAttr(dirName) + attr, err := s.client.GetAttr(ctx, dirName) s.assert.NoError(err) s.assert.NotNil(attr) s.assert.True(attr.IsDir()) @@ -951,7 +951,7 @@ func (s *clientTestSuite) TestGetAttrDirWithOnlyFile() { }) s.assert.NoError(err) - attr, err := s.client.GetAttr(dirName) + attr, err := s.client.GetAttr(ctx, dirName) s.assert.NoError(err) s.assert.NotNil(attr) s.assert.True(attr.IsDir()) @@ -972,7 +972,7 @@ func (s *clientTestSuite) TestGetAttrFile() { }) s.assert.NoError(err) - before, err := s.client.GetAttr(name) + before, err := s.client.GetAttr(ctx, name) // file info s.assert.NoError(err) @@ -996,7 +996,7 @@ func (s *clientTestSuite) TestGetAttrFile() { }) s.assert.NoError(err) - after, err := s.client.GetAttr(name) + after, err := s.client.GetAttr(ctx, name) s.assert.NoError(err) s.assert.NotNil(after.Mtime) @@ -1008,7 +1008,7 @@ func (s *clientTestSuite) TestGetAttrError() { name := generateFileName() // non existent file should throw error - _, err := s.client.GetAttr(name) + _, err := s.client.GetAttr(ctx, name) s.assert.Error(err) s.assert.EqualValues(syscall.ENOENT, err) } @@ -1076,9 +1076,10 @@ func (s *clientTestSuite) TestList() { ChecksumAlgorithm: s.client.Config.checksumAlgorithm, }) s.assert.NoError(err) + ctx := context.Background() // a/c2 c2 := base + "/c2" - _, err = s.awsS3Client.PutObject(context.Background(), &s3.PutObjectInput{ + _, err = s.awsS3Client.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(s.client.Config.AuthConfig.BucketName), Key: aws.String(c2), ChecksumAlgorithm: s.client.Config.checksumAlgorithm, @@ -1086,7 +1087,7 @@ func (s *clientTestSuite) TestList() { s.assert.NoError(err) // ab/c1 abc1 := base + "b/c1" - _, err = s.awsS3Client.PutObject(context.Background(), &s3.PutObjectInput{ + _, err = s.awsS3Client.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(s.client.Config.AuthConfig.BucketName), Key: aws.String(abc1), ChecksumAlgorithm: s.client.Config.checksumAlgorithm, @@ -1094,7 +1095,7 @@ func (s *clientTestSuite) TestList() { s.assert.NoError(err) // ac ac := base + "c" - _, err = s.awsS3Client.PutObject(context.Background(), &s3.PutObjectInput{ + _, err = s.awsS3Client.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(s.client.Config.AuthConfig.BucketName), Key: aws.String(ac), ChecksumAlgorithm: s.client.Config.checksumAlgorithm, @@ -1103,7 +1104,7 @@ func (s *clientTestSuite) TestList() { // with trailing "/" should return only the directory c1 and the file c2 baseTrail := base + "/" - objects, _, err := s.client.List(baseTrail, nil, 0) + objects, _, err := s.client.List(ctx, baseTrail, nil, 0) s.assert.NoError(err) s.assert.NotNil(objects) s.assert.Len(objects, 2) @@ -1115,7 +1116,7 @@ func (s *clientTestSuite) TestList() { // without trailing "/" only get file ac // if not including the trailing "/", List will return any files with the given prefix // but no directories - objects, _, err = s.client.List(base, nil, 0) + objects, _, err = s.client.List(ctx, base, nil, 0) s.assert.NoError(err) s.assert.NotNil(objects) s.assert.Len(objects, 1) @@ -1123,7 +1124,7 @@ func (s *clientTestSuite) TestList() { s.assert.False(objects[0].IsDir()) // When listing the root, List should not include the root - objects, _, err = s.client.List("", nil, 0) + objects, _, err = s.client.List(ctx, "", nil, 0) s.assert.NoError(err) s.assert.NotNil(objects) s.assert.NotEmpty(objects) @@ -1151,7 +1152,7 @@ func (s *clientTestSuite) TestReadToFile() { s.assert.NoError(err) defer os.Remove(f.Name()) - err = s.client.ReadToFile(name, 0, 0, f) + err = s.client.ReadToFile(ctx, name, 0, 0, f) s.assert.NoError(err) // file content should match generated body @@ -1185,7 +1186,7 @@ func (s *clientTestSuite) TestReadToFileRanged() { s.assert.NoError(err) defer os.Remove(f.Name()) - err = s.client.ReadToFile(name, 0, int64(bodyLen), f) + err = s.client.ReadToFile(ctx, name, 0, int64(bodyLen), f) s.assert.NoError(err) // file content should match generated body @@ -1223,7 +1224,7 @@ func (s *clientTestSuite) TestReadToFileNoMultipart() { s.assert.NoError(err) defer os.Remove(f.Name()) - err = s.client.ReadToFile(name, 0, 0, f) + err = s.client.ReadToFile(ctx, name, 0, 0, f) s.assert.NoError(err) // file content should match generated body @@ -1253,7 +1254,7 @@ func (s *clientTestSuite) TestReadBuffer() { }) s.assert.NoError(err) - result, err := s.client.ReadBuffer(name, 0, int64(bodyLen), false) + result, err := s.client.ReadBuffer(ctx, name, 0, int64(bodyLen), false) // result should match generated body s.assert.NoError(err) @@ -1277,7 +1278,7 @@ func (s *clientTestSuite) TestReadInBuffer() { outputLen := rand.IntN(bodyLen-1) + 1 // minimum buffer length of 1 output := make([]byte, outputLen) - err = s.client.ReadInBuffer(name, 0, int64(outputLen), output) + err = s.client.ReadInBuffer(ctx, name, 0, int64(outputLen), output) // read in buffer should match first outputLen characters of generated body s.assert.NoError(err) @@ -1299,7 +1300,7 @@ func (s *clientTestSuite) TestWriteFromFile() { s.assert.Equal(bodyLen, outputLen) var options internal.WriteFileOptions //stub - err = s.client.WriteFromFile(name, options.Metadata, f) + err = s.client.WriteFromFile(ctx, name, options.Metadata, f) s.assert.NoError(err) f.Close() @@ -1330,7 +1331,7 @@ func (s *clientTestSuite) TestWriteFromBuffer() { var options internal.WriteFileOptions //stub - err := s.client.WriteFromBuffer(name, options.Metadata, body) + err := s.client.WriteFromBuffer(ctx, name, options.Metadata, body) s.assert.NoError(err) result, err := s.awsS3Client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -1363,7 +1364,7 @@ func (s *clientTestSuite) TestTruncateFile() { s.assert.NoError(err) size := rand.IntN(bodyLen-1) + 1 // minimum size of 1 - err = s.client.TruncateFile(name, int64(size)) + err = s.client.TruncateFile(ctx, name, int64(size)) s.assert.NoError(err) result, err := s.awsS3Client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -1399,6 +1400,7 @@ func (s *clientTestSuite) TestWrite() { newData := []byte(randomString(bodyLen - offset)) h := handlemap.NewHandle(name) err = s.client.Write( + ctx, &internal.WriteFileOptions{Handle: h, Offset: int64(offset), Data: newData}, ) s.assert.NoError(err) @@ -1424,10 +1426,10 @@ func (s *clientTestSuite) TestGetCommittedBlockListSmallFile() { bodyLen := 1024 body := []byte(randomString(bodyLen)) - err := s.client.WriteFromBuffer(name, nil, body) + err := s.client.WriteFromBuffer(ctx, name, nil, body) s.assert.NoError(err) - blockList, err := s.client.GetCommittedBlockList(name) + blockList, err := s.client.GetCommittedBlockList(ctx, name) s.assert.NoError(err) s.assert.NotNil(blockList) @@ -1441,10 +1443,10 @@ func (s *clientTestSuite) TestGetCommittedBlockListMultipartFile() { bodyLen := int(partSize*2 + partSize/2) body := randomString(bodyLen) - err := s.client.WriteFromBuffer(name, nil, []byte(body)) + err := s.client.WriteFromBuffer(ctx, name, nil, []byte(body)) s.assert.NoError(err) - blockList, err := s.client.GetCommittedBlockList(name) + blockList, err := s.client.GetCommittedBlockList(ctx, name) s.assert.NoError(err) s.assert.NotNil(blockList) @@ -1474,7 +1476,7 @@ func (s *clientTestSuite) TestGetCommittedBlockListNonExistentFile() { defer s.cleanupTest() name := generateFileName() - blockList, err := s.client.GetCommittedBlockList(name) + blockList, err := s.client.GetCommittedBlockList(ctx, name) s.assert.Error(err) s.assert.Equal(syscall.ENOENT, err) diff --git a/component/s3storage/config.go b/component/s3storage/config.go index 9b9f6bcb8..d79747e2e 100644 --- a/component/s3storage/config.go +++ b/component/s3storage/config.go @@ -28,6 +28,7 @@ package s3storage import ( "errors" "fmt" + "time" "github.com/Seagate/cloudfuse/common" "github.com/Seagate/cloudfuse/common/config" @@ -55,6 +56,7 @@ type Options struct { UsePathStyle bool `config:"use-path-style" yaml:"use-path-style,omitempty"` DisableUsage bool `config:"disable-usage" yaml:"disable-usage,omitempty"` EnableDirMarker bool `config:"enable-dir-marker" yaml:"enable-dir-marker,omitempty"` + HealthCheckIntervalSec int `config:"health-check-interval-sec" yaml:"health-check-interval-sec,omitempty"` } type ConfigSecrets struct { @@ -62,6 +64,11 @@ type ConfigSecrets struct { SecretKey *memguard.Enclave } +const ( + defaultHealthCheckInterval = 10 * time.Second + maxHealthCheckInterval = 90 * time.Second +) + // ParseAndValidateConfig : Parse and validate config func ParseAndValidateConfig(s3 *S3Storage, opt Options, secrets ConfigSecrets) error { log.Trace("ParseAndValidateConfig : Parsing config") @@ -153,6 +160,27 @@ func ParseAndValidateConfig(s3 *S3Storage, opt Options, secrets ConfigSecrets) e } s3.stConfig.disableSymlink = !enableSymlinks + s3.stConfig.healthCheckInterval = defaultHealthCheckInterval + if config.IsSet("s3storage.health-check-interval-sec") { + specifiedInterval := time.Duration(opt.HealthCheckIntervalSec) * time.Second + switch { + case specifiedInterval < 1*time.Second: + log.Warn( + "S3storage : health-check-interval-sec=%d... using 1s instead", + opt.HealthCheckIntervalSec, + ) + s3.stConfig.healthCheckInterval = 1 * time.Second + case specifiedInterval > maxHealthCheckInterval: + log.Warn( + "S3storage : health-check-interval-sec=%d... using %.0fs instead", + opt.HealthCheckIntervalSec, + maxHealthCheckInterval.Seconds(), + ) + default: + s3.stConfig.healthCheckInterval = specifiedInterval + } + } + // TODO: add more config options to customize AWS SDK behavior and import them here return nil diff --git a/component/s3storage/connection.go b/component/s3storage/connection.go index b959dacbc..3275f0967 100644 --- a/component/s3storage/connection.go +++ b/component/s3storage/connection.go @@ -26,8 +26,10 @@ package s3storage import ( + "context" "net/url" "os" + "time" "github.com/Seagate/cloudfuse/common" "github.com/Seagate/cloudfuse/internal" @@ -55,6 +57,7 @@ type Config struct { disableSymlink bool disableUsage bool enableDirMarker bool + healthCheckInterval time.Duration } // TODO: move s3AuthConfig to s3auth.go @@ -82,43 +85,60 @@ type S3Connection interface { Configure(cfg Config) error UpdateConfig(cfg Config) error - ListBuckets() ([]string, error) - ListAuthorizedBuckets() ([]string, error) + ConnectionOkay(ctx context.Context) error + ListBuckets(ctx context.Context) ([]string, error) + ListAuthorizedBuckets(ctx context.Context) ([]string, error) // This is just for test, shall not be used otherwise SetPrefixPath(string) error - CreateFile(name string, mode os.FileMode) error - CreateDirectory(name string) error - CreateLink(source string, target string, isSymlink bool) error + CreateFile(ctx context.Context, name string, mode os.FileMode) error + CreateDirectory(ctx context.Context, name string) error + CreateLink(ctx context.Context, source string, target string, isSymlink bool) error - DeleteFile(name string) error - DeleteDirectory(name string) error + DeleteFile(ctx context.Context, name string) error + DeleteDirectory(ctx context.Context, name string) error - RenameFile(string, string, bool) error - RenameDirectory(string, string) error + RenameFile(ctx context.Context, source string, target string, isSymLink bool) error + RenameDirectory(ctx context.Context, source string, target string) error - GetAttr(name string) (attr *internal.ObjAttr, err error) + GetAttr(ctx context.Context, name string) (attr *internal.ObjAttr, err error) // Standard operations to be supported by any account type - List(prefix string, marker *string, count int32) ([]*internal.ObjAttr, *string, error) - - ReadToFile(name string, offset int64, count int64, fi *os.File) error - ReadBuffer(name string, offset int64, length int64, isSymlink bool) ([]byte, error) - ReadInBuffer(name string, offset int64, length int64, data []byte) error - - WriteFromFile(name string, metadata map[string]*string, fi *os.File) error - WriteFromBuffer(name string, metadata map[string]*string, data []byte) error - Write(options *internal.WriteFileOptions) error - GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) - - TruncateFile(string, int64) error - StageAndCommit(name string, bol *common.BlockOffsetList) error - - GetCommittedBlockList(string) (*internal.CommittedBlockList, error) - StageBlock(string, []byte, string) error - CommitBlocks(string, []string) error + List( + ctx context.Context, + prefix string, + marker *string, + count int32, + ) ([]*internal.ObjAttr, *string, error) + + ReadToFile(ctx context.Context, name string, offset int64, count int64, fi *os.File) error + ReadBuffer( + ctx context.Context, + name string, + offset int64, + length int64, + isSymlink bool, + ) ([]byte, error) + ReadInBuffer(ctx context.Context, name string, offset int64, length int64, data []byte) error + + WriteFromFile(ctx context.Context, name string, metadata map[string]*string, fi *os.File) error + WriteFromBuffer( + ctx context.Context, + name string, + metadata map[string]*string, + data []byte, + ) error + Write(ctx context.Context, options *internal.WriteFileOptions) error + GetFileBlockOffsets(ctx context.Context, name string) (*common.BlockOffsetList, error) + + TruncateFile(ctx context.Context, name string, size int64) error + StageAndCommit(ctx context.Context, name string, bol *common.BlockOffsetList) error + + GetCommittedBlockList(ctx context.Context, name string) (*internal.CommittedBlockList, error) + StageBlock(name string, data []byte, id string) error + CommitBlocks(ctx context.Context, name string, blockList []string) error NewCredentialKey(_, _ string) error - GetUsedSize() (uint64, error) + GetUsedSize(ctx context.Context) (uint64, error) } diff --git a/component/s3storage/s3storage.go b/component/s3storage/s3storage.go index 93f56711a..37c463e86 100644 --- a/component/s3storage/s3storage.go +++ b/component/s3storage/s3storage.go @@ -29,6 +29,7 @@ import ( "context" "errors" "fmt" + "sync" "sync/atomic" "syscall" "time" @@ -48,6 +49,16 @@ type S3Storage struct { internal.BaseComponent Storage S3Connection stConfig Config + state connectionState + ctx context.Context + cancelFn context.CancelFunc +} + +type connectionState struct { + sync.Mutex + lastConnectionAttempt *time.Time + firstOffline *time.Time + retryTicker *time.Ticker } const compName = "s3storage" @@ -123,6 +134,9 @@ func (s3 *S3Storage) Configure(isParent bool) error { log.Err("S3Storage::Configure : Failed to validate storage account [%s]", err.Error()) return err } + // first connection attempt is now + currentTime := time.Now() + s3.state.lastConnectionAttempt = ¤tTime return nil } @@ -167,6 +181,16 @@ func (s3 *S3Storage) Start(ctx context.Context) error { // create stats collector for s3storage s3StatsCollector = stats_manager.NewStatsCollector(s3.Name()) log.Debug("Starting s3 stats collector") + // create a shared context for all cloud operations, with ability to cancel + s3.ctx, s3.cancelFn = context.WithCancel(ctx) + // create the retry ticker + s3.state.retryTicker = time.NewTicker(s3.stConfig.healthCheckInterval) + s3.state.retryTicker.Stop() // stop it for now, we will start it when we are offline + go func() { + for range s3.state.retryTicker.C { + s3.CloudConnected() + } + }() return nil } @@ -178,13 +202,73 @@ func (s3 *S3Storage) Stop() error { return nil } +// Online check +func (s3 *S3Storage) CloudConnected() bool { + log.Trace("S3Storage::CloudConnected") + connected := s3.state.firstOffline == nil + // don't check the connection when it's up, or if we are not ready to retry + if connected || !s3.timeToRetry() { + return connected + } + // check connection + ctx, cancelFun := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancelFun() + err := s3.Storage.ConnectionOkay(ctx) + nowConnected := s3.updateConnectionState(err) + return nowConnected +} + +func (s3 *S3Storage) timeToRetry() bool { + timeSinceLastAttempt := time.Since(*s3.state.lastConnectionAttempt) + switch { + case timeSinceLastAttempt < s3.stConfig.healthCheckInterval: + // minimum delay before retrying + return false + case timeSinceLastAttempt > 90*time.Second: + // maximum delay + return true + default: + // when between the minimum and maximum delay, we use an exponential backoff + timeOfflineAtLastAttempt := s3.state.lastConnectionAttempt.Sub(*s3.state.firstOffline) + return timeSinceLastAttempt > timeOfflineAtLastAttempt + } +} + +func (s3 *S3Storage) updateConnectionState(err error) bool { + s3.state.Lock() + defer s3.state.Unlock() + currentTime := time.Now() + s3.state.lastConnectionAttempt = ¤tTime + connected := !errors.Is(err, &common.CloudUnreachableError{}) + wasConnected := s3.state.firstOffline == nil + stateChanged := connected != wasConnected + if stateChanged { + log.Warn("S3Storage::updateConnectionState : connected is now: %t", connected) + if connected { + s3.state.firstOffline = nil + // reset the context to allow new requests + s3.ctx, s3.cancelFn = context.WithCancel(context.Background()) + // stop the retry ticker + s3.state.retryTicker.Stop() + } else { + s3.state.firstOffline = ¤tTime + // cancel all outstanding requests + s3.cancelFn() + log.Warn("S3Storage::updateConnectionState : cancelled all outstanding requests") + // reset the ticker to retry the connection + s3.state.retryTicker.Reset(s3.stConfig.healthCheckInterval) + } + } + return connected +} + // ------------------------- Bucket listing ------------------------------------------- func (s3 *S3Storage) ListBuckets() ([]string, error) { - return s3.Storage.ListBuckets() + return s3.Storage.ListBuckets(s3.ctx) } func (s3 *S3Storage) ListAuthorizedBuckets() ([]string, error) { - return s3.Storage.ListAuthorizedBuckets() + return s3.Storage.ListAuthorizedBuckets(s3.ctx) } // ------------------------- Core Operations ------------------------------------------- @@ -193,7 +277,10 @@ func (s3 *S3Storage) ListAuthorizedBuckets() ([]string, error) { func (s3 *S3Storage) CreateDir(options internal.CreateDirOptions) error { log.Trace("S3Storage::CreateDir : %s", options.Name) - err := s3.Storage.CreateDirectory(internal.TruncateDirName(options.Name)) + err := s3.Storage.CreateDirectory(s3.ctx, internal.TruncateDirName(options.Name)) + if s3.stConfig.enableDirMarker { + s3.updateConnectionState(err) + } if err == nil { s3StatsCollector.PushEvents( @@ -210,7 +297,8 @@ func (s3 *S3Storage) CreateDir(options internal.CreateDirOptions) error { func (s3 *S3Storage) DeleteDir(options internal.DeleteDirOptions) error { log.Trace("S3Storage::DeleteDir : %s", options.Name) - err := s3.Storage.DeleteDirectory(internal.TruncateDirName(options.Name)) + err := s3.Storage.DeleteDirectory(s3.ctx, internal.TruncateDirName(options.Name)) + s3.updateConnectionState(err) if err == nil { s3StatsCollector.PushEvents(deleteDir, options.Name, nil) @@ -234,7 +322,8 @@ func formatListDirName(path string) string { func (s3 *S3Storage) IsDirEmpty(options internal.IsDirEmptyOptions) bool { log.Trace("S3Storage::IsDirEmpty : %s", options.Name) // List up to two objects, since one could be the directory with a trailing slash - list, _, err := s3.Storage.List(formatListDirName(options.Name), nil, 2) + list, _, err := s3.Storage.List(s3.ctx, formatListDirName(options.Name), nil, 2) + s3.updateConnectionState(err) if err != nil { log.Err("S3Storage::IsDirEmpty : error listing [%s]", err) return false @@ -263,7 +352,8 @@ func (s3 *S3Storage) StreamDir( entriesRemaining = maxResultsPerListCall } for entriesRemaining > 0 { - newList, nextMarker, err := s3.Storage.List(path, marker, entriesRemaining) + newList, nextMarker, err := s3.Storage.List(s3.ctx, path, marker, entriesRemaining) + s3.updateConnectionState(err) if err != nil { log.Err("S3Storage::StreamDir : %s Failed to read dir [%s]", options.Name, err) return objectList, "", err @@ -312,7 +402,8 @@ func (s3 *S3Storage) RenameDir(options internal.RenameDirOptions) error { options.Src = internal.TruncateDirName(options.Src) options.Dst = internal.TruncateDirName(options.Dst) - err := s3.Storage.RenameDirectory(options.Src, options.Dst) + err := s3.Storage.RenameDirectory(s3.ctx, options.Src, options.Dst) + s3.updateConnectionState(err) if err == nil { s3StatsCollector.PushEvents( @@ -337,7 +428,8 @@ func (s3 *S3Storage) CreateFile(options internal.CreateFileOptions) (*handlemap. return nil, syscall.EFAULT } - err := s3.Storage.CreateFile(options.Name, options.Mode) + err := s3.Storage.CreateFile(s3.ctx, options.Name, options.Mode) + s3.updateConnectionState(err) if err != nil { return nil, err } @@ -358,7 +450,8 @@ func (s3 *S3Storage) CreateFile(options internal.CreateFileOptions) (*handlemap. func (s3 *S3Storage) OpenFile(options internal.OpenFileOptions) (*handlemap.Handle, error) { log.Trace("S3Storage::OpenFile : %s", options.Name) - attr, err := s3.Storage.GetAttr(options.Name) + attr, err := s3.Storage.GetAttr(s3.ctx, options.Name) + s3.updateConnectionState(err) if err != nil { return nil, err } @@ -391,7 +484,8 @@ func (s3 *S3Storage) ReleaseFile(options internal.ReleaseFileOptions) error { func (s3 *S3Storage) DeleteFile(options internal.DeleteFileOptions) error { log.Trace("S3Storage::DeleteFile : %s", options.Name) - err := s3.Storage.DeleteFile(options.Name) + err := s3.Storage.DeleteFile(s3.ctx, options.Name) + s3.updateConnectionState(err) if err == nil { s3StatsCollector.PushEvents(deleteFile, options.Name, nil) @@ -405,8 +499,9 @@ func (s3 *S3Storage) RenameFile(options internal.RenameFileOptions) error { log.Trace("S3Storage::RenameFile : %s to %s", options.Src, options.Dst) isSymLink := options.SrcAttr != nil && options.SrcAttr.IsSymlink() - err := s3.Storage.RenameFile(options.Src, options.Dst, isSymLink) + err := s3.Storage.RenameFile(s3.ctx, options.Src, options.Dst, isSymLink) + s3.updateConnectionState(err) if err == nil { s3StatsCollector.PushEvents( renameFile, @@ -435,7 +530,14 @@ func (s3 *S3Storage) ReadInBuffer(options *internal.ReadInBufferOptions) (int, e return 0, nil } - err := s3.Storage.ReadInBuffer(options.Handle.Path, options.Offset, dataLen, options.Data) + err := s3.Storage.ReadInBuffer( + s3.ctx, + options.Handle.Path, + options.Offset, + dataLen, + options.Data, + ) + s3.updateConnectionState(err) if err != nil { log.Err( "S3Storage::ReadInBuffer : Failed to read %s [%s]", @@ -449,20 +551,22 @@ func (s3 *S3Storage) ReadInBuffer(options *internal.ReadInBufferOptions) (int, e } func (s3 *S3Storage) WriteFile(options *internal.WriteFileOptions) (int, error) { - err := s3.Storage.Write(options) + err := s3.Storage.Write(s3.ctx, options) + s3.updateConnectionState(err) return len(options.Data), err } func (s3 *S3Storage) GetFileBlockOffsets( options internal.GetFileBlockOffsetsOptions, ) (*common.BlockOffsetList, error) { - return s3.Storage.GetFileBlockOffsets(options.Name) + return s3.Storage.GetFileBlockOffsets(s3.ctx, options.Name) } func (s3 *S3Storage) TruncateFile(options internal.TruncateFileOptions) error { log.Trace("S3Storage::TruncateFile : %s to %d bytes", options.Name, options.NewSize) - err := s3.Storage.TruncateFile(options.Name, options.NewSize) + err := s3.Storage.TruncateFile(s3.ctx, options.Name, options.NewSize) + s3.updateConnectionState(err) if err == nil { s3StatsCollector.PushEvents( @@ -477,12 +581,16 @@ func (s3 *S3Storage) TruncateFile(options internal.TruncateFileOptions) error { func (s3 *S3Storage) CopyToFile(options internal.CopyToFileOptions) error { log.Trace("S3Storage::CopyToFile : Read file %s", options.Name) - return s3.Storage.ReadToFile(options.Name, options.Offset, options.Count, options.File) + err := s3.Storage.ReadToFile(s3.ctx, options.Name, options.Offset, options.Count, options.File) + s3.updateConnectionState(err) + return err } func (s3 *S3Storage) CopyFromFile(options internal.CopyFromFileOptions) error { log.Trace("S3Storage::CopyFromFile : Upload file %s", options.Name) - return s3.Storage.WriteFromFile(options.Name, options.Metadata, options.File) + err := s3.Storage.WriteFromFile(s3.ctx, options.Name, options.Metadata, options.File) + s3.updateConnectionState(err) + return err } // Symlink operations @@ -496,8 +604,9 @@ func (s3 *S3Storage) CreateLink(options internal.CreateLinkOptions) error { return syscall.ENOTSUP } log.Trace("S3Storage::CreateLink : Create symlink %s -> %s", options.Name, options.Target) - err := s3.Storage.CreateLink(options.Name, options.Target, true) + err := s3.Storage.CreateLink(s3.ctx, options.Name, options.Target, true) + s3.updateConnectionState(err) if err == nil { s3StatsCollector.PushEvents( createLink, @@ -517,7 +626,8 @@ func (s3 *S3Storage) ReadLink(options internal.ReadLinkOptions) (string, error) } log.Trace("S3Storage::ReadLink : Read symlink %s", options.Name) - data, err := s3.Storage.ReadBuffer(options.Name, 0, 0, true) + data, err := s3.Storage.ReadBuffer(s3.ctx, options.Name, 0, 0, true) + s3.updateConnectionState(err) if err != nil { s3StatsCollector.PushEvents(readLink, options.Name, nil) @@ -530,7 +640,9 @@ func (s3 *S3Storage) ReadLink(options internal.ReadLinkOptions) (string, error) // Attribute operations func (s3 *S3Storage) GetAttr(options internal.GetAttrOptions) (*internal.ObjAttr, error) { //log.Trace("S3Storage::GetAttr : Get attributes of file %s", name) - return s3.Storage.GetAttr(options.Name) + attr, err := s3.Storage.GetAttr(s3.ctx, options.Name) + s3.updateConnectionState(err) + return attr, err } func (s3 *S3Storage) Chmod(options internal.ChmodOptions) error { @@ -558,11 +670,19 @@ func (s3 *S3Storage) Chown(options internal.ChownOptions) error { func (s3 *S3Storage) FlushFile(options internal.FlushFileOptions) error { log.Trace("S3Storage::FlushFile : Flush file %s", options.Handle.Path) - return s3.Storage.StageAndCommit(options.Handle.Path, options.Handle.CacheObj.BlockOffsetList) + err := s3.Storage.StageAndCommit( + s3.ctx, + options.Handle.Path, + options.Handle.CacheObj.BlockOffsetList, + ) + s3.updateConnectionState(err) + return err } func (s3 *S3Storage) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) { - return s3.Storage.GetCommittedBlockList(name) + cbl, err := s3.Storage.GetCommittedBlockList(s3.ctx, name) + s3.updateConnectionState(err) + return cbl, err } func (s3 *S3Storage) StageData(opt internal.StageDataOptions) error { @@ -570,7 +690,9 @@ func (s3 *S3Storage) StageData(opt internal.StageDataOptions) error { } func (s3 *S3Storage) CommitData(opt internal.CommitDataOptions) error { - return s3.Storage.CommitBlocks(opt.Name, opt.List) + err := s3.Storage.CommitBlocks(s3.ctx, opt.Name, opt.List) + s3.updateConnectionState(err) + return err } const blockSize = 4096 @@ -585,7 +707,8 @@ func (s3 *S3Storage) StatFs() (*common.Statfs_t, bool, error) { // cache_size - used = f_frsize * f_bavail/1024 // cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024 // if cache size is set to 0 then we have the root mount usage - sizeUsed, err := s3.Storage.GetUsedSize() + sizeUsed, err := s3.Storage.GetUsedSize(s3.ctx) + s3.updateConnectionState(err) if err != nil { // TODO: will returning EIO break any applications that depend on StatFs? return nil, true, err diff --git a/component/s3storage/s3storage_test.go b/component/s3storage/s3storage_test.go index 65eb1b624..6f86f496b 100644 --- a/component/s3storage/s3storage_test.go +++ b/component/s3storage/s3storage_test.go @@ -455,6 +455,37 @@ func (s *s3StorageTestSuite) TestListBuckets() { s.assert.Contains(buckets, storageTestConfigurationParameters.BucketName) } +func (s *s3StorageTestSuite) TestCloudConnected() { + defer s.cleanupTest() + s.assert.True(s.s3Storage.CloudConnected()) +} + +func (s *s3StorageTestSuite) TestUpdateConnectionState() { + defer s.cleanupTest() + connected := s.s3Storage.updateConnectionState(&common.CloudUnreachableError{}) + s.assert.False(connected) + s.assert.False(s.s3Storage.CloudConnected()) + connected = s.s3Storage.updateConnectionState(nil) + s.assert.True(connected) + s.assert.True(s.s3Storage.CloudConnected()) +} + +func (s *s3StorageTestSuite) TestCloudOfflineCached() { + defer s.cleanupTest() + s.s3Storage.updateConnectionState(&common.CloudUnreachableError{}) + s.assert.False(s.s3Storage.CloudConnected()) + s.s3Storage.updateConnectionState(nil) +} + +func (s *s3StorageTestSuite) TestCloudOfflineContext() { + defer s.cleanupTest() + s.s3Storage.updateConnectionState(&common.CloudUnreachableError{}) + h, err := s.s3Storage.CreateFile(internal.CreateFileOptions{Name: "file" + randomString(8)}) + s.assert.Nil(h) + s.assert.ErrorIs(err, &common.CloudUnreachableError{}) + s.s3Storage.updateConnectionState(nil) +} + func (s *s3StorageTestSuite) TestCreateDir() { defer s.cleanupTest() // Testing dir and dir/ @@ -3098,6 +3129,7 @@ func (s *s3StorageTestSuite) TestFlushFileUpdateChunkedFile() { _, _ = rand.Read(updatedBlock) h.CacheObj.BlockOffsetList.BlockList[1].Data = make([]byte, blockSizeBytes) err = s.s3Storage.Storage.ReadInBuffer( + context.Background(), name, int64(blockSizeBytes), int64(blockSizeBytes), @@ -3155,6 +3187,7 @@ func (s *s3StorageTestSuite) TestFlushFileTruncateUpdateChunkedFile() { h.CacheObj.BlockOffsetList.BlockList[1].Data = make([]byte, blockSizeBytes/2) h.CacheObj.BlockOffsetList.BlockList[1].EndIndex = int64(blockSizeBytes + blockSizeBytes/2) err = s.s3Storage.Storage.ReadInBuffer( + context.Background(), name, int64(blockSizeBytes), int64(blockSizeBytes)/2, diff --git a/component/s3storage/s3wrappers.go b/component/s3storage/s3wrappers.go index 810272815..3d95e0e13 100644 --- a/component/s3storage/s3wrappers.go +++ b/component/s3storage/s3wrappers.go @@ -84,9 +84,19 @@ type renameObjectOptions struct { const symlinkStr = ".rclonelink" const maxResultsPerListCall = 1000 +// check the connection to the S3 service by calling HeadBucket. +func (cl *Client) ConnectionOkay(ctx context.Context) error { + log.Trace("Client::ConnectionOkay : checking connection to S3 service") + _, err := cl.AwsS3Client.HeadBucket( + ctx, + &s3.HeadBucketInput{Bucket: aws.String(cl.Config.AuthConfig.BucketName)}, + ) + return parseS3Err(err, "HeadBucket "+cl.Config.AuthConfig.BucketName) +} + // getObjectMultipartDownload downloads an object to a file using multipart download // which can be much faster for large objects. -func (cl *Client) getObjectMultipartDownload(name string, fi *os.File) error { +func (cl *Client) getObjectMultipartDownload(ctx context.Context, name string, fi *os.File) error { key := cl.getKey(name, false, false) log.Trace("Client::getObjectMultipartDownload : get object %s", key) @@ -100,7 +110,7 @@ func (cl *Client) getObjectMultipartDownload(name string, fi *os.File) error { downloadInput.ChecksumMode = tmtypes.ChecksumModeEnabled } - _, err := cl.transferManager.DownloadObject(context.Background(), downloadInput) + _, err := cl.transferManager.DownloadObject(ctx, downloadInput) // check for errors if err != nil { attemptedAction := fmt.Sprintf("GetObject(%s)", key) @@ -112,7 +122,7 @@ func (cl *Client) getObjectMultipartDownload(name string, fi *os.File) error { // Wrapper for awsS3Client.GetObject. // Set count = 0 to read to the end of the object. // name is the path to the file. -func (cl *Client) getObject(options getObjectOptions) (io.ReadCloser, error) { +func (cl *Client) getObject(ctx context.Context, options getObjectOptions) (io.ReadCloser, error) { key := cl.getKey(options.name, options.isSymLink, options.isDir) log.Trace("Client::getObject : get object %s (%d+%d)", key, options.offset, options.count) @@ -148,7 +158,7 @@ func (cl *Client) getObject(options getObjectOptions) (io.ReadCloser, error) { getObjectInput.ChecksumMode = types.ChecksumModeEnabled } - result, err := cl.AwsS3Client.GetObject(context.Background(), getObjectInput) + result, err := cl.AwsS3Client.GetObject(ctx, getObjectInput) // check for errors if err != nil { @@ -163,10 +173,9 @@ func (cl *Client) getObject(options getObjectOptions) (io.ReadCloser, error) { // Wrapper for awsS3Client.PutObject. // Pass in the name of the file, an io.Reader with the object data, the size of the upload, // and whether the object is a symbolic link or not. -func (cl *Client) putObject(options putObjectOptions) error { +func (cl *Client) putObject(ctx context.Context, options putObjectOptions) error { key := cl.getKey(options.name, options.isSymLink, options.isDir) log.Trace("Client::putObject : putting object %s", key) - ctx := context.Background() // Handle nil body by providing an empty reader body := options.objectData @@ -195,11 +204,11 @@ func (cl *Client) putObject(options putObjectOptions) error { // Wrapper for awsS3Client.DeleteObject. // name is the path to the file. -func (cl *Client) deleteObject(name string, isSymLink bool, isDir bool) error { +func (cl *Client) deleteObject(ctx context.Context, name string, isSymLink bool, isDir bool) error { key := cl.getKey(name, isSymLink, isDir) log.Trace("Client::deleteObject : deleting object %s", key) - _, err := cl.AwsS3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err := cl.AwsS3Client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: aws.String(cl.Config.AuthConfig.BucketName), Key: aws.String(key), }) @@ -210,7 +219,7 @@ func (cl *Client) deleteObject(name string, isSymLink bool, isDir bool) error { // Wrapper for awsS3Client.DeleteObjects. // names is a list of paths to the objects. -func (cl *Client) deleteObjects(objects []*internal.ObjAttr) error { +func (cl *Client) deleteObjects(ctx context.Context, objects []*internal.ObjAttr) error { if objects == nil { return nil } @@ -224,7 +233,7 @@ func (cl *Client) deleteObjects(objects []*internal.ObjAttr) error { } } // send keyList for deletion - result, err := cl.AwsS3Client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ + result, err := cl.AwsS3Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ Bucket: &cl.Config.AuthConfig.BucketName, Delete: &types.Delete{ Objects: keyList, @@ -253,11 +262,16 @@ func (cl *Client) deleteObjects(objects []*internal.ObjAttr) error { // HeadObject() acts just like GetObject, except no contents are returned. // So this is used to get metadata / attributes for an object. // name is the path to the file. -func (cl *Client) headObject(name string, isSymlink bool, isDir bool) (*internal.ObjAttr, error) { +func (cl *Client) headObject( + ctx context.Context, + name string, + isSymlink bool, + isDir bool, +) (*internal.ObjAttr, error) { key := cl.getKey(name, isSymlink, isDir) log.Trace("Client::headObject : object %s", key) - result, err := cl.AwsS3Client.HeadObject(context.Background(), &s3.HeadObjectInput{ + result, err := cl.AwsS3Client.HeadObject(ctx, &s3.HeadObjectInput{ Bucket: aws.String(cl.Config.AuthConfig.BucketName), Key: aws.String(key), }) @@ -279,15 +293,15 @@ func (cl *Client) headObject(name string, isSymlink bool, isDir bool) (*internal } // Wrapper for awsS3Client.HeadBucket -func (cl *Client) headBucket(bucketName string) (*s3.HeadBucketOutput, error) { - headBucketOutput, err := cl.AwsS3Client.HeadBucket(context.Background(), &s3.HeadBucketInput{ +func (cl *Client) headBucket(ctx context.Context, bucketName string) (*s3.HeadBucketOutput, error) { + headBucketOutput, err := cl.AwsS3Client.HeadBucket(ctx, &s3.HeadBucketInput{ Bucket: aws.String(bucketName), }) return headBucketOutput, parseS3Err(err, "HeadBucket "+bucketName) } // Wrapper for awsS3Client.CopyObject -func (cl *Client) copyObject(options copyObjectOptions) error { +func (cl *Client) copyObject(ctx context.Context, options copyObjectOptions) error { // copy the object to its new key sourceKey := cl.getKey(options.source, options.isSymLink, options.isDir) targetKey := cl.getKey(options.target, options.isSymLink, options.isDir) @@ -304,7 +318,7 @@ func (cl *Client) copyObject(options copyObjectOptions) error { copyObjectInput.ChecksumAlgorithm = cl.Config.checksumAlgorithm } - _, err := cl.AwsS3Client.CopyObject(context.Background(), copyObjectInput) + _, err := cl.AwsS3Client.CopyObject(ctx, copyObjectInput) // check for errors on copy if err != nil { attemptedAction := fmt.Sprintf("copy %s to %s", sourceKey, targetKey) @@ -314,10 +328,8 @@ func (cl *Client) copyObject(options copyObjectOptions) error { return err } -func (cl *Client) renameObject(options renameObjectOptions) error { - err := cl.copyObject( - copyObjectOptions(options), - ) //nolint +func (cl *Client) renameObject(ctx context.Context, options renameObjectOptions) error { + err := cl.copyObject(ctx, copyObjectOptions(options)) if err != nil { log.Err( "Client::renameObject : copyObject(%s->%s) failed. Here's why: %v", @@ -330,7 +342,7 @@ func (cl *Client) renameObject(options renameObjectOptions) error { // Copy of the file is done so now delete the older file // in this case we don't need to check if the file exists, so we use deleteObject, not DeleteFile // this is what S3's DeleteObject spec is meant for: to make sure the object doesn't exist anymore - err = cl.deleteObject(options.source, options.isSymLink, options.isDir) + err = cl.deleteObject(ctx, options.source, options.isSymLink, options.isDir) if err != nil { log.Err( "Client::renameObject : deleteObject(%s) failed. Here's why: %v", @@ -343,9 +355,9 @@ func (cl *Client) renameObject(options renameObjectOptions) error { } // abortMultipartUpload stops a multipart upload and verifys that the parts are deleted. -func (cl *Client) abortMultipartUpload(key string, uploadID string) error { +func (cl *Client) abortMultipartUpload(ctx context.Context, key string, uploadID string) error { _, abortErr := cl.AwsS3Client.AbortMultipartUpload( - context.Background(), + ctx, &s3.AbortMultipartUploadInput{ Bucket: aws.String(cl.Config.AuthConfig.BucketName), Key: aws.String(key), @@ -357,7 +369,7 @@ func (cl *Client) abortMultipartUpload(key string, uploadID string) error { } // AWS states you need to call listparts to verify that multipart upload was properly aborted - resp, listErr := cl.AwsS3Client.ListParts(context.Background(), &s3.ListPartsInput{ + resp, listErr := cl.AwsS3Client.ListParts(ctx, &s3.ListPartsInput{ Bucket: aws.String(cl.Config.AuthConfig.BucketName), Key: aws.String(key), UploadId: &uploadID, @@ -382,12 +394,12 @@ func (cl *Client) abortMultipartUpload(key string, uploadID string) error { } // Wrapper for awsS3Client.ListBuckets -func (cl *Client) ListBuckets() ([]string, error) { +func (cl *Client) ListBuckets(ctx context.Context) ([]string, error) { log.Trace("Client::ListBuckets : Listing buckets") cntList := make([]string, 0) - result, err := cl.AwsS3Client.ListBuckets(context.Background(), &s3.ListBucketsInput{}) + result, err := cl.AwsS3Client.ListBuckets(ctx, &s3.ListBucketsInput{}) if err != nil { log.Err("Client::ListBuckets : Failed to list buckets. Here's why: %v", err) @@ -409,6 +421,7 @@ func (cl *Client) ListBuckets() ([]string, error) { // If count=0 - fetch max entries. // the *string being returned is the token / marker and will be nil when the listing is complete. func (cl *Client) List( + ctx context.Context, prefix string, marker *string, count int32, @@ -467,15 +480,14 @@ func (cl *Client) List( // initialize list to be returned objectAttrList := make([]*internal.ObjAttr, 0) // fetch and process a single result page - output, err := paginator.NextPage(context.Background()) + output, err := paginator.NextPage(ctx) if err != nil { - log.Err( - "Client::List : Failed to list objects in bucket %v with prefix %v. Here's why: %v", - prefix, + attemptedAction := fmt.Sprintf( + "list objects in bucket %v with prefix %v", bucketName, - err, + prefix, ) - return objectAttrList, nil, err + return objectAttrList, nil, parseS3Err(err, attemptedAction) } if output.IsTruncated != nil && *output.IsTruncated { diff --git a/component/s3storage/utils.go b/component/s3storage/utils.go index 2b030ae89..49eaef2a8 100644 --- a/component/s3storage/utils.go +++ b/component/s3storage/utils.go @@ -26,6 +26,7 @@ package s3storage import ( + "context" "encoding/json" "errors" "fmt" @@ -39,6 +40,8 @@ import ( "github.com/Seagate/cloudfuse/common/log" "github.com/Seagate/cloudfuse/internal" + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" + "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/smithy-go" ) @@ -152,7 +155,25 @@ func parseS3Err(err error, attemptedAction string) error { } } + var maErr *retry.MaxAttemptsError + qeErr := &ratelimit.QuotaExceededError{} + if errors.As(err, &maErr) || errors.As(err, qeErr) || errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) { + log.Err( + "%s : Failed to %s because cloud storage is unreachable", + functionName, + attemptedAction, + ) + return common.NewCloudUnreachableError(err) + } + // unrecognized error - parsing failed + // log error information to debug log + unwrappedErr := err + for unwrappedErr != nil { + log.Debug("Uncaught S3 error is of type \"%T\" and value %v.", unwrappedErr, unwrappedErr) + unwrappedErr = errors.Unwrap(unwrappedErr) + } // print and return the original error log.Err("%s : Failed to %s. Here's why: %v", functionName, attemptedAction, err) return err diff --git a/internal/base_component.go b/internal/base_component.go index f08b1f99f..79aef9b16 100644 --- a/internal/base_component.go +++ b/internal/base_component.go @@ -84,6 +84,13 @@ func (base *BaseComponent) Stop() error { return nil } +func (base *BaseComponent) CloudConnected() bool { + if base.next != nil { + return base.next.CloudConnected() + } + return false +} + // Directory operations func (base *BaseComponent) CreateDir(options CreateDirOptions) error { if base.next != nil { diff --git a/internal/component.go b/internal/component.go index cb5d6b82a..b77b3c141 100644 --- a/internal/component.go +++ b/internal/component.go @@ -71,6 +71,8 @@ type Component interface { Start(context.Context) error Stop() error + CloudConnected() bool + // Directory operations CreateDir(CreateDirOptions) error DeleteDir(DeleteDirOptions) error diff --git a/internal/mock_component.go b/internal/mock_component.go index 385da2608..b72b1a70b 100644 --- a/internal/mock_component.go +++ b/internal/mock_component.go @@ -121,6 +121,20 @@ func (mr *MockComponentMockRecorder) ReleaseFile(arg0 interface{}) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseFile", reflect.TypeOf((*MockComponent)(nil).ReleaseFile), arg0) } +// CloudConnected mocks base method. +func (m *MockComponent) CloudConnected() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloudConnected") + ret0, _ := ret[0].(bool) + return ret0 +} + +// CloudConnected indicates an expected call of CloudConnected. +func (mr *MockComponentMockRecorder) CloudConnected() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloudConnected", reflect.TypeOf((*MockComponent)(nil).CloudConnected)) +} + // Configure mocks base method. func (m *MockComponent) Configure(arg0 bool) error { m.ctrl.T.Helper() diff --git a/setup/advancedConfig.yaml b/setup/advancedConfig.yaml index 02c73521b..3f77b845d 100644 --- a/setup/advancedConfig.yaml +++ b/setup/advancedConfig.yaml @@ -212,6 +212,7 @@ s3storage: use-path-style: true|false disable-usage: true|false enable-dir-marker: true|false + health-check-interval-sec: # Mount all configuration mountall: diff --git a/setup/baseConfig.yaml b/setup/baseConfig.yaml index 3e4ca5f72..ced9cae75 100644 --- a/setup/baseConfig.yaml +++ b/setup/baseConfig.yaml @@ -199,6 +199,7 @@ s3storage: use-path-style: true|false disable-usage: true|false enable-dir-marker: true|false + health-check-interval-sec: # Mount all configuration mountall: