From ef0ca4a8a476eb6ed2af7715576380a297408f86 Mon Sep 17 00:00:00 2001 From: Enzo DJABALI Date: Wed, 28 Jan 2026 15:32:15 +0100 Subject: [PATCH] Implementing file search, pagination, bulk ops & URL S3 instance routing --- internal/app/s3manager/bucket_view.go | 193 +++++++++-- internal/app/s3manager/buckets_view.go | 25 +- internal/app/s3manager/bulk_operations.go | 142 ++++++++ internal/app/s3manager/instance_handlers.go | 37 -- internal/app/s3manager/manager_handlers.go | 331 ++++++++++++++++-- internal/app/s3manager/mocks/s3.go | 62 ++++ internal/app/s3manager/multi_s3_manager.go | 60 ++-- internal/app/s3manager/s3.go | 1 + main.go | 33 +- web/template/bucket.html.tmpl | 356 ++++++++++++++++++-- web/template/buckets.html.tmpl | 8 +- web/template/layout.html.tmpl | 35 +- 12 files changed, 1098 insertions(+), 185 deletions(-) create mode 100644 internal/app/s3manager/bulk_operations.go diff --git a/internal/app/s3manager/bucket_view.go b/internal/app/s3manager/bucket_view.go index e373e482..f8cef0f0 100644 --- a/internal/app/s3manager/bucket_view.go +++ b/internal/app/s3manager/bucket_view.go @@ -7,39 +7,85 @@ import ( "net/http" "path" "regexp" + "sort" + "strconv" "strings" "time" "github.com/minio/minio-go/v7" ) +// objectWithIcon represents an S3 object with additional display properties +type objectWithIcon struct { + Key string + Size int64 + LastModified time.Time + Owner string + Icon string + IsFolder bool + DisplayName string +} + // HandleBucketView shows the details page of a bucket. func HandleBucketView(s3 S3, templates fs.FS, allowDelete bool, listRecursive bool, rootURL string) http.HandlerFunc { - type objectWithIcon struct { - Key string - Size int64 - LastModified time.Time - Owner string - Icon string - IsFolder bool - DisplayName string - } - type pageData struct { - RootURL string - BucketName string - Objects []objectWithIcon - AllowDelete bool - Paths []string - CurrentPath string + RootURL string + BucketName string + Objects []objectWithIcon + AllowDelete bool + Paths []string + CurrentPath string + CurrentS3 *S3Instance + S3Instances []*S3Instance + HasError bool + ErrorMessage string + SortBy string + SortOrder string + Page int + PerPage int + TotalItems int + TotalPages int + HasPrevPage bool + HasNextPage bool + Search string } return func(w http.ResponseWriter, r *http.Request) { regex := regexp.MustCompile(`\/buckets\/([^\/]*)\/?(.*)`) - matches := regex.FindStringSubmatch(r.RequestURI) + matches := regex.FindStringSubmatch(r.URL.Path) bucketName := matches[1] path := matches[2] + // Get sorting parameters from query string + sortBy := r.URL.Query().Get("sortBy") + sortOrder := r.URL.Query().Get("sortOrder") + + // Default sorting + if sortBy == "" { + sortBy = "key" + } + if sortOrder == "" { + sortOrder = "asc" + } + + // Get pagination parameters + page := 1 + if pageStr := r.URL.Query().Get("page"); pageStr != "" { + if p, err := strconv.Atoi(pageStr); err == nil && p > 0 { + page = p + } + } + + perPage := 25 + if perPageStr := r.URL.Query().Get("perPage"); perPageStr != "" { + if pp, err := strconv.Atoi(perPageStr); err == nil && pp > 0 { + perPage = pp + } + } + + // Get search parameter + search := strings.TrimSpace(r.URL.Query().Get("search")) + var objs []objectWithIcon opts := minio.ListObjectsOptions{ Recursive: listRecursive, @@ -63,16 +109,91 @@ func HandleBucketView(s3 S3, templates fs.FS, allowDelete bool, listRecursive bo } objs = append(objs, obj) } + + // Filter objects based on search query + if search != "" { + searchLower := strings.ToLower(search) + filteredObjs := make([]objectWithIcon, 0) + for _, obj := range objs { + // Search in DisplayName and Key (case-insensitive) + if strings.Contains(strings.ToLower(obj.DisplayName), searchLower) || + strings.Contains(strings.ToLower(obj.Key), searchLower) { + filteredObjs = append(filteredObjs, obj) + } + } + objs = filteredObjs + } + + // Sort objects based on sortBy and sortOrder + sortObjects(objs, sortBy, sortOrder) + + // Calculate pagination + totalItems := len(objs) + totalPages := (totalItems + perPage - 1) / perPage + if totalPages == 0 { + totalPages = 1 + } + if page > totalPages { + page = totalPages + } + + // Paginate objects + start := (page - 1) * perPage + end := start + perPage + if start < 0 { + start = 0 + } + if end > totalItems { + end = totalItems + } + if start < totalItems { + objs = objs[start:end] + } else { + objs = []objectWithIcon{} + } + data := pageData{ - RootURL: rootURL, - BucketName: bucketName, - Objects: objs, - AllowDelete: allowDelete, - Paths: removeEmptyStrings(strings.Split(path, "/")), - CurrentPath: path, + RootURL: rootURL, + BucketName: bucketName, + Objects: objs, + AllowDelete: allowDelete, + Paths: removeEmptyStrings(strings.Split(path, "/")), + CurrentPath: path, + CurrentS3: nil, + S3Instances: nil, + HasError: false, + ErrorMessage: "", + SortBy: sortBy, + SortOrder: sortOrder, + Page: page, + PerPage: perPage, + TotalItems: totalItems, + TotalPages: totalPages, + HasPrevPage: page > 1, + HasNextPage: page < totalPages, + Search: search, + } + + funcMap := template.FuncMap{ + "add": func(a, b int) int { return a + b }, + "sub": func(a, b int) int { return a - b }, + "mul": func(a, b int) int { return a * b }, + "min": func(a, b int) int { + if a < b { + return a + } + return b + }, + "iterate": func(start, end int) []int { + result := make([]int, 0, end-start) + for i := start; i < end; i++ { + result = append(result, i) + } + return result + }, } - t, err := template.ParseFS(templates, "layout.html.tmpl", "bucket.html.tmpl") + t, err := template.New("").Funcs(funcMap).ParseFS(templates, "layout.html.tmpl", "bucket.html.tmpl") if err != nil { handleHTTPError(w, fmt.Errorf("error parsing template files: %w", err)) return @@ -114,3 +235,27 @@ func removeEmptyStrings(input []string) []string { } return result } + +// sortObjects sorts the objects based on the specified field and order +func sortObjects(objs []objectWithIcon, sortBy, sortOrder string) { + sort.Slice(objs, func(i, j int) bool { + var less bool + switch sortBy { + case "size": + less = objs[i].Size < objs[j].Size + case "owner": + less = strings.ToLower(objs[i].Owner) < strings.ToLower(objs[j].Owner) + case "lastModified": + less = objs[i].LastModified.Before(objs[j].LastModified) + case "key": + fallthrough + default: + less = strings.ToLower(objs[i].DisplayName) < strings.ToLower(objs[j].DisplayName) + } + + if sortOrder == "desc" { + return !less + } + return less + }) +} diff --git a/internal/app/s3manager/buckets_view.go b/internal/app/s3manager/buckets_view.go index 748228ec..a7e45a3b 100644 --- a/internal/app/s3manager/buckets_view.go +++ b/internal/app/s3manager/buckets_view.go @@ -5,30 +5,37 @@ import ( "html/template" "io/fs" "net/http" - - "github.com/minio/minio-go/v7" ) // HandleBucketsView renders all buckets on an HTML page. func HandleBucketsView(s3 S3, templates fs.FS, allowDelete bool, rootURL string) http.HandlerFunc { type pageData struct { - RootURL string - Buckets []minio.BucketInfo - AllowDelete bool + RootURL string + Buckets []interface{} + AllowDelete bool + CurrentS3 *S3Instance + S3Instances []*S3Instance + HasError bool + ErrorMessage string } return func(w http.ResponseWriter, r *http.Request) { buckets, err := s3.ListBuckets(r.Context()) + data := pageData{ + RootURL: rootURL, + AllowDelete: allowDelete, + HasError: false, + } + if err != nil { handleHTTPError(w, fmt.Errorf("error listing buckets: %w", err)) return } - data := pageData{ - RootURL: rootURL, - Buckets: buckets, - AllowDelete: allowDelete, + data.Buckets = make([]interface{}, len(buckets)) + for i, bucket := range buckets { + data.Buckets[i] = bucket } t, err := template.ParseFS(templates, "layout.html.tmpl", "buckets.html.tmpl") diff --git a/internal/app/s3manager/bulk_operations.go b/internal/app/s3manager/bulk_operations.go new file mode 100644 index 00000000..d7bd7880 --- /dev/null +++ b/internal/app/s3manager/bulk_operations.go @@ -0,0 +1,142 @@ +package s3manager + +import ( + "archive/zip" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/minio/minio-go/v7" +) + +// BulkDeleteRequest represents the request body for bulk delete +type BulkDeleteRequest struct { + Keys []string `json:"keys"` +} + +// BulkDownloadRequest represents the request body for bulk download +type BulkDownloadRequest struct { + Keys []string `json:"keys"` +} + +// HandleBulkDeleteObjects deletes multiple objects from a bucket. +func HandleBulkDeleteObjects(s3 S3) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + bucketName := mux.Vars(r)["bucketName"] + + var req BulkDeleteRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + handleHTTPError(w, fmt.Errorf("error parsing request: %w", err)) + return + } + + if len(req.Keys) == 0 { + http.Error(w, "no keys provided", http.StatusBadRequest) + return + } + + // Create a channel for objects to delete + objectsCh := make(chan minio.ObjectInfo) + + // Send object names to the channel + go func() { + defer close(objectsCh) + for _, key := range req.Keys { + objectsCh <- minio.ObjectInfo{Key: key} + } + }() + + // Remove objects + errorCh := s3.RemoveObjects(r.Context(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + // Check for errors + for err := range errorCh { + if err.Err != nil { + handleHTTPError(w, fmt.Errorf("error removing object %s: %w", err.ObjectName, err.Err)) + return + } + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if _, err := w.Write([]byte(`{"success": true}`)); err != nil { + // Response already sent, can only log the error + fmt.Printf("error writing response: %v\n", err) + } + } +} + +// HandleBulkDownloadObjects downloads multiple objects as a ZIP archive. +func HandleBulkDownloadObjects(s3 S3) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + bucketName := mux.Vars(r)["bucketName"] + + // Parse the form to get the keys + if err := r.ParseForm(); err != nil { + handleHTTPError(w, fmt.Errorf("error parsing form: %w", err)) + return + } + + keysJSON := r.FormValue("keys") + var keys []string + if err := json.Unmarshal([]byte(keysJSON), &keys); err != nil { + handleHTTPError(w, fmt.Errorf("error parsing keys: %w", err)) + return + } + + if len(keys) == 0 { + http.Error(w, "no keys provided", http.StatusBadRequest) + return + } + + // Set headers for ZIP download + timestamp := time.Now().Format("20060102-150405") + zipFilename := fmt.Sprintf("%s-%s.zip", bucketName, timestamp) + w.Header().Set("Content-Type", "application/zip") + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", zipFilename)) + + // Create a new ZIP writer + zipWriter := zip.NewWriter(w) + defer func() { + if err := zipWriter.Close(); err != nil { + // Can't return HTTP error at this point, just log + fmt.Printf("error closing zip writer: %v\n", err) + } + }() + + // Add each object to the ZIP + for _, key := range keys { + // Get the object from S3 + object, err := s3.GetObject(r.Context(), bucketName, key, minio.GetObjectOptions{}) + if err != nil { + // Log error but continue with other files + continue + } + + // Get object info to check if it's valid + _, err = object.Stat() + if err != nil { + _ = object.Close() + continue + } + + // Create a file in the ZIP + zipFile, err := zipWriter.Create(key) + if err != nil { + _ = object.Close() + continue + } + + // Copy the object content to the ZIP file + _, err = io.Copy(zipFile, object) + _ = object.Close() + if err != nil { + // Error writing to ZIP, but we can't return HTTP error at this point + continue + } + } + } +} diff --git a/internal/app/s3manager/instance_handlers.go b/internal/app/s3manager/instance_handlers.go index 10a347c5..d4b988ac 100644 --- a/internal/app/s3manager/instance_handlers.go +++ b/internal/app/s3manager/instance_handlers.go @@ -4,8 +4,6 @@ import ( "encoding/json" "fmt" "net/http" - - "github.com/gorilla/mux" ) // S3InstanceInfo represents the information about an S3 instance for API responses @@ -18,14 +16,11 @@ type S3InstanceInfo struct { func HandleGetS3Instances(manager *MultiS3Manager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { instances := manager.GetAllInstances() - current := manager.GetCurrentInstance() response := struct { Instances []S3InstanceInfo `json:"instances"` - Current string `json:"current"` }{ Instances: make([]S3InstanceInfo, len(instances)), - Current: current.ID, } for i, instance := range instances { @@ -43,35 +38,3 @@ func HandleGetS3Instances(manager *MultiS3Manager) http.HandlerFunc { } } } - -// HandleSwitchS3Instance switches to a specific S3 instance -func HandleSwitchS3Instance(manager *MultiS3Manager) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - instanceID := vars["instanceId"] - - if instanceID == "" { - http.Error(w, "instance ID is required", http.StatusBadRequest) - return - } - - err := manager.SetCurrentInstance(instanceID) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - - current := manager.GetCurrentInstance() - response := S3InstanceInfo{ - ID: current.ID, - Name: current.Name, - } - - w.Header().Set("Content-Type", "application/json") - err = json.NewEncoder(w).Encode(response) - if err != nil { - handleHTTPError(w, fmt.Errorf("error encoding JSON: %w", err)) - return - } - } -} diff --git a/internal/app/s3manager/manager_handlers.go b/internal/app/s3manager/manager_handlers.go index f5aa0787..cedc92e6 100644 --- a/internal/app/s3manager/manager_handlers.go +++ b/internal/app/s3manager/manager_handlers.go @@ -6,12 +6,27 @@ import ( "io/fs" "net/http" "regexp" + "sort" + "strconv" "strings" "time" + "github.com/gorilla/mux" "github.com/minio/minio-go/v7" ) +// objectWithIconExtended extends objectWithIcon with additional formatting fields +type objectWithIconExtended struct { + Key string + Size int64 + SizeDisplay string + LastModified time.Time + Owner string + Icon string + IsFolder bool + DisplayName string +} + // HandleBucketsViewWithManager renders all buckets on an HTML page using MultiS3Manager. func HandleBucketsViewWithManager(manager *MultiS3Manager, templates fs.FS, allowDelete bool, rootURL string) http.HandlerFunc { type pageData struct { @@ -25,8 +40,16 @@ func HandleBucketsViewWithManager(manager *MultiS3Manager, templates fs.FS, allo } return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() - current := manager.GetCurrentInstance() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client instances := manager.GetAllInstances() buckets, err := s3.ListBuckets(r.Context()) @@ -67,8 +90,16 @@ func HandleBucketsViewWithManager(manager *MultiS3Manager, templates fs.FS, allo // HandleBucketViewWithManager shows the details page of a bucket using MultiS3Manager. func HandleBucketViewWithManager(manager *MultiS3Manager, templates fs.FS, allowDelete bool, listRecursive bool, rootURL string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() - current := manager.GetCurrentInstance() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client instances := manager.GetAllInstances() // Create a modified handler that includes S3 instance data @@ -80,7 +111,16 @@ func HandleBucketViewWithManager(manager *MultiS3Manager, templates fs.FS, allow // HandleCreateBucketWithManager creates a new bucket using MultiS3Manager. func HandleCreateBucketWithManager(manager *MultiS3Manager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client // Delegate to the original handler with the current S3 client handler := HandleCreateBucket(s3) handler(w, r) @@ -90,7 +130,16 @@ func HandleCreateBucketWithManager(manager *MultiS3Manager) http.HandlerFunc { // HandleDeleteBucketWithManager deletes a bucket using MultiS3Manager. func HandleDeleteBucketWithManager(manager *MultiS3Manager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client // Delegate to the original handler with the current S3 client handler := HandleDeleteBucket(s3) handler(w, r) @@ -100,7 +149,16 @@ func HandleDeleteBucketWithManager(manager *MultiS3Manager) http.HandlerFunc { // HandleCreateObjectWithManager uploads a new object using MultiS3Manager. func HandleCreateObjectWithManager(manager *MultiS3Manager, sseInfo SSEType) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client // Delegate to the original handler with the current S3 client handler := HandleCreateObject(s3, sseInfo) handler(w, r) @@ -110,7 +168,16 @@ func HandleCreateObjectWithManager(manager *MultiS3Manager, sseInfo SSEType) htt // HandleGenerateURLWithManager generates a presigned URL using MultiS3Manager. func HandleGenerateURLWithManager(manager *MultiS3Manager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client // Delegate to the original handler with the current S3 client handler := HandleGenerateURL(s3) handler(w, r) @@ -120,7 +187,16 @@ func HandleGenerateURLWithManager(manager *MultiS3Manager) http.HandlerFunc { // HandleGetObjectWithManager downloads an object to the client using MultiS3Manager. func HandleGetObjectWithManager(manager *MultiS3Manager, forceDownload bool) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client // Delegate to the original handler with the current S3 client handler := HandleGetObject(s3, forceDownload) handler(w, r) @@ -130,7 +206,16 @@ func HandleGetObjectWithManager(manager *MultiS3Manager, forceDownload bool) htt // HandleDeleteObjectWithManager deletes an object using MultiS3Manager. func HandleDeleteObjectWithManager(manager *MultiS3Manager) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - s3 := manager.GetCurrentClient() + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client // Delegate to the original handler with the current S3 client handler := HandleDeleteObject(s3) handler(w, r) @@ -139,21 +224,10 @@ func HandleDeleteObjectWithManager(manager *MultiS3Manager) http.HandlerFunc { // createBucketViewWithS3Data creates a bucket view handler that includes S3 instance data func createBucketViewWithS3Data(s3 S3, templates fs.FS, allowDelete bool, listRecursive bool, rootURL string, current *S3Instance, instances []*S3Instance) http.HandlerFunc { - type objectWithIcon struct { - Key string - Size int64 - SizeDisplay string - LastModified time.Time - Owner string - Icon string - IsFolder bool - DisplayName string - } - type pageData struct { RootURL string BucketName string - Objects []objectWithIcon + Objects []objectWithIconExtended AllowDelete bool Paths []string CurrentPath string @@ -161,15 +235,67 @@ func createBucketViewWithS3Data(s3 S3, templates fs.FS, allowDelete bool, listRe S3Instances []*S3Instance HasError bool ErrorMessage string + SortBy string + SortOrder string + Page int + PerPage int + TotalItems int + TotalPages int + HasPrevPage bool + HasNextPage bool + Search string } return func(w http.ResponseWriter, r *http.Request) { - regex := regexp.MustCompile(`\/buckets\/([^\/]*)\/?(.*)`) - matches := regex.FindStringSubmatch(r.RequestURI) - bucketName := matches[1] - path := matches[2] + // Updated regex to handle instance in the URL path + regex := regexp.MustCompile(`\/([^\/]+)\/buckets\/([^\/]*)\/?(.*)`) + matches := regex.FindStringSubmatch(r.URL.Path) + if len(matches) < 3 { + handleHTTPError(w, fmt.Errorf("invalid URL path")) + return + } + bucketName := matches[2] + path := "" + if len(matches) > 3 { + path = matches[3] + } - var objs []objectWithIcon + // Get sorting parameters from query string + sortBy := r.URL.Query().Get("sortBy") + sortOrder := r.URL.Query().Get("sortOrder") + + // Default sorting + if sortBy == "" { + sortBy = "key" + } + if sortOrder == "" { + sortOrder = "asc" + } + + // Get pagination parameters + page := 1 + if pageStr := r.URL.Query().Get("page"); pageStr != "" { + if p, err := strconv.Atoi(pageStr); err == nil && p > 0 { + page = p + } + } + + perPage := 25 + showAll := false + if perPageStr := r.URL.Query().Get("perPage"); perPageStr != "" { + if pp, err := strconv.Atoi(perPageStr); err == nil { + if pp == 0 || pp == -1 { + showAll = true + } else if pp > 0 { + perPage = pp + } + } + } + + // Get search parameter + search := strings.TrimSpace(r.URL.Query().Get("search")) + + var objs []objectWithIconExtended hasError := false errorMessage := "" @@ -194,7 +320,7 @@ func createBucketViewWithS3Data(s3 S3, templates fs.FS, allowDelete bool, listRe sizeDisplay := FormatFileSize(object.Size) - obj := objectWithIcon{ + obj := objectWithIconExtended{ Key: object.Key, Size: object.Size, SizeDisplay: sizeDisplay, @@ -207,6 +333,63 @@ func createBucketViewWithS3Data(s3 S3, templates fs.FS, allowDelete bool, listRe objs = append(objs, obj) } + // Filter objects based on search query + if search != "" && !hasError { + searchLower := strings.ToLower(search) + filteredObjs := make([]objectWithIconExtended, 0) + for _, obj := range objs { + // Search in DisplayName and Key (case-insensitive) + if strings.Contains(strings.ToLower(obj.DisplayName), searchLower) || + strings.Contains(strings.ToLower(obj.Key), searchLower) { + filteredObjs = append(filteredObjs, obj) + } + } + objs = filteredObjs + } + + // Sort objects based on sortBy and sortOrder + if !hasError { + sortObjectsWithSize(objs, sortBy, sortOrder) + } + + // Calculate pagination + totalItems := len(objs) + totalPages := 1 + if !showAll { + totalPages = (totalItems + perPage - 1) / perPage + if totalPages == 0 { + totalPages = 1 + } + if page > totalPages { + page = totalPages + } + } + + // Paginate objects + if showAll { + // Show all items - no pagination + perPage = totalItems + if perPage == 0 { + perPage = 1 // Avoid division by zero + } + page = 1 + } else { + // Apply pagination + start := (page - 1) * perPage + end := start + perPage + if start < 0 { + start = 0 + } + if end > totalItems { + end = totalItems + } + if start < totalItems && !hasError { + objs = objs[start:end] + } else if !hasError { + objs = []objectWithIconExtended{} + } + } + data := pageData{ RootURL: rootURL, BucketName: bucketName, @@ -218,9 +401,37 @@ func createBucketViewWithS3Data(s3 S3, templates fs.FS, allowDelete bool, listRe S3Instances: instances, HasError: hasError, ErrorMessage: errorMessage, + SortBy: sortBy, + SortOrder: sortOrder, + Page: page, + PerPage: perPage, + TotalItems: totalItems, + TotalPages: totalPages, + HasPrevPage: page > 1, + HasNextPage: page < totalPages, + Search: search, + } + + funcMap := template.FuncMap{ + "add": func(a, b int) int { return a + b }, + "sub": func(a, b int) int { return a - b }, + "mul": func(a, b int) int { return a * b }, + "min": func(a, b int) int { + if a < b { + return a + } + return b + }, + "iterate": func(start, end int) []int { + result := make([]int, 0, end-start) + for i := start; i < end; i++ { + result = append(result, i) + } + return result + }, } - t, err := template.ParseFS(templates, "layout.html.tmpl", "bucket.html.tmpl") + t, err := template.New("").Funcs(funcMap).ParseFS(templates, "layout.html.tmpl", "bucket.html.tmpl") if err != nil { handleHTTPError(w, fmt.Errorf("error parsing template files: %w", err)) return @@ -232,3 +443,65 @@ func createBucketViewWithS3Data(s3 S3, templates fs.FS, allowDelete bool, listRe } } } + +// sortObjectsWithSize sorts objects with SizeDisplay field based on the specified field and order +func sortObjectsWithSize(objs []objectWithIconExtended, sortBy, sortOrder string) { + sort.Slice(objs, func(i, j int) bool { + var less bool + switch sortBy { + case "size": + less = objs[i].Size < objs[j].Size + case "owner": + less = strings.ToLower(objs[i].Owner) < strings.ToLower(objs[j].Owner) + case "lastModified": + less = objs[i].LastModified.Before(objs[j].LastModified) + case "key": + fallthrough + default: + less = strings.ToLower(objs[i].DisplayName) < strings.ToLower(objs[j].DisplayName) + } + + if sortOrder == "desc" { + return !less + } + return less + }) +} + +// HandleBulkDeleteObjectsWithManager deletes multiple objects using MultiS3Manager. +func HandleBulkDeleteObjectsWithManager(manager *MultiS3Manager) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client + // Delegate to the bulk delete handler with the current S3 client + handler := HandleBulkDeleteObjects(s3) + handler(w, r) + } +} + +// HandleBulkDownloadObjectsWithManager downloads multiple objects as a ZIP using MultiS3Manager. +func HandleBulkDownloadObjectsWithManager(manager *MultiS3Manager) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + instanceName := vars["instance"] + + current, err := manager.GetInstance(instanceName) + if err != nil { + http.Error(w, fmt.Sprintf("Instance not found: %s", err.Error()), http.StatusNotFound) + return + } + + s3 := current.Client + // Delegate to the bulk download handler with the current S3 client + handler := HandleBulkDownloadObjects(s3) + handler(w, r) + } +} diff --git a/internal/app/s3manager/mocks/s3.go b/internal/app/s3manager/mocks/s3.go index 2c1cb382..93b65d57 100644 --- a/internal/app/s3manager/mocks/s3.go +++ b/internal/app/s3manager/mocks/s3.go @@ -47,6 +47,9 @@ var _ s3manager.S3 = &S3Mock{} // RemoveObjectFunc: func(ctx context.Context, bucketName string, objectName string, opts minio.RemoveObjectOptions) error { // panic("mock out the RemoveObject method") // }, +// RemoveObjectsFunc: func(ctx context.Context, bucketName string, objectsCh <-chan minio.ObjectInfo, opts minio.RemoveObjectsOptions) <-chan minio.RemoveObjectError { +// panic("mock out the RemoveObjects method") +// }, // } // // // use mockedS3 in code that requires s3manager.S3 @@ -78,6 +81,9 @@ type S3Mock struct { // RemoveObjectFunc mocks the RemoveObject method. RemoveObjectFunc func(ctx context.Context, bucketName string, objectName string, opts minio.RemoveObjectOptions) error + // RemoveObjectsFunc mocks the RemoveObjects method. + RemoveObjectsFunc func(ctx context.Context, bucketName string, objectsCh <-chan minio.ObjectInfo, opts minio.RemoveObjectsOptions) <-chan minio.RemoveObjectError + // calls tracks calls to the methods. calls struct { // GetObject holds details about calls to the GetObject method. @@ -160,6 +166,17 @@ type S3Mock struct { // Opts is the opts argument value. Opts minio.RemoveObjectOptions } + // RemoveObjects holds details about calls to the RemoveObjects method. + RemoveObjects []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // BucketName is the bucketName argument value. + BucketName string + // ObjectsCh is the objectsCh argument value. + ObjectsCh <-chan minio.ObjectInfo + // Opts is the opts argument value. + Opts minio.RemoveObjectsOptions + } } lockGetObject sync.RWMutex lockListBuckets sync.RWMutex @@ -169,6 +186,7 @@ type S3Mock struct { lockPutObject sync.RWMutex lockRemoveBucket sync.RWMutex lockRemoveObject sync.RWMutex + lockRemoveObjects sync.RWMutex } // GetObject calls GetObjectFunc. @@ -506,3 +524,47 @@ func (mock *S3Mock) RemoveObjectCalls() []struct { mock.lockRemoveObject.RUnlock() return calls } + +// RemoveObjects calls RemoveObjectsFunc. +func (mock *S3Mock) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan minio.ObjectInfo, opts minio.RemoveObjectsOptions) <-chan minio.RemoveObjectError { + if mock.RemoveObjectsFunc == nil { + panic("S3Mock.RemoveObjectsFunc: method is nil but S3.RemoveObjects was just called") + } + callInfo := struct { + Ctx context.Context + BucketName string + ObjectsCh <-chan minio.ObjectInfo + Opts minio.RemoveObjectsOptions + }{ + Ctx: ctx, + BucketName: bucketName, + ObjectsCh: objectsCh, + Opts: opts, + } + mock.lockRemoveObjects.Lock() + mock.calls.RemoveObjects = append(mock.calls.RemoveObjects, callInfo) + mock.lockRemoveObjects.Unlock() + return mock.RemoveObjectsFunc(ctx, bucketName, objectsCh, opts) +} + +// RemoveObjectsCalls gets all the calls that were made to RemoveObjects. +// Check the length with: +// +// len(mockedS3.RemoveObjectsCalls()) +func (mock *S3Mock) RemoveObjectsCalls() []struct { + Ctx context.Context + BucketName string + ObjectsCh <-chan minio.ObjectInfo + Opts minio.RemoveObjectsOptions +} { + var calls []struct { + Ctx context.Context + BucketName string + ObjectsCh <-chan minio.ObjectInfo + Opts minio.RemoveObjectsOptions + } + mock.lockRemoveObjects.RLock() + calls = mock.calls.RemoveObjects + mock.lockRemoveObjects.RUnlock() + return calls +} diff --git a/internal/app/s3manager/multi_s3_manager.go b/internal/app/s3manager/multi_s3_manager.go index 5239ab51..bf272c12 100644 --- a/internal/app/s3manager/multi_s3_manager.go +++ b/internal/app/s3manager/multi_s3_manager.go @@ -21,7 +21,6 @@ type S3Instance struct { // MultiS3Manager manages multiple S3 instances type MultiS3Manager struct { instances map[string]*S3Instance - currentID string instanceOrder []string mu sync.RWMutex } @@ -98,11 +97,6 @@ func NewMultiS3Manager(configs []S3InstanceConfig) (*MultiS3Manager, error) { manager.instances[instanceID] = instance manager.instanceOrder = append(manager.instanceOrder, instanceID) - - // Set the first instance as current - if i == 0 { - manager.currentID = instanceID - } } if len(manager.instances) == 0 { @@ -113,46 +107,56 @@ func NewMultiS3Manager(configs []S3InstanceConfig) (*MultiS3Manager, error) { return manager, nil } +// GetInstance returns an S3 instance by its ID or Name +func (m *MultiS3Manager) GetInstance(identifier string) (*S3Instance, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + // Try to find by ID first + if instance, exists := m.instances[identifier]; exists { + return instance, nil + } + + // Try to find by Name + for _, instance := range m.instances { + if instance.Name == identifier { + return instance, nil + } + } + + return nil, fmt.Errorf("S3 instance '%s' not found", identifier) +} + // GetCurrentClient returns the currently active S3 client +// Deprecated: Use GetInstance instead func (m *MultiS3Manager) GetCurrentClient() S3 { m.mu.RLock() defer m.mu.RUnlock() - instance, exists := m.instances[m.currentID] - if !exists && len(m.instances) > 0 { - // Fallback to first instance if current doesn't exist - m.currentID = m.instanceOrder[0] - instance = m.instances[m.currentID] + // Return first instance as fallback for backwards compatibility + if len(m.instanceOrder) > 0 { + return m.instances[m.instanceOrder[0]].Client } - return instance.Client + return nil } // GetCurrentInstance returns the currently active S3 instance info +// Deprecated: Use GetInstance instead func (m *MultiS3Manager) GetCurrentInstance() *S3Instance { m.mu.RLock() defer m.mu.RUnlock() - instance, exists := m.instances[m.currentID] - if !exists && len(m.instances) > 0 { - // Fallback to first instance if current doesn't exist - m.currentID = m.instanceOrder[0] - instance = m.instances[m.currentID] + // Return first instance as fallback for backwards compatibility + if len(m.instanceOrder) > 0 { + return m.instances[m.instanceOrder[0]] } - return instance + return nil } // SetCurrentInstance switches to the specified S3 instance +// Deprecated: Instance selection is now URL-based func (m *MultiS3Manager) SetCurrentInstance(instanceID string) error { - m.mu.Lock() - defer m.mu.Unlock() - - if _, exists := m.instances[instanceID]; !exists { - return fmt.Errorf("S3 instance with ID %s not found", instanceID) - } - - m.currentID = instanceID - log.Printf("Switched to S3 instance: %s (%s)", instanceID, m.instances[instanceID].Name) - return nil + return fmt.Errorf("instance switching is no longer supported - use URL-based instance selection") } // GetAllInstances returns all available S3 instances diff --git a/internal/app/s3manager/s3.go b/internal/app/s3manager/s3.go index 75c22f33..a171cfdb 100644 --- a/internal/app/s3manager/s3.go +++ b/internal/app/s3manager/s3.go @@ -21,6 +21,7 @@ type S3 interface { PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts minio.PutObjectOptions) (minio.UploadInfo, error) RemoveBucket(ctx context.Context, bucketName string) error RemoveObject(ctx context.Context, bucketName, objectName string, opts minio.RemoveObjectOptions) error + RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan minio.ObjectInfo, opts minio.RemoveObjectsOptions) <-chan minio.RemoveObjectError } // SSEType describes a type of server side encryption. diff --git a/main.go b/main.go index 27345134..5d3c3cdd 100644 --- a/main.go +++ b/main.go @@ -190,26 +190,37 @@ func main() { // Set up router r := mux.NewRouter() - r.Handle("/", http.RedirectHandler(rootURL+"/buckets", http.StatusPermanentRedirect)).Methods(http.MethodGet) + + // Root redirects to first instance's buckets page + r.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + instances := s3Manager.GetAllInstances() + if len(instances) > 0 { + http.Redirect(w, r, rootURL+"/"+instances[0].Name+"/buckets", http.StatusPermanentRedirect) + } else { + http.Error(w, "No S3 instances configured", http.StatusInternalServerError) + } + })).Methods(http.MethodGet) + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.FS(statics)))).Methods(http.MethodGet) // S3 instance management endpoints r.Handle("/api/s3-instances", s3manager.HandleGetS3Instances(s3Manager)).Methods(http.MethodGet) - r.Handle("/api/s3-instances/{instanceId}/switch", s3manager.HandleSwitchS3Instance(s3Manager)).Methods(http.MethodPost) - // S3 management endpoints (using current instance) - r.Handle("/buckets", s3manager.HandleBucketsViewWithManager(s3Manager, templates, configuration.AllowDelete, rootURL)).Methods(http.MethodGet) - r.PathPrefix("/buckets/").Handler(s3manager.HandleBucketViewWithManager(s3Manager, templates, configuration.AllowDelete, configuration.ListRecursive, rootURL)).Methods(http.MethodGet) - r.Handle("/api/buckets", s3manager.HandleCreateBucketWithManager(s3Manager)).Methods(http.MethodPost) + // S3 management endpoints (with instance in URL) + r.Handle("/{instance}/buckets", s3manager.HandleBucketsViewWithManager(s3Manager, templates, configuration.AllowDelete, rootURL)).Methods(http.MethodGet) + r.PathPrefix("/{instance}/buckets/").Handler(s3manager.HandleBucketViewWithManager(s3Manager, templates, configuration.AllowDelete, configuration.ListRecursive, rootURL)).Methods(http.MethodGet) + r.Handle("/{instance}/api/buckets", s3manager.HandleCreateBucketWithManager(s3Manager)).Methods(http.MethodPost) if configuration.AllowDelete { - r.Handle("/api/buckets/{bucketName}", s3manager.HandleDeleteBucketWithManager(s3Manager)).Methods(http.MethodDelete) + r.Handle("/{instance}/api/buckets/{bucketName}", s3manager.HandleDeleteBucketWithManager(s3Manager)).Methods(http.MethodDelete) } - r.Handle("/api/buckets/{bucketName}/objects", s3manager.HandleCreateObjectWithManager(s3Manager, sseType)).Methods(http.MethodPost) - r.Handle("/api/buckets/{bucketName}/objects/{objectName:.*}/url", s3manager.HandleGenerateURLWithManager(s3Manager)).Methods(http.MethodGet) - r.Handle("/api/buckets/{bucketName}/objects/{objectName:.*}", s3manager.HandleGetObjectWithManager(s3Manager, configuration.ForceDownload)).Methods(http.MethodGet) + r.Handle("/{instance}/api/buckets/{bucketName}/objects", s3manager.HandleCreateObjectWithManager(s3Manager, sseType)).Methods(http.MethodPost) + r.Handle("/{instance}/api/buckets/{bucketName}/objects/{objectName:.*}/url", s3manager.HandleGenerateURLWithManager(s3Manager)).Methods(http.MethodGet) + r.Handle("/{instance}/api/buckets/{bucketName}/objects/{objectName:.*}", s3manager.HandleGetObjectWithManager(s3Manager, configuration.ForceDownload)).Methods(http.MethodGet) if configuration.AllowDelete { - r.Handle("/api/buckets/{bucketName}/objects/{objectName:.*}", s3manager.HandleDeleteObjectWithManager(s3Manager)).Methods(http.MethodDelete) + r.Handle("/{instance}/api/buckets/{bucketName}/objects/{objectName:.*}", s3manager.HandleDeleteObjectWithManager(s3Manager)).Methods(http.MethodDelete) + r.Handle("/{instance}/api/buckets/{bucketName}/objects/bulk-delete", s3manager.HandleBulkDeleteObjectsWithManager(s3Manager)).Methods(http.MethodPost) } + r.Handle("/{instance}/api/buckets/{bucketName}/objects/bulk-download", s3manager.HandleBulkDownloadObjectsWithManager(s3Manager)).Methods(http.MethodPost) lr := logging.Handler(os.Stdout)(r) srv := &http.Server{ diff --git a/web/template/bucket.html.tmpl b/web/template/bucket.html.tmpl index c54d1ee1..c3e0703d 100644 --- a/web/template/bucket.html.tmpl +++ b/web/template/bucket.html.tmpl @@ -1,4 +1,6 @@ {{ define "content" }} +{{ $instancePath := "" }} +{{ if $.CurrentS3 }}{{ $instancePath = printf "/%s" $.CurrentS3.Name }}{{ end }} -
+
{{ if .HasError }}
@@ -57,10 +71,10 @@ Connection Error

{{ .ErrorMessage }}

{{ if gt (len .S3Instances) 1 }} -

Tip: Use the instance switcher button (bottom-left) to try a different S3 instance.

+

Tip: Try switching to another S3 instance from the list below.

{{ end }} @@ -70,14 +84,97 @@
- {{ else if .Objects }} + {{ else }} + + +
+
+
+
+ search + + {{ if .Search }} + close + {{ end }} +
+
+
+ {{ if .Search }} +
+

+ info_outline + Showing results for "{{ .Search }}" + {{ if eq .TotalItems 0 }} + - No matches found + {{ end }} +

+
+ {{ end }} +
+ + {{ if .Objects }} + + + - - - - + + + + + @@ -85,9 +182,17 @@ {{ range $index, $object := .Objects }} +
KeySizeOwnerLast Modified + + + Key + {{ if eq .SortBy "key" }} + {{ if eq .SortOrder "asc" }} + arrow_upward + {{ else }} + arrow_downward + {{ end }} + {{ end }} + + Size + {{ if eq .SortBy "size" }} + {{ if eq .SortOrder "asc" }} + arrow_upward + {{ else }} + arrow_downward + {{ end }} + {{ end }} + + Owner + {{ if eq .SortBy "owner" }} + {{ if eq .SortOrder "asc" }} + arrow_upward + {{ else }} + arrow_downward + {{ end }} + {{ end }} + + Last Modified + {{ if eq .SortBy "lastModified" }} + {{ if eq .SortOrder "asc" }} + arrow_upward + {{ else }} + arrow_downward + {{ end }} + {{ end }} +
+ {{ if not $object.IsFolder }} + + {{ end }} + {{ $object.Icon }} {{ $object.DisplayName }} @@ -102,7 +207,7 @@
+ + +
+
+
+ + +
+

Showing {{ if gt .TotalItems 0 }}{{ add (mul (sub .Page 1) .PerPage) 1 }} - {{ min (mul .Page .PerPage) .TotalItems }}{{ else }}0{{ end }} of {{ .TotalItems }} items

+
+
+
    + {{ if .HasPrevPage }} +
  • first_page
  • +
  • chevron_left
  • + {{ else }} +
  • first_page
  • +
  • chevron_left
  • + {{ end }} + + {{ $currentPage := .Page }} + {{ $totalPages := .TotalPages }} + {{ $startPage := sub $currentPage 2 }} + {{ if lt $startPage 1 }}{{ $startPage = 1 }}{{ end }} + {{ $endPage := add $currentPage 2 }} + {{ if gt $endPage $totalPages }}{{ $endPage = $totalPages }}{{ end }} + + {{ if gt $startPage 1 }} +
  • 1
  • + {{ if gt $startPage 2 }} +
  • ...
  • + {{ end }} + {{ end }} + + {{ range $i := iterate $startPage (add $endPage 1) }} + {{ if eq $i $currentPage }} +
  • {{ $i }}
  • + {{ else }} +
  • {{ $i }}
  • + {{ end }} + {{ end }} + + {{ if lt $endPage $totalPages }} + {{ if lt $endPage (sub $totalPages 1) }} +
  • ...
  • + {{ end }} +
  • {{ $totalPages }}
  • + {{ end }} + + {{ if .HasNextPage }} +
  • chevron_right
  • +
  • last_page
  • + {{ else }} +
  • chevron_right
  • +
  • last_page
  • + {{ end }} +
+
+
+ {{ else }} -

No objects in {{ .BucketName }}/{{ .CurrentPath }} yet

+

+ {{ if .Search }} + No objects matching "{{ .Search }}" found in {{ .BucketName }}/{{ .CurrentPath }} + {{ else }} + No objects in {{ .BucketName }}/{{ .CurrentPath }} yet + {{ end }} +

+ {{ end }} {{ end }}
@@ -220,10 +399,67 @@
diff --git a/web/template/buckets.html.tmpl b/web/template/buckets.html.tmpl index 9b7131b8..b310d08d 100644 --- a/web/template/buckets.html.tmpl +++ b/web/template/buckets.html.tmpl @@ -1,4 +1,6 @@ {{ define "content" }} +{{ $instancePath := "" }} +{{ if $.CurrentS3 }}{{ $instancePath = printf "/%s" $.CurrentS3.Name }}{{ end }}
@@ -36,7 +38,7 @@ {{ else if .Buckets }} {{ range $bucket := .Buckets }}
- +
@@ -97,34 +100,16 @@ function toggleS3Dropdown() { const dropdown = document.getElementById('s3-instance-dropdown'); - dropdown.style.display = dropdown.style.display === 'block' ? 'none' : 'block'; - } - - function switchS3Instance(instanceId, instanceName) { - $.ajax({ - type: 'POST', - url: '{{$.RootURL}}/api/s3-instances/' + instanceId + '/switch', - success: function() { - // Show toast notification - M.toast({html: 'Switched to: ' + instanceName}); - // Always redirect to buckets list when switching instances - setTimeout(function() { - window.location.href = '{{$.RootURL}}/buckets'; - }, 1000); - }, - error: function() { - M.toast({html: 'Failed to switch S3 instance'}); - } - }); - // Hide dropdown - document.getElementById('s3-instance-dropdown').style.display = 'none'; + if (dropdown) { + dropdown.style.display = dropdown.style.display === 'block' ? 'none' : 'block'; + } } // Close dropdown when clicking outside document.addEventListener('click', function(event) { const switcher = document.querySelector('.s3-instance-switcher'); const dropdown = document.getElementById('s3-instance-dropdown'); - if (!switcher.contains(event.target)) { + if (switcher && dropdown && !switcher.contains(event.target)) { dropdown.style.display = 'none'; } });