diff --git a/.env b/.env deleted file mode 100644 index 9befeb2..0000000 --- a/.env +++ /dev/null @@ -1,12 +0,0 @@ -LDAP_SERVER=placeholder.placeholder -PC_PORT=8080 -SECRET_KEY=placeholder -LDAP_BIND_DN=cn=ldap-reader,cn=users,dc=domain,dc=com -LDAP_BIND_PASSWORD=placeholder -LDAP_BASE_DN=dc=domain,dc=com -PROXMOX_SERVER=your.domain.com -PROXMOX_PORT=8006 -PROXMOX_TOKEN_ID=kaminosvc@pve!kamino-token -PROXMOX_TOKEN_SECRET=placeholder -PROXMOX_VERIFY_SSL="false" -PROXMOX_NODES=gonk,commando,gemini diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4cc65c9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,35 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +*.env + +# Uploaded template images +uploads/ + +# Editor/IDE +# .idea/ +# .vscode/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 3d4fa9a..17605eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ -FROM golang:1.24 as builder +FROM golang:1.24 AS builder WORKDIR /app COPY . . RUN go mod download -RUN go build -o server . +RUN go build -o server ./cmd/api FROM debian:bookworm-slim WORKDIR /app diff --git a/auth/auth.go b/auth/auth.go deleted file mode 100644 index 7fa108c..0000000 --- a/auth/auth.go +++ /dev/null @@ -1,144 +0,0 @@ -package auth - -import ( - "fmt" - "log" - "net/http" - "strings" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -// struct to hold username and password received from post request -type LoginRequest struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// called by /api/login post request -func LoginHandler(c *gin.Context) { - var req LoginRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request format"}) - return - } - - username := strings.TrimSpace(req.Username) - password := req.Password - - // return error if either username or password are empty - if username == "" || password == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username and password are required"}) - return - } - - // Connect to LDAP - ldapConn, err := ConnectToLDAP() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("LDAP connection failed: %v", err)}) - return - } - defer ldapConn.Close() - - // Authenticate user - _, groups, err := ldapConn.AuthenticateUser(username, password) - if err != nil { - c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()}) - return - } - - // Check if user is admin - isAdmin := CheckIfAdmin(groups) - - log.Println("logging user membership: ", groups) - for _, group := range groups { - log.Println("User is a member of: ", group) - } - - // create session - session := sessions.Default(c) - session.Set("authenticated", true) - session.Set("username", username) - session.Set("is_admin", isAdmin) - session.Save() - - c.JSON(http.StatusOK, gin.H{"message": "Login successful"}) -} - -// handle clearing session cookies -func LogoutHandler(c *gin.Context) { - session := sessions.Default(c) - session.Clear() - session.Save() - c.JSON(http.StatusOK, gin.H{"message": "Logged out"}) -} - -// check logged in profile -func ProfileHandler(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - if username == "" { - c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "username": username, - "isAdmin": isAdmin, - }) -} - -// check if user is authenticated -func IsAuthenticated(c *gin.Context) (bool, string) { - session := sessions.Default(c) - auth, ok := session.Get("authenticated").(bool) - if !ok || !auth { - return false, "" - } - username, _ := session.Get("username").(string) - return true, username -} - -// check if user is in "Domain Admins" group -func isAdmin(c *gin.Context) bool { - session := sessions.Default(c) - isAdmin, _ := session.Get("is_admin").(bool) - return isAdmin -} - -// api endpoint that returns true if user is already authenticated -func SessionHandler(c *gin.Context) { - if ok, username := IsAuthenticated(c); ok { - is_admin := isAdmin(c) - c.JSON(http.StatusOK, gin.H{ - "authenticated": true, - "username": username, - "isAdmin": is_admin, - }) - } else { - c.JSON(http.StatusUnauthorized, gin.H{"authenticated": false}) - } -} - -// auth protected routes helper function -func AuthRequired(c *gin.Context) { - if ok, _ := IsAuthenticated(c); !ok { - c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) - c.Abort() - return - } - c.Next() -} - -// admin protected routes helper function -func AdminRequired(c *gin.Context) { - if !isAdmin(c) { - c.JSON(http.StatusForbidden, gin.H{"error": "Forbidden"}) - c.Abort() - return - } - c.Next() -} diff --git a/auth/ldap.go b/auth/ldap.go deleted file mode 100644 index ebeaae0..0000000 --- a/auth/ldap.go +++ /dev/null @@ -1,185 +0,0 @@ -package auth - -import ( - "fmt" - "os" - "strings" - "time" - - "github.com/go-ldap/ldap/v3" -) - -// LDAPConnection holds the LDAP connection and configuration -type LDAPConnection struct { - conn *ldap.Conn - server string - baseDN string - bindDN string - bindPassword string -} - -// ConnectToLDAP creates a new LDAP connection and returns it -func ConnectToLDAP() (*LDAPConnection, error) { - // LDAP configuration from environment variables - ldapServer := os.Getenv("LDAP_SERVER") - baseDN := os.Getenv("LDAP_BASE_DN") - bindDN := os.Getenv("LDAP_BIND_DN") - bindPassword := os.Getenv("LDAP_BIND_PASSWORD") - - // check LDAP configuration - if ldapServer == "" || baseDN == "" || bindDN == "" || bindPassword == "" { - return nil, fmt.Errorf("LDAP configuration is missing") - } - - // connect to LDAP server - conn, err := ldap.DialURL("ldap://" + ldapServer + ":389") - if err != nil { - return nil, fmt.Errorf("LDAP connection failed: %v", err) - } - - // bind as service account - err = conn.Bind(bindDN, bindPassword) - if err != nil { - conn.Close() - return nil, fmt.Errorf("LDAP service account bind failed: %v", err) - } - - return &LDAPConnection{ - conn: conn, - server: ldapServer, - baseDN: baseDN, - bindDN: bindDN, - bindPassword: bindPassword, - }, nil -} - -// Close closes the LDAP connection -func (lc *LDAPConnection) Close() { - if lc.conn != nil { - lc.conn.Close() - } -} - -// AuthenticateUser authenticates a user against LDAP and returns user info -func (lc *LDAPConnection) AuthenticateUser(username, password string) (string, []string, error) { - // Define search request - searchRequest := ldap.NewSearchRequest( - lc.baseDN, - ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, - fmt.Sprintf("(sAMAccountName=%s)", username), - []string{"dn", "memberOf"}, - nil, - ) - - // search for user - sr, err := lc.conn.Search(searchRequest) - if err != nil { - return "", nil, fmt.Errorf("user not found in LDAP: %v", err) - } - - // handle user not found - if len(sr.Entries) != 1 { - return "", nil, fmt.Errorf("user not found or multiple users found") - } - - userDN := sr.Entries[0].DN - groups := sr.Entries[0].GetAttributeValues("memberOf") - - // bind as user to verify password - err = lc.conn.Bind(userDN, password) - if err != nil { - return "", nil, fmt.Errorf("invalid credentials") - } - - // rebind as service account for further operations - err = lc.conn.Bind(lc.bindDN, lc.bindPassword) - if err != nil { - return "", nil, fmt.Errorf("failed to rebind as service account") - } - - return userDN, groups, nil -} - -// GetAllUsers fetches all users from Active Directory -func (lc *LDAPConnection) GetAllUsers() (*UserResponse, error) { - // search for all users - searchRequest := ldap.NewSearchRequest( - lc.baseDN, - ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, - "(&(objectClass=user)(objectCategory=person)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))", - []string{"sAMAccountName", "whenCreated", "memberOf"}, - nil, - ) - - // perform search - sr, err := lc.conn.Search(searchRequest) - if err != nil { - return nil, fmt.Errorf("LDAP search failed: %v", err) - } - - var userResponse UserResponse - userResponse.Users = make([]UserWithRoles, 0) - - // process each user entry - for _, entry := range sr.Entries { - username := entry.GetAttributeValue("sAMAccountName") - whenCreated := entry.GetAttributeValue("whenCreated") - groups := entry.GetAttributeValues("memberOf") - - // skip if no username - if username == "" { - continue - } - - // parse and format creation date - createdDate := "Unknown" - if whenCreated != "" { - // AD stores dates in GeneralizedTime format: YYYYMMDDHHMMSS.0Z - if parsedTime, err := time.Parse("20060102150405.0Z", whenCreated); err == nil { - createdDate = parsedTime.Format("2006-01-02 15:04:05") - } - } - - // check if user is admin - isAdmin := false - for _, group := range groups { - if strings.Contains(strings.ToLower(group), "cn=domain admins") || strings.Contains(strings.ToLower(group), "cn=kamino admin") { - isAdmin = true - break - } - } - - // clean up group names (extract CN values) - cleanGroups := make([]string, 0) - for _, group := range groups { - // extract CN from DN format - parts := strings.Split(group, ",") - if len(parts) > 0 && strings.HasPrefix(strings.ToLower(parts[0]), "cn=") { - groupName := strings.TrimPrefix(parts[0], "CN=") - groupName = strings.TrimPrefix(groupName, "cn=") - cleanGroups = append(cleanGroups, groupName) - } - } - - user := UserWithRoles{ - Username: username, - CreatedDate: createdDate, - IsAdmin: isAdmin, - Groups: cleanGroups, - } - - userResponse.Users = append(userResponse.Users, user) - } - - return &userResponse, nil -} - -// CheckIfAdmin checks if a user is in the Domain Admins group -func CheckIfAdmin(groups []string) bool { - for _, group := range groups { - if strings.Contains(strings.ToLower(group), "cn=domain admins") { - return true - } - } - return false -} diff --git a/auth/users.go b/auth/users.go deleted file mode 100644 index 734d1ab..0000000 --- a/auth/users.go +++ /dev/null @@ -1,73 +0,0 @@ -package auth - -import ( - "log" - "net/http" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -type UserResponse struct { - Users []UserWithRoles `json:"users"` -} - -type UserWithRoles struct { - Username string `json:"username"` - CreatedDate string `json:"createdDate"` - IsAdmin bool `json:"isAdmin"` - Groups []string `json:"groups"` -} - -// helper function that fetches all users from Active Directory -func buildUserResponse() (*UserResponse, error) { - // Connect to LDAP - ldapConn, err := ConnectToLDAP() - if err != nil { - return nil, err - } - defer ldapConn.Close() - - // Get all users using the LDAP connection - return ldapConn.GetAllUsers() -} - -/* - * ===== ADMIN ENDPOINT ===== - * This function returns a list of - * all users and their roles in Active Directory - */ -func GetUsers(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // make sure user is authenticated and is admin - if !isAdmin.(bool) { - log.Printf("Forbidden access attempt by user %s", username) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only Admin users can see all domain users", - }) - return - } - - // fetch user response - userResponse, err := getAdminUserResponse() - - // if error, return error status - if err != nil { - log.Printf("Failed to fetch user list for admin %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch user list from Active Directory", - "details": err.Error(), - }) - return - } - - log.Printf("Successfully fetched user list for admin %s", username) - c.JSON(http.StatusOK, userResponse) -} - -func getAdminUserResponse() (*UserResponse, error) { - return buildUserResponse() -} diff --git a/cmd/api/main.go b/cmd/api/main.go new file mode 100644 index 0000000..413549c --- /dev/null +++ b/cmd/api/main.go @@ -0,0 +1,76 @@ +package main + +import ( + "log" + + "github.com/cpp-cyber/proclone/internal/api/handlers" + "github.com/cpp-cyber/proclone/internal/api/middleware" + "github.com/cpp-cyber/proclone/internal/api/routes" + "github.com/gin-contrib/sessions" + "github.com/gin-contrib/sessions/cookie" + "github.com/gin-gonic/gin" + _ "github.com/go-sql-driver/mysql" + "github.com/joho/godotenv" + "github.com/kelseyhightower/envconfig" +) + +// Config holds all application configuration +type Config struct { + Port string `envconfig:"PORT" default:":8080"` + SessionSecret string `envconfig:"SESSION_SECRET" default:"default-secret-key"` + FrontendURL string `envconfig:"FRONTEND_URL" default:"http://localhost:3000"` +} + +// init the environment +func init() { + if err := godotenv.Load(); err != nil { + log.Println("No .env file found, using environment variables from system") + } else { + log.Println("Loaded configuration from .env file") + } +} + +func main() { + gin.SetMode(gin.ReleaseMode) + + // Load and parse configuration from environment variables + var config Config + if err := envconfig.Process("", &config); err != nil { + log.Fatalf("Failed to process environment configuration: %v", err) + } + + log.Printf("Starting server on port %s", config.Port) + + r := gin.Default() + r.Use(middleware.CORSMiddleware(config.FrontendURL)) + r.MaxMultipartMemory = 8 << 20 // 8MiB + r.SetTrustedProxies(nil) + + // Setup session middleware + store := cookie.NewStore([]byte(config.SessionSecret)) + store.Options(sessions.Options{ + MaxAge: 3600, + HttpOnly: true, + Secure: true, + }) + r.Use(sessions.Sessions("session", store)) + + // Initialize handlers + authHandler, err := handlers.NewAuthHandler() + if err != nil { + log.Fatalf("Failed to initialize auth handler: %v", err) + } + + proxmoxHandler, err := handlers.NewProxmoxHandler() + if err != nil { + log.Fatalf("Failed to initialize Proxmox handler: %v", err) + } + + cloningHandler, err := handlers.NewCloningHandler() + if err != nil { + log.Fatalf("Failed to initialize cloning handler: %v", err) + } + + routes.RegisterRoutes(r, authHandler, proxmoxHandler, cloningHandler) + r.Run(config.Port) +} diff --git a/go.mod b/go.mod index 85491cf..432853f 100644 --- a/go.mod +++ b/go.mod @@ -1,22 +1,25 @@ -module github.com/P-E-D-L/proclone +module github.com/cpp-cyber/proclone go 1.24.1 +toolchain go1.24.6 + require ( - github.com/gin-contrib/sessions v1.0.3 - github.com/gin-gonic/gin v1.10.0 + github.com/gin-contrib/sessions v1.0.4 + github.com/gin-gonic/gin v1.10.1 github.com/go-ldap/ldap/v3 v3.4.11 + github.com/go-sql-driver/mysql v1.9.3 + github.com/google/uuid v1.6.0 github.com/joho/godotenv v1.5.1 + github.com/kelseyhightower/envconfig v1.4.0 ) require ( + filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/bsm/redislock v0.9.4 // indirect github.com/bytedance/sonic v1.13.2 // indirect github.com/bytedance/sonic/loader v0.2.4 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudwego/base64x v0.1.5 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/gin-contrib/sse v1.0.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect @@ -24,7 +27,6 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.26.0 // indirect github.com/goccy/go-json v0.10.5 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/gorilla/context v1.1.2 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/sessions v1.4.0 // indirect @@ -35,7 +37,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect - github.com/redis/go-redis/v9 v9.11.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect golang.org/x/arch v0.16.0 // indirect diff --git a/go.sum b/go.sum index 2e2eb65..a8df5d3 100644 --- a/go.sum +++ b/go.sum @@ -1,34 +1,28 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= -github.com/bsm/redislock v0.9.4 h1:X/Wse1DPpiQgHbVYRE9zv6m070UcKoOGekgvpNhiSvw= -github.com/bsm/redislock v0.9.4/go.mod h1:Epf7AJLiSFwLCiZcfi6pWFO/8eAYrYpQXFxEDPoDeAk= github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= -github.com/gin-contrib/sessions v1.0.3 h1:AZ4j0AalLsGqdrKNbbrKcXx9OJZqViirvNGsJTxcQps= -github.com/gin-contrib/sessions v1.0.3/go.mod h1:5i4XMx4KPtQihnzxEqG9u1K446lO3G19jAi2GtbfsAI= +github.com/gin-contrib/sessions v1.0.4 h1:ha6CNdpYiTOK/hTp05miJLbpTSNfOnFg5Jm2kbcqy8U= +github.com/gin-contrib/sessions v1.0.4/go.mod h1:ccmkrb2z6iU2osiAHZG3x3J4suJK+OU27oqzlWOqQgs= github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= -github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= -github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= +github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU= @@ -41,6 +35,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= @@ -74,6 +70,8 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= @@ -91,10 +89,6 @@ github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNH github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/redis/go-redis/v9 v9.0.3 h1:+7mmR26M0IvyLxGZUHxu4GiBkJkVDid0Un+j4ScYu4k= -github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.11.0 h1:E3S08Gl/nJNn5vkxd2i78wZxWAPNZgUNTp8WIJUAiIs= -github.com/redis/go-redis/v9 v9.11.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/internal/api/auth/auth_service.go b/internal/api/auth/auth_service.go new file mode 100644 index 0000000..6d93b28 --- /dev/null +++ b/internal/api/auth/auth_service.go @@ -0,0 +1,97 @@ +package auth + +import ( + "fmt" + "strings" + + "github.com/cpp-cyber/proclone/internal/ldap" +) + +func NewAuthService() (*AuthService, error) { + ldapService, err := ldap.NewLDAPService() + if err != nil { + return nil, fmt.Errorf("failed to create LDAP service: %w", err) + } + + return &AuthService{ + ldapService: ldapService, + }, nil +} + +func (s *AuthService) Authenticate(username string, password string) (bool, error) { + // Input validation + if username == "" || password == "" { + return false, nil // Invalid credentials, not an error + } + + // Get user DN first to validate user exists + userDN, err := s.ldapService.GetUserDN(username) + if err != nil { + return false, nil // User not found, not an error for security reasons + } + + // Create a temporary client for authentication to avoid privilege escalation + config, err := ldap.LoadConfig() + if err != nil { + return false, fmt.Errorf("failed to load LDAP config: %w", err) + } + + authClient := ldap.NewClient(config) + if err := authClient.Connect(); err != nil { + return false, fmt.Errorf("failed to connect to LDAP: %w", err) + } + defer authClient.Disconnect() + + // Try to bind as the user to verify password + if err := authClient.SimpleBind(userDN, password); err != nil { + return false, nil // Invalid credentials, not an error + } + + return true, nil +} + +func (s *AuthService) IsAdmin(username string) (bool, error) { + // Input validation + if username == "" { + return false, fmt.Errorf("username cannot be empty") + } + + // Get user DN + userDN, err := s.ldapService.GetUserDN(username) + if err != nil { + return false, fmt.Errorf("failed to get user DN: %w", err) + } + + // Get user's groups + userGroups, err := s.ldapService.GetUserGroups(userDN) + if err != nil { + return false, fmt.Errorf("failed to get user groups: %w", err) + } + + // Load LDAP config to get admin group DN + config, err := ldap.LoadConfig() + if err != nil { + return false, fmt.Errorf("failed to load LDAP config: %w", err) + } + + if config.AdminGroupDN == "" { + return false, fmt.Errorf("admin group DN not configured") + } + + // Check if user is in the admin group + for _, groupDN := range userGroups { + if strings.EqualFold(groupDN, "Proxmox-Admins") { + return true, nil + } + } + + return false, nil +} + +func (s *AuthService) HealthCheck() error { + return s.ldapService.HealthCheck() +} + +func (s *AuthService) Reconnect() error { + return s.ldapService.Reconnect() +} diff --git a/internal/api/auth/types.go b/internal/api/auth/types.go new file mode 100644 index 0000000..34420fa --- /dev/null +++ b/internal/api/auth/types.go @@ -0,0 +1,31 @@ +package auth + +import ( + "github.com/cpp-cyber/proclone/internal/ldap" +) + +// ================================================= +// Auth Service Interface +// ================================================= + +type Service interface { + // Authentication + Authenticate(username, password string) (bool, error) + IsAdmin(username string) (bool, error) + + // Health and Connection + HealthCheck() error + Reconnect() error +} + +type AuthService struct { + ldapService ldap.Service +} + +// ================================================= +// Types for Auth Service (re-exported from ldap) +// ================================================= + +type User = ldap.User +type Group = ldap.Group +type UserRegistrationInfo = ldap.UserRegistrationInfo diff --git a/internal/api/handlers/auth_handler.go b/internal/api/handlers/auth_handler.go new file mode 100644 index 0000000..3a15003 --- /dev/null +++ b/internal/api/handlers/auth_handler.go @@ -0,0 +1,478 @@ +package handlers + +import ( + "fmt" + "log" + "net/http" + + "github.com/cpp-cyber/proclone/internal/api/auth" + "github.com/cpp-cyber/proclone/internal/ldap" + "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// ================================================= +// Login / Logout / Session Handlers +// ================================================= + +// NewAuthHandler creates a new authentication handler +func NewAuthHandler() (*AuthHandler, error) { + authService, err := auth.NewAuthService() + if err != nil { + return nil, fmt.Errorf("failed to create auth service: %w", err) + } + + ldapService, err := ldap.NewLDAPService() + if err != nil { + return nil, fmt.Errorf("failed to create LDAP service: %w", err) + } + + proxmoxService, err := proxmox.NewService() + if err != nil { + return nil, fmt.Errorf("failed to create proxmox service: %w", err) + } + + log.Println("Auth handler initialized") + + return &AuthHandler{ + authService: authService, + ldapService: ldapService, + proxmoxService: proxmoxService, + }, nil +} + +// LoginHandler handles the login POST request +func (h *AuthHandler) LoginHandler(c *gin.Context) { + var req UsernamePasswordRequest + if !validateAndBind(c, &req) { + return + } + + // Authenticate user + valid, err := h.authService.Authenticate(req.Username, req.Password) + if err != nil { + log.Printf("Authentication failed for user %s: %v", req.Username, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Authentication failed"}) + return + } + + if !valid { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Invalid credentials"}) + return + } + + // Create session + session := sessions.Default(c) + session.Set("id", req.Username) + + // Check if user is admin + isAdmin, err := h.authService.IsAdmin(req.Username) + if err != nil { + log.Printf("Error checking admin status for user %s: %v", req.Username, err) + isAdmin = false + } + session.Set("isAdmin", isAdmin) + + if err := session.Save(); err != nil { + log.Printf("Failed to save session for user %s: %v", req.Username, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save session"}) + return + } + + c.JSON(http.StatusOK, gin.H{ + "message": "Login successful", + "isAdmin": isAdmin, + }) +} + +// LogoutHandler handles user logout +func (h *AuthHandler) LogoutHandler(c *gin.Context) { + session := sessions.Default(c) + session.Clear() + + if err := session.Save(); err != nil { + log.Printf("Failed to clear session: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save session"}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Successfully logged out"}) +} + +// SessionHandler returns current session information for authenticated users +func (h *AuthHandler) SessionHandler(c *gin.Context) { + session := sessions.Default(c) + + // Since this is under private routes, AuthRequired middleware ensures session exists + id := session.Get("id") + isAdmin := session.Get("isAdmin") + + // Convert isAdmin to bool, defaulting to false if not set + adminStatus := false + if isAdmin != nil { + adminStatus = isAdmin.(bool) + } + + c.JSON(http.StatusOK, gin.H{ + "authenticated": true, + "username": id.(string), + "isAdmin": adminStatus, + }) +} + +func (h *AuthHandler) RegisterHandler(c *gin.Context) { + var req UsernamePasswordRequest + if !validateAndBind(c, &req) { + return + } + + // Check if the username already exists + var userDN = "" + userDN, err := h.ldapService.GetUserDN(req.Username) + if userDN != "" { + log.Printf("Attempt to register existing username: %s", req.Username) + c.JSON(http.StatusConflict, gin.H{"error": "Username already exists"}) + return + } + if err != nil { + // Ignore since this error is (most likely) stating that the user does not exist + } + + // Create user + if err := h.ldapService.CreateAndRegisterUser(ldap.UserRegistrationInfo(req)); err != nil { + log.Printf("Failed to create user %s: %v", req.Username, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create user"}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "User registered successfully"}) +} + +// ================================================= +// User Handlers +// ================================================= + +// ADMIN: GetUsersHandler returns a list of all users +func (h *AuthHandler) GetUsersHandler(c *gin.Context) { + users, err := h.ldapService.GetUsers() + if err != nil { + log.Printf("Failed to retrieve users: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve users"}) + return + } + + var adminCount = 0 + var disabledCount = 0 + for _, user := range users { + if user.IsAdmin { + adminCount++ + } + if !user.Enabled { + disabledCount++ + } + } + + c.JSON(http.StatusOK, gin.H{ + "users": users, + "count": len(users), + "disabled_count": disabledCount, + "admin_count": adminCount, + }) +} + +// ADMIN: CreateUsersHandler creates new user(s) +func (h *AuthHandler) CreateUsersHandler(c *gin.Context) { + var req AdminCreateUserRequest + if !validateAndBind(c, &req) { + return + } + + var errors []error + + // Create users in AD + for _, user := range req.Users { + if err := h.ldapService.CreateAndRegisterUser(ldap.UserRegistrationInfo(user)); err != nil { + errors = append(errors, fmt.Errorf("failed to create user %s: %v", user.Username, err)) + } + } + + if len(errors) > 0 { + log.Printf("Failed to create users: %v", errors) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create users", "details": errors}) + return + } + + // Sync users to Proxmox + if err := h.proxmoxService.SyncUsers(); err != nil { + log.Printf("Failed to sync users with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync users with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusCreated, gin.H{"message": "Users created successfully"}) +} + +// ADMIN: DeleteUsersHandler deletes existing user(s) +func (h *AuthHandler) DeleteUsersHandler(c *gin.Context) { + var req UsersRequest + if !validateAndBind(c, &req) { + return + } + + var errors []error + + // Delete users in AD + for _, username := range req.Usernames { + if err := h.ldapService.DeleteUser(username); err != nil { + errors = append(errors, fmt.Errorf("failed to delete user %s: %v", username, err)) + } + } + + if len(errors) > 0 { + log.Printf("Failed to delete users: %v", errors) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete users", "details": errors}) + return + } + + // Sync users to Proxmox + if err := h.proxmoxService.SyncUsers(); err != nil { + log.Printf("Failed to sync users with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync users with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Users deleted successfully"}) +} + +// ADMIN: EnableUsersHandler enables existing user(s) +func (h *AuthHandler) EnableUsersHandler(c *gin.Context) { + var req UsersRequest + if !validateAndBind(c, &req) { + return + } + + var errors []error + + for _, username := range req.Usernames { + if err := h.ldapService.EnableUserAccount(username); err != nil { + errors = append(errors, fmt.Errorf("failed to enable user %s: %v", username, err)) + } + } + + if len(errors) > 0 { + log.Printf("Failed to enable users: %v", errors) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to enable users", "details": errors}) + return + } + + // Sync users to Proxmox + if err := h.proxmoxService.SyncUsers(); err != nil { + log.Printf("Failed to sync users with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync users with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Users enabled successfully"}) +} + +// ADMIN: DisableUsersHandler disables existing user(s) +func (h *AuthHandler) DisableUsersHandler(c *gin.Context) { + var req UsersRequest + if !validateAndBind(c, &req) { + return + } + + var errors []error + + for _, username := range req.Usernames { + if err := h.ldapService.DisableUserAccount(username); err != nil { + errors = append(errors, fmt.Errorf("failed to disable user %s: %v", username, err)) + } + } + + if len(errors) > 0 { + log.Printf("Failed to disable users: %v", errors) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to disable users", "details": errors}) + return + } + + // Sync users to Proxmox + if err := h.proxmoxService.SyncUsers(); err != nil { + log.Printf("Failed to sync users with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync users with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Users disabled successfully"}) +} + +// ================================================= +// Group Handlers +// ================================================= + +// ADMIN: SetUserGroupsHandler sets the groups for an existing user +func (h *AuthHandler) SetUserGroupsHandler(c *gin.Context) { + var req SetUserGroupsRequest + if !validateAndBind(c, &req) { + return + } + + if err := h.ldapService.SetUserGroups(req.Username, req.Groups); err != nil { + log.Printf("Failed to set groups for user %s: %v", req.Username, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to set user groups", "details": err.Error()}) + return + } + + // Sync groups to Proxmox + if err := h.proxmoxService.SyncGroups(); err != nil { + log.Printf("Failed to sync groups with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync groups with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "User groups updated successfully"}) +} + +func (h *AuthHandler) GetGroupsHandler(c *gin.Context) { + groups, err := h.ldapService.GetGroups() + if err != nil { + log.Printf("Failed to retrieve groups: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve groups"}) + return + } + + c.JSON(http.StatusOK, gin.H{ + "groups": groups, + "count": len(groups), + }) +} + +// ADMIN: CreateGroupsHandler creates new group(s) +func (h *AuthHandler) CreateGroupsHandler(c *gin.Context) { + var req GroupsRequest + if !validateAndBind(c, &req) { + return + } + + var errors []error + + // Create groups in AD + for _, group := range req.Groups { + if err := h.ldapService.CreateGroup(group); err != nil { + errors = append(errors, fmt.Errorf("failed to create group %s: %v", group, err)) + } + } + + if len(errors) > 0 { + log.Printf("Failed to create groups: %v", errors) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create groups", "details": errors}) + return + } + + // Sync groups to Proxmox + if err := h.proxmoxService.SyncGroups(); err != nil { + log.Printf("Failed to sync groups with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync groups with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusCreated, gin.H{"message": "Groups created successfully"}) +} + +func (h *AuthHandler) RenameGroupHandler(c *gin.Context) { + var req RenameGroupRequest + if !validateAndBind(c, &req) { + return + } + + if err := h.ldapService.RenameGroup(req.OldName, req.NewName); err != nil { + log.Printf("Failed to rename group %s to %s: %v", req.OldName, req.NewName, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to rename group", "details": err.Error()}) + return + } + + // Sync groups to Proxmox + if err := h.proxmoxService.SyncGroups(); err != nil { + log.Printf("Failed to sync groups with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync groups with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Group renamed successfully"}) +} + +func (h *AuthHandler) DeleteGroupsHandler(c *gin.Context) { + var req GroupsRequest + if !validateAndBind(c, &req) { + return + } + + var errors []error + + // Delete groups in AD + for _, group := range req.Groups { + if err := h.ldapService.DeleteGroup(group); err != nil { + errors = append(errors, fmt.Errorf("failed to delete group %s: %v", group, err)) + } + } + + if len(errors) > 0 { + log.Printf("Failed to delete groups: %v", errors) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete groups", "details": errors}) + return + } + + // Sync groups to Proxmox + if err := h.proxmoxService.SyncGroups(); err != nil { + log.Printf("Failed to sync groups with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync groups with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Groups deleted successfully"}) +} + +func (h *AuthHandler) AddUsersHandler(c *gin.Context) { + var req ModifyGroupMembersRequest + if !validateAndBind(c, &req) { + return + } + + if err := h.ldapService.AddUsersToGroup(req.Group, req.Usernames); err != nil { + log.Printf("Failed to add users to group %s: %v", req.Group, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to add users to group", "details": err.Error()}) + return + } + + // Sync groups to Proxmox + if err := h.proxmoxService.SyncGroups(); err != nil { + log.Printf("Failed to sync groups with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync groups with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Users added to group successfully"}) +} + +func (h *AuthHandler) RemoveUsersHandler(c *gin.Context) { + var req ModifyGroupMembersRequest + if !validateAndBind(c, &req) { + return + } + + if err := h.ldapService.RemoveUsersFromGroup(req.Group, req.Usernames); err != nil { + log.Printf("Failed to remove users from group %s: %v", req.Group, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to remove users from group", "details": err.Error()}) + return + } + + // Sync groups to Proxmox + if err := h.proxmoxService.SyncGroups(); err != nil { + log.Printf("Failed to sync groups with Proxmox: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to sync groups with Proxmox", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Users removed from group successfully"}) +} diff --git a/internal/api/handlers/cloning_handler.go b/internal/api/handlers/cloning_handler.go new file mode 100644 index 0000000..f765213 --- /dev/null +++ b/internal/api/handlers/cloning_handler.go @@ -0,0 +1,447 @@ +package handlers + +import ( + "fmt" + "log" + "net/http" + "path/filepath" + "strings" + + "github.com/cpp-cyber/proclone/internal/cloning" + "github.com/cpp-cyber/proclone/internal/ldap" + "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/cpp-cyber/proclone/internal/tools" + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// NewCloningHandler creates a new cloning handler, loading dependencies internally +func NewCloningHandler() (*CloningHandler, error) { + // Initialize database connection + dbClient, err := tools.NewDBClient() + if err != nil { + return nil, fmt.Errorf("failed to initialize database client: %w", err) + } + + // Initialize Proxmox service + proxmoxService, err := proxmox.NewService() + if err != nil { + return nil, fmt.Errorf("failed to create Proxmox service: %w", err) + } + + // Initialize LDAP service + ldapService, err := ldap.NewLDAPService() + if err != nil { + return nil, fmt.Errorf("failed to create LDAP service: %w", err) + } + + // Initialize Cloning manager + cloningService, err := cloning.NewCloningService(proxmoxService, dbClient.DB(), ldapService) + if err != nil { + return nil, fmt.Errorf("failed to initialize cloning manager: %w", err) + } + log.Println("Cloning manager initialized") + + return &CloningHandler{ + Service: cloningService, + dbClient: dbClient, + }, nil +} + +// CloneTemplateHandler handles requests to clone a template pool for a user or group +func (ch *CloningHandler) CloneTemplateHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req CloneRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("User %s requested cloning of template %s", username, req.Template) + + // Create the cloning request using the new format + cloneReq := cloning.CloneRequest{ + Template: req.Template, + CheckExistingDeployments: true, // Check for existing deployments for single user clones + Targets: []cloning.CloneTarget{ + { + Name: username, + IsGroup: false, + }, + }, + } + + if err := ch.Service.CloneTemplate(cloneReq); err != nil { + log.Printf("Error cloning template: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to clone template", + "details": err.Error(), + }) + return + } + + log.Printf("Template %s cloned successfully for user %s", req.Template, username) + c.JSON(http.StatusOK, gin.H{"success": true}) +} + +// ADMIN: BulkCloneTemplateHandler handles POST requests for cloning multiple templates for a list of users +func (ch *CloningHandler) AdminCloneTemplateHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req AdminCloneRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("%s requested bulk cloning of template %s", username, req.Template) + + // Build targets slice from usernames and groups + var targets []cloning.CloneTarget + + // Add users as targets + for _, user := range req.Usernames { + targets = append(targets, cloning.CloneTarget{ + Name: user, + IsGroup: false, + }) + } + + // Add groups as targets + for _, group := range req.Groups { + targets = append(targets, cloning.CloneTarget{ + Name: group, + IsGroup: true, + }) + } + + // Create clone request + cloneReq := cloning.CloneRequest{ + Template: req.Template, + Targets: targets, + CheckExistingDeployments: false, + } + + // Perform clone operation + err := ch.Service.CloneTemplate(cloneReq) + if err != nil { + log.Printf("Admin %s encountered error while bulk cloning template: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to clone templates", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "success": true, + "message": "Templates cloned successfully", + }) +} + +// DeletePodHandler handles requests to delete a pod +func (ch *CloningHandler) DeletePodHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req DeletePodRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("User %s requested deletion of pod %s", username, req.Pod) + + // Check if the pod belongs to the user (maybe allow users to delete group pods in the future?) + if !strings.Contains(req.Pod, username) { + c.JSON(http.StatusForbidden, gin.H{ + "error": "You do not have permission to delete this pod", + "details": fmt.Sprintf("Pod %s does not belong to user %s", req.Pod, username), + }) + return + } + + err := ch.Service.DeletePod(req.Pod) + if err != nil { + log.Printf("Error deleting %s pod: %v", req.Pod, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to delete pod", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Pod deleted successfully"}) +} + +func (ch *CloningHandler) AdminDeletePodHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req AdminDeletePodRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("Admin %s requested deletion of pods: %v", username, req.Pods) + + var errors []error + for _, pod := range req.Pods { + err := ch.Service.DeletePod(pod) + if err != nil { + errors = append(errors, fmt.Errorf("failed to delete pod %s: %v", pod, err)) + } + } + + if len(errors) > 0 { + log.Printf("Admin %s encountered errors while deleting pods: %v", username, errors) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to delete pods", + "details": errors, + }) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Pods deleted successfully"}) +} + +func (ch *CloningHandler) GetUnpublishedTemplatesHandler(c *gin.Context) { + templates, err := ch.Service.GetUnpublishedTemplates() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to retrieve unpublished templates", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "templates": templates, + "count": len(templates), + }) +} + +// PRIVATE: GetPodsHandler handles GET requests for retrieving a user's pods +func (ch *CloningHandler) GetPodsHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + pods, err := ch.Service.GetPods(username) + if err != nil { + log.Printf("Error retrieving pods for user %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve pods", "details": err.Error()}) + return + } + + // Loop through the user's deployed pods and add template information + for i := range pods { + templateName := strings.Replace(pods[i].Name[5:], fmt.Sprintf("_%s", username), "", 1) + templateInfo, err := ch.Service.DatabaseService.GetTemplateInfo(templateName) + if err != nil { + log.Printf("Error retrieving template info for pod %s: %v", pods[i].Name, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve template info for pod", "details": err.Error()}) + return + } + pods[i].Template = templateInfo + } + + c.JSON(http.StatusOK, gin.H{"pods": pods}) +} + +// ADMIN: AdminGetPodsHandler handles GET requests for retrieving all pods +func (ch *CloningHandler) AdminGetPodsHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + pods, err := ch.Service.AdminGetPods() + if err != nil { + log.Printf("Error retrieving all pods for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve pods for user", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"pods": pods}) +} + +// PRIVATE: GetTemplatesHandler handles GET requests for retrieving templates +func (ch *CloningHandler) GetTemplatesHandler(c *gin.Context) { + templates, err := ch.Service.DatabaseService.GetTemplates() + if err != nil { + log.Printf("Error retrieving templates: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to retrieve templates", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "templates": templates, + "count": len(templates), + }) +} + +// ADMIN: GetPublishedTemplatesHandler handles GET requests for retrieving all templates +func (ch *CloningHandler) AdminGetTemplatesHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + templates, err := ch.Service.DatabaseService.GetPublishedTemplates() + if err != nil { + log.Printf("Error retrieving all templates for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to retrieve all templates", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "templates": templates, + "count": len(templates), + }) +} + +// PRIVATE: GetTemplateImageHandler handles GET requests for retrieving a template's image +func (ch *CloningHandler) GetTemplateImageHandler(c *gin.Context) { + filename := c.Param("filename") + config := ch.Service.DatabaseService.GetTemplateConfig() + filePath := filepath.Join(config.UploadDir, filename) + + // Serve the file + c.File(filePath) +} + +// ADMIN: PublishTemplateHandler handles POST requests for publishing a template +func (ch *CloningHandler) PublishTemplateHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req PublishTemplateRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("Admin %s requested publishing of template %s", username, req.Template.Name) + + if err := ch.Service.PublishTemplate(req.Template); err != nil { + log.Printf("Error publishing template for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to publish template", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "message": "Template published successfully", + }) +} + +// ADMIN: EditTemplateHandler handles POST requests for editing a published template +func (ch *CloningHandler) EditTemplateHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req PublishTemplateRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("Admin %s requested editing of template %s", username, req.Template.Name) + + if err := ch.Service.DatabaseService.EditTemplate(req.Template); err != nil { + log.Printf("Error editing template for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to edit template", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "message": "Template edited successfully", + }) +} + +// ADMIN: DeleteTemplateHandler handles POST requests for deleting a template +func (ch *CloningHandler) DeleteTemplateHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req TemplateRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("Admin %s requested deletion of template %s", username, req.Template) + + if err := ch.Service.DatabaseService.DeleteTemplate(req.Template); err != nil { + log.Printf("Error deleting template for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to delete template", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "message": "Template deleted successfully", + }) +} + +// ADMIN: ToggleTemplateVisibilityHandler handles POST requests for toggling a template's visibility +func (ch *CloningHandler) ToggleTemplateVisibilityHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + var req TemplateRequest + if !validateAndBind(c, &req) { + return + } + + log.Printf("Admin %s requested toggling visibility of template %s", username, req.Template) + + if err := ch.Service.DatabaseService.ToggleTemplateVisibility(req.Template); err != nil { + log.Printf("Error toggling template visibility for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to toggle template visibility", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "message": "Template visibility toggled successfully", + }) +} + +// ADMIN: UploadTemplateImageHandler handles POST requests for uploading a template's image +func (ch *CloningHandler) UploadTemplateImageHandler(c *gin.Context) { + session := sessions.Default(c) + username := session.Get("id").(string) + + log.Printf("Admin %s requested uploading a template image", username) + + result, err := ch.Service.DatabaseService.UploadTemplateImage(c) + if err != nil { + log.Printf("Error uploading template image for admin %s: %v", username, err) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": "Failed to upload template image", + "details": err.Error(), + }) + return + } + + c.JSON(http.StatusOK, result) +} + +// HealthCheck checks the database connection health +func (ch *CloningHandler) HealthCheck() error { + return ch.dbClient.HealthCheck() +} + +// Reconnect attempts to reconnect to the database +func (ch *CloningHandler) Reconnect() error { + return ch.dbClient.Connect() +} diff --git a/internal/api/handlers/dashboard_handler.go b/internal/api/handlers/dashboard_handler.go new file mode 100644 index 0000000..9a058a1 --- /dev/null +++ b/internal/api/handlers/dashboard_handler.go @@ -0,0 +1,73 @@ +package handlers + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +// NewDashboardHandler creates a new dashboard handler +func NewDashboardHandler(authHandler *AuthHandler, proxmoxHandler *ProxmoxHandler, cloningHandler *CloningHandler) *DashboardHandler { + return &DashboardHandler{ + authHandler: authHandler, + proxmoxHandler: proxmoxHandler, + cloningHandler: cloningHandler, + } +} + +// ADMIN: GetDashboardStatsHandler retrieves all dashboard statistics in a single request +func (dh *DashboardHandler) GetDashboardStatsHandler(c *gin.Context) { + stats := DashboardStats{} + + // Get user count + users, err := dh.authHandler.ldapService.GetUsers() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve user count", "details": err.Error()}) + return + } + stats.UserCount = len(users) + + // Get group count + groups, err := dh.authHandler.ldapService.GetGroups() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve group count", "details": err.Error()}) + return + } + stats.GroupCount = len(groups) + + // Get published template count + publishedTemplates, err := dh.cloningHandler.Service.DatabaseService.GetPublishedTemplates() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve published template count", "details": err.Error()}) + return + } + stats.PublishedTemplateCount = len(publishedTemplates) + + // Get deployed pod count + pods, err := dh.cloningHandler.Service.AdminGetPods() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve deployed pod count", "details": err.Error()}) + return + } + stats.DeployedPodCount = len(pods) + + // Get virtual machine count + vms, err := dh.proxmoxHandler.service.GetVMs() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve virtual machine count", "details": err.Error()}) + return + } + stats.VirtualMachineCount = len(vms) + + // Get cluster resource usage + clusterUsage, err := dh.proxmoxHandler.service.GetClusterResourceUsage() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve cluster resource usage", "details": err.Error()}) + return + } + stats.ClusterResourceUsage = clusterUsage + + c.JSON(http.StatusOK, gin.H{ + "stats": stats, + }) +} diff --git a/internal/api/handlers/health_handler.go b/internal/api/handlers/health_handler.go new file mode 100644 index 0000000..0410237 --- /dev/null +++ b/internal/api/handlers/health_handler.go @@ -0,0 +1,51 @@ +package handlers + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +// PUBLIC: HealthCheckHandler handles GET requests for health checks with detailed service status +func HealthCheckHandler(authHandler *AuthHandler, cloningHandler *CloningHandler) gin.HandlerFunc { + return func(c *gin.Context) { + healthStatus := gin.H{ + "status": "healthy", + "services": gin.H{ + "api": "healthy", + }, + } + + statusCode := http.StatusOK + + // Check LDAP connection + if authHandler != nil && authHandler.authService != nil { + if err := authHandler.authService.HealthCheck(); err != nil { + healthStatus["services"].(gin.H)["ldap"] = gin.H{ + "status": "unhealthy", + "error": err.Error(), + } + healthStatus["status"] = "degraded" + statusCode = http.StatusServiceUnavailable + } else { + healthStatus["services"].(gin.H)["ldap"] = "healthy" + } + } + + // Check database connection (via cloning handler) + if cloningHandler != nil { + if err := cloningHandler.HealthCheck(); err != nil { + healthStatus["services"].(gin.H)["database"] = gin.H{ + "status": "unhealthy", + "error": err.Error(), + } + healthStatus["status"] = "degraded" + statusCode = http.StatusServiceUnavailable + } else { + healthStatus["services"].(gin.H)["database"] = "healthy" + } + } + + c.JSON(statusCode, healthStatus) + } +} diff --git a/internal/api/handlers/proxmox_handler.go b/internal/api/handlers/proxmox_handler.go new file mode 100644 index 0000000..0bc8676 --- /dev/null +++ b/internal/api/handlers/proxmox_handler.go @@ -0,0 +1,98 @@ +package handlers + +import ( + "fmt" + "log" + "net/http" + + "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/gin-gonic/gin" +) + +// NewProxmoxHandler creates a new Proxmox handler, loading configuration internally +func NewProxmoxHandler() (*ProxmoxHandler, error) { + proxmoxService, err := proxmox.NewService() + if err != nil { + return nil, fmt.Errorf("failed to create Proxmox service: %w", err) + } + + log.Println("Proxmox handler initialized") + + return &ProxmoxHandler{ + service: proxmoxService, + }, nil +} + +// ADMIN: GetClusterResourceUsageHandler retrieves and formats the total cluster resource usage in addition to each individual node's usage +func (ph *ProxmoxHandler) GetClusterResourceUsageHandler(c *gin.Context) { + response, err := ph.service.GetClusterResourceUsage() + if err != nil { + log.Printf("Error retrieving cluster resource usage: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve cluster resource usage", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{ + "cluster": response, + }) +} + +// ADMIN: GetVMsHandler handles GET requests for retrieving all VMs on Proxmox +func (ph *ProxmoxHandler) GetVMsHandler(c *gin.Context) { + vms, err := ph.service.GetVMs() + if err != nil { + log.Printf("Error retrieving VMs: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve VMs", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"vms": vms}) +} + +// ADMIN: StartVMHandler handles POST requests for starting a VM on Proxmox +func (ph *ProxmoxHandler) StartVMHandler(c *gin.Context) { + var req VMActionRequest + if !validateAndBind(c, &req) { + return + } + + if err := ph.service.StartVM(req.Node, req.VMID); err != nil { + log.Printf("Error starting VM %d on node %s: %v", req.VMID, req.Node, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to start VM", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "VM started"}) +} + +// ADMIN: ShutdownVMHandler handles POST requests for shutting down a VM on Proxmox +func (ph *ProxmoxHandler) ShutdownVMHandler(c *gin.Context) { + var req VMActionRequest + if !validateAndBind(c, &req) { + return + } + + if err := ph.service.ShutdownVM(req.Node, req.VMID); err != nil { + log.Printf("Error shutting down VM %d on node %s: %v", req.VMID, req.Node, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to shutdown VM", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "VM shutdown"}) +} + +// ADMIN: RebootVMHandler handles POST requests for rebooting a VM on Proxmox +func (ph *ProxmoxHandler) RebootVMHandler(c *gin.Context) { + var req VMActionRequest + if !validateAndBind(c, &req) { + return + } + + if err := ph.service.RebootVM(req.Node, req.VMID); err != nil { + log.Printf("Error rebooting VM %d on node %s: %v", req.VMID, req.Node, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to reboot VM", "details": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "VM rebooted"}) +} diff --git a/internal/api/handlers/types.go b/internal/api/handlers/types.go new file mode 100644 index 0000000..2fc47a0 --- /dev/null +++ b/internal/api/handlers/types.go @@ -0,0 +1,132 @@ +package handlers + +import ( + "net/http" + + "github.com/cpp-cyber/proclone/internal/api/auth" + "github.com/cpp-cyber/proclone/internal/cloning" + "github.com/cpp-cyber/proclone/internal/ldap" + "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/cpp-cyber/proclone/internal/tools" + "github.com/gin-gonic/gin" +) + +// ================================================= +// Handler Types +// ================================================= + +// AuthHandler handles HTTP authentication requests +type AuthHandler struct { + authService auth.Service + ldapService ldap.Service + proxmoxService proxmox.Service +} + +// CloningHandler holds the cloning service +type CloningHandler struct { + Service *cloning.CloningService + dbClient *tools.DBClient +} + +// DashboardHandler handles HTTP requests for dashboard operations +type DashboardHandler struct { + authHandler *AuthHandler + proxmoxHandler *ProxmoxHandler + cloningHandler *CloningHandler +} + +// ProxmoxHandler handles HTTP requests for Proxmox operations +type ProxmoxHandler struct { + service proxmox.Service +} + +// ================================================= +// API Request Types +// ================================================= + +type VMActionRequest struct { + Node string `json:"node" binding:"required,min=1,max=100" validate:"alphanum"` + VMID int `json:"vmid" binding:"required,min=100,max=999999"` +} + +type TemplateRequest struct { + Template string `json:"template" binding:"required,min=1,max=100" validate:"alphanum,ascii"` +} + +type PublishTemplateRequest struct { + Template cloning.KaminoTemplate `json:"template" binding:"required"` +} + +type CloneRequest struct { + Template string `json:"template" binding:"required,min=1,max=100" validate:"alphanum,ascii"` +} + +type GroupsRequest struct { + Groups []string `json:"groups" binding:"required,min=1,dive,min=1,max=100" validate:"dive,alphanum,ascii"` +} + +type AdminCloneRequest struct { + Template string `json:"template" binding:"required,min=1,max=100" validate:"alphanum,ascii"` + Usernames []string `json:"usernames" binding:"omitempty,dive,min=1,max=100" validate:"dive,alphanum,ascii"` + Groups []string `json:"groups" binding:"omitempty,dive,min=1,max=100" validate:"dive,alphanum,ascii"` +} + +type DeletePodRequest struct { + Pod string `json:"pod" binding:"required,min=1,max=100" validate:"alphanum,ascii"` +} + +type AdminDeletePodRequest struct { + Pods []string `json:"pods" binding:"required,min=1,dive,min=1,max=100" validate:"dive,alphanum,ascii"` +} + +type UsernamePasswordRequest struct { + Username string `json:"username" binding:"required,min=3,max=20" validate:"alphanum,ascii"` + Password string `json:"password" binding:"required,min=8,max=128"` +} + +type AdminCreateUserRequest struct { + Users []UsernamePasswordRequest `json:"users" binding:"required,min=1,max=100,dive"` +} + +type UsersRequest struct { + Usernames []string `json:"usernames" binding:"required,min=1,dive,min=1,max=50" validate:"dive,alphanum,ascii"` +} + +type ModifyGroupMembersRequest struct { + Group string `json:"group" binding:"required,min=1,max=100" validate:"alphanum,ascii"` + Usernames []string `json:"usernames" binding:"required,min=1,dive,min=1,max=50" validate:"dive,alphanum,ascii"` +} + +type SetUserGroupsRequest struct { + Username string `json:"username" binding:"required,min=3,max=20" validate:"alphanum,ascii"` + Groups []string `json:"groups" binding:"required,min=1,dive,min=1,max=100" validate:"dive,alphanum,ascii"` +} + +type RenameGroupRequest struct { + OldName string `json:"old_name" binding:"required,min=1,max=100" validate:"alphanum,ascii"` + NewName string `json:"new_name" binding:"required,min=1,max=100" validate:"alphanum,ascii"` +} + +type DashboardStats struct { + UserCount int `json:"users"` + GroupCount int `json:"groups"` + PublishedTemplateCount int `json:"published_templates"` + DeployedPodCount int `json:"deployed_pods"` + VirtualMachineCount int `json:"vms"` + ClusterResourceUsage any `json:"cluster"` +} + +// ================================================= +// Private Functions +// ================================================= + +func validateAndBind(c *gin.Context, obj any) bool { + if err := c.ShouldBindJSON(obj); err != nil { + c.JSON(http.StatusBadRequest, gin.H{ + "error": "Validation failed", + "details": "Invalid request format or missing required fields", + }) + return false + } + return true +} diff --git a/internal/api/middleware/authorization.go b/internal/api/middleware/authorization.go new file mode 100644 index 0000000..e173d39 --- /dev/null +++ b/internal/api/middleware/authorization.go @@ -0,0 +1,79 @@ +package middleware + +import ( + "net/http" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// authRequired provides authentication middleware for ensuring that a user is logged in. +func AuthRequired(c *gin.Context) { + session := sessions.Default(c) + id := session.Get("id") + if id == nil { + c.String(http.StatusUnauthorized, "Unauthorized") + c.Abort() + return + } + c.Next() +} + +func AdminRequired(c *gin.Context) { + session := sessions.Default(c) + id := session.Get("id") + if id == nil { + c.String(http.StatusUnauthorized, "Unauthorized") + c.Abort() + return + } + + isAdmin := session.Get("isAdmin") + if isAdmin == nil || !isAdmin.(bool) { + c.String(http.StatusForbidden, "Admin access required") + c.Abort() + return + } + + c.Next() +} + +func GetUser(c *gin.Context) string { + userID := sessions.Default(c).Get("id") + if userID != nil { + return userID.(string) + } + return "" +} + +func Logout(c *gin.Context) { + session := sessions.Default(c) + id := session.Get("id") + if id == nil { + c.JSON(http.StatusOK, gin.H{"message": "No session."}) + return + } + session.Delete("id") + if err := session.Save(); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to save session"}) + return + } + c.JSON(http.StatusOK, gin.H{"message": "Successfully logged out!"}) +} + +func CORSMiddleware(fqdn string) gin.HandlerFunc { + return func(c *gin.Context) { + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.Header().Set("Access-Control-Allow-Origin", fqdn) + c.Writer.Header().Set("Access-Control-Max-Age", "86400") + c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") + c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, Origin") + + if c.Request.Method == "OPTIONS" { + c.AbortWithStatus(200) + } + + c.Next() + } +} diff --git a/internal/api/routes/admin_routes.go b/internal/api/routes/admin_routes.go new file mode 100644 index 0000000..1bb4eb7 --- /dev/null +++ b/internal/api/routes/admin_routes.go @@ -0,0 +1,44 @@ +package routes + +import ( + "github.com/cpp-cyber/proclone/internal/api/handlers" + "github.com/gin-gonic/gin" +) + +// registerAdminRoutes defines all routes accessible to admin users +func registerAdminRoutes(g *gin.RouterGroup, authHandler *handlers.AuthHandler, proxmoxHandler *handlers.ProxmoxHandler, cloningHandler *handlers.CloningHandler) { + // Create dashboard handler + dashboardHandler := handlers.NewDashboardHandler(authHandler, proxmoxHandler, cloningHandler) + + // GET Requests + g.GET("/dashboard", dashboardHandler.GetDashboardStatsHandler) + g.GET("/cluster", proxmoxHandler.GetClusterResourceUsageHandler) + g.GET("/users", authHandler.GetUsersHandler) + g.GET("/groups", authHandler.GetGroupsHandler) + g.GET("/vms", proxmoxHandler.GetVMsHandler) + g.GET("/pods", cloningHandler.AdminGetPodsHandler) + g.GET("/templates", cloningHandler.AdminGetTemplatesHandler) + g.GET("/templates/unpublished", cloningHandler.GetUnpublishedTemplatesHandler) + + // POST Requests + g.POST("/users/create", authHandler.CreateUsersHandler) + g.POST("/users/delete", authHandler.DeleteUsersHandler) + g.POST("/users/enable", authHandler.EnableUsersHandler) + g.POST("/users/disable", authHandler.DisableUsersHandler) + g.POST("/user/groups", authHandler.SetUserGroupsHandler) + g.POST("/groups/create", authHandler.CreateGroupsHandler) + g.POST("/group/members/add", authHandler.AddUsersHandler) + g.POST("/group/members/remove", authHandler.RemoveUsersHandler) + g.POST("/group/rename", authHandler.RenameGroupHandler) + g.POST("/groups/delete", authHandler.DeleteGroupsHandler) + g.POST("/vm/start", proxmoxHandler.StartVMHandler) + g.POST("/vm/shutdown", proxmoxHandler.ShutdownVMHandler) + g.POST("/vm/reboot", proxmoxHandler.RebootVMHandler) + g.POST("/pods/delete", cloningHandler.AdminDeletePodHandler) + g.POST("/template/publish", cloningHandler.PublishTemplateHandler) + g.POST("/template/edit", cloningHandler.EditTemplateHandler) + g.POST("/template/delete", cloningHandler.DeleteTemplateHandler) + g.POST("/template/visibility", cloningHandler.ToggleTemplateVisibilityHandler) + g.POST("/template/image/upload", cloningHandler.UploadTemplateImageHandler) + g.POST("/templates/clone", cloningHandler.AdminCloneTemplateHandler) +} diff --git a/internal/api/routes/private_routes.go b/internal/api/routes/private_routes.go new file mode 100644 index 0000000..d9eac10 --- /dev/null +++ b/internal/api/routes/private_routes.go @@ -0,0 +1,20 @@ +package routes + +import ( + "github.com/cpp-cyber/proclone/internal/api/handlers" + "github.com/gin-gonic/gin" +) + +// registerPrivateRoutes defines all routes accessible to authenticated users +func registerPrivateRoutes(g *gin.RouterGroup, authHandler *handlers.AuthHandler, proxmoxHandler *handlers.ProxmoxHandler, cloningHandler *handlers.CloningHandler) { + // GET Requests + g.GET("/session", authHandler.SessionHandler) + g.GET("/pods", cloningHandler.GetPodsHandler) + g.GET("/templates", cloningHandler.GetTemplatesHandler) + g.GET("/template/image/:filename", cloningHandler.GetTemplateImageHandler) + + // POST Requests + g.POST("/logout", authHandler.LogoutHandler) + g.POST("/pod/delete", cloningHandler.DeletePodHandler) + g.POST("/template/clone", cloningHandler.CloneTemplateHandler) +} diff --git a/internal/api/routes/public_routes.go b/internal/api/routes/public_routes.go new file mode 100644 index 0000000..ea72952 --- /dev/null +++ b/internal/api/routes/public_routes.go @@ -0,0 +1,13 @@ +package routes + +import ( + "github.com/cpp-cyber/proclone/internal/api/handlers" + "github.com/gin-gonic/gin" +) + +// registerPublicRoutes defines all routes accessible without authentication +func registerPublicRoutes(g *gin.RouterGroup, authHandler *handlers.AuthHandler, cloningHandler *handlers.CloningHandler) { + // GET Requests + g.GET("/health", handlers.HealthCheckHandler(authHandler, cloningHandler)) + g.POST("/login", authHandler.LoginHandler) +} diff --git a/internal/api/routes/routes.go b/internal/api/routes/routes.go new file mode 100644 index 0000000..f29a73d --- /dev/null +++ b/internal/api/routes/routes.go @@ -0,0 +1,24 @@ +package routes + +import ( + "github.com/cpp-cyber/proclone/internal/api/handlers" + "github.com/cpp-cyber/proclone/internal/api/middleware" + "github.com/gin-gonic/gin" +) + +// RegisterRoutes sets up all API routes with their respective middleware and handlers +func RegisterRoutes(r *gin.Engine, authHandler *handlers.AuthHandler, proxmoxHandler *handlers.ProxmoxHandler, cloningHandler *handlers.CloningHandler) { + // Public routes (no authentication required) + public := r.Group("/api/v1") + registerPublicRoutes(public, authHandler, cloningHandler) + + // Private routes (authentication required) + private := r.Group("/api/v1") + private.Use(middleware.AuthRequired) + registerPrivateRoutes(private, authHandler, proxmoxHandler, cloningHandler) + + // Admin routes (authentication + admin privileges required) + admin := r.Group("/api/v1/admin") + admin.Use(middleware.AdminRequired) + registerAdminRoutes(admin, authHandler, proxmoxHandler, cloningHandler) +} diff --git a/internal/cloning/cloning_service.go b/internal/cloning/cloning_service.go new file mode 100644 index 0000000..9073004 --- /dev/null +++ b/internal/cloning/cloning_service.go @@ -0,0 +1,410 @@ +package cloning + +import ( + "database/sql" + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/cpp-cyber/proclone/internal/ldap" + "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/kelseyhightower/envconfig" +) + +// LoadCloningConfig loads and validates cloning configuration from environment variables +func LoadCloningConfig() (*Config, error) { + var config Config + if err := envconfig.Process("", &config); err != nil { + return nil, fmt.Errorf("failed to process cloning configuration: %w", err) + } + return &config, nil +} + +func NewTemplateClient(db *sql.DB) *TemplateClient { + return &TemplateClient{ + DB: db, + TemplateConfig: &TemplateConfig{ + UploadDir: os.Getenv("UPLOAD_DIR"), + }, + } +} + +func NewDatabaseService(db *sql.DB) DatabaseService { + return NewTemplateClient(db) +} + +func (c *TemplateClient) GetTemplateConfig() *TemplateConfig { + return c.TemplateConfig +} + +func NewCloningService(proxmoxService proxmox.Service, db *sql.DB, ldapService ldap.Service) (*CloningService, error) { + config, err := LoadCloningConfig() + if err != nil { + return nil, fmt.Errorf("failed to load cloning configuration: %w", err) + } + + if config.RouterVMID == 0 || config.RouterNode == "" { + return nil, fmt.Errorf("incomplete cloning configuration") + } + + return &CloningService{ + ProxmoxService: proxmoxService, + DatabaseService: NewDatabaseService(db), + LDAPService: ldapService, + Config: config, + }, nil +} + +func (cs *CloningService) CloneTemplate(req CloneRequest) error { + var errors []string + var createdPools []string + var clonedRouters []RouterInfo + + // 1. Get the template pool and its VMs + templatePool, err := cs.ProxmoxService.GetPoolVMs("kamino_template_" + req.Template) + if err != nil { + return fmt.Errorf("failed to get template pool: %w", err) + } + + // 2. Check if any template is already deployed (if requested) + if req.CheckExistingDeployments { + for _, target := range req.Targets { + targetPoolName := fmt.Sprintf("%s_%s", req.Template, target.Name) + isDeployed, err := cs.IsDeployed(targetPoolName) + if err != nil { + return fmt.Errorf("failed to check if template is deployed for %s: %w", target.Name, err) + } + if isDeployed { + return fmt.Errorf("template %s is already or in the process of being deployed for %s", req.Template, target.Name) + } + } + } + + // 3. Identify router and other VMs + var router *proxmox.VM + var templateVMs []proxmox.VM + + for _, vm := range templatePool { + // Check to see if this VM is the router + lowerVMName := strings.ToLower(vm.Name) + if strings.Contains(lowerVMName, "router") || strings.Contains(lowerVMName, "pfsense") { + router = &proxmox.VM{ + Name: vm.Name, + Node: vm.NodeName, + VMID: vm.VmId, + } + } else { + templateVMs = append(templateVMs, proxmox.VM{ + Name: vm.Name, + Node: vm.NodeName, + VMID: vm.VmId, + }) + } + } + + // If no router was found in the template, use the default router template + if router == nil { + router = &proxmox.VM{ + Name: cs.Config.RouterName, + Node: cs.Config.RouterNode, + VMID: cs.Config.RouterVMID, + } + } + + // 4. Verify that the pool is not empty + if len(templateVMs) == 0 { + return fmt.Errorf("template pool %s contains no VMs", req.Template) + } + + // 5. Get pod IDs, Numbers, and VMIDs and assign them to targets + numVMsPerTarget := len(templateVMs) + 1 // +1 for router + log.Printf("Number of VMs per target (including router): %d", numVMsPerTarget) + + podIDs, podNumbers, err := cs.ProxmoxService.GetNextPodIDs(cs.Config.MinPodID, cs.Config.MaxPodID, len(req.Targets)) + if err != nil { + return fmt.Errorf("failed to get next pod IDs: %w", err) + } + + vmIDs, err := cs.ProxmoxService.GetNextVMIDs(len(req.Targets) * numVMsPerTarget) + if err != nil { + return fmt.Errorf("failed to get next VM IDs: %w", err) + } + + for i := range req.Targets { + req.Targets[i].PoolName = fmt.Sprintf("%s_%s_%s", podIDs[i], req.Template, req.Targets[i].Name) + req.Targets[i].PodID = podIDs[i] + req.Targets[i].PodNumber = podNumbers[i] + req.Targets[i].VMIDs = vmIDs[i*(numVMsPerTarget) : (i+1)*(numVMsPerTarget)] + + log.Printf("Target %s: PodID=%s, PodNumber=%d, VMIDs=%v", + req.Targets[i].Name, req.Targets[i].PodID, req.Targets[i].PodNumber, req.Targets[i].VMIDs) + } + + // 6. Create new pool for each target + for _, target := range req.Targets { + err = cs.ProxmoxService.CreateNewPool(target.PoolName) + if err != nil { + cs.cleanupFailedClones(createdPools) + return fmt.Errorf("failed to create new pool for %s: %w", target.Name, err) + } + createdPools = append(createdPools, target.PoolName) + } + + // 7. Clone targets to proxmox + for _, target := range req.Targets { + // Find best node per target + bestNode, err := cs.ProxmoxService.FindBestNode() + if err != nil { + errors = append(errors, fmt.Sprintf("failed to find best node for %s: %v", target.Name, err)) + continue + } + + // Clone router + routerCloneReq := proxmox.VMCloneRequest{ + SourceVM: *router, + PoolName: target.PoolName, + PodID: target.PodID, + NewVMID: target.VMIDs[0], + TargetNode: bestNode, + } + err = cs.ProxmoxService.CloneVMWithConfig(routerCloneReq) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to clone router VM for %s: %v", target.Name, err)) + } else { + // Store router info for later operations + clonedRouters = append(clonedRouters, RouterInfo{ + TargetName: target.Name, + PodNumber: target.PodNumber, + Node: bestNode, + VMID: target.VMIDs[0], + }) + } + + // Clone each VM to new pool + for i, vm := range templateVMs { + vmCloneReq := proxmox.VMCloneRequest{ + SourceVM: vm, + PoolName: target.PoolName, + PodID: target.PodID, + NewVMID: target.VMIDs[i+1], + TargetNode: bestNode, + } + err := cs.ProxmoxService.CloneVMWithConfig(vmCloneReq) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to clone VM %s for %s: %v", vm.Name, target.Name, err)) + } + } + } + + // 8. Wait for all VM clone operations to complete before configuring VNets + log.Printf("Waiting for clone operations to complete for %d targets", len(req.Targets)) + for _, target := range req.Targets { + // Wait for all VMs in the pool to be properly cloned + log.Printf("Waiting for VMs in pool %s to be available", target.PoolName) + time.Sleep(2 * time.Second) + + // Check if pool has the expected number of VMs + for retries := range 30 { + poolVMs, err := cs.ProxmoxService.GetPoolVMs(target.PoolName) + if err != nil { + time.Sleep(2 * time.Second) + continue + } + + if len(poolVMs) >= numVMsPerTarget { + log.Printf("Pool %s has %d VMs (expected %d) - clone operations complete", target.PoolName, len(poolVMs), numVMsPerTarget) + break + } + + log.Printf("Pool %s has %d VMs, waiting for %d (retry %d/30)", target.PoolName, len(poolVMs), numVMsPerTarget, retries+1) + time.Sleep(2 * time.Second) + } + } + + // 9. Configure VNet of all VMs + log.Printf("Configuring VNets for %d targets", len(req.Targets)) + for _, target := range req.Targets { + vnetName := fmt.Sprintf("kamino%d", target.PodNumber) + log.Printf("Setting VNet %s for pool %s (target: %s)", vnetName, target.PoolName, target.Name) + err = cs.SetPodVnet(target.PoolName, vnetName) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to update pod vnet for %s: %v", target.Name, err)) + } + } + + // 10. Start all routers and wait for them to be running + log.Printf("Starting %d routers", len(clonedRouters)) + for _, routerInfo := range clonedRouters { + // Wait for router disk to be available + log.Printf("Waiting for router disk to be available for %s (VMID: %d)", routerInfo.TargetName, routerInfo.VMID) + err = cs.ProxmoxService.WaitForDisk(routerInfo.Node, routerInfo.VMID, cs.Config.RouterWaitTimeout) + if err != nil { + errors = append(errors, fmt.Sprintf("router disk unavailable for %s: %v", routerInfo.TargetName, err)) + continue + } + + // Start the router + log.Printf("Starting router VM for %s (VMID: %d)", routerInfo.TargetName, routerInfo.VMID) + err = cs.ProxmoxService.StartVM(routerInfo.Node, routerInfo.VMID) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to start router VM for %s: %v", routerInfo.TargetName, err)) + continue + } + + // Wait for router to be running + routerVM := proxmox.VM{ + Node: routerInfo.Node, + VMID: routerInfo.VMID, + } + log.Printf("Waiting for router VM to be running for %s (VMID: %d)", routerInfo.TargetName, routerInfo.VMID) + err = cs.ProxmoxService.WaitForRunning(routerVM) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to start router VM for %s: %v", routerInfo.TargetName, err)) + } + } + + // 11. Configure all pod routers (separate step after all routers are running) + log.Printf("Configuring %d pod routers", len(clonedRouters)) + for _, routerInfo := range clonedRouters { + // Only configure routers that successfully started + routerVM := proxmox.VM{ + Node: routerInfo.Node, + VMID: routerInfo.VMID, + } + + // Double-check that router is still running before configuration + err = cs.ProxmoxService.WaitForRunning(routerVM) + if err != nil { + errors = append(errors, fmt.Sprintf("router not running before configuration for %s: %v", routerInfo.TargetName, err)) + continue + } + + log.Printf("Configuring pod router for %s (Pod: %d, VMID: %d)", routerInfo.TargetName, routerInfo.PodNumber, routerInfo.VMID) + err = cs.configurePodRouter(routerInfo.PodNumber, routerInfo.Node, routerInfo.VMID) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to configure pod router for %s: %v", routerInfo.TargetName, err)) + } + } + + // 12. Set permissions on the pool to the user/group + for _, target := range req.Targets { + err = cs.ProxmoxService.SetPoolPermission(target.PoolName, target.Name, target.IsGroup) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to update pool permissions for %s: %v", target.Name, err)) + } + } + + // 13. Add deployments to the templates database + err = cs.DatabaseService.AddDeployment(req.Template, len(req.Targets)) + if err != nil { + errors = append(errors, fmt.Sprintf("failed to increment template deployments for %s: %v", req.Template, err)) + } + + // Handle errors and cleanup if necessary + if len(errors) > 0 { + cs.cleanupFailedClones(createdPools) + return fmt.Errorf("bulk clone operation completed with errors: %v", errors) + } + + return nil +} + +func (cs *CloningService) DeletePod(pod string) error { + + // 1. Check if pool is already empty + isEmpty, err := cs.ProxmoxService.IsPoolEmpty(pod) + if err != nil { + return fmt.Errorf("failed to check if pool %s is empty: %w", pod, err) + } + + if isEmpty { + if err := cs.ProxmoxService.DeletePool(pod); err != nil { + return fmt.Errorf("failed to delete empty pool %s: %w", pod, err) + } + return nil + } + + // 2. Get all virtual machines in the pool + poolVMs, err := cs.ProxmoxService.GetPoolVMs(pod) + if err != nil { + return fmt.Errorf("failed to get pool VMs for %s: %w", pod, err) + } + + // 3. Stop all VMs and wait for them to be stopped + var runningVMs []proxmox.VM + stoppedCount := 0 + + for _, vm := range poolVMs { + if vm.Type == "qemu" { + // Only stop if VM is running + if vm.RunningStatus == "running" { + err := cs.ProxmoxService.StopVM(vm.NodeName, vm.VmId) + if err != nil { + return fmt.Errorf("failed to stop VM %s: %w", vm.Name, err) + } + + // Only add to wait list if it was actually running + runningVMs = append(runningVMs, proxmox.VM{ + Node: vm.NodeName, + VMID: vm.VmId, + }) + stoppedCount++ + } else { + } + } else { + } + } + + // Wait for all previously running VMs to be stopped + if len(runningVMs) > 0 { + for _, vm := range runningVMs { + if err := cs.ProxmoxService.WaitForStopped(vm); err != nil { + // Continue with deletion even if we can't confirm the VM is stopped + } + } + } + + // 4. Delete all VMs + deletedCount := 0 + + for _, vm := range poolVMs { + if vm.Type == "qemu" { + err := cs.ProxmoxService.DeleteVM(vm.NodeName, vm.VmId) + if err != nil { + return fmt.Errorf("failed to delete VM %s: %w", vm.Name, err) + } + deletedCount++ + } + } + + // 5. Wait for all VMs to be deleted and pool to become empty + err = cs.ProxmoxService.WaitForPoolEmpty(pod, 5*time.Minute) + if err != nil { + // Continue with pool deletion even if we can't confirm all VMs are gone + } + + // 6. Delete the pool + err = cs.ProxmoxService.DeletePool(pod) + if err != nil { + return fmt.Errorf("failed to delete pool %s: %w", pod, err) + } + + return nil +} + +func (cs *CloningService) cleanupFailedClones(createdPools []string) { + for _, poolName := range createdPools { + // Check if pool has any VMs + poolVMs, err := cs.ProxmoxService.GetPoolVMs(poolName) + if err != nil { + continue // Skip if we can't check + } + + // If pool is empty, delete it + if len(poolVMs) == 0 { + _ = cs.ProxmoxService.DeletePool(poolName) + } + } +} diff --git a/internal/cloning/networking.go b/internal/cloning/networking.go new file mode 100644 index 0000000..0cc56f3 --- /dev/null +++ b/internal/cloning/networking.go @@ -0,0 +1,135 @@ +package cloning + +import ( + "fmt" + "log" + "math" + "regexp" + "time" + + "github.com/cpp-cyber/proclone/internal/tools" +) + +// configurePodRouter configures the pod router with proper networking settings +func (cs *CloningService) configurePodRouter(podNumber int, node string, vmid int) error { + // Wait for router agent to be pingable + statusReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/ping", node, vmid), + } + + backoff := time.Second + maxBackoff := 30 * time.Second + timeout := 5 * time.Minute + startTime := time.Now() + + for { + if time.Since(startTime) > timeout { + return fmt.Errorf("router qemu agent timed out") + } + + _, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(statusReq) + if err == nil { + break // Agent is responding + } + + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) + } + + // Configure router WAN IP to have correct third octet using qemu agent API call + reqBody := map[string]any{ + "command": []string{ + cs.Config.WANScriptPath, + fmt.Sprintf("%s%d.1", cs.Config.WANIPBase, podNumber), + }, + } + + execReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), + RequestBody: reqBody, + } + + _, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(execReq) + if err != nil { + return fmt.Errorf("failed to make IP change request: %v", err) + } + + // Send agent exec request to change VIP subnet + vipReqBody := map[string]any{ + "command": []string{ + cs.Config.VIPScriptPath, + fmt.Sprintf("%s%d.0", cs.Config.WANIPBase, podNumber), + }, + } + + vipExecReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/agent/exec", node, vmid), + RequestBody: vipReqBody, + } + + _, err = cs.ProxmoxService.GetRequestHelper().MakeRequest(vipExecReq) + if err != nil { + return fmt.Errorf("failed to make VIP change request: %v", err) + } + + return nil +} + +func (cs *CloningService) SetPodVnet(poolName string, vnetName string) error { + // Get all VMs in the pool + vms, err := cs.ProxmoxService.GetPoolVMs(poolName) + if err != nil { + return fmt.Errorf("failed to get pool VMs for pool %s: %w", poolName, err) + } + + if len(vms) == 0 { + return fmt.Errorf("pool %s contains no VMs", poolName) + } + + log.Printf("Setting VNet %s for %d VMs in pool %s", vnetName, len(vms), poolName) + + routerRegex := regexp.MustCompile(`(?i).*(router|pfsense).*`) + var errors []string + + for _, vm := range vms { + vnet := "net0" + + // Detect if VM is a router based on its name (lazy way but requires fewer API calls) + if routerRegex.MatchString(vm.Name) { + vnet = "net1" + log.Printf("Detected router VM %s (VMID: %d), using %s interface", vm.Name, vm.VmId, vnet) + } else { + log.Printf("Setting VNet for VM %s (VMID: %d), using %s interface", vm.Name, vm.VmId, vnet) + } + + // Update VM network configuration + reqBody := map[string]string{ + vnet: fmt.Sprintf("virtio,bridge=%s,firewall=1", vnetName), + } + + req := tools.ProxmoxAPIRequest{ + Method: "PUT", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/config", vm.NodeName, vm.VmId), + RequestBody: reqBody, + } + + _, err := cs.ProxmoxService.GetRequestHelper().MakeRequest(req) + if err != nil { + errorMsg := fmt.Sprintf("failed to update network for VM %s (VMID: %d): %v", vm.Name, vm.VmId, err) + log.Printf("ERROR: %s", errorMsg) + errors = append(errors, errorMsg) + } else { + log.Printf("Successfully updated VNet for VM %s (VMID: %d) to %s", vm.Name, vm.VmId, vnetName) + } + } + + if len(errors) > 0 { + return fmt.Errorf("VNet configuration completed with errors: %v", errors) + } + + log.Printf("Successfully configured VNet %s for all %d VMs in pool %s", vnetName, len(vms), poolName) + return nil +} diff --git a/internal/cloning/pods.go b/internal/cloning/pods.go new file mode 100644 index 0000000..e97b1b7 --- /dev/null +++ b/internal/cloning/pods.go @@ -0,0 +1,92 @@ +package cloning + +import ( + "fmt" + "regexp" + "strings" + + "github.com/cpp-cyber/proclone/internal/proxmox" +) + +func (cs *CloningService) GetPods(username string) ([]Pod, error) { + // Get User DN + userDN, err := cs.LDAPService.GetUserDN(username) + if err != nil { + return nil, fmt.Errorf("failed to get user DN: %w", err) + } + + // Get user's groups + groups, err := cs.LDAPService.GetUserGroups(userDN) + if err != nil { + return nil, fmt.Errorf("failed to get user groups: %w", err) + } + + // Build regex pattern to match username or any of their group names + regexPattern := fmt.Sprintf(`1[0-9]{3}_.*_(%s|%s)`, username, strings.Join(groups, "|")) + + // Get pods based on regex pattern + pods, err := cs.MapVirtualResourcesToPods(regexPattern) + if err != nil { + return nil, err + } + return pods, nil +} + +func (cs *CloningService) AdminGetPods() ([]Pod, error) { + pods, err := cs.MapVirtualResourcesToPods(`1[0-9]{3}_.*`) + if err != nil { + return nil, err + } + return pods, nil +} + +func (cs *CloningService) MapVirtualResourcesToPods(regex string) ([]Pod, error) { + // Get cluster resources + resources, err := cs.ProxmoxService.GetClusterResources("") + if err != nil { + return nil, err + } + + podMap := make(map[string]*Pod) + reg := regexp.MustCompile(regex) + + // Iterate over cluster resources, this works because proxmox displays pools before VMs + for _, r := range resources { + if r.Type == "pool" && reg.MatchString(r.ResourcePool) { + name := r.ResourcePool + podMap[name] = &Pod{ + Name: name, + VMs: []proxmox.VirtualResource{}, + } + } + if r.Type == "qemu" && reg.MatchString(r.ResourcePool) { + if pod, ok := podMap[r.ResourcePool]; ok { + pod.VMs = append(pod.VMs, r) + } + } + } + + // Convert map to slice + var pods []Pod + for _, pod := range podMap { + pods = append(pods, *pod) + } + + return pods, nil +} + +func (cs *CloningService) IsDeployed(templateName string) (bool, error) { + podPools, err := cs.AdminGetPods() + if err != nil { + return false, fmt.Errorf("failed to get pod pools: %w", err) + } + + for _, pod := range podPools { + // Remove the Pod ID number and _ to compare + if pod.Name[5:] == templateName { + return true, nil + } + } + + return false, nil +} diff --git a/internal/cloning/templates.go b/internal/cloning/templates.go new file mode 100644 index 0000000..c895b2e --- /dev/null +++ b/internal/cloning/templates.go @@ -0,0 +1,354 @@ +package cloning + +import ( + "database/sql" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// ================================================= +// Template Database Operations +// ================================================= + +func (c *TemplateClient) GetTemplates() ([]KaminoTemplate, error) { + query := "SELECT * FROM templates WHERE template_visible = true ORDER BY created_at DESC" + rows, err := c.DB.Query(query) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + defer rows.Close() + + return c.buildTemplates(rows) +} + +func (c *TemplateClient) GetPublishedTemplates() ([]KaminoTemplate, error) { + query := "SELECT * FROM templates" + rows, err := c.DB.Query(query) + if err != nil { + return nil, fmt.Errorf("failed to execute query: %w", err) + } + defer rows.Close() + + return c.buildTemplates(rows) +} + +func (c *TemplateClient) DeleteTemplate(templateName string) error { + // Get template image path and delete the image + template, err := c.GetTemplateInfo(templateName) + if err != nil { + return fmt.Errorf("failed to get template info: %w", err) + } + + // Only attempt to delete image if there's an image path + if template.ImagePath != "" { + err = c.DeleteImage(template.ImagePath) + if err != nil { + return fmt.Errorf("failed to delete template image: %w", err) + } + } + + // Delete template from database + query := "DELETE FROM templates WHERE name = ?" + result, err := c.DB.Exec(query, templateName) + if err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + + // Check if any rows were affected + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + if rowsAffected == 0 { + return fmt.Errorf("template not found: %s", templateName) + } + + return nil +} + +func (c *TemplateClient) ToggleTemplateVisibility(templateName string) error { + query := "UPDATE templates SET template_visible = NOT template_visible WHERE name = ?" + _, err := c.DB.Exec(query, templateName) + if err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + + return nil +} + +func (c *TemplateClient) GetAllTemplateNames() ([]string, error) { + templates, err := c.GetPublishedTemplates() + if err != nil { + return nil, err + } + + var templateNames []string + for _, template := range templates { + templateNames = append(templateNames, template.Name) + } + + return templateNames, nil +} + +func (c *TemplateClient) InsertTemplate(template KaminoTemplate) error { + query := "INSERT INTO templates (name, description, image_path, authors, template_visible, vm_count) VALUES (?, ?, ?, ?, ?, ?)" + _, err := c.DB.Exec(query, template.Name, template.Description, template.ImagePath, template.Authors, template.TemplateVisible, template.VMCount) + if err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + + return nil +} + +func (c *TemplateClient) EditTemplate(template KaminoTemplate) error { + setParts := []string{} + args := []any{} + + // Always update description + setParts = append(setParts, "description = ?") + args = append(args, template.Description) + + // Only update image_path if it's not empty + if template.ImagePath != "" { + setParts = append(setParts, "image_path = ?") + args = append(args, template.ImagePath) + } + + // Always update authors + setParts = append(setParts, "authors = ?") + args = append(args, template.Authors) + + // Always update vm_count + setParts = append(setParts, "vm_count = ?") + args = append(args, template.VMCount) + + // Always update template_visible + setParts = append(setParts, "template_visible = ?") + args = append(args, template.TemplateVisible) + + // Build and execute the query + query := fmt.Sprintf("UPDATE templates SET %s WHERE name = ?", strings.Join(setParts, ", ")) + args = append(args, template.Name) + + _, err := c.DB.Exec(query, args...) + if err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + + return nil +} + +func (c *TemplateClient) AddDeployment(templateName string, num int) error { + query := "UPDATE templates SET deployments = deployments + ? WHERE name = ?" + _, err := c.DB.Exec(query, num, templateName) + if err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + + return nil +} + +func (c *TemplateClient) GetTemplateInfo(templateName string) (KaminoTemplate, error) { + query := "SELECT * FROM templates WHERE name = ?" + row := c.DB.QueryRow(query, templateName) + + var template KaminoTemplate + err := row.Scan( + &template.Name, + &template.Description, + &template.ImagePath, + &template.Authors, + &template.TemplateVisible, + &template.PodVisible, + &template.VMsVisible, + &template.VMCount, + &template.Deployments, + &template.CreatedAt, + ) + if err != nil { + if strings.Contains(err.Error(), "no rows in result set") { + return KaminoTemplate{}, nil // No error, but template not found + } + return KaminoTemplate{}, fmt.Errorf("failed to scan row: %w", err) + } + + return template, nil +} + +func (cs *CloningService) GetUnpublishedTemplates() ([]string, error) { + // Gets published templates from the database + publishedTemplates, err := cs.DatabaseService.GetPublishedTemplates() + if err != nil { + return nil, fmt.Errorf("failed to get unpublished templates: %w", err) + } + + // Gets pools that start with "kamino_template_" in Proxmox + proxmoxTemplate, err := cs.ProxmoxService.GetTemplatePools() + if err != nil { + return nil, fmt.Errorf("failed to get Proxmox templates: %w", err) + } + + var unpublished = []string{} + for _, template := range proxmoxTemplate { + trimmedTemplateName := strings.TrimPrefix(template, "kamino_template_") + + found := false + for _, pubTemplate := range publishedTemplates { + if pubTemplate.Name == trimmedTemplateName { + found = true + break + } + } + + if !found { + unpublished = append(unpublished, trimmedTemplateName) + } + } + + return unpublished, nil +} + +func (cs *CloningService) PublishTemplate(template KaminoTemplate) error { + // Insert template information into database + if err := cs.DatabaseService.InsertTemplate(template); err != nil { + return fmt.Errorf("failed to publish template: %w", err) + } + + // Get all VMs in pool + vms, err := cs.ProxmoxService.GetPoolVMs("kamino_template_" + template.Name) + if err != nil { + log.Printf("Error retrieving VMs in pool: %v", err) + return fmt.Errorf("failed to get VMs in pool: %w", err) + } + + // Convert all VMs to templates + for _, vm := range vms { + if err := cs.ProxmoxService.ConvertVMToTemplate(vm.NodeName, vm.VmId); err != nil { + log.Printf("Error converting VM %d to template: %v", vm.VmId, err) + return fmt.Errorf("failed to convert VM to template: %w", err) + } + } + + return nil +} + +// ================================================= +// Template Image Operations +// ================================================= + +func (cl *TemplateClient) UploadTemplateImage(c *gin.Context) (*UploadResult, error) { + // Check header for multipart/form-data + if !strings.HasPrefix(c.Request.Header.Get("Content-Type"), "multipart/form-data") { + return nil, fmt.Errorf("invalid content type") + } + + // Parse the multipart form + file, header, err := c.Request.FormFile("image") + if err != nil { + return nil, fmt.Errorf("image field is required") + } + defer file.Close() + + // Basic check: Is file size 0? + if header.Size == 0 { + return nil, fmt.Errorf("uploaded file is empty") + } + + // Block unsupported file types + filetype, err := detectMIME(file) + if err != nil { + return nil, fmt.Errorf("failed to detect file type") + } + if _, ok := allowedMIMEs[filetype]; !ok { + return nil, fmt.Errorf("unsupported file type: %s", filetype) + } + + // Reset file pointer back to beginning + if _, err := file.Seek(0, io.SeekStart); err != nil { + return nil, fmt.Errorf("failed to reset file reader") + } + // File name sanitization + filename := filepath.Base(header.Filename) // basic sanitization + filename = filepath.Clean(filename) // clean up the filename + filename = strings.ReplaceAll(filename, " ", "_") // replace spaces with underscores + + // Unique file name + // Save with a UUID filename to avoid name collisions + // generate unique filename + newFilename := fmt.Sprintf("%s-%s", uuid.NewString(), filename) + outPath := filepath.Join(cl.TemplateConfig.UploadDir, newFilename) + + // Save file using Gin utility + if err := c.SaveUploadedFile(header, outPath); err != nil { + return nil, fmt.Errorf("unable to save file: %w", err) + } + + result := &UploadResult{ + Message: "file uploaded successfully", + Filename: newFilename, + MimeType: filetype, + Path: outPath, + } + + return result, nil +} + +func (c *TemplateClient) DeleteImage(imagePath string) error { + if imagePath == "" { + return fmt.Errorf("image path is empty") + } + + fullPath := filepath.Join(c.TemplateConfig.UploadDir, imagePath) + if err := os.Remove(fullPath); err != nil { + return fmt.Errorf("failed to delete image: %w", err) + } + return nil +} + +// ================================================= +// Private Functions +// ================================================= + +func (c *TemplateClient) buildTemplates(rows *sql.Rows) ([]KaminoTemplate, error) { + templates := []KaminoTemplate{} + + for rows.Next() { + var template KaminoTemplate + err := rows.Scan( + &template.Name, + &template.Description, + &template.ImagePath, + &template.Authors, + &template.TemplateVisible, + &template.PodVisible, + &template.VMsVisible, + &template.VMCount, + &template.Deployments, + &template.CreatedAt, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan row: %w", err) + } + templates = append(templates, template) + } + + return templates, nil +} + +// detectMIME reads a small buffer to determine the file's MIME type +func detectMIME(f multipart.File) (string, error) { + buffer := make([]byte, 512) + if _, err := f.Read(buffer); err != nil && err != io.EOF { + return "", err + } + return http.DetectContentType(buffer), nil +} diff --git a/internal/cloning/types.go b/internal/cloning/types.go new file mode 100644 index 0000000..ae88cc7 --- /dev/null +++ b/internal/cloning/types.go @@ -0,0 +1,123 @@ +package cloning + +import ( + "database/sql" + "time" + + "github.com/cpp-cyber/proclone/internal/ldap" + "github.com/cpp-cyber/proclone/internal/proxmox" + "github.com/gin-gonic/gin" +) + +// Config holds the configuration for cloning operations +type Config struct { + RouterName string `envconfig:"ROUTER_NAME" default:"1-1NAT-pfsense"` + RouterVMID int `envconfig:"ROUTER_VMID"` + RouterNode string `envconfig:"ROUTER_NODE"` + MinPodID int `envconfig:"MIN_POD_ID" default:"1001"` + MaxPodID int `envconfig:"MAX_POD_ID" default:"1250"` + CloneTimeout time.Duration `envconfig:"CLONE_TIMEOUT" default:"3m"` + RouterWaitTimeout time.Duration `envconfig:"ROUTER_WAIT_TIMEOUT" default:"120s"` + SDNApplyTimeout time.Duration `envconfig:"SDN_APPLY_TIMEOUT" default:"30s"` + WANScriptPath string `envconfig:"WAN_SCRIPT_PATH" default:"/home/update-wan-ip.sh"` + VIPScriptPath string `envconfig:"VIP_SCRIPT_PATH" default:"/home/update-wan-vip.sh"` + WANIPBase string `envconfig:"WAN_IP_BASE" default:"172.16."` +} + +// KaminoTemplate represents a template in the system +type KaminoTemplate struct { + Name string `json:"name" binding:"required,min=1,max=100" validate:"alphanum,ascii"` + Description string `json:"description" binding:"required,min=1,max=5000"` + ImagePath string `json:"image_path" binding:"omitempty,max=255" validate:"omitempty,file"` + Authors string `json:"authors" binding:"omitempty,max=255"` + TemplateVisible bool `json:"template_visible"` + PodVisible bool `json:"pod_visible"` + VMsVisible bool `json:"vms_visible"` + VMCount int `json:"vm_count" binding:"min=0,max=100"` + Deployments int `json:"deployments" binding:"min=0"` + CreatedAt string `json:"created_at" binding:"omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` +} + +// DatabaseService interface defines the methods for template operations +type DatabaseService interface { + GetTemplates() ([]KaminoTemplate, error) + GetPublishedTemplates() ([]KaminoTemplate, error) + InsertTemplate(template KaminoTemplate) error + DeleteTemplate(templateName string) error + ToggleTemplateVisibility(templateName string) error + UploadTemplateImage(c *gin.Context) (*UploadResult, error) + GetTemplateConfig() *TemplateConfig + GetTemplateInfo(templateName string) (KaminoTemplate, error) + AddDeployment(templateName string, num int) error + EditTemplate(template KaminoTemplate) error + GetAllTemplateNames() ([]string, error) + DeleteImage(imagePath string) error +} + +// TemplateConfig holds template configuration +type TemplateConfig struct { + UploadDir string +} + +// UploadResult holds the result of a file upload +type UploadResult struct { + Message string `json:"message"` + Filename string `json:"filename"` + MimeType string `json:"mime_type"` + Path string `json:"path"` +} + +// TemplateClient implements the DatabaseService interface for template operations +type TemplateClient struct { + DB *sql.DB + TemplateConfig *TemplateConfig +} + +// CloningService combines Proxmox service and templates database functionality +// for handling VM cloning operations +type CloningService struct { + ProxmoxService proxmox.Service + DatabaseService DatabaseService + LDAPService ldap.Service + Config *Config +} + +// PodResponse represents the response structure for pod operations +type PodResponse struct { + Pods []Pod `json:"pods"` +} + +// Pod represents a pod containing VMs and template information +type Pod struct { + Name string `json:"name"` + VMs []proxmox.VirtualResource `json:"vms"` + Template KaminoTemplate `json:"template,omitempty"` +} + +var allowedMIMEs = map[string]struct{}{ + "image/jpeg": {}, + "image/png": {}, +} + +type CloneTarget struct { + Name string + IsGroup bool + Node string + PoolName string + PodID string + PodNumber int + VMIDs []int +} + +type CloneRequest struct { + Template string + Targets []CloneTarget + CheckExistingDeployments bool // Whether to check if templates are already deployed +} + +type RouterInfo struct { + TargetName string + PodNumber int + Node string + VMID int +} diff --git a/internal/ldap/groups.go b/internal/ldap/groups.go new file mode 100644 index 0000000..5f7359d --- /dev/null +++ b/internal/ldap/groups.go @@ -0,0 +1,326 @@ +package ldap + +import ( + "fmt" + "regexp" + "strings" + "time" + + ldapv3 "github.com/go-ldap/ldap/v3" +) + +// ================================================= +// Public Functions +// ================================================= + +func (s *LDAPService) GetGroups() ([]Group, error) { + // Search for all groups in the KaminoGroups OU + kaminoGroupsOU := "OU=KaminoGroups," + s.client.config.BaseDN + req := ldapv3.NewSearchRequest( + kaminoGroupsOU, + ldapv3.ScopeWholeSubtree, ldapv3.NeverDerefAliases, 0, 0, false, + "(objectClass=group)", + []string{"cn", "whenCreated", "member"}, + nil, + ) + + searchResult, err := s.client.Search(req) + if err != nil { + return nil, fmt.Errorf("failed to search for groups: %v", err) + } + + var groups []Group + for _, entry := range searchResult.Entries { + cn := entry.GetAttributeValue("cn") + + // Check if the group is protected + protectedGroup, err := isProtectedGroup(cn) + if err != nil { + return nil, fmt.Errorf("failed to determine if the group %s is protected: %v", cn, err) + } + + group := Group{ + Name: cn, + CanModify: !protectedGroup, + UserCount: len(entry.GetAttributeValues("member")), + } + + // Add creation date if available and convert it + whenCreated := entry.GetAttributeValue("whenCreated") + if whenCreated != "" { + // AD stores dates in GeneralizedTime format: YYYYMMDDHHMMSS.0Z + if parsedTime, err := time.Parse("20060102150405.0Z", whenCreated); err == nil { + group.CreatedAt = parsedTime.Format("2006-01-02 15:04:05") + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func (s *LDAPService) CreateGroup(groupName string) error { + // Validate group name + if err := validateGroupName(groupName); err != nil { + return fmt.Errorf("invalid group name: %v", err) + } + + // Check if group already exists + _, err := s.getGroupDN(groupName) + if err == nil { + return fmt.Errorf("group already exists: %s", groupName) + } + + // Construct the DN for the new group + groupDN := fmt.Sprintf("CN=%s,OU=KaminoGroups,%s", groupName, s.client.config.BaseDN) + + // Create the add request + addReq := ldapv3.NewAddRequest(groupDN, nil) + addReq.Attribute("objectClass", []string{"top", "group"}) + addReq.Attribute("cn", []string{groupName}) + addReq.Attribute("sAMAccountName", []string{groupName}) + addReq.Attribute("groupType", []string{"-2147483646"}) + + // Execute the add request + err = s.client.Add(addReq) + if err != nil { + return fmt.Errorf("failed to create group: %v", err) + } + + return nil +} + +func (s *LDAPService) RenameGroup(oldGroupName string, newGroupName string) error { + // Validate new group name + if err := validateGroupName(newGroupName); err != nil { + return fmt.Errorf("invalid new group name: %v", err) + } + + // Check if old group exists + oldGroupDN, err := s.getGroupDN(oldGroupName) + if err != nil { + return fmt.Errorf("old group not found: %v", err) + } + + // Check if new group already exists + _, err = s.getGroupDN(newGroupName) + if err == nil { + return fmt.Errorf("new group name already exists: %s", newGroupName) + } + + // Create modify DN request + newRDN := fmt.Sprintf("CN=%s", newGroupName) + modifyDNReq := ldapv3.NewModifyDNRequest(oldGroupDN, newRDN, true, "") + + // Execute the modify DN request + err = s.client.ModifyDN(modifyDNReq) + if err != nil { + return fmt.Errorf("failed to rename group: %v", err) + } + + return nil +} + +func (s *LDAPService) DeleteGroup(groupName string) error { + // Check if group is protected + protected, err := isProtectedGroup(groupName) + if err != nil { + return fmt.Errorf("failed to check if group is protected: %v", err) + } + if protected { + return fmt.Errorf("cannot delete protected group: %s", groupName) + } + + // Get group DN + groupDN, err := s.getGroupDN(groupName) + if err != nil { + return fmt.Errorf("group not found: %v", err) + } + + // Create delete request + delReq := ldapv3.NewDelRequest(groupDN, nil) + + // Execute the delete request + err = s.client.Del(delReq) + if err != nil { + return fmt.Errorf("failed to delete group: %v", err) + } + + return nil +} + +func (s *LDAPService) GetGroupMembers(groupName string) ([]User, error) { + groupDN, err := s.getGroupDN(groupName) + if err != nil { + return nil, fmt.Errorf("group not found: %v", err) + } + + // Search for the group and get its members + req := ldapv3.NewSearchRequest( + groupDN, + ldapv3.ScopeBaseObject, ldapv3.NeverDerefAliases, 0, 0, false, + "(objectClass=group)", + []string{"member"}, + nil, + ) + + searchResult, err := s.client.Search(req) + if err != nil { + return nil, fmt.Errorf("failed to search for group: %v", err) + } + + if len(searchResult.Entries) == 0 { + return []User{}, nil + } + + memberDNs := searchResult.Entries[0].GetAttributeValues("member") + var users []User + + for _, memberDN := range memberDNs { + // Get user details from DN + userReq := ldapv3.NewSearchRequest( + memberDN, + ldapv3.ScopeBaseObject, ldapv3.NeverDerefAliases, 0, 0, false, + "(objectClass=user)", + []string{"sAMAccountName", "cn", "whenCreated", "userAccountControl"}, + nil, + ) + + userResult, err := s.client.Search(userReq) + if err != nil { + continue // Skip this user if there's an error + } + + if len(userResult.Entries) > 0 { + entry := userResult.Entries[0] + user := User{ + Name: entry.GetAttributeValue("sAMAccountName"), + CreatedAt: entry.GetAttributeValue("whenCreated"), + Enabled: true, // Default, will be updated based on userAccountControl + } + + // Check if user is enabled + userAccountControl := entry.GetAttributeValue("userAccountControl") + if userAccountControl != "" { + // Parse userAccountControl to determine if account is enabled + // UF_ACCOUNTDISABLE = 0x02 + if strings.Contains(userAccountControl, "2") { + user.Enabled = false + } + } + + users = append(users, user) + } + } + + return users, nil +} + +func (s *LDAPService) AddUsersToGroup(groupName string, usernames []string) error { + groupDN, err := s.getGroupDN(groupName) + if err != nil { + return fmt.Errorf("group not found: %v", err) + } + + // Add users one by one to handle cases where some users might already be in the group + for _, username := range usernames { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("user %s not found: %v", username, err) + } + + if err := s.AddToGroup(userDN, groupDN); err != nil { + return fmt.Errorf("failed to add user %s to group: %v", username, err) + } + } + + return nil +} + +func (s *LDAPService) RemoveUsersFromGroup(groupName string, usernames []string) error { + groupDN, err := s.getGroupDN(groupName) + if err != nil { + return fmt.Errorf("group not found: %v", err) + } + + // Remove users one by one to handle cases where some users might not be in the group + for _, username := range usernames { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("user %s not found: %v", username, err) + } + + if err := s.RemoveFromGroup(userDN, groupDN); err != nil { + return fmt.Errorf("failed to remove user %s from group: %v", username, err) + } + } + + return nil +} + +// ================================================= +// Private Functions +// ================================================= + +func (s *LDAPService) getGroupDN(groupName string) (string, error) { + kaminoGroupsOU := "OU=KaminoGroups," + s.client.config.BaseDN + req := ldapv3.NewSearchRequest( + kaminoGroupsOU, + ldapv3.ScopeWholeSubtree, ldapv3.NeverDerefAliases, 1, 30, false, + fmt.Sprintf("(&(objectClass=group)(cn=%s))", ldapv3.EscapeFilter(groupName)), + []string{"dn"}, + nil, + ) + + searchResult, err := s.client.Search(req) + if err != nil { + return "", fmt.Errorf("failed to search for group: %v", err) + } + + if len(searchResult.Entries) == 0 { + return "", fmt.Errorf("group %s not found", groupName) + } + + return searchResult.Entries[0].DN, nil +} + +func validateGroupName(groupName string) error { + if groupName == "" { + return fmt.Errorf("group name cannot be empty") + } + + if len(groupName) >= 64 { + return fmt.Errorf("group name must be less than 64 characters") + } + + regex := regexp.MustCompile("^[a-zA-Z0-9-_]*$") + if !regex.MatchString(groupName) { + return fmt.Errorf("group name must only contain letters, numbers, hyphens, and underscores") + } + + return nil +} + +func isProtectedGroup(groupName string) (bool, error) { + protectedGroups := []string{ + "Domain Admins", + "Domain Users", + "Domain Guests", + "Schema Admins", + "Enterprise Admins", + "Administrators", + "Users", + "Guests", + "Proxmox-Admins", + "KaminoUsers", + } + + for _, protectedGroup := range protectedGroups { + if strings.EqualFold(groupName, protectedGroup) { + return true, nil + } + } + + return false, nil +} diff --git a/internal/ldap/ldap_client.go b/internal/ldap/ldap_client.go new file mode 100644 index 0000000..e43582d --- /dev/null +++ b/internal/ldap/ldap_client.go @@ -0,0 +1,412 @@ +package ldap + +import ( + "crypto/tls" + "fmt" + "strings" + "time" + + "github.com/go-ldap/ldap/v3" + "github.com/kelseyhightower/envconfig" +) + +func NewClient(config *Config) *Client { + return &Client{config: config} +} + +func LoadConfig() (*Config, error) { + var config Config + if err := envconfig.Process("", &config); err != nil { + return nil, fmt.Errorf("failed to process LDAP configuration: %w", err) + } + return &config, nil +} + +func (c *Client) Connect() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + conn, err := c.dial() + if err != nil { + c.connected = false + return fmt.Errorf("failed to connect to LDAP server: %v", err) + } + + if c.config.BindUser != "" { + err = conn.Bind(c.config.BindUser, c.config.BindPassword) + if err != nil { + conn.Close() + c.connected = false + return fmt.Errorf("failed to bind to LDAP server: %v", err) + } + } else { + } + + c.conn = conn + c.connected = true + return nil +} + +func (c *Client) dial() (ldap.Client, error) { + if strings.HasPrefix(c.config.URL, "ldaps://") { + return ldap.DialURL(c.config.URL, ldap.DialWithTLSConfig(&tls.Config{InsecureSkipVerify: c.config.SkipTLSVerify})) + } else { + return nil, fmt.Errorf("unsupported LDAP URL scheme: %s", c.config.URL) + } +} + +func (c *Client) Disconnect() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if c.conn != nil { + c.conn.Close() + c.conn = nil + } + c.connected = false + return nil +} + +// isConnectionError checks if an error indicates a connection problem +func (c *Client) isConnectionError(err error) bool { + if err == nil { + return false + } + + errorMsg := strings.ToLower(err.Error()) + return strings.Contains(errorMsg, "connection closed") || + strings.Contains(errorMsg, "network error") || + strings.Contains(errorMsg, "connection reset") || + strings.Contains(errorMsg, "broken pipe") || + strings.Contains(errorMsg, "connection refused") || + strings.Contains(errorMsg, "timeout") || + strings.Contains(errorMsg, "eof") || + strings.Contains(errorMsg, "operations error") || + strings.Contains(errorMsg, "successful bind must be completed") || + strings.Contains(errorMsg, "ldap result code 1") +} + +// reconnect attempts to reconnect to the LDAP server +func (c *Client) reconnect() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + // Close existing connection if any + if c.conn != nil { + c.conn.Close() + } + c.connected = false + + // Wait a moment before retrying + time.Sleep(100 * time.Millisecond) + + // Attempt reconnection + conn, err := c.dial() + if err != nil { + return fmt.Errorf("failed to reconnect to LDAP server: %v", err) + } + + if c.config.BindUser != "" { + err = conn.Bind(c.config.BindUser, c.config.BindPassword) + if err != nil { + conn.Close() + return fmt.Errorf("failed to bind after reconnection: %v", err) + } + } + + c.conn = conn + c.connected = true + return nil +} + +// validateBind checks if the current bind is still valid by performing a simple operation +func (c *Client) validateBind() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no connection available") + } + + // Try a simple search to validate the bind + req := ldap.NewSearchRequest( + c.config.BaseDN, + ldap.ScopeBaseObject, ldap.NeverDerefAliases, 1, 0, false, + "(objectClass=*)", + []string{"dn"}, + nil, + ) + + _, err := conn.Search(req) + return err +} + +// executeWithRetry executes an LDAP operation with automatic retry on connection errors +func (c *Client) executeWithRetry(operation func() error, maxRetries int) error { + var lastErr error + + for attempt := 0; attempt <= maxRetries; attempt++ { + + c.mutex.RLock() + connected := c.connected + conn := c.conn + c.mutex.RUnlock() + + // Check if we need to reconnect + if !connected || conn == nil { + if reconnectErr := c.reconnect(); reconnectErr != nil { + lastErr = reconnectErr + continue + } + } else { + // Validate that the bind is still active + if bindErr := c.validateBind(); bindErr != nil { + if c.isConnectionError(bindErr) { + if reconnectErr := c.reconnect(); reconnectErr != nil { + lastErr = reconnectErr + continue + } + } + } + } + + err := operation() + if err == nil { + return nil + } + + lastErr = err + + // If it's not a connection error, don't retry + if !c.isConnectionError(err) { + return err + } + + // Mark as disconnected and try to reconnect + c.mutex.Lock() + c.connected = false + c.mutex.Unlock() + + // Don't reconnect on the last attempt + if attempt < maxRetries { + if reconnectErr := c.reconnect(); reconnectErr != nil { + lastErr = reconnectErr + } + } + } + + return fmt.Errorf("operation failed after %d retries, last error: %v", maxRetries+1, lastErr) +} + +func (c *Client) HealthCheck() error { + req := ldap.NewSearchRequest( + c.config.BaseDN, + ldap.ScopeBaseObject, ldap.NeverDerefAliases, 1, 0, false, + "(objectClass=*)", + []string{"dn"}, + nil, + ) + + err := c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + _, searchErr := conn.Search(req) + if searchErr != nil { + } else { + } + return searchErr + }, 2) + + if err != nil { + } else { + } + return err +} + +func (c *Client) IsConnected() bool { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.connected +} + +func (c *Client) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + var result *ldap.SearchResult + + err := c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + res, err := conn.Search(searchRequest) + if err != nil { + return fmt.Errorf("failed to search: %v", err) + } + result = res + return nil + }, 2) // Retry up to 2 times + + if err != nil { + return nil, err + } + + return result, nil +} + +// SearchEntry performs an LDAP search and returns the first entry +func (c *Client) SearchEntry(req *ldap.SearchRequest) (*ldap.Entry, error) { + var result *ldap.SearchResult + + err := c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + res, err := conn.Search(req) + if err != nil { + return fmt.Errorf("failed to search entry: %v", err) + } + result = res + return nil + }, 2) // Retry up to 2 times + + if err != nil { + return nil, err + } + + if len(result.Entries) == 0 { + return nil, nil + } + return result.Entries[0], nil +} + +func (c *Client) Add(addRequest *ldap.AddRequest) error { + return c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + return conn.Add(addRequest) + }, 2) // Retry up to 2 times +} + +func (c *Client) Modify(modifyRequest *ldap.ModifyRequest) error { + return c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + return conn.Modify(modifyRequest) + }, 2) // Retry up to 2 times +} + +func (c *Client) Del(delRequest *ldap.DelRequest) error { + return c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + return conn.Del(delRequest) + }, 2) // Retry up to 2 times +} + +func (c *Client) ModifyDN(modifyDNRequest *ldap.ModifyDNRequest) error { + return c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + return conn.ModifyDN(modifyDNRequest) + }, 2) // Retry up to 2 times +} + +func (c *Client) Bind(username, password string) error { + err := c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + bindErr := conn.Bind(username, password) + if bindErr != nil { + } else { + } + return bindErr + }, 2) // Retry up to 2 times + + if err != nil { + } + return err +} + +func (c *Client) SimpleBind(username, password string) error { + return c.executeWithRetry(func() error { + c.mutex.RLock() + conn := c.conn + c.mutex.RUnlock() + + if conn == nil { + return fmt.Errorf("no LDAP connection available") + } + + return conn.Bind(username, password) + }, 2) // Retry up to 2 times +} + +func (s *LDAPService) GetUserDN(username string) (string, error) { + if username == "" { + return "", fmt.Errorf("username cannot be empty") + } + + req := ldap.NewSearchRequest( + s.client.config.BaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=user)(sAMAccountName=%s))", username), + []string{"dn"}, + nil, + ) + + entry, err := s.client.SearchEntry(req) + if err != nil { + return "", fmt.Errorf("failed to search for user: %v", err) + } + + if entry == nil { + return "", fmt.Errorf("user not found") + } + + return entry.DN, nil +} diff --git a/internal/ldap/ldap_service.go b/internal/ldap/ldap_service.go new file mode 100644 index 0000000..b3bae99 --- /dev/null +++ b/internal/ldap/ldap_service.go @@ -0,0 +1,44 @@ +package ldap + +import "fmt" + +func NewLDAPService() (*LDAPService, error) { + config, err := LoadConfig() + if err != nil { + return nil, fmt.Errorf("failed to load LDAP configuration: %w", err) + } + + client := NewClient(config) + if err := client.Connect(); err != nil { + return nil, fmt.Errorf("failed to connect to LDAP: %w", err) + } + + return &LDAPService{ + client: client, + }, nil +} + +func (s *LDAPService) Close() error { + err := s.client.Disconnect() + if err != nil { + return err + } + return nil +} + +func (s *LDAPService) HealthCheck() error { + err := s.client.HealthCheck() + if err != nil { + return err + } + + return nil +} + +func (s *LDAPService) Reconnect() error { + err := s.client.Connect() + if err != nil { + return err + } + return nil +} diff --git a/internal/ldap/types.go b/internal/ldap/types.go new file mode 100644 index 0000000..5de4949 --- /dev/null +++ b/internal/ldap/types.go @@ -0,0 +1,95 @@ +package ldap + +import ( + "sync" + + "github.com/go-ldap/ldap/v3" +) + +// ================================================= +// LDAP Service Interface +// ================================================= + +type Service interface { + // User Management + GetUsers() ([]User, error) + CreateAndRegisterUser(userInfo UserRegistrationInfo) error + DeleteUser(username string) error + AddUserToGroup(username string, groupName string) error + SetUserGroups(username string, groups []string) error + EnableUserAccount(username string) error + DisableUserAccount(username string) error + GetUserGroups(userDN string) ([]string, error) + GetUserDN(username string) (string, error) + + // Group Management + CreateGroup(groupName string) error + GetGroups() ([]Group, error) + RenameGroup(oldGroupName string, newGroupName string) error + DeleteGroup(groupName string) error + GetGroupMembers(groupName string) ([]User, error) + RemoveUserFromGroup(username string, groupName string) error + AddUsersToGroup(groupName string, usernames []string) error + RemoveUsersFromGroup(groupName string, usernames []string) error + + // Connection Management + HealthCheck() error + Reconnect() error + Close() error +} + +type LDAPService struct { + client *Client +} + +// ================================================= +// LDAP Client +// ================================================= + +type Config struct { + URL string `envconfig:"LDAP_URL" default:"ldaps://localhost:636"` + BindUser string `envconfig:"LDAP_BIND_USER"` + BindPassword string `envconfig:"LDAP_BIND_PASSWORD"` + SkipTLSVerify bool `envconfig:"LDAP_SKIP_TLS_VERIFY" default:"false"` + AdminGroupDN string `envconfig:"LDAP_ADMIN_GROUP_DN"` + BaseDN string `envconfig:"LDAP_BASE_DN"` +} + +type Client struct { + conn ldap.Client + config *Config + mutex sync.RWMutex + connected bool +} + +// ================================================= +// Groups +// ================================================= + +type CreateRequest struct { + Group string `json:"group"` +} + +type Group struct { + Name string `json:"name"` + CanModify bool `json:"can_modify"` + CreatedAt string `json:"created_at,omitempty"` + UserCount int `json:"user_count,omitempty"` +} + +// ================================================= +// Users +// ================================================= + +type User struct { + Name string `json:"name"` + CreatedAt string `json:"created_at"` + Enabled bool `json:"enabled"` + IsAdmin bool `json:"is_admin"` + Groups []Group `json:"groups"` +} + +type UserRegistrationInfo struct { + Username string `json:"username" validate:"required,min=1,max=20"` + Password string `json:"password" validate:"required,min=8,max=128"` +} diff --git a/internal/ldap/users.go b/internal/ldap/users.go new file mode 100644 index 0000000..c20587b --- /dev/null +++ b/internal/ldap/users.go @@ -0,0 +1,423 @@ +package ldap + +import ( + "encoding/binary" + "fmt" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf16" + + ldapv3 "github.com/go-ldap/ldap/v3" +) + +// ================================================= +// Public Functions +// ================================================= + +func (s *LDAPService) GetUsers() ([]User, error) { + kaminoUsersGroupDN := "CN=KaminoUsers,OU=KaminoGroups," + s.client.config.BaseDN + searchRequest := ldapv3.NewSearchRequest( + s.client.config.BaseDN, ldapv3.ScopeWholeSubtree, ldapv3.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=user)(sAMAccountName=*)(memberOf=%s))", kaminoUsersGroupDN), // Filter for users in KaminoUsers group + []string{"sAMAccountName", "dn", "whenCreated", "memberOf", "userAccountControl"}, // Attributes to retrieve + nil, + ) + + searchResult, err := s.client.Search(searchRequest) + if err != nil { + return nil, fmt.Errorf("failed to search for users: %v", err) + } + + var users = []User{} + for _, entry := range searchResult.Entries { + user := User{ + Name: entry.GetAttributeValue("sAMAccountName"), + } + + whenCreated := entry.GetAttributeValue("whenCreated") + if whenCreated != "" { + // AD stores dates in GeneralizedTime format: YYYYMMDDHHMMSS.0Z + if parsedTime, err := time.Parse("20060102150405.0Z", whenCreated); err == nil { + user.CreatedAt = parsedTime.Format("2006-01-02 15:04:05") + } + } + + // Check if user is enabled + userAccountControl := entry.GetAttributeValue("userAccountControl") + if userAccountControl != "" { + uac, err := strconv.Atoi(userAccountControl) + if err == nil { + // UF_ACCOUNTDISABLE = 0x02 + user.Enabled = (uac & 0x02) == 0 + } + } + + // Check if user is admin + memberOfValues := entry.GetAttributeValues("memberOf") + for _, memberOf := range memberOfValues { + if strings.Contains(memberOf, s.client.config.AdminGroupDN) { + user.IsAdmin = true + break + } + } + + // Get user groups + groups, err := getUserGroupsFromMemberOf(memberOfValues) + if err == nil { + user.Groups = groups + } + + users = append(users, user) + } + + return users, nil +} + +func (s *LDAPService) CreateUser(userInfo UserRegistrationInfo) (string, error) { + // Create DN for new user in Users container + // TODO: Static + userDN := fmt.Sprintf("CN=%s,OU=KaminoUsers,%s", userInfo.Username, s.client.config.BaseDN) + + // Create add request for new user + addReq := ldapv3.NewAddRequest(userDN, nil) + + // Add required object classes + addReq.Attribute("objectClass", []string{"top", "person", "organizationalPerson", "user"}) + + // Add basic attributes + addReq.Attribute("cn", []string{userInfo.Username}) + addReq.Attribute("sAMAccountName", []string{userInfo.Username}) + addReq.Attribute("userPrincipalName", []string{fmt.Sprintf("%s@%s", userInfo.Username, extractDomainFromDN(s.client.config.BaseDN))}) + + // Set account control flags - account disabled initially (will be enabled after password is set) + addReq.Attribute("userAccountControl", []string{"546"}) // NORMAL_ACCOUNT + ACCOUNTDISABLE + + // Perform the add operation + err := s.client.Add(addReq) + if err != nil { + return "", fmt.Errorf("failed to create user: %v", err) + } + + return userDN, nil +} + +func (s *LDAPService) SetUserPassword(userDN string, password string) error { + // For Active Directory, passwords must be set using unicodePwd attribute + // The password must be UTF-16LE encoded and quoted + utf16Password := encodePasswordForAD(password) + + // Create modify request to set password + modifyReq := ldapv3.NewModifyRequest(userDN, nil) + modifyReq.Replace("unicodePwd", []string{utf16Password}) + + err := s.client.Modify(modifyReq) + if err != nil { + return fmt.Errorf("failed to set password: %v", err) + } + + return nil +} + +func (s *LDAPService) EnableUserAccountByDN(userDN string) error { + modifyRequest := ldapv3.NewModifyRequest(userDN, nil) + modifyRequest.Replace("userAccountControl", []string{"512"}) // Normal account + + err := s.client.Modify(modifyRequest) + if err != nil { + return fmt.Errorf("failed to enable user account: %v", err) + } + + return nil +} + +// DisableUserAccountByDN disables a user account by DN +func (s *LDAPService) DisableUserAccountByDN(userDN string) error { + modifyRequest := ldapv3.NewModifyRequest(userDN, nil) + modifyRequest.Replace("userAccountControl", []string{"514"}) // Disabled account + + err := s.client.Modify(modifyRequest) + if err != nil { + return fmt.Errorf("failed to disable user account: %v", err) + } + + return nil +} + +func (s *LDAPService) AddToGroup(userDN string, groupDN string) error { + modifyRequest := ldapv3.NewModifyRequest(groupDN, nil) + modifyRequest.Add("member", []string{userDN}) + + err := s.client.Modify(modifyRequest) + if err != nil { + // Check if the error is because the user is already in the group + if strings.Contains(strings.ToLower(err.Error()), "already exists") || + strings.Contains(strings.ToLower(err.Error()), "attribute or value exists") { + return nil // Not an error if user is already in group + } + return fmt.Errorf("failed to add user to group: %v", err) + } + + return nil +} + +func (s *LDAPService) RemoveFromGroup(userDN string, groupDN string) error { + modifyRequest := ldapv3.NewModifyRequest(groupDN, nil) + modifyRequest.Delete("member", []string{userDN}) + + err := s.client.Modify(modifyRequest) + if err != nil { + // Check if the error is because the user is not in the group + if strings.Contains(strings.ToLower(err.Error()), "no such attribute") || + strings.Contains(strings.ToLower(err.Error()), "unwilling to perform") || + strings.Contains(strings.ToLower(err.Error()), "no such object") { + return nil // Not an error if user is not in group + } + return fmt.Errorf("failed to remove user from group: %v", err) + } + + return nil +} + +func (s *LDAPService) CreateAndRegisterUser(userInfo UserRegistrationInfo) error { + // Validate username + if !isValidUsername(userInfo.Username) { + return fmt.Errorf("invalid username: must be alphanumeric and 1-20 characters long") + } + + // Validate password strength + if len(userInfo.Password) < 8 || len(userInfo.Password) > 128 { + return fmt.Errorf("password must be between 8 and 128 characters long") + } + + userDN, err := s.CreateUser(userInfo) + if err != nil { + return fmt.Errorf("failed to create user: %v", err) + } + + // Set password + err = s.SetUserPassword(userDN, userInfo.Password) + if err != nil { + // Clean up created user if password setting fails + delRequest := ldapv3.NewDelRequest(userDN, nil) + s.client.Del(delRequest) + return fmt.Errorf("failed to set user password: %v", err) + } + + // Enable account + err = s.EnableUserAccountByDN(userDN) + if err != nil { + return fmt.Errorf("failed to enable user account: %v", err) + } + + // Add user to KaminoUsers group + kaminoUsersGroupDN := "CN=KaminoUsers,OU=KaminoGroups," + s.client.config.BaseDN + err = s.AddToGroup(userDN, kaminoUsersGroupDN) + if err != nil { + return fmt.Errorf("failed to add user to KaminoUsers group: %v", err) + } + + return nil +} + +func (s *LDAPService) AddUserToGroup(username string, groupName string) error { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("failed to get user DN: %v", err) + } + + groupDN, err := s.getGroupDN(groupName) + if err != nil { + return fmt.Errorf("failed to get group DN: %v", err) + } + + return s.AddToGroup(userDN, groupDN) +} + +func (s *LDAPService) RemoveUserFromGroup(username string, groupName string) error { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("failed to get user DN: %v", err) + } + + groupDN, err := s.getGroupDN(groupName) + if err != nil { + return fmt.Errorf("failed to get group DN: %v", err) + } + + modifyRequest := ldapv3.NewModifyRequest(groupDN, nil) + modifyRequest.Delete("member", []string{userDN}) + + err = s.client.Modify(modifyRequest) + if err != nil { + return fmt.Errorf("failed to remove user from group: %v", err) + } + + return nil +} + +func (s *LDAPService) DeleteUser(username string) error { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("failed to get user DN: %v", err) + } + + delRequest := ldapv3.NewDelRequest(userDN, nil) + err = s.client.Del(delRequest) + if err != nil { + return fmt.Errorf("failed to delete user: %v", err) + } + + return nil +} + +func (s *LDAPService) DeleteUsers(usernames []string) []error { + var errors []error + for _, username := range usernames { + err := s.DeleteUser(username) + if err != nil { + errors = append(errors, fmt.Errorf("failed to delete user %s: %v", username, err)) + } + } + return errors +} + +func (s *LDAPService) GetUserGroups(userDN string) ([]string, error) { + searchRequest := ldapv3.NewSearchRequest( + userDN, + ldapv3.ScopeBaseObject, + ldapv3.NeverDerefAliases, + 1, + 30, + false, + "(objectClass=*)", + []string{"memberOf"}, + nil, + ) + + searchResult, err := s.client.Search(searchRequest) + if err != nil { + return nil, fmt.Errorf("failed to search for user groups: %v", err) + } + + if len(searchResult.Entries) == 0 { + return []string{}, nil + } + + memberOfValues := searchResult.Entries[0].GetAttributeValues("memberOf") + var groups []string + for _, memberOf := range memberOfValues { + // Extract CN from DN + parts := strings.Split(memberOf, ",") + if len(parts) > 0 && strings.HasPrefix(parts[0], "CN=") { + groupName := strings.TrimPrefix(parts[0], "CN=") + groups = append(groups, groupName) + } + } + + return groups, nil +} + +func (s *LDAPService) EnableUserAccount(username string) error { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("failed to get user DN: %v", err) + } + + return s.EnableUserAccountByDN(userDN) +} + +func (s *LDAPService) DisableUserAccount(username string) error { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("failed to get user DN: %v", err) + } + + return s.DisableUserAccountByDN(userDN) +} + +func (s *LDAPService) SetUserGroups(username string, groups []string) error { + userDN, err := s.GetUserDN(username) + if err != nil { + return fmt.Errorf("failed to get user DN: %v", err) + } + + // Get current groups + currentGroups, err := s.GetUserGroups(userDN) + if err != nil { + return fmt.Errorf("failed to get current user groups: %v", err) + } + + // Remove from current groups + for _, group := range currentGroups { + err = s.RemoveUserFromGroup(username, group) + if err != nil { + return fmt.Errorf("failed to remove user from group %s: %v", group, err) + } + } + + // Add to new groups + for _, group := range groups { + err = s.AddUserToGroup(username, group) + if err != nil { + return fmt.Errorf("failed to add user to group %s: %v", group, err) + } + } + + return nil +} + +// ================================================= +// Private Functions +// ================================================= + +func getUserGroupsFromMemberOf(memberOfValues []string) ([]Group, error) { + var groups []Group + for _, memberOf := range memberOfValues { + // Extract CN from DN + parts := strings.Split(memberOf, ",") + if len(parts) > 0 && strings.HasPrefix(parts[0], "CN=") { + groupName := strings.TrimPrefix(parts[0], "CN=") + groups = append(groups, Group{Name: groupName}) + } + } + return groups, nil +} + +func isValidUsername(username string) bool { + if len(username) < 1 || len(username) > 20 { + return false + } + matched, _ := regexp.MatchString("^[a-zA-Z0-9]+$", username) + return matched +} + +func extractDomainFromDN(dn string) string { + // Convert DN like "DC=example,DC=com" to "example.com" + parts := strings.Split(strings.ToLower(dn), ",") + var domainParts []string + + for _, part := range parts { + part = strings.TrimSpace(part) + if strings.HasPrefix(part, "dc=") { + domainParts = append(domainParts, strings.TrimPrefix(part, "dc=")) + } + } + + return strings.Join(domainParts, ".") +} + +func encodePasswordForAD(password string) string { + // AD requires password to be UTF-16LE encoded and surrounded by quotes + quotedPassword := fmt.Sprintf("\"%s\"", password) + utf16Encoded := utf16.Encode([]rune(quotedPassword)) + + // Convert to bytes in little-endian format + bytes := make([]byte, len(utf16Encoded)*2) + for i, r := range utf16Encoded { + binary.LittleEndian.PutUint16(bytes[i*2:], r) + } + + return string(bytes) +} diff --git a/internal/proxmox/cluster.go b/internal/proxmox/cluster.go new file mode 100644 index 0000000..9de7983 --- /dev/null +++ b/internal/proxmox/cluster.go @@ -0,0 +1,237 @@ +package proxmox + +import ( + "fmt" + "log" + + "github.com/cpp-cyber/proclone/internal/tools" +) + +// ================================================= +// Public Functions +// ================================================= + +// GetNodeStatus retrieves detailed status for a specific node +func (s *ProxmoxService) GetNodeStatus(nodeName string) (*ProxmoxNodeStatus, error) { + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/nodes/%s/status", nodeName), + } + + var nodeStatus ProxmoxNodeStatus + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &nodeStatus); err != nil { + return nil, fmt.Errorf("failed to get node status for %s: %w", nodeName, err) + } + + return &nodeStatus, nil +} + +// GetClusterResources retrieves all cluster resources from the Proxmox cluster +func (s *ProxmoxService) GetClusterResources(getParams string) ([]VirtualResource, error) { + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/cluster/resources?%s", getParams), + } + + var resources []VirtualResource + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &resources); err != nil { + return nil, fmt.Errorf("failed to get cluster resources: %w", err) + } + + return resources, nil +} + +// GetClusterResourceUsage retrieves resource usage for the Proxmox cluster +func (s *ProxmoxService) GetClusterResourceUsage() (*ClusterResourceUsageResponse, error) { + resources, err := s.GetClusterResources("") + if err != nil { + return nil, fmt.Errorf("failed to get cluster resources: %w", err) + } + + nodes, errors := s.collectNodeResourceUsage(resources) + cluster := s.aggregateClusterResourceUsage(nodes, resources) + + response := &ClusterResourceUsageResponse{ + Nodes: nodes, + Total: cluster, + Errors: errors, + } + + // Return error if all nodes failed + if len(errors) > 0 && len(nodes) == 0 { + return nil, fmt.Errorf("failed to fetch resource usage for all nodes: %v", errors) + } + + return response, nil +} + +// FindBestNode finds the node with the most available resources +func (s *ProxmoxService) FindBestNode() (string, error) { + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: "/nodes", + } + + var nodesResponse []struct { + Node string `json:"node"` + Status string `json:"status"` + CPU float64 `json:"cpu"` + MaxCPU int `json:"maxcpu"` + Mem int64 `json:"mem"` + MaxMem int64 `json:"maxmem"` + } + + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &nodesResponse); err != nil { + return "", fmt.Errorf("failed to get nodes: %w", err) + } + + var bestNode string + var lowestLoad float64 = 1.0 + + for _, node := range nodesResponse { + if node.Status == "online" { + // Calculate combined load (CPU + Memory) + cpuLoad := node.CPU + memLoad := float64(node.Mem) / float64(node.MaxMem) + combinedLoad := (cpuLoad + memLoad) / 2 + + if combinedLoad < lowestLoad { + lowestLoad = combinedLoad + bestNode = node.Node + } + } + } + + if bestNode == "" { + return "", fmt.Errorf("no online nodes available") + } + + return bestNode, nil +} + +func (s *ProxmoxService) SyncUsers() error { + return s.syncRealm("users") +} + +func (s *ProxmoxService) SyncGroups() error { + return s.syncRealm("groups") +} + +// ================================================= +// Private Functions +// ================================================= + +// collectNodeResourceUsage gathers resource usage data for all configured nodes +func (s *ProxmoxService) collectNodeResourceUsage(resources []VirtualResource) ([]NodeResourceUsage, []string) { + var nodes []NodeResourceUsage + var errors []string + + for _, nodeName := range s.Config.Nodes { + nodeUsage, err := s.getNodeResourceUsage(nodeName, resources) + if err != nil { + errorMsg := fmt.Sprintf("Error fetching status for node %s: %v", nodeName, err) + log.Printf("%s", errorMsg) + errors = append(errors, errorMsg) + continue + } + nodes = append(nodes, nodeUsage) + } + + return nodes, errors +} + +// getNodeResourceUsage retrieves resource usage for a single node +func (s *ProxmoxService) getNodeResourceUsage(nodeName string, resources []VirtualResource) (NodeResourceUsage, error) { + status, err := s.GetNodeStatus(nodeName) + if err != nil { + return NodeResourceUsage{}, fmt.Errorf("failed to get node status: %w", err) + } + + usedStorage, totalStorage := getNodeStorage(&resources, nodeName) + + return NodeResourceUsage{ + Name: nodeName, + Resources: ResourceUsage{ + CPUUsage: status.CPU, + MemoryTotal: status.Memory.Total, + MemoryUsed: status.Memory.Used, + StorageTotal: int64(totalStorage), + StorageUsed: int64(usedStorage), + }, + }, nil +} + +// aggregateClusterResourceUsage calculates cluster-wide resource totals and averages +func (s *ProxmoxService) aggregateClusterResourceUsage(nodes []NodeResourceUsage, resources []VirtualResource) ResourceUsage { + cluster := ResourceUsage{} + + // Aggregate node resources + for _, node := range nodes { + cluster.MemoryTotal += node.Resources.MemoryTotal + cluster.MemoryUsed += node.Resources.MemoryUsed + cluster.StorageTotal += node.Resources.StorageTotal + cluster.StorageUsed += node.Resources.StorageUsed + cluster.CPUUsage += node.Resources.CPUUsage + } + + // Add shared storage (NAS) + nasUsed, nasTotal := getStorage(&resources, "mufasa-proxmox") + cluster.StorageTotal += int64(nasTotal) + cluster.StorageUsed += int64(nasUsed) + + // Calculate average CPU usage + if len(nodes) > 0 { + cluster.CPUUsage /= float64(len(nodes)) + } + + return cluster +} + +func getNodeStorage(resources *[]VirtualResource, node string) (Used int64, Total int64) { + var used int64 = 0 + var total int64 = 0 + + for _, r := range *resources { + if r.Type == "storage" && r.NodeName == node && + (r.Storage == "local" || r.Storage == "local-lvm") && + r.RunningStatus == "available" { + used += r.Disk + total += r.MaxDisk + } + } + + return used, total +} + +func getStorage(resources *[]VirtualResource, storage string) (Used int64, Total int64) { + var used int64 = 0 + var total int64 = 0 + + for _, r := range *resources { + if r.Type == "storage" && r.Storage == storage && r.RunningStatus == "available" { + used = r.Disk + total = r.MaxDisk + break + } + } + + return used, total +} + +func (s *ProxmoxService) syncRealm(scope string) error { + req := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/access/domains/%s/sync", s.Config.Realm), + RequestBody: map[string]string{ + "scope": scope, // Either "users" or "groups" + "remove-vanished": "acl;properties;entry", // Delete any users/groups that no longer exist in AD + }, + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + return fmt.Errorf("failed to sync realm: %w", err) + } + + return nil +} diff --git a/internal/proxmox/pools.go b/internal/proxmox/pools.go new file mode 100644 index 0000000..dee9ffc --- /dev/null +++ b/internal/proxmox/pools.go @@ -0,0 +1,251 @@ +package proxmox + +import ( + "fmt" + "log" + "math" + "slices" + "sort" + "strconv" + "strings" + "time" + + "github.com/cpp-cyber/proclone/internal/tools" +) + +func (s *ProxmoxService) GetPoolVMs(poolName string) ([]VirtualResource, error) { + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/pools/%s", poolName), + } + + var poolResponse struct { + Members []VirtualResource `json:"members"` + } + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &poolResponse); err != nil { + return nil, fmt.Errorf("failed to get pool VMs: %w", err) + } + + // Filter for VMs only (type=qemu) + var vms []VirtualResource + for _, member := range poolResponse.Members { + if member.Type == "qemu" { + vms = append(vms, member) + } + } + + return vms, nil +} + +func (s *ProxmoxService) CreateNewPool(poolName string) error { + reqBody := map[string]string{ + "poolid": poolName, + } + + req := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: "/pools", + RequestBody: reqBody, + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + return fmt.Errorf("failed to create pool %s: %w", poolName, err) + } + + return nil +} + +func (s *ProxmoxService) SetPoolPermission(poolName string, targetName string, isGroup bool) error { + reqBody := map[string]any{ + "path": fmt.Sprintf("/pool/%s", poolName), + "roles": "PVEVMUser,PVEPoolUser", + "propagate": true, + } + + if isGroup { + reqBody["groups"] = fmt.Sprintf("%s-%s", targetName, s.Config.Realm) + } else { + reqBody["users"] = fmt.Sprintf("%s@%s", targetName, s.Config.Realm) + } + + req := tools.ProxmoxAPIRequest{ + Method: "PUT", + Endpoint: "/access/acl", + RequestBody: reqBody, + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + return fmt.Errorf("failed to set pool permissions: %w", err) + } + + return nil +} + +func (s *ProxmoxService) DeletePool(poolName string) error { + req := tools.ProxmoxAPIRequest{ + Method: "DELETE", + Endpoint: fmt.Sprintf("/pools/%s", poolName), + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + return fmt.Errorf("failed to delete pool %s: %w", poolName, err) + } + + log.Printf("Successfully deleted pool: %s", poolName) + return nil +} + +func (s *ProxmoxService) GetTemplatePools() ([]string, error) { + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: "/pools", + } + + var poolResponse []struct { + Name string `json:"poolid"` + } + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &poolResponse); err != nil { + return nil, fmt.Errorf("failed to get template pools: %w", err) + } + + var templatePools []string + for _, pool := range poolResponse { + if strings.HasPrefix(pool.Name, "kamino_template_") { + templatePools = append(templatePools, pool.Name) + } + } + + return templatePools, nil +} + +func (s *ProxmoxService) IsPoolEmpty(poolName string) (bool, error) { + poolVMs, err := s.GetPoolVMs(poolName) + if err != nil { + return false, fmt.Errorf("failed to check if pool %s is empty: %w", poolName, err) + } + + // Count only QEMU VMs (ignore other resource types) + vmCount := 0 + for _, vm := range poolVMs { + if vm.Type == "qemu" { + vmCount++ + } + } + + return vmCount == 0, nil +} + +func (s *ProxmoxService) WaitForPoolEmpty(poolName string, timeout time.Duration) error { + start := time.Now() + backoff := 2 * time.Second + maxBackoff := 30 * time.Second + + for time.Since(start) < timeout { + poolVMs, err := s.GetPoolVMs(poolName) + if err != nil { + // If we can't get pool VMs, pool might be deleted or empty + log.Printf("Error checking pool %s (might be deleted): %v", poolName, err) + return nil + } + + if len(poolVMs) == 0 { + log.Printf("Pool %s is now empty", poolName) + return nil + } + + log.Printf("Pool %s still contains %d VMs, waiting...", poolName, len(poolVMs)) + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) + } + + return fmt.Errorf("timeout waiting for pool %s to become empty after %v", poolName, timeout) +} + +func (s *ProxmoxService) GetNextPodID(minPodID int, maxPodID int) (string, int, error) { + // Get all existing pools + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: "/pools", + } + + var poolsResponse []struct { + PoolID string `json:"poolid"` + } + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &poolsResponse); err != nil { + return "", 0, fmt.Errorf("failed to get existing pools: %w", err) + } + + // Extract pod IDs from existing pools + var usedIDs []int + for _, pool := range poolsResponse { + if len(pool.PoolID) >= 4 { + if id, err := strconv.Atoi(pool.PoolID[:4]); err == nil { + if id >= minPodID && id <= maxPodID { + usedIDs = append(usedIDs, id) + } + } + } + } + + sort.Ints(usedIDs) + + // Find first available ID + for i := minPodID; i <= maxPodID; i++ { + found := slices.Contains(usedIDs, i) + if !found { + return fmt.Sprintf("%04d", i), i - 1000, nil + } + } + + return "", 0, fmt.Errorf("no available pod IDs in range 1000-1255") +} + +func (s *ProxmoxService) GetNextPodIDs(minPodID int, maxPodID int, num int) ([]string, []int, error) { + // Get all existing pools + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: "/pools", + } + + var poolsResponse []struct { + PoolID string `json:"poolid"` + } + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &poolsResponse); err != nil { + return nil, nil, fmt.Errorf("failed to get existing pools: %w", err) + } + + // Extract pod IDs from existing pools + var usedIDs []int + for _, pool := range poolsResponse { + if len(pool.PoolID) >= 4 { + if id, err := strconv.Atoi(pool.PoolID[:4]); err == nil { + if id >= minPodID && id <= maxPodID { + usedIDs = append(usedIDs, id) + } + } + } + } + + sort.Ints(usedIDs) + + // Find available IDs + var podIDs []string + var adjustedIDs []int + + for i := minPodID; i <= maxPodID && len(podIDs) < num; i++ { + found := slices.Contains(usedIDs, i) + if !found { + podIDs = append(podIDs, fmt.Sprintf("%04d", i)) + adjustedIDs = append(adjustedIDs, i-1000) + } + } + + if len(podIDs) < num { + return nil, nil, fmt.Errorf("only found %d available pod IDs out of %d requested in range %d-%d", len(podIDs), num, minPodID, maxPodID) + } + + return podIDs, adjustedIDs, nil +} diff --git a/internal/proxmox/proxmox_service.go b/internal/proxmox/proxmox_service.go new file mode 100644 index 0000000..b78fa3c --- /dev/null +++ b/internal/proxmox/proxmox_service.go @@ -0,0 +1,73 @@ +package proxmox + +import ( + "crypto/tls" + "fmt" + "net/http" + "strings" + "time" + + "github.com/cpp-cyber/proclone/internal/tools" + "github.com/kelseyhightower/envconfig" +) + +// NewProxmoxService creates a new Proxmox service with the given configuration +func NewProxmoxService(config ProxmoxConfig) *ProxmoxService { + // Create HTTP client with appropriate TLS settings + transport := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !config.VerifySSL, + }, + } + + client := &http.Client{ + Transport: transport, + Timeout: 30 * time.Second, + } + + baseURL := fmt.Sprintf("https://%s:%s/api2/json", config.Host, config.Port) + + // Initialize the request helper + requestHelper := tools.NewProxmoxRequestHelper(baseURL, config.APIToken, client) + + return &ProxmoxService{ + Config: &config, + HTTPClient: client, + BaseURL: baseURL, + RequestHelper: requestHelper, + } +} + +func NewService() (Service, error) { + config, err := LoadProxmoxConfig() + if err != nil { + return nil, fmt.Errorf("failed to load Proxmox configuration: %w", err) + } + + return NewProxmoxService(*config), nil +} + +func (s *ProxmoxService) GetRequestHelper() *tools.ProxmoxRequestHelper { + return s.RequestHelper +} + +func LoadProxmoxConfig() (*ProxmoxConfig, error) { + var config ProxmoxConfig + if err := envconfig.Process("", &config); err != nil { + return nil, fmt.Errorf("failed to process Proxmox configuration: %w", err) + } + + // Build API token from ID and secret + config.APIToken = fmt.Sprintf("%s=%s", config.TokenID, config.TokenSecret) + + // Parse nodes list if provided + if config.NodesStr != "" { + config.Nodes = strings.Split(config.NodesStr, ",") + // Trim whitespace from each node + for i, node := range config.Nodes { + config.Nodes[i] = strings.TrimSpace(node) + } + } + + return &config, nil +} diff --git a/internal/proxmox/types.go b/internal/proxmox/types.go new file mode 100644 index 0000000..6fbcaec --- /dev/null +++ b/internal/proxmox/types.go @@ -0,0 +1,160 @@ +package proxmox + +import ( + "net/http" + "time" + + "github.com/cpp-cyber/proclone/internal/tools" +) + +// ProxmoxConfig holds the configuration for Proxmox API +type ProxmoxConfig struct { + Host string `envconfig:"PROXMOX_HOST" required:"true"` + Port string `envconfig:"PROXMOX_PORT" default:"8006"` + TokenID string `envconfig:"PROXMOX_TOKEN_ID" required:"true"` + TokenSecret string `envconfig:"PROXMOX_TOKEN_SECRET" required:"true"` + VerifySSL bool `envconfig:"PROXMOX_VERIFY_SSL" default:"false"` + CriticalPool string `envconfig:"PROXMOX_CRITICAL_POOL"` + Realm string `envconfig:"REALM"` + NodesStr string `envconfig:"PROXMOX_NODES"` + StorageID string `envconfig:"STORAGE_ID" default:"local-lvm"` + Nodes []string // Parsed from NodesStr + APIToken string // Computed from TokenID and TokenSecret +} + +// Service interface defines the methods for Proxmox operations +type Service interface { + // Cluster and Resource Management + GetClusterResourceUsage() (*ClusterResourceUsageResponse, error) + GetClusterResources(getParams string) ([]VirtualResource, error) + GetNodeStatus(nodeName string) (*ProxmoxNodeStatus, error) + FindBestNode() (string, error) + SyncUsers() error + SyncGroups() error + + // Pod Management + GetNextPodIDs(minPodID int, maxPodID int, num int) ([]string, []int, error) + + // VM Management + GetVMs() ([]VirtualResource, error) + GetNextVMIDs(num int) ([]int, error) + StartVM(node string, vmID int) error + ShutdownVM(node string, vmID int) error + RebootVM(node string, vmID int) error + StopVM(node string, vmID int) error + DeleteVM(node string, vmID int) error + ConvertVMToTemplate(node string, vmID int) error + CloneVMWithConfig(req VMCloneRequest) error + WaitForCloneCompletion(vm *VM, timeout time.Duration) error + WaitForDisk(node string, vmid int, maxWait time.Duration) error + WaitForRunning(vm VM) error + WaitForStopped(vm VM) error + + // Pool Management + GetPoolVMs(poolName string) ([]VirtualResource, error) + CreateNewPool(poolName string) error + SetPoolPermission(poolName string, targetName string, isGroup bool) error + DeletePool(poolName string) error + IsPoolEmpty(poolName string) (bool, error) + WaitForPoolEmpty(poolName string, timeout time.Duration) error + + // Template Management + GetTemplatePools() ([]string, error) + + // Internal access for router functionality + GetRequestHelper() *tools.ProxmoxRequestHelper +} + +// ProxmoxService implements the Service interface for Proxmox operations +type ProxmoxService struct { + Config *ProxmoxConfig + HTTPClient *http.Client + BaseURL string + RequestHelper *tools.ProxmoxRequestHelper +} + +type ProxmoxNode struct { + Node string `json:"node"` + Status string `json:"status"` +} + +type ProxmoxNodeStatus struct { + CPU float64 `json:"cpu"` + Memory struct { + Total int64 `json:"total"` + Used int64 `json:"used"` + } `json:"memory"` + Uptime int64 `json:"uptime"` +} + +type VirtualResourceConfig struct { + HardDisk string `json:"scsi0"` + Lock string `json:"lock,omitempty"` + Net0 string `json:"net0"` + Net1 string `json:"net1,omitempty"` +} + +type VirtualResourceStatus struct { + Status string `json:"status"` +} + +type VNetResponse []struct { + VNet string `json:"vnet"` +} + +type VM struct { + Name string `json:"name,omitempty"` + Node string `json:"node"` + VMID int `json:"vmid"` +} + +type VMCloneRequest struct { + SourceVM VM + PoolName string + PodID string + NewVMID int + TargetNode string +} + +type VirtualResource struct { + CPU float64 `json:"cpu,omitempty"` + MaxCPU int `json:"maxcpu,omitempty"` + Mem int `json:"mem,omitempty"` + MaxMem int `json:"maxmem,omitempty"` + Type string `json:"type,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + NodeName string `json:"node,omitempty"` + ResourcePool string `json:"pool,omitempty"` + RunningStatus string `json:"status,omitempty"` + Uptime int `json:"uptime,omitempty"` + VmId int `json:"vmid,omitempty"` + Storage string `json:"storage,omitempty"` + Disk int64 `json:"disk,omitempty"` + MaxDisk int64 `json:"maxdisk,omitempty"` + Template int `json:"template,omitempty"` +} + +type ResourceUsage struct { + CPUUsage float64 `json:"cpu_usage"` // CPU usage percentage + MemoryUsed int64 `json:"memory_used"` // Used memory in bytes + MemoryTotal int64 `json:"memory_total"` // Total memory in bytes + StorageUsed int64 `json:"storage_used"` // Used storage in bytes + StorageTotal int64 `json:"storage_total"` // Total storage in bytes +} + +type NodeResourceUsage struct { + Name string `json:"name"` + Resources ResourceUsage `json:"resources"` +} + +type ClusterResourceUsageResponse struct { + Total ResourceUsage `json:"total"` + Nodes []NodeResourceUsage `json:"nodes"` + Errors []string `json:"errors,omitempty"` +} + +type PendingDiskResponse struct { + Used int64 `json:"used"` + Size int64 `json:"size"` +} diff --git a/internal/proxmox/vms.go b/internal/proxmox/vms.go new file mode 100644 index 0000000..a5322d9 --- /dev/null +++ b/internal/proxmox/vms.go @@ -0,0 +1,360 @@ +package proxmox + +import ( + "fmt" + "log" + "math" + "strconv" + "strings" + "time" + + "github.com/cpp-cyber/proclone/internal/tools" +) + +// ================================================= +// Public Functions +// ================================================= + +func (s *ProxmoxService) GetVMs() ([]VirtualResource, error) { + vms, err := s.GetClusterResources("type=vm") + if err != nil { + return nil, err + } + return vms, nil +} + +func (s *ProxmoxService) StartVM(node string, vmID int) error { + return s.vmAction("start", node, vmID) +} + +func (s *ProxmoxService) StopVM(node string, vmID int) error { + return s.vmAction("stop", node, vmID) +} + +func (s *ProxmoxService) ShutdownVM(node string, vmID int) error { + return s.vmAction("shutdown", node, vmID) +} + +func (s *ProxmoxService) RebootVM(node string, vmID int) error { + return s.vmAction("reboot", node, vmID) +} + +func (s *ProxmoxService) DeleteVM(node string, vmID int) error { + if err := s.validateVMID(vmID); err != nil { + return err + } + + req := tools.ProxmoxAPIRequest{ + Method: "DELETE", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d", node, vmID), + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + return fmt.Errorf("failed to delete VM: %w", err) + } + + return nil +} + +func (s *ProxmoxService) ConvertVMToTemplate(node string, vmID int) error { + if err := s.validateVMID(vmID); err != nil { + return err + } + + req := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/template", node, vmID), + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + if !strings.Contains(err.Error(), "you can't convert a template to a template") { + return fmt.Errorf("failed to convert VM to template: %w", err) + } + } + + return nil +} + +func (s *ProxmoxService) CloneVM(sourceVM VM, newPoolName string) (*VM, error) { + // Get next available VMID + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: "/cluster/nextid", + } + + var nextIDStr string + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &nextIDStr); err != nil { + return nil, fmt.Errorf("failed to get next VMID: %w", err) + } + + newVMID, err := strconv.Atoi(nextIDStr) + if err != nil { + return nil, fmt.Errorf("invalid VMID received: %w", err) + } + + // Find best node for cloning + bestNode, err := s.FindBestNode() + if err != nil { + return nil, fmt.Errorf("failed to find best node: %w", err) + } + + // Clone VM + cloneBody := map[string]any{ + "newid": newVMID, + "name": sourceVM.Name, + "pool": newPoolName, + "full": 0, // Linked clone + "target": bestNode, + } + + cloneReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/clone", sourceVM.Node, sourceVM.VMID), + RequestBody: cloneBody, + } + + _, err = s.RequestHelper.MakeRequest(cloneReq) + if err != nil { + return nil, fmt.Errorf("failed to initiate VM clone: %w", err) + } + + // Wait for clone to complete + newVM := &VM{ + Node: bestNode, + VMID: newVMID, + } + + err = s.WaitForCloneCompletion(newVM, 5*time.Minute) // CLONE_TIMEOUT + if err != nil { + return nil, fmt.Errorf("clone operation failed: %w", err) + } + + return newVM, nil +} + +func (s *ProxmoxService) CloneVMWithConfig(req VMCloneRequest) error { + // Clone VM + cloneBody := map[string]any{ + "newid": req.NewVMID, + "name": req.SourceVM.Name, + "pool": req.PoolName, + "full": 0, // Linked clone + "target": req.TargetNode, + } + + cloneReq := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/clone", req.SourceVM.Node, req.SourceVM.VMID), + RequestBody: cloneBody, + } + + _, err := s.RequestHelper.MakeRequest(cloneReq) + if err != nil { + return fmt.Errorf("failed to initiate VM clone: %w", err) + } + + return nil +} + +func (s *ProxmoxService) WaitForCloneCompletion(vm *VM, timeout time.Duration) error { + start := time.Now() + backoff := time.Second + maxBackoff := 30 * time.Second + + for time.Since(start) < timeout { + // Check VM status + status, err := s.getVMStatus(vm.Node, vm.VMID) + if err != nil { + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) + continue + } + + if status == "running" || status == "stopped" { + // Check if VM is locked (clone in progress) + configResp, err := s.getVMConfig(vm.Node, vm.VMID) + if err != nil { + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) + continue + } + + if configResp.Lock == "" { + return nil // Clone is complete and VM is not locked + } + } + + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) + } + + return fmt.Errorf("clone operation timed out after %v", timeout) +} + +func (s *ProxmoxService) WaitForDisk(node string, vmid int, maxWait time.Duration) error { + start := time.Now() + + for time.Since(start) < maxWait { + time.Sleep(2 * time.Second) + + configResp, err := s.getVMConfig(node, vmid) + if err != nil { + continue + } + + if configResp.HardDisk != "" { + pendingReq := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/nodes/%s/storage/%s/content?vmid=%d", node, s.Config.StorageID, vmid), + } + + var diskResponse []PendingDiskResponse + err := s.RequestHelper.MakeRequestAndUnmarshal(pendingReq, &diskResponse) + if err != nil || len(diskResponse) == 0 { + log.Printf("Error retrieving pending disk info for VMID %d on node %s: %v", vmid, node, err) + continue + } + + // Iterate through all disks, if all have valid Used and Size (not 0) consider available + allAvailable := true + for _, disk := range diskResponse { + if disk.Used == 0 || disk.Size == 0 { + allAvailable = false + break + } + } + + if allAvailable { + return nil // Disk is available + } + } + } + + return fmt.Errorf("timeout waiting for VM disks to become available") +} + +func (s *ProxmoxService) WaitForStopped(vm VM) error { + return s.waitForStatus("stopped", vm) +} + +func (s *ProxmoxService) WaitForRunning(vm VM) error { + return s.waitForStatus("running", vm) +} + +func (s *ProxmoxService) GetNextVMIDs(num int) ([]int, error) { + // Get VMs + resources, err := s.GetClusterResources("type=vm") + if err != nil { + return nil, fmt.Errorf("failed to get cluster resources: %w", err) + } + + // Iterate thought and find the highest VMID under 4000 + highestID := 100 + for _, res := range resources { + if res.VmId > highestID && res.VmId < 4000 { + highestID = res.VmId + } + } + + // Generate the next num VMIDs + var vmIDs []int + for i := 1; i <= num; i++ { + vmIDs = append(vmIDs, highestID+i) + } + + return vmIDs, nil +} + +// ================================================= +// Private Functions +// ================================================= + +func (s *ProxmoxService) vmAction(action string, node string, vmID int) error { + if err := s.validateVMID(vmID); err != nil { + return err + } + + req := tools.ProxmoxAPIRequest{ + Method: "POST", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/status/%s", node, vmID, action), + } + + _, err := s.RequestHelper.MakeRequest(req) + if err != nil { + return fmt.Errorf("failed to %s VM: %w", action, err) + } + + return nil +} + +func (s *ProxmoxService) waitForStatus(targetStatus string, vm VM) error { + timeout := 2 * time.Minute + start := time.Now() + + for time.Since(start) < timeout { + currentStatus, err := s.getVMStatus(vm.Node, vm.VMID) + if err != nil { + time.Sleep(5 * time.Second) + continue + } + + if currentStatus == targetStatus { + return nil + } + + time.Sleep(5 * time.Second) + } + + return fmt.Errorf("timeout waiting for VM to be %s", targetStatus) +} + +func (s *ProxmoxService) validateVMID(vmID int) error { + // Get VMs + vms, err := s.GetClusterResources("type=vm") + if err != nil { + return err + } + + // Check if VMID exists + for _, vm := range vms { + if vm.VmId == vmID { + // Check if VM is in critical pool + if vm.ResourcePool == s.Config.CriticalPool { + return fmt.Errorf("VMID %d is in critical pool", vmID) + } + return nil + } + } + + return fmt.Errorf("VMID %d not found", vmID) +} + +func (s *ProxmoxService) getVMConfig(node string, VMID int) (*VirtualResourceConfig, error) { + configReq := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/config", node, VMID), + } + + var config VirtualResourceConfig + if err := s.RequestHelper.MakeRequestAndUnmarshal(configReq, &config); err != nil { + return nil, fmt.Errorf("failed to get VM config: %w", err) + } + + return &config, nil +} + +func (s *ProxmoxService) getVMStatus(node string, VMID int) (string, error) { + req := tools.ProxmoxAPIRequest{ + Method: "GET", + Endpoint: fmt.Sprintf("/nodes/%s/qemu/%d/status/current", node, VMID), + } + + var response VirtualResourceStatus + if err := s.RequestHelper.MakeRequestAndUnmarshal(req, &response); err != nil { + return "", fmt.Errorf("failed to get VM status: %w", err) + } + + return response.Status, nil +} diff --git a/internal/tools/database_client.go b/internal/tools/database_client.go new file mode 100644 index 0000000..940b97a --- /dev/null +++ b/internal/tools/database_client.go @@ -0,0 +1,290 @@ +package tools + +import ( + "database/sql" + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/kelseyhightower/envconfig" +) + +// DatabaseConfig holds database configuration +type DatabaseConfig struct { + Host string `envconfig:"DB_HOST" required:"true"` + Port string `envconfig:"DB_PORT" required:"true"` + User string `envconfig:"DB_USER" required:"true"` + Password string `envconfig:"DB_PASSWORD" required:"true"` + Name string `envconfig:"DB_NAME" required:"true"` +} + +// DBClient wraps database connection and provides reconnection capabilities +type DBClient struct { + db *sql.DB + config *DatabaseConfig + mutex sync.RWMutex + connected bool +} + +// NewDBClient creates a new database client with reconnection capabilities +func NewDBClient() (*DBClient, error) { + var dbConfig DatabaseConfig + if err := envconfig.Process("", &dbConfig); err != nil { + return nil, fmt.Errorf("failed to process database configuration: %w", err) + } + + client := &DBClient{ + config: &dbConfig, + } + + if err := client.Connect(); err != nil { + return nil, fmt.Errorf("failed to connect to database: %w", err) + } + + return client, nil +} + +// Connect establishes connection to the database +func (c *DBClient) Connect() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + // Build the Data Source Name (DSN) + dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?parseTime=true", + c.config.User, c.config.Password, c.config.Host, c.config.Port, c.config.Name) + + // Open database connection + db, err := sql.Open("mysql", dsn) + if err != nil { + c.connected = false + return fmt.Errorf("failed to open database connection: %w", err) + } + + // Test the connection + err = db.Ping() + if err != nil { + db.Close() + c.connected = false + return fmt.Errorf("failed to ping database: %w", err) + } + + // Configure connection pool settings + db.SetMaxOpenConns(25) // Maximum number of open connections + db.SetMaxIdleConns(25) // Maximum number of idle connections + db.SetConnMaxLifetime(0) // Maximum connection lifetime (0 = unlimited) + + c.db = db + c.connected = true + log.Printf("Successfully connected to MariaDB database: %s", c.config.Name) + return nil +} + +// Disconnect closes the database connection +func (c *DBClient) Disconnect() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if c.db == nil { + c.connected = false + return nil + } + + err := c.db.Close() + c.connected = false + return err +} + +// isConnectionError checks if an error indicates a connection problem +func (c *DBClient) isConnectionError(err error) bool { + if err == nil { + return false + } + + errorMsg := strings.ToLower(err.Error()) + return strings.Contains(errorMsg, "connection") || + strings.Contains(errorMsg, "broken pipe") || + strings.Contains(errorMsg, "network") || + strings.Contains(errorMsg, "timeout") || + strings.Contains(errorMsg, "eof") || + strings.Contains(errorMsg, "invalid connection") || + strings.Contains(errorMsg, "connection refused") || + strings.Contains(errorMsg, "server has gone away") +} + +// reconnect attempts to reconnect to the database +func (c *DBClient) reconnect() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + // Close existing connection if any + if c.db != nil { + c.db.Close() + } + c.connected = false + + // Wait a moment before retrying + time.Sleep(100 * time.Millisecond) + + // Build the Data Source Name (DSN) + dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?parseTime=true", + c.config.User, c.config.Password, c.config.Host, c.config.Port, c.config.Name) + + // Open database connection + db, err := sql.Open("mysql", dsn) + if err != nil { + return fmt.Errorf("failed to reconnect to database: %w", err) + } + + // Test the connection + err = db.Ping() + if err != nil { + db.Close() + return fmt.Errorf("failed to ping database after reconnection: %w", err) + } + + // Configure connection pool settings + db.SetMaxOpenConns(25) + db.SetMaxIdleConns(25) + db.SetConnMaxLifetime(0) + + c.db = db + c.connected = true + return nil +} + +// executeWithRetry executes a database operation with automatic retry on connection errors +func (c *DBClient) executeWithRetry(operation func(*sql.DB) error, maxRetries int) error { + var lastErr error + + for attempt := 0; attempt <= maxRetries; attempt++ { + c.mutex.RLock() + if !c.connected && c.db != nil { + c.mutex.RUnlock() + if reconnectErr := c.reconnect(); reconnectErr != nil { + lastErr = reconnectErr + continue + } + c.mutex.RLock() + } + db := c.db + c.mutex.RUnlock() + + if db == nil { + lastErr = fmt.Errorf("no database connection available") + if attempt < maxRetries { + if reconnectErr := c.reconnect(); reconnectErr != nil { + lastErr = reconnectErr + } + } + continue + } + + err := operation(db) + if err == nil { + return nil + } + + lastErr = err + + // If it's not a connection error, don't retry + if !c.isConnectionError(err) { + return err + } + + // Mark as disconnected and try to reconnect + c.mutex.Lock() + c.connected = false + c.mutex.Unlock() + + // Don't reconnect on the last attempt + if attempt < maxRetries { + if reconnectErr := c.reconnect(); reconnectErr != nil { + lastErr = reconnectErr + } + } + } + + return fmt.Errorf("database operation failed after %d retries, last error: %v", maxRetries+1, lastErr) +} + +// DB returns the underlying sql.DB with retry mechanism +func (c *DBClient) DB() *sql.DB { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.db +} + +// Exec executes a query with retry mechanism +func (c *DBClient) Exec(query string, args ...interface{}) (sql.Result, error) { + var result sql.Result + err := c.executeWithRetry(func(db *sql.DB) error { + res, err := db.Exec(query, args...) + if err != nil { + return err + } + result = res + return nil + }, 2) + return result, err +} + +// Query executes a query that returns rows with retry mechanism +func (c *DBClient) Query(query string, args ...interface{}) (*sql.Rows, error) { + var rows *sql.Rows + err := c.executeWithRetry(func(db *sql.DB) error { + res, err := db.Query(query, args...) + if err != nil { + return err + } + rows = res + return nil + }, 2) + return rows, err +} + +// QueryRow executes a query that returns at most one row with retry mechanism +func (c *DBClient) QueryRow(query string, args ...interface{}) *sql.Row { + c.mutex.RLock() + db := c.db + c.mutex.RUnlock() + + if db == nil { + // Return a row with an error that will be caught by Scan() + return &sql.Row{} + } + + return db.QueryRow(query, args...) +} + +// Ping checks if the database connection is alive +func (c *DBClient) Ping() error { + return c.executeWithRetry(func(db *sql.DB) error { + return db.Ping() + }, 2) +} + +// IsConnected returns the current connection status +func (c *DBClient) IsConnected() bool { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.connected +} + +// HealthCheck performs a simple query to verify the connection is working +func (c *DBClient) HealthCheck() error { + return c.executeWithRetry(func(db *sql.DB) error { + var result int + return db.QueryRow("SELECT 1").Scan(&result) + }, 2) +} + +// Connect to the MariaDB database (legacy function for backward compatibility) +func InitDB() (*sql.DB, error) { + client, err := NewDBClient() + if err != nil { + return nil, err + } + return client.DB(), nil +} diff --git a/internal/tools/requests.go b/internal/tools/requests.go new file mode 100644 index 0000000..9db67ba --- /dev/null +++ b/internal/tools/requests.go @@ -0,0 +1,116 @@ +package tools + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" +) + +// ProxmoxAPIRequest represents a request to the Proxmox API +type ProxmoxAPIRequest struct { + Method string // GET, POST, PUT, DELETE + Endpoint string // The API endpoint (e.g., "/nodes", "/nodes/node1/status") + RequestBody any // Optional request body for POST/PUT requests +} + +// ProxmoxAPIResponse represents the generic Proxmox API response structure +type ProxmoxAPIResponse struct { + Data json.RawMessage `json:"data"` +} + +// ProxmoxRequestHelper provides a helper for making HTTP requests to Proxmox API +type ProxmoxRequestHelper struct { + BaseURL string + APIToken string + HTTPClient *http.Client +} + +// NewProxmoxRequestHelper creates a new Proxmox request helper +func NewProxmoxRequestHelper(baseURL, apiToken string, httpClient *http.Client) *ProxmoxRequestHelper { + return &ProxmoxRequestHelper{ + BaseURL: baseURL, + APIToken: apiToken, + HTTPClient: httpClient, + } +} + +// MakeRequest performs an HTTP request to the Proxmox API and returns the raw response data +func (prh *ProxmoxRequestHelper) MakeRequest(req ProxmoxAPIRequest) (json.RawMessage, error) { + var reqBody io.Reader + + // Prepare request body for POST/PUT requests + if req.Method == "POST" || req.Method == "PUT" { + var bodyData any + if req.RequestBody != nil { + bodyData = req.RequestBody + } else { + bodyData = map[string]any{} + } + + jsonData, err := json.Marshal(bodyData) + if err != nil { + return nil, fmt.Errorf("failed to marshal request body: %w", err) + } + reqBody = bytes.NewBuffer(jsonData) + } + + // Create the full URL + url := prh.BaseURL + req.Endpoint + + // Create HTTP request + httpReq, err := http.NewRequest(req.Method, url, reqBody) + if err != nil { + return nil, fmt.Errorf("failed to create %s request to %s: %w", req.Method, req.Endpoint, err) + } + + // Set headers + httpReq.Header.Add("Authorization", "PVEAPIToken="+prh.APIToken) + httpReq.Header.Add("Content-Type", "application/json") + + // Execute the request + resp, err := prh.HTTPClient.Do(httpReq) + if err != nil { + return nil, fmt.Errorf("failed to execute %s request to %s: %w", req.Method, req.Endpoint, err) + } + defer resp.Body.Close() + + // Read response body first for better error reporting + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body from %s %s: %w", req.Method, req.Endpoint, err) + } + + // Check response status + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("proxmox API returned status %d for %s %s, response: %s", resp.StatusCode, req.Method, req.Endpoint, string(bodyBytes)) + } + + // Don't try to parse into ProxmoxAPIResponse structure for DELETE operations + if req.Method == "DELETE" { + return json.RawMessage("nil"), nil + } + + // Decode the API response for other methods + var apiResponse ProxmoxAPIResponse + if err := json.Unmarshal(bodyBytes, &apiResponse); err != nil { + return nil, fmt.Errorf("failed to decode response from %s %s: %w", req.Method, req.Endpoint, err) + } + + return apiResponse.Data, nil +} + +// MakeRequestAndUnmarshal performs an HTTP request and unmarshals the response into the provided interface +func (prh *ProxmoxRequestHelper) MakeRequestAndUnmarshal(req ProxmoxAPIRequest, target any) error { + data, err := prh.MakeRequest(req) + if err != nil { + return err + } + + if err := json.Unmarshal(data, target); err != nil { + return fmt.Errorf("failed to unmarshal response data from %s %s: %w", req.Method, req.Endpoint, err) + } + + return nil +} diff --git a/internal/tools/telemetry.go b/internal/tools/telemetry.go new file mode 100644 index 0000000..0aaa040 --- /dev/null +++ b/internal/tools/telemetry.go @@ -0,0 +1,3 @@ +package tools + +// TODO: Implement telemetry logging diff --git a/main.go b/main.go deleted file mode 100644 index 03ed0c1..0000000 --- a/main.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "log" - "os" - - "github.com/P-E-D-L/proclone/auth" - "github.com/P-E-D-L/proclone/proxmox" - "github.com/P-E-D-L/proclone/proxmox/cloning" - "github.com/gin-contrib/sessions" - "github.com/gin-contrib/sessions/cookie" - "github.com/gin-gonic/gin" - "github.com/joho/godotenv" -) - -// init the environment -func init() { - _ = godotenv.Load() -} - -func main() { - r := gin.Default() - - // store session cookie - // **IN PROD USE REAL SECURE KEY** - store := cookie.NewStore([]byte(os.Getenv("SECRET_KEY"))) - - // further cookie security - store.Options(sessions.Options{ - MaxAge: 3600, - HttpOnly: true, - Secure: true, - }) - - r.Use(sessions.Sessions("mysession", store)) - - // export public route - r.POST("/api/login", auth.LoginHandler) - - // authenticated routes - user := r.Group("/api") - user.Use(auth.AuthRequired) - user.GET("/profile", auth.ProfileHandler) - user.GET("/session", auth.SessionHandler) - user.POST("/logout", auth.LogoutHandler) - - // Proxmox User Template endpoints - user.GET("/proxmox/templates", cloning.GetAvailableTemplates) - user.POST("/proxmox/templates/clone", cloning.CloneTemplateToPod) - user.POST("/proxmox/pods/delete", cloning.DeletePod) - - // Proxmox Pod endpoints - user.GET("/proxmox/pods", cloning.GetUserPods) - - // admin routes - admin := user.Group("/admin") - admin.Use(auth.AdminRequired) - - // Proxmox VM endpoints - admin.GET("/proxmox/virtualmachines", proxmox.GetVirtualMachines) - admin.POST("/proxmox/virtualmachines/shutdown", proxmox.PowerOffVirtualMachine) - admin.POST("/proxmox/virtualmachines/start", proxmox.PowerOnVirtualMachine) - - // Proxmox resource monitoring endpoint - admin.GET("/proxmox/resources", proxmox.GetProxmoxResources) - - // Proxmox Admin Pod endpoints - admin.GET("/proxmox/pods/all", cloning.GetPods) - - // Active Directory User endpoints - admin.GET("/users", auth.GetUsers) - - // get port to run server on via. PC_PORT env variable - port := os.Getenv("PC_PORT") - if port == "" { - port = "8080" - } - - if err := r.Run(":" + port); err != nil { - log.Fatalf("failed to run server: %v", err) - } -} diff --git a/proxmox/cloning/cloning.go b/proxmox/cloning/cloning.go deleted file mode 100644 index 04e3223..0000000 --- a/proxmox/cloning/cloning.go +++ /dev/null @@ -1,614 +0,0 @@ -package cloning - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "log" - "math" - "net/http" - "os" - "sort" - "strconv" - "time" - - "github.com/P-E-D-L/proclone/auth" - "github.com/P-E-D-L/proclone/proxmox" - "github.com/P-E-D-L/proclone/proxmox/cloning/locking" - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -const KAMINO_TEMP_POOL string = "0100_Kamino_Templates" -const ROUTER_NAME string = "1-1NAT-pfsense" - -var STORAGE_ID string = os.Getenv("STORAGE_ID") - -type CloneRequest struct { - TemplateName string `json:"template_name" binding:"required"` -} - -type NewPoolResponse struct { - Success int `json:"success,omitempty"` -} - -type CloneResponse struct { - Success int `json:"success"` - PodName string `json:"pod_name"` - Errors []string `json:"errors,omitempty"` -} - -type NextIDResponse struct { - Data string `json:"data"` -} - -type StorageResponse struct { - Data []Disk `json:"data"` -} -type Disk struct { - Id string `json:"volid"` - Size int64 `json:"size,omitempty"` - Used int64 `json:"used,omitempty"` -} - -/* - * ===== CLONE VMS FROM TEMPLATE POOL TO POD POOL ===== - */ -func CloneTemplateToPod(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - var errors []string - - // Make sure user is authenticated - isAuth, _ := auth.IsAuthenticated(c) - if !isAuth { - log.Printf("Unauthorized access attempt") - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only authenticated users can access pod data", - }) - return - } - - // Parse request body - var req CloneRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{ - "error": "Invalid request format", - "details": err.Error(), - }) - return - } - - templatePool := "kamino_template_" + req.TemplateName - - // Load Proxmox configuration - config, err := proxmox.LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("failed to load Proxmox configuration: %v", err), - }) - return - } - - // Get all virtual resources - apiResp, err := proxmox.GetVirtualResources(config) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "failed to fetch virtual resources", - "details": err.Error(), - }) - return - } - - // Find VMs in template pool - var templateVMs []proxmox.VirtualResource - var routerTemplate proxmox.VirtualResource - for _, r := range *apiResp { - - // if VM is a member of target pool, add it to list - if r.Type == "qemu" && r.ResourcePool == templatePool { - templateVMs = append(templateVMs, r) - } - - // if vm is pod router template, save that to variable - if r.Name == ROUTER_NAME && r.ResourcePool == KAMINO_TEMP_POOL { - routerTemplate = r - } - } - - // handle case where template is empty and should not be cloned - if len(templateVMs) == 0 { - c.JSON(http.StatusNotFound, gin.H{ - "error": fmt.Sprintf("No VMs found in template pool: %s", templatePool), - }) - return - } - - // get next avaialble pod ID - NewPodID, newPodNumber, err := nextPodID(config, c) - - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "failed to get a pod ID", - "details": err.Error(), - }) - return - } - - // create new pod resource pool with ID - NewPodPool, err := createNewPodPool(username.(string), NewPodID, req.TemplateName, config) - - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "failed to create new pod resource pool", - "details": err.Error(), - }) - return - } - - /* Clone 1:1 NAT router from template - * - */ - newRouter, err := cloneVM(config, routerTemplate, NewPodPool) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to clone router VM: %v", err)) - } - - // Clone each VM to new pool - for _, vm := range templateVMs { - _, err := cloneVM(config, vm, NewPodPool) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to clone VM %s: %v", vm.Name, err)) - } - } - - // Check if vnet exists, if not, create it - vnetExists, err := checkForVnet(config, newPodNumber) - var vnetName string - - if err != nil { - errors = append(errors, fmt.Sprintf("failed to check current vnets: %v", err)) - } - - if !vnetExists { - vnetName, err = addVNetObject(config, newPodNumber) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to create new vnet object: %v", err)) - } - - err = applySDNChanges(config) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to apply new sdn changes: %v", err)) - } - } else { - vnetName = fmt.Sprintf("kamino%d", newPodNumber) - } - - // Configure VNet of all VMs - err = setPodVnet(config, NewPodPool, vnetName) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to update pod vnet: %v", err)) - } - - // Turn on router - err = waitForDiskAvailability(config, newRouter.Node, newRouter.VMID, 120*time.Second) - if err != nil { - errors = append(errors, fmt.Sprintf("router disk unavailable: %v", err)) - } - _, err = proxmox.PowerOnRequest(config, *newRouter) - - if err != nil { - errors = append(errors, fmt.Sprintf("failed to start router VM: %v", err)) - } - - // Wait for router to be running - err = proxmox.WaitForRunning(config, *newRouter) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to start router VM: %v", err)) - } else { - err = configurePodRouter(config, newPodNumber, newRouter.Node, newRouter.VMID) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to configure pod router: %v", err)) - } - } - - // automatically give user who cloned the pod access - err = setPoolPermission(config, NewPodPool, username.(string)) - if err != nil { - errors = append(errors, fmt.Sprintf("failed to update pool permissions for %s: %v", username, err)) - } - - var success int = 0 - if len(errors) == 0 { - success = 1 - } - - response := CloneResponse{ - Success: success, - PodName: NewPodPool, - Errors: errors, - } - - if len(errors) > 0 { - // if an error has occured, count # of successfully cloned VMs - var clonedVMs []proxmox.VirtualResource - for _, r := range *apiResp { - if r.Type == "qemu" && r.ResourcePool == NewPodPool { - clonedVMs = append(templateVMs, r) - } - } - - // if there are no cloned VMs in the resource pool, clean up the resource pool - if len(clonedVMs) == 0 { - cleanupFailedPodPool(config, NewPodPool) - } - - // send response :) - c.JSON(http.StatusPartialContent, response) - } else { - c.JSON(http.StatusOK, response) - } -} - -// assign a user to be a VM user for a resource pool -func setPoolPermission(config *proxmox.ProxmoxConfig, pool string, user string) error { - // define json data holding new pool name - jsonString := fmt.Sprintf("{\"path\":\"/pool/%s\", \"users\":\"%s@SDC\", \"roles\":\"PVEVMUser,PVEPoolUser\", \"propagate\": true }", pool, user) - jsonData := []byte(jsonString) - - statusCode, _, err := proxmox.MakeRequest(config, "api2/json/access/acl", "PUT", jsonData, nil) - if err != nil { - return err - } - if statusCode < 200 || statusCode >= 300 { - return fmt.Errorf("failed to assign pool permissions, status code: %d", statusCode) - } - return nil -} - -func cleanupClone(config *proxmox.ProxmoxConfig, nodeName string, vmid int) error { - /* - * ----- IF RUNNING, WAIT FOR VM TO BE TURNED OFF ----- - */ - // assign values to VM struct - var vm proxmox.VM - vm.Node = nodeName - vm.VMID = vmid - - // make request to turn off VM - _, err := proxmox.StopRequest(config, vm) - - if err != nil { - // will error if the VM is alr off so just ignore - } - - // Wait for VM to be "stopped" before continuing - err = proxmox.WaitForStopped(config, vm) - if err != nil { - return fmt.Errorf("stopping vm failed: %v", err) - } - - /* - * ----- HANDLE DELETING VM ----- - */ - - // Prepare request path - path := fmt.Sprintf("api2/json/nodes/%s/qemu/%d", nodeName, vmid) - - statusCode, body, err := proxmox.MakeRequest(config, path, "DELETE", nil, nil) - if err != nil { - return fmt.Errorf("vm delete request failed: %v", err) - } - - if statusCode != http.StatusOK { - return fmt.Errorf("failed to cleanup VM: %s", string(body)) - } - - return nil -} - -// !! Need to refactor to use MakeRequest, idk why I wrote it like this :( -func cloneVM(config *proxmox.ProxmoxConfig, vm proxmox.VirtualResource, newPool string) (newVm *proxmox.VM, err error) { - // Create a single HTTP client for all requests - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: !config.VerifySSL}, - } - client := &http.Client{Transport: tr} - - bestNode, newVMID, err := makeCloneRequest(config, vm, newPool) - if err != nil { - return nil, err - } - - statusURL := fmt.Sprintf("https://%s:%s/api2/json/nodes/%s/qemu/%d/status/current", - config.Host, config.Port, bestNode, newVMID) - - backoff := time.Second - maxBackoff := 30 * time.Second - timeout := 5 * time.Minute - startTime := time.Now() - - for { - if time.Since(startTime) > timeout { - if err := cleanupClone(config, vm.NodeName, newVMID); err != nil { - return nil, fmt.Errorf("clone timed out and cleanup failed: %v", err) - } - return nil, fmt.Errorf("clone operation timed out after %v", timeout) - } - - req, err := http.NewRequest("GET", statusURL, nil) - if err != nil { - return nil, fmt.Errorf("failed to create status check request: %v", err) - } - req.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s", config.APIToken)) - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("failed to check clone status: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - // Verify the VM is actually cloned - var statusResponse struct { - Data struct { - Status string `json:"status"` - } `json:"data"` - } - if err := json.NewDecoder(resp.Body).Decode(&statusResponse); err != nil { - return nil, fmt.Errorf("failed to decode status response: %v", err) - } - if statusResponse.Data.Status == "running" || statusResponse.Data.Status == "stopped" { - lockURL := fmt.Sprintf("https://%s:%s/api2/json/nodes/%s/qemu/%d/config", - config.Host, config.Port, bestNode, newVMID) - lockReq, err := http.NewRequest("GET", lockURL, nil) - if err != nil { - return nil, fmt.Errorf("failed to create lock check request: %v", err) - } - lockReq.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s", config.APIToken)) - - lockResp, err := client.Do(lockReq) - if err != nil { - return nil, fmt.Errorf("failed to check lock status: %v", err) - } - defer lockResp.Body.Close() - - var configResp struct { - Data struct { - Lock string `json:"lock"` - } `json:"data"` - } - if err := json.NewDecoder(lockResp.Body).Decode(&configResp); err != nil { - return nil, fmt.Errorf("failed to decode lock status: %v", err) - } - if configResp.Data.Lock == "" { - var newVM proxmox.VM - newVM.VMID = newVMID - - // once node optimization is done must be replaced with new node !!! - newVM.Node = bestNode - - return &newVM, nil // Clone is complete and VM is not locked - } - } - } - - time.Sleep(backoff) - backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) - } -} - -func makeCloneRequest(config *proxmox.ProxmoxConfig, vm proxmox.VirtualResource, newPool string) (node string, vmid int, err error) { - - // lock VMID to prevent race conditions - - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - - lock, err := locking.TryAcquireLockWithBackoff(ctx, "lock:vmid", 30*time.Second, 5, 500*time.Millisecond) - if err != nil { - return "", 0, fmt.Errorf("failed to acquire vmid lock: %v", err) - } - defer lock.Release(ctx) - - // Get next available VMID - statusCode, body, err := proxmox.MakeRequest(config, "api2/json/cluster/nextid", "GET", nil, nil) - if err != nil { - return "", 0, fmt.Errorf("failed to get next VMID: %v", err) - } - - if statusCode != http.StatusOK { - return "", 0, fmt.Errorf("failed to get next VMID: %s", string(body)) - } - - var nextID NextIDResponse - if err := json.Unmarshal(body, &nextID); err != nil { - return "", 0, fmt.Errorf("failed to decode VMID response: %v", err) - } - - newVMID, err := strconv.Atoi(nextID.Data) - if err != nil { - return "", 0, fmt.Errorf("invalid VMID received: %v", err) - } - - // find optimal node - bestNode, err := findBestNode(config) - if err != nil { - return "", 0, fmt.Errorf("failed to calculate optimal compute node: %v", err) - } - - // clone VM - cloneBody := map[string]interface{}{ - "newid": newVMID, - "name": fmt.Sprintf("%s-clone", vm.Name), - "pool": newPool, - "target": bestNode, - } - - jsonBody, err := json.Marshal(cloneBody) - if err != nil { - return "", 0, fmt.Errorf("failed to create request body: %v", err) - } - - clonePath := fmt.Sprintf("api2/json/nodes/%s/qemu/%d/clone", vm.NodeName, vm.VmId) - statusCode, body, err = proxmox.MakeRequest(config, clonePath, "POST", jsonBody, nil) - if err != nil { - return "", 0, fmt.Errorf("failed to clone VM: %v", err) - } - if statusCode != http.StatusOK { - return "", 0, fmt.Errorf("failed to clone VM: %s", string(body)) - } - - return bestNode, newVMID, nil -} - -// finds lowest available POD ID between 1001 - 1255 -func nextPodID(config *proxmox.ProxmoxConfig, c *gin.Context) (string, int, error) { - podResponse, err := getAdminPodResponse(config) - - // if error, return error status - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "failed to fetch pod list from proxmox cluster", - "details": err, - }) - return "", 0, err - } - - pods := podResponse.Pods - var ids []int - - // for each pod name, get id from name and append to int array - for _, pod := range pods { - id, _ := strconv.Atoi(pod.Name[:4]) - ids = append(ids, id) - } - - sort.Ints(ids) - - var nextId int - var gapFound bool = false - - // find first id available starting from 1001 - for i := 1001; i <= 1000+len(ids); i++ { - nextId = i - if ids[i-1001] != i { - gapFound = true - break - } - } - - if !gapFound { - nextId = 1001 + len(ids) - } - - // if no ids available between 0 - 255 return error - if nextId == 1256 { - err = fmt.Errorf("no pod ids available") - return "", 0, err - } - - return strconv.Itoa(nextId), nextId - 1000, nil -} - -func cleanupFailedPodPool(config *proxmox.ProxmoxConfig, poolName string) error { - poolDeletePath := fmt.Sprintf("api2/json/pools/%s", poolName) - - statusCode, body, err := proxmox.MakeRequest(config, poolDeletePath, "DELETE", nil, nil) - if err != nil { - return fmt.Errorf("pool delete request failed: %v", err) - } - - if statusCode != http.StatusOK { - return fmt.Errorf("pool delete request failed: %s", string(body)) - } - - return nil -} - -func createNewPodPool(username string, newPodID string, templateName string, config *proxmox.ProxmoxConfig) (string, error) { - newPoolName := newPodID + "_" + templateName + "_" + username - - poolPath := "api2/extjs/pools" - - // define json data holding new pool name - jsonString := fmt.Sprintf("{\"poolid\":\"%s\"}", newPoolName) - jsonData := []byte(jsonString) - - _, body, err := proxmox.MakeRequest(config, poolPath, "POST", jsonData, nil) - if err != nil { - return "", fmt.Errorf("pool create request failed: %v", err) - } - - // Parse response - var newPoolResponse NewPoolResponse - if err := json.Unmarshal(body, &newPoolResponse); err != nil { - return "", fmt.Errorf("failed to parse new pool response: %v", err) - } - - return newPoolName, nil -} - -func waitForDiskAvailability(config *proxmox.ProxmoxConfig, node string, vmid int, maxWait time.Duration) error { - start := time.Now() - var status *ConfigResponse - var err error - for { - time.Sleep(2 * time.Second) - if time.Since(start) > maxWait { - return fmt.Errorf("timeout waiting for VM disks to become available") - } - - status, err = getVMConfig(config, node, vmid) - if err != nil { - continue - } - - if status.Data.HardDisk == "" { - continue - } else { - time.Sleep(5 * time.Second) - return nil - } - - /*imageId := strings.Split(status.Data.HardDisk, ",")[0] - - disks, err := getStorageContent(config, node, STORAGE_ID) - if err != nil { - log.Printf("%v", err) - continue - } - - for _, d := range *disks { - if d.Id == imageId && d.Used > 0 { - return nil - } - } */ - } -} - -/* -func getStorageContent(config *proxmox.ProxmoxConfig, node string, storage string) (response *[]Disk, err error) { - - contentPath := fmt.Sprintf("api2/json/nodes/%s/storage/%s/content", node, storage) - log.Printf("%s", contentPath) - - statusCode, body, err := proxmox.MakeRequest(config, contentPath, "GET", nil, nil) - if err != nil { - return nil, fmt.Errorf("%s storage content request failed: %v", node, err) - } - - if statusCode != http.StatusOK { - return nil, fmt.Errorf("storage content request failed: %s", string(body)) - } - - var apiResp StorageResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse storage content response: %v", err) - } - - return &apiResp.Data, nil -} -*/ diff --git a/proxmox/cloning/deleting.go b/proxmox/cloning/deleting.go deleted file mode 100644 index f2bb981..0000000 --- a/proxmox/cloning/deleting.go +++ /dev/null @@ -1,186 +0,0 @@ -package cloning - -import ( - "fmt" - "log" - "math" - "net/http" - "strings" - "time" - - "github.com/P-E-D-L/proclone/auth" - "github.com/P-E-D-L/proclone/proxmox" - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -type DeleteRequest struct { - PodName string `json:"pod_id"` // full pod name i.e. 1015_Some_Template_Administrator -} - -type DeleteResponse = CloneResponse - -/* - * ===== DELETE CLONED VM POD ===== - */ -func DeletePod(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // Make sure user is authenticated - isAuth, _ := auth.IsAuthenticated(c) - if !isAuth { - log.Printf("Unauthorized access attempt") - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only authenticated users can delete pods", - }) - return - } - - // Parse request body - var req DeleteRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{ - "error": "Invalid request format", - "details": err.Error(), - }) - return - } - - // Check if a non-admin user is trying to delete someone else's pod - if !isAdmin.(bool) { - // handle edge-case where username is longer than entire pod name - if len(req.PodName) < len(username.(string)) { - log.Printf("User %s attempted to delete pod %s.", username, req.PodName) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only admin users can administer other users' pods", - }) - return - } - if !strings.HasSuffix(req.PodName, username.(string)) { - log.Printf("User %s attempted to delete pod %s.", username, req.PodName) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only admin users can administer other users' pods", - }) - return - } - } - - // Load Proxmox configuration - config, err := proxmox.LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // Get all virtual resources - apiResp, err := proxmox.GetVirtualResources(config) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch virtual resources", - "details": err.Error(), - }) - return - } - - // Check if resource pool actually exists - var poolExists = false - for _, r := range *apiResp { - if r.Type == "pool" && r.ResourcePool == req.PodName { - poolExists = true - } - } - - if !poolExists { - log.Printf("User %s attempted to delete pod %s, but the resource pool doesn't exist.", username, req.PodName) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Resource pool does not exist", - }) - return - } - - // Find all vms in resource pool - podVMs, err := proxmox.GetPoolMembers(config, req.PodName) - - if err != nil { - log.Printf("attempted to enumerate pod %s members, but error: %v", req.PodName, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Resource pool does not exist", - "details": err.Error(), - }) - return - } - - var errors []string - - // for each vm in the pool - for _, vm := range podVMs { - // clean up VM (turn off & remove) - err := cleanupClone(config, vm.NodeName, vm.VmId) - if err != nil { - errors = append(errors, fmt.Sprintf("Failed to delete VM %s: %v", vm.Name, err)) - } - } - - // wait until all vms have been deleted - err = waitForEmptyPool(config, req.PodName) - - if err != nil { - errors = append(errors, fmt.Sprintf("waiting for empty pool returned error: %v", err)) - log.Printf("attempted to enumerate pod %s members, but the resource pool doesn't exist.", req.PodName) - } - - // delete resource pool - err = cleanupFailedPodPool(config, req.PodName) - - if err != nil { - errors = append(errors, fmt.Sprintf("Failed to delete pod pool %s: %v", req.PodName, err)) - } - - var success int = 0 - if len(errors) == 0 { - success = 1 - } - - response := DeleteResponse{ - Success: success, - PodName: req.PodName, - Errors: errors, - } - - if len(errors) > 0 { - c.JSON(http.StatusPartialContent, response) - } else { - c.JSON(http.StatusOK, response) - } -} - -func waitForEmptyPool(config *proxmox.ProxmoxConfig, poolid string) error { - backoff := time.Second - maxBackoff := 30 * time.Second - timeout := 5 * time.Minute - startTime := time.Now() - - for { - if time.Since(startTime) > timeout { - return fmt.Errorf("failed to delete all resource pool members: timeout") - } else { - poolMembers, err := proxmox.GetPoolMembers(config, poolid) - - if err != nil { - return fmt.Errorf("failed to get resource pool members: %v", err) - } - - if len(poolMembers) == 0 { - log.Printf("%s contains no members, proceeding with pool deletion.", poolid) - return nil - } - time.Sleep(backoff) - backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) - } - } -} diff --git a/proxmox/cloning/locking/mutexLock.go b/proxmox/cloning/locking/mutexLock.go deleted file mode 100644 index adcbcc6..0000000 --- a/proxmox/cloning/locking/mutexLock.go +++ /dev/null @@ -1,45 +0,0 @@ -package locking - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/bsm/redislock" - "github.com/redis/go-redis/v9" -) - -var ( - rdb = redis.NewClient(&redis.Options{ - Addr: os.Getenv("REDIS_ADDR"), - Password: os.Getenv("REDIS_PASSWORD"), - DB: 0, - }) -) - -// try to acquire a redis mutex lock for an allotted amount of time with specified backoff -func TryAcquireLockWithBackoff(ctx context.Context, lockKey string, ttl time.Duration, maxAttempts int, initialBackoff time.Duration) (*redislock.Lock, error) { - locker := redislock.New(rdb) - backoff := initialBackoff - - for attempt := 1; attempt <= maxAttempts; attempt++ { - lock, err := locker.Obtain(ctx, lockKey, ttl, nil) - if err == nil { - return lock, nil - } - - if err == redislock.ErrNotObtained { - if attempt == maxAttempts { - break - } - time.Sleep(backoff) - backoff *= 2 - continue - } - - return nil, fmt.Errorf("unexpected error while acquiring lock: %v", err) - } - - return nil, fmt.Errorf("could not obtain lock %q after %d attempts", lockKey, maxAttempts) -} diff --git a/proxmox/cloning/networking.go b/proxmox/cloning/networking.go deleted file mode 100644 index 7a89f26..0000000 --- a/proxmox/cloning/networking.go +++ /dev/null @@ -1,337 +0,0 @@ -package cloning - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "math" - "net/http" - "regexp" - "time" - - "github.com/P-E-D-L/proclone/proxmox" -) - -type VNetResponse struct { - VnetArray []VNet `json:"data"` -} - -type VNet struct { - Type string `json:"type"` - Name string `json:"vnet"` - Tag int `json:"tag,omitempty"` - Alias string `json:"alias,omitempty"` - Zone string `json:"zone"` - VlanAware int `json:"vlanaware,omitempty"` -} - -type Config struct { - HardDisk string `json:"scsi0"` - Net0 string `json:"net0"` - Net1 string `json:"net1,omitempty"` -} - -type ConfigResponse struct { - Data Config `json:"data"` - Success int `json:"success"` -} - -const POD_VLAN_BASE int = 1800 -const SDN_ZONE string = "MainZone" -const WAN_SCRIPT_PATH string = "/home/update-wan-ip.sh" -const VIP_SCRIPT_PATH string = "/home/update-wan-vip.sh" -const WAN_IP_BASE string = "172.16." - -/* - * ----- SETS THE WAN IP ADDRESS OF A POD ROUTER ----- - * depends on the pfSense router template having a qemu agent installed and enabled - */ -func configurePodRouter(config *proxmox.ProxmoxConfig, podNum int, node string, vmid int) error { - // Create HTTP client with SSL verification based on config - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: !config.VerifySSL}, - } - client := &http.Client{Transport: tr} - - // wait for router agent to be pingable - - statusPath := fmt.Sprintf("api2/json/nodes/%s/qemu/%d/agent/ping", node, vmid) - - backoff := time.Second - maxBackoff := 30 * time.Second - timeout := 5 * time.Minute - startTime := time.Now() - - for { - if time.Since(startTime) > timeout { - return fmt.Errorf("router qemu agent timed out") - } - - statusCode, _, err := proxmox.MakeRequest(config, statusPath, "POST", nil, client) - if err != nil { - return fmt.Errorf("") - } - - if statusCode == http.StatusOK { - break - } - - time.Sleep(backoff) - backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) - } - - // configure router WAN ip to have correct third octet using qemu agent api call - - execPath := fmt.Sprintf("api2/json/nodes/%s/qemu/%d/agent/exec", node, vmid) - - // define json data holding new WAN IP - reqBody := map[string]interface{}{ - "command": []string{ - WAN_SCRIPT_PATH, - fmt.Sprintf("%s%d.1", WAN_IP_BASE, podNum), - }, - } - - jsonBody, err := json.Marshal(reqBody) - if err != nil { - return fmt.Errorf("failed to create ip request body: %v", err) - } - - statusCode, body, err := proxmox.MakeRequest(config, execPath, "POST", jsonBody, client) - if err != nil { - return fmt.Errorf("failed to make IP change request: %v", err) - } - - // handle response and return - if statusCode != http.StatusOK { - return fmt.Errorf("qemu agent failed to execute ip change script on router: %s", string(body)) - } - - // SEND AGENT EXEC REQUEST TO CHANGE VIP SUBNET - - // define json data holding new VIP subnet - reqBody = map[string]interface{}{ - "command": []string{ - VIP_SCRIPT_PATH, - fmt.Sprintf("%s%d.0", WAN_IP_BASE, podNum), - }, - } - - jsonBody, err = json.Marshal(reqBody) - if err != nil { - return fmt.Errorf("failed to create vip request body: %v", err) - } - - statusCode, body, err = proxmox.MakeRequest(config, execPath, "POST", jsonBody, client) - if err != nil { - return fmt.Errorf("failed to make VIP change request: %v", err) - } - - // handle response and return - if statusCode != http.StatusOK { - return fmt.Errorf("qemu agent failed to execute vip change script on router: %s", string(body)) - } - - return nil -} - -/* - * ----- CHECK BY NAME FOR VNET ALREADY IN CLUSTER ----- - */ -func checkForVnet(config *proxmox.ProxmoxConfig, podID int) (exists bool, err error) { - vnetPath := "api2/json/cluster/sdn/vnets" - - _, body, err := proxmox.MakeRequest(config, vnetPath, "GET", nil, nil) - if err != nil { - return false, fmt.Errorf("failed to request vnets: %v", err) - } - - // Parse response into VMResponse struct - var apiResp VNetResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return false, fmt.Errorf("failed to parse vnet response: %v", err) - } - - // iterate through list of vnets and compare with desired vnet name - vnetName := fmt.Sprintf("kamino%d", podID) - - for _, vnet := range apiResp.VnetArray { - if vnet.Name == vnetName { - return true, nil - } - } - - return false, nil -} - -/* - * ----- CREATE NEW VNET OBJECT IN THE CLUSTER SDN ----- - * SDN must be refreshed for new vnet to be used by pods - */ -func addVNetObject(config *proxmox.ProxmoxConfig, podID int) (vnet string, err error) { - - // Prepare VNet URL - vnetPath := "api2/json/cluster/sdn/vnets" - - podVlan := POD_VLAN_BASE + podID - - // define json data holding new VNet parameters - reqBody := map[string]interface{}{ - "vnet": fmt.Sprintf("kamino%d", podID), - "zone": SDN_ZONE, - "alias": fmt.Sprintf("%d_pod-vnet", podVlan), - "tag": podVlan, - "vlanaware": true, - } - - jsonBody, err := json.Marshal(reqBody) - if err != nil { - return "", fmt.Errorf("failed to create request body: %v", err) - } - - statusCode, body, err := proxmox.MakeRequest(config, vnetPath, "POST", jsonBody, nil) - if err != nil { - return "", fmt.Errorf("vnet create request failed: %v", err) - } - - // handle response and return - if statusCode != http.StatusOK { - return "", fmt.Errorf("failed to create new vnet object: %s", string(body)) - } else { - return fmt.Sprintf("kamino%d", podID), nil - } -} - -/* - * ----- APPLIES SDN CHANGES ----- - * should be called after adding or removing vnet objects - */ -func applySDNChanges(config *proxmox.ProxmoxConfig) error { - sdnPath := "api2/json/cluster/sdn" - - statusCode, body, err := proxmox.MakeRequest(config, sdnPath, "PUT", nil, nil) - if err != nil { - return fmt.Errorf("failed to apply sdn changes: %v", err) - } - - // return based on response - if statusCode != http.StatusOK { - return fmt.Errorf("failed to apply changes to sdn: %s", string(body)) - } else { - return nil - } -} - -/* - * ----- CONFIGURES NETWORK BRIDGE (VNET) FOR ALL VMS IN A POD ----- - */ -func setPodVnet(config *proxmox.ProxmoxConfig, podName string, vnet string) error { - - // Prepare VNet URL - poolPath := fmt.Sprintf("api2/json/pools/%s", podName) - - _, body, err := proxmox.MakeRequest(config, poolPath, "GET", nil, nil) - if err != nil { - return fmt.Errorf("failed to get pod pool: %v", err) - } - - var apiResp proxmox.PoolResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return fmt.Errorf("failed to parse pool response: %v", err) - } - - for _, vm := range apiResp.Data.Members { - err = updateVNet(config, &vm, vnet) - if err != nil { - return fmt.Errorf("failed to update VNet: %v", err) - } - } - - return nil -} - -// Gets config of a specific vm -func getVMConfig(config *proxmox.ProxmoxConfig, node string, vmid int) (response *ConfigResponse, err error) { - - // Prepare config URL - configPath := fmt.Sprintf("api2/extjs/nodes/%s/qemu/%d/config", node, vmid) - - _, body, err := proxmox.MakeRequest(config, configPath, "GET", nil, nil) - if err != nil { - return nil, fmt.Errorf("failed to get vm config: %v", err) - } - - // Parse response body - var apiResp ConfigResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse vm config response: %v", err) - } - - return &apiResp, nil -} - -/* - * ----- CONFIGURE NETWORK BRIDGE FOR A SINGLE VM ----- - * automatically handles configuration of normal vms and routers - */ -func updateVNet(config *proxmox.ProxmoxConfig, vm *proxmox.VirtualResource, newBridge string) error { - // ----- get current network config ----- - - apiResp, err := getVMConfig(config, vm.NodeName, vm.VmId) - if err != nil { - return err - } - - // Handle vms with two interfaces (routers) seperately from vms with one interface - if apiResp.Data.Net1 == "" { - newConfig := replaceBridge(apiResp.Data.Net0, newBridge) - err := setNetworkBridge(config, vm, "net0", newConfig) - if err != nil { - return err - } - } else { - newConfig := replaceBridge(apiResp.Data.Net1, newBridge) - err := setNetworkBridge(config, vm, "net1", newConfig) - if err != nil { - return err - } - } - - return nil -} - -// helper function to replace the network bridge in a vm config using regex -func replaceBridge(netStr string, newBridge string) string { - re := regexp.MustCompile(`bridge=[^,]+`) - return re.ReplaceAllString(netStr, "bridge="+newBridge) -} - -/* - * ----- SET NETWORK BRIDGE FOR A SINGLE VM ----- - * automatically handles configuration of normal vms and routers - */ -func setNetworkBridge(config *proxmox.ProxmoxConfig, vm *proxmox.VirtualResource, net string, newConfig string) error { - // ----- set network config ----- - configPath := fmt.Sprintf("api2/extjs/nodes/%s/qemu/%d/config", vm.NodeName, vm.VmId) - - // define json data holding new VNet parameters - reqBody := map[string]interface{}{ - net: newConfig, - } - - jsonBody, err := json.Marshal(reqBody) - if err != nil { - return fmt.Errorf("failed to create request body: %v", err) - } - - statusCode, body, err := proxmox.MakeRequest(config, configPath, "PUT", jsonBody, nil) - if err != nil { - return fmt.Errorf("failed to set network bridge in vm config: %v", err) - } - - if statusCode != http.StatusOK { - return fmt.Errorf("failed to set vm config: %s", string(body)) - } - - return nil -} diff --git a/proxmox/cloning/optimization.go b/proxmox/cloning/optimization.go deleted file mode 100644 index bc015f4..0000000 --- a/proxmox/cloning/optimization.go +++ /dev/null @@ -1,82 +0,0 @@ -package cloning - -import ( - "fmt" - "math" - - "github.com/P-E-D-L/proclone/proxmox" -) - -/* - * ----- FIND OPTIMAL COMPUTE NODE FOR NEXT VM CLONE ----- - * this function factors in current memory & cpu utilization, - * total memory allocation, and vm density to decide which node - * has the best resource availability for new VMs - */ -func findBestNode(config *proxmox.ProxmoxConfig) (node string, err error) { - - // define variables and data structures to hold structured values relevant to calculating optimal node - var totalVms int - vmDensityMap := make(map[string]int) - allocatedMemoryMap := make(map[string]int64) - totalMemoryMap := make(map[string]int64) - currentMemoryMap := make(map[string]int64) - cpuUtilizationMap := make(map[string]float64) - - virtualMachines, err := proxmox.GetVirtualMachineResponse(config) - - if err != nil { - return "", fmt.Errorf("failed to get cluster resources: %v", err) - } - - // increment density and allocated memory values per vm - for _, machine := range *virtualMachines { - if machine.Template != 1 { - allocatedMemoryMap[machine.NodeName] += int64(machine.MaxMem) - vmDensityMap[machine.NodeName] += 1 - totalVms += 1 - } - } - - // set default topScore to lowest possible float32 value - var topScore float32 = -1 * math.MaxFloat32 - var bestNode string - - for _, node := range config.Nodes { - nodeStatus, err := proxmox.GetNodeStatus(config, node) - if err != nil { - return "", fmt.Errorf("failed to get node status of %s: %v", node, err) - } - - // set total and current memory values, and cpu utilization values for eaach node - totalMemoryMap[node] = nodeStatus.Memory.Total - currentMemoryMap[node] = nodeStatus.Memory.Used - cpuUtilizationMap[node] = nodeStatus.CPU - - // fraction of node memory that is currently free - freeMemRatio := 1 - float32(currentMemoryMap[node])/float32(totalMemoryMap[node]) - - // fraction of free node cpu resources - freeCpuRatio := 1 - float32(cpuUtilizationMap[node]) - - // fraction of node memory that is currently unallocated - unallocatedMemRatio := 1 - float32(allocatedMemoryMap[node])/float32(totalMemoryMap[node]) - - // inverse vm density value (higher is better) - inverseVmDensity := 1 - float32(vmDensityMap[node])/float32(totalVms) - - // calculate node score (higher is better) - score := - 0.40*freeMemRatio + - 0.25*freeCpuRatio + - 0.30*unallocatedMemRatio + - 0.05*inverseVmDensity - - // if node score is higher than current bestNode, update bestNode - if score > topScore { - topScore = score - bestNode = node - } - } - return bestNode, nil -} diff --git a/proxmox/cloning/pods.go b/proxmox/cloning/pods.go deleted file mode 100644 index 441ec65..0000000 --- a/proxmox/cloning/pods.go +++ /dev/null @@ -1,191 +0,0 @@ -package cloning - -import ( - "fmt" - "log" - "net/http" - "regexp" - - "github.com/P-E-D-L/proclone/auth" - "github.com/P-E-D-L/proclone/proxmox" - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -type PodResponse struct { - Pods []PodWithVMs `json:"pods"` -} - -type PodWithVMs struct { - Name string `json:"name"` - VMs []proxmox.VirtualResource `json:"vms"` -} - -// helper function that builds a maps pod names to their VMs based on the provided regex pattern -func buildPodResponse(config *proxmox.ProxmoxConfig, regexPattern string) (*PodResponse, error) { - // get all virtual resources from proxmox - apiResp, err := proxmox.GetVirtualResources(config) - - // if error, return error - if err != nil { - return nil, err - } - - // map pod pools to their VMs - resources := apiResp - podMap := make(map[string]*PodWithVMs) - reg := regexp.MustCompile(regexPattern) - - // first pass: find all pools that are pods - for _, r := range *resources { - if r.Type == "pool" && reg.MatchString(r.ResourcePool) { - name := r.ResourcePool - podMap[name] = &PodWithVMs{ - Name: name, - VMs: []proxmox.VirtualResource{}, - } - } - } - - // second pass: map VMs to their pod pool - for _, r := range *resources { - if r.Type == "qemu" && reg.MatchString(r.ResourcePool) { - name := r.ResourcePool - if pod, ok := podMap[name]; ok { - pod.VMs = append(pod.VMs, r) - } - } - } - - // build response - var podResponse PodResponse - for _, pod := range podMap { - podResponse.Pods = append(podResponse.Pods, *pod) - } - - return &podResponse, nil -} - -/* - * ===== ADMIN ENDPOINT ===== - * This function returns a list of - * all currently deployed pods - */ -func GetPods(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // Make sure user is authenticated (redundant) - if !isAdmin.(bool) { - log.Printf("Forbidden access attempt") - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only Admin users can see all deployed pods", - }) - return - } - - // store proxmox config - var config *proxmox.ProxmoxConfig - var err error - config, err = proxmox.LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no proxmox host specified, return empty repsonse - if config.Host == "" { - log.Printf("No proxmox server configured") - c.JSON(http.StatusOK, proxmox.VirtualMachineResponse{VirtualMachines: []proxmox.VirtualResource{}}) - return - } - - // fetch template reponse - var podResponse *PodResponse - var error error - - // get Pod list and assign response - podResponse, error = getAdminPodResponse(config) - - // if error, return error status - if error != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch pod list from proxmox cluster", - "details": error, - }) - return - } - - log.Printf("Successfully fetched full pod list for user %s", username) - c.JSON(http.StatusOK, podResponse) -} - -func getAdminPodResponse(config *proxmox.ProxmoxConfig) (*PodResponse, error) { - return buildPodResponse(config, `1[0-9]{3}_.*`) -} - -/* - * ===== USER ENDPOINT ===== - * This function returns a list of - * this user's deployed pods - */ -func GetUserPods(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - - // Make sure user is authenticated (redundant) - isAuth, _ := auth.IsAuthenticated(c) - if !isAuth { - log.Printf("Unauthorized access attempt") - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only authenticated users can see their deployed pods", - }) - return - } - - // store proxmox config - var config *proxmox.ProxmoxConfig - var err error - config, err = proxmox.LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no proxmox host specified, return empty repsonse - if config.Host == "" { - log.Printf("No proxmox server configured") - c.JSON(http.StatusOK, proxmox.VirtualMachineResponse{VirtualMachines: []proxmox.VirtualResource{}}) - return - } - - // fetch template reponse - var podResponse *PodResponse - var error error - - // get Pod list and assign response - podResponse, error = getUserPodResponse(username.(string), config) - - // if error, return error status - if error != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch user's pod list from proxmox cluster", - "details": error, - }) - return - } - - log.Printf("Successfully fetched pod list for user %s", username) - c.JSON(http.StatusOK, podResponse) -} - -func getUserPodResponse(user string, config *proxmox.ProxmoxConfig) (*PodResponse, error) { - return buildPodResponse(config, fmt.Sprintf(`1[0-9]{3}_.*_%s`, user)) -} diff --git a/proxmox/cloning/templates.go b/proxmox/cloning/templates.go deleted file mode 100644 index cc4fe81..0000000 --- a/proxmox/cloning/templates.go +++ /dev/null @@ -1,116 +0,0 @@ -package cloning - -import ( - "fmt" - "log" - "net/http" - "regexp" - - "github.com/P-E-D-L/proclone/auth" - "github.com/P-E-D-L/proclone/proxmox" - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -type TemplateResponse struct { - Templates []TemplateWithVMs `json:"templates"` -} - -type TemplateWithVMs struct { - Name string `json:"name"` - Deployments int `json:"deployments"` - VMs []proxmox.VirtualResource `json:"vms"` -} - -/* - * ===== GET ALL CURRENT POD TEMPLATES ===== - */ -func GetAvailableTemplates(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - - // Make sure user is authenticated (redundant) - isAuth, _ := auth.IsAuthenticated(c) - if !isAuth { - log.Printf("Unauthorized access attempt") - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only authenticated users can access template data", - }) - return - } - - // store proxmox config - var config *proxmox.ProxmoxConfig - var err error - config, err = proxmox.LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no proxmox host specified, return empty repsonse - if config.Host == "" { - log.Printf("No proxmox server configured") - c.JSON(http.StatusOK, proxmox.VirtualMachineResponse{VirtualMachines: []proxmox.VirtualResource{}}) - return - } - - // fetch template response - templateResponse, err := getTemplateResponse(config) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch template list from proxmox cluster", - "details": err, - }) - return - } - - log.Printf("Successfully fetched template list for user %s", username) - c.JSON(http.StatusOK, templateResponse) -} - -func getTemplateResponse(config *proxmox.ProxmoxConfig) (*TemplateResponse, error) { - - // get all virtual resources from proxmox - resources, err := proxmox.GetVirtualResources(config) - if err != nil { - return nil, err - } - - // map template pools to their VMs - templateMap := make(map[string]*TemplateWithVMs) - reg := regexp.MustCompile(`kamino_template_.*`) - - // first pass: find all pools that are templates - for _, r := range *resources { - if r.Type == "pool" && reg.MatchString(r.ResourcePool) { - name := r.ResourcePool[16:] - templateMap[name] = &TemplateWithVMs{ - Name: name, - Deployments: 0, - VMs: []proxmox.VirtualResource{}, - } - } - } - - // second pass: map VMs to their template pool - for _, r := range *resources { - if r.Type == "qemu" && reg.MatchString(r.ResourcePool) { - name := r.ResourcePool[16:] - if template, ok := templateMap[name]; ok { - template.VMs = append(template.VMs, r) - } - } - } - - // build response - var templateResponse TemplateResponse - for _, template := range templateMap { - templateResponse.Templates = append(templateResponse.Templates, *template) - } - - return &templateResponse, nil -} diff --git a/proxmox/proxmox.go b/proxmox/proxmox.go deleted file mode 100644 index c7abc87..0000000 --- a/proxmox/proxmox.go +++ /dev/null @@ -1,93 +0,0 @@ -package proxmox - -import ( - "encoding/json" - "fmt" - "os" - "strings" -) - -// ProxmoxConfig holds the configuration for Proxmox API -type ProxmoxConfig struct { - Host string - Port string - APIToken string // API token for authentication - VerifySSL bool - Nodes []string -} - -// ProxmoxAPIResponse represents the generic Proxmox API response structure -type ProxmoxAPIResponse struct { - Data json.RawMessage `json:"data"` -} - -// ProxmoxNodeStatus represents the status response from a Proxmox node -type ProxmoxNodeStatus struct { - CPU float64 `json:"cpu"` - Memory struct { - Total int64 `json:"total"` - Used int64 `json:"used"` - } `json:"memory"` -} - -// LoadProxmoxConfig loads and validates Proxmox configuration from environment variables -func LoadProxmoxConfig() (*ProxmoxConfig, error) { - tokenID := os.Getenv("PROXMOX_TOKEN_ID") // The token ID including user and realm - tokenSecret := os.Getenv("PROXMOX_TOKEN_SECRET") // The secret part of the token - - if tokenID == "" { - return nil, fmt.Errorf("PROXMOX_TOKEN_ID is required") - } - if tokenSecret == "" { - return nil, fmt.Errorf("PROXMOX_TOKEN_SECRET is required") - } - - config := &ProxmoxConfig{ - Host: os.Getenv("PROXMOX_SERVER"), - Port: os.Getenv("PROXMOX_PORT"), - APIToken: fmt.Sprintf("%s=%s", tokenID, tokenSecret), - VerifySSL: os.Getenv("PROXMOX_VERIFY_SSL") == "true", - } - - // Validate required fields - if config.Host == "" { - return nil, fmt.Errorf("PROXMOX_SERVER is required") - } - if config.Port == "" { - config.Port = "443" // Default port - } - - // Parse nodes list - nodesStr := os.Getenv("PROXMOX_NODES") - if nodesStr != "" { - config.Nodes = strings.Split(nodesStr, ",") - } - - return config, nil -} - -// GetNodeStatus fetches the status of a single Proxmox node -func GetNodeStatus(config *ProxmoxConfig, nodeName string) (*ProxmoxNodeStatus, error) { - - // Prepare status endpoint path - path := fmt.Sprintf("api2/json/nodes/%s/status", nodeName) - - _, body, err := MakeRequest(config, path, "GET", nil, nil) - if err != nil { - return nil, fmt.Errorf("proxmox node status request failed: %v", err) - } - - // Parse response - var apiResp ProxmoxAPIResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse status response: %v", err) - } - - // Extract status from response - var status ProxmoxNodeStatus - if err := json.Unmarshal(apiResp.Data, &status); err != nil { - return nil, fmt.Errorf("failed to extract status from response: %v", err) - } - - return &status, nil -} diff --git a/proxmox/requests.go b/proxmox/requests.go deleted file mode 100644 index 96a7ee9..0000000 --- a/proxmox/requests.go +++ /dev/null @@ -1,59 +0,0 @@ -package proxmox - -import ( - "bytes" - "crypto/tls" - "fmt" - "io" - "net/http" -) - -// kind should be "GET", "DELETE", "POST", or "PUT", jsonData and httpClient can be nil -func MakeRequest(config *ProxmoxConfig, path string, kind string, jsonData []byte, httpClient *http.Client) (int, []byte, error) { - if !(kind == "GET" || kind == "DELETE" || kind == "POST" || kind == "PUT") { - return 0, nil, fmt.Errorf("invalid REST method passed: %s", kind) - } - - var client *http.Client = nil - if httpClient == nil { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: !config.VerifySSL}, - } - client = &http.Client{Transport: tr} - } else { - client = httpClient - } - - reqURL := fmt.Sprintf("https://%s:%s/%s", config.Host, config.Port, path) - - var bodyReader io.Reader = nil - - if jsonData != nil { - bodyReader = bytes.NewBuffer(jsonData) - } - - req, err := http.NewRequest(kind, reqURL, bodyReader) - if err != nil { - return 0, nil, fmt.Errorf("failed to create %s request: %v", kind, err) - } - - if jsonData != nil { - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Accept", "application/json") - } - - req.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s", config.APIToken)) - - resp, err := client.Do(req) - if err != nil { - return 0, nil, fmt.Errorf("failed to make request: %v", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return resp.StatusCode, nil, fmt.Errorf("failed to read response body: %v", err) - } - - return resp.StatusCode, body, nil -} diff --git a/proxmox/resources.go b/proxmox/resources.go deleted file mode 100644 index c2217fa..0000000 --- a/proxmox/resources.go +++ /dev/null @@ -1,173 +0,0 @@ -package proxmox - -import ( - "fmt" - "log" - "net/http" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -// NodeResourceUsage represents the resource usage metrics for a single node -type NodeResourceUsage struct { - NodeName string `json:"node_name"` - CPUUsage float64 `json:"cpu_usage"` // CPU usage percentage - MemoryTotal int64 `json:"memory_total"` // Total memory in bytes - MemoryUsed int64 `json:"memory_used"` // Used memory in bytes - StorageTotal int64 `json:"storage_total"` // Total storage in bytes - StorageUsed int64 `json:"storage_used"` // Used storage in bytes -} - -// ResourceUsageResponse represents the API response containing resource usage for all nodes -type ResourceUsageResponse struct { - Nodes []NodeResourceUsage `json:"nodes"` - Cluster struct { - TotalCPUUsage float64 `json:"total_cpu_usage"` // Average CPU usage across all nodes - TotalMemoryTotal int64 `json:"total_memory_total"` // Total memory across all nodes - TotalMemoryUsed int64 `json:"total_memory_used"` // Total used memory across all nodes - TotalStorageTotal int64 `json:"total_storage_total"` // Total storage across all nodes - TotalStorageUsed int64 `json:"total_storage_used"` // Total used storage across all nodes - } `json:"cluster"` - Errors []string `json:"errors,omitempty"` -} - -func GetProxmoxResources(c *gin.Context) { - // Get session - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // Double check admin status (although middleware should have caught this) - if !isAdmin.(bool) { - log.Printf("Unauthorized access attempt by user %s", username) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only admin users can access resource usage data", - }) - return - } - - // Load Proxmox configuration - config, err := LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no nodes specified, return empty response - if len(config.Nodes) == 0 { - log.Printf("No nodes configured for user %s", username) - c.JSON(http.StatusOK, ResourceUsageResponse{Nodes: []NodeResourceUsage{}}) - return - } - - // Fetch status for each node - var nodes []NodeResourceUsage - var errors []string - response := ResourceUsageResponse{} - - VirtualResources, err := GetVirtualResources(config) - - if err != nil { - log.Printf("Failed to get proxmox cluster resources: %v", err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to get proxmox cluster resources: %v", err), - }) - return - } - - for _, nodeName := range config.Nodes { - status, err := GetNodeStatus(config, nodeName) - if err != nil { - errorMsg := fmt.Sprintf("Error fetching status for node %s: %v", nodeName, err) - log.Printf("%s", errorMsg) - errors = append(errors, errorMsg) - continue - } - - usedStorage, totalStorage := getNodeStorage(VirtualResources, nodeName) - - nodes = append(nodes, NodeResourceUsage{ - NodeName: nodeName, - CPUUsage: status.CPU, - MemoryTotal: status.Memory.Total, - MemoryUsed: status.Memory.Used, - StorageTotal: int64(totalStorage), - StorageUsed: int64(usedStorage), - }) - - // Add to cluster totals - response.Cluster.TotalMemoryTotal += status.Memory.Total - response.Cluster.TotalMemoryUsed += status.Memory.Used - response.Cluster.TotalStorageTotal += int64(totalStorage) - response.Cluster.TotalStorageUsed += int64(usedStorage) - response.Cluster.TotalCPUUsage += status.CPU - } - - // Get NAS storage and add that to cluster capacity - usedStorage, totalStorage := getStorage(VirtualResources, "mufasa-proxmox") - - response.Cluster.TotalStorageTotal += int64(totalStorage) - response.Cluster.TotalStorageUsed += int64(usedStorage) - - // Calculate average CPU usage for the cluster - if len(nodes) > 0 { - response.Cluster.TotalCPUUsage /= float64(len(nodes)) - } - - response.Nodes = nodes - response.Errors = errors - - // If we have any errors but also some successful responses, include errors in response - if len(errors) > 0 && len(nodes) > 0 { - c.JSON(http.StatusPartialContent, response) - return - } - - // If we have only errors, return error status - if len(errors) > 0 { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch resource usage for any nodes", - "details": errors, - }) - return - } - - // Success case - log.Printf("Successfully fetched resource usage for user %s", username) - c.JSON(http.StatusOK, response) -} - -func getNodeStorage(resources *[]VirtualResource, node string) (Used int64, Total int64) { - var used int64 = 0 - var total int64 = 0 - - for _, r := range *resources { - if r.Type == "storage" && r.NodeName == node && - (r.Storage == "local" || r.Storage == "local-lvm") && - r.RunningStatus == "available" { - used += r.Disk - total += r.MaxDisk - } - } - log.Printf("%s has used %d of its %d local storage", node, used, total) - return used, total -} - -func getStorage(resources *[]VirtualResource, storage string) (Used int64, Total int64) { - var used int64 = 0 - var total int64 = 0 - - for _, r := range *resources { - if r.Type == "storage" && r.Storage == storage && r.RunningStatus == "available" { - used = r.Disk - total = r.MaxDisk - break - } - } - log.Printf("The cluster has used %d of its %d total storage", used, total) - return used, total -} diff --git a/proxmox/vms.go b/proxmox/vms.go deleted file mode 100644 index d9b65dc..0000000 --- a/proxmox/vms.go +++ /dev/null @@ -1,560 +0,0 @@ -package proxmox - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "log" - "math" - "net/http" - "strconv" - "time" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -const CRITICAL_POOL string = "0030_Critical" - -type VMResponse struct { - Data []VirtualResource `json:"data"` -} -type VirtualResource struct { - CPU float64 `json:"cpu,omitempty"` - MaxCPU int `json:"maxcpu,omitempty"` - Mem int `json:"mem,omitempty"` - MaxMem int `json:"maxmem,omitempty"` - Type string `json:"type,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - NodeName string `json:"node,omitempty"` - ResourcePool string `json:"pool,omitempty"` - RunningStatus string `json:"status,omitempty"` - Uptime int `json:"uptime,omitempty"` - VmId int `json:"vmid,omitempty"` - Storage string `json:"storage,omitempty"` - Disk int64 `json:"disk,omitempty"` - MaxDisk int64 `json:"maxdisk,omitempty"` - Template int `json:"template,omitempty"` -} - -type VirtualMachineResponse struct { - VirtualMachines []VirtualResource `json:"virtual_machines"` - VirtualMachineCount int `json:"virtual_machine_count"` - RunningCount int `json:"running_count"` -} - -type VM struct { - VMID int `json:"vmid" binding:"required"` - Node string `json:"node" binding:"required"` -} - -type VMPower struct { - Success int `json:"success"` - Data string `json:"data"` -} - -type VMPowerResponse struct { - Success int `json:"success"` -} - -type PoolResponse struct { - Data Pool `json:"data"` -} - -type Pool struct { - Poolid string `json:"poolid"` - Members []VirtualResource `json:"members"` -} - -/* - * ===== GET ALL VIRTUAL MACHINES ===== - */ -func GetVirtualMachines(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // Make sure user is admin (redundant with middleware) - if !isAdmin.(bool) { - log.Printf("Unauthorized access attempt by user %s", username) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only admin users can access vm data", - }) - return - } - - // store proxmox config - var config *ProxmoxConfig - var err error - config, err = LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no proxmox host specified, return empty repsonse - if config.Host == "" { - log.Printf("No proxmox server configured") - c.JSON(http.StatusOK, VirtualMachineResponse{VirtualMachines: []VirtualResource{}}) - return - } - - // fetch all virtual machines - var virtualMachines *[]VirtualResource - var error error - var response VirtualMachineResponse = VirtualMachineResponse{} - response.RunningCount = 0 - - // get virtual machine info and include in response - virtualMachines, error = GetVirtualMachineResponse(config) - - // if error, return error status - if error != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch vm list from proxmox cluster", - "details": error, - }) - return - } - - response.VirtualMachines = *virtualMachines - - // get total # of virtual machines and include in response - response.VirtualMachineCount = len(*virtualMachines) - - // get # of running virtual machines and include in response - for _, vm := range *virtualMachines { - if vm.RunningStatus == "running" { - response.RunningCount++ - } - } - - log.Printf("Successfully fetched vm list for user %s", username) - c.JSON(http.StatusOK, response) - -} - -// handles fetching all the virtual machines on the proxmox cluster -func GetVirtualResources(config *ProxmoxConfig) (*[]VirtualResource, error) { - - path := "api2/json/cluster/resources" - - _, body, err := MakeRequest(config, path, "GET", nil, nil) - if err != nil { - return nil, fmt.Errorf("proxmox cluster resource request failed: %v", err) - } - - // Parse response into VMResponse struct - var apiResp VMResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse status response: %v", err) - } - - return &apiResp.Data, nil - -} - -func GetVirtualMachineResponse(config *ProxmoxConfig) (*[]VirtualResource, error) { - - // get all virtual resources from proxmox - apiResp, err := GetVirtualResources(config) - - // if error, return error - if err != nil { - return nil, err - } - - // Extract virtual machines from response, store in VirtualMachine struct array - var vms []VirtualResource - for _, r := range *apiResp { - // don't return VMS in critical resource pool, for security - if r.Type == "qemu" && r.ResourcePool != CRITICAL_POOL { - vms = append(vms, r) - } - } - - return &vms, nil -} - -/* - * ====== POWERING OFF VIRTUAL MACHINES ====== - * POST requires "vmid" and "node" fields - */ -func PowerOffVirtualMachine(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // Make sure user is admin (redundant with middleware) - if !isAdmin.(bool) { - log.Printf("Unauthorized access attempt by user %s", username) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only admin users can access vm data", - }) - return - } - - // store proxmox config - var config *ProxmoxConfig - var err error - config, err = LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no proxmox host specified, return empty repsonse - if config.Host == "" { - log.Printf("No proxmox server configured") - c.JSON(http.StatusOK, VirtualMachineResponse{VirtualMachines: []VirtualResource{}}) - return - } - - // If no nodes specified, return empty response - if len(config.Nodes) == 0 { - log.Printf("No nodes configured for user %s", username) - c.JSON(http.StatusOK, ResourceUsageResponse{Nodes: []NodeResourceUsage{}}) - return - } - - // get req.VMID, req.Node - var req VM - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: must include 'vmid' and 'node'"}) - return - } - - // log request on backend - log.Printf("User %s requested to power off VM %d on node %s", username, req.VMID, req.Node) - - var error error - var response *VMPower - - response, error = PowerOffRequest(config, req) - - // If we have error , return error status - if error != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to fetch resource usage for any nodes", - "details": error, - }) - return - } - - var finalResponse VMPowerResponse - finalResponse.Success = response.Success - - if finalResponse.Success == 1 { - log.Printf("Successfully powered down VMID %s for %s", strconv.Itoa(req.VMID), username) - c.JSON(http.StatusOK, response) - } else { - log.Printf("Failed to power down VMID %s for %s", strconv.Itoa(req.VMID), username) - c.JSON(http.StatusOK, response) - } - -} - -func PowerOffRequest(config *ProxmoxConfig, vm VM) (*VMPower, error) { - // ----- SECURITY CHECK ----- - // make sure VM is not critical - - criticalMembers, err := GetPoolMembers(config, CRITICAL_POOL) - if err != nil { - return nil, fmt.Errorf("could not verify members of critical pool: %v", err) - } - - // return unauthorized if error or vm is critical - if isCritical, err := isVmCritical(vm, &criticalMembers); err != nil || isCritical { - return nil, fmt.Errorf("not authorized to power off VMID %d: %v", vm.VMID, err) - } - - path := fmt.Sprintf("api2/extjs/nodes/%s/qemu/%s/status/shutdown", vm.Node, strconv.Itoa(vm.VMID)) - - _, body, err := MakeRequest(config, path, "POST", nil, nil) - if err != nil { - return nil, fmt.Errorf("vm power off request failed: %v", err) - } - - // Parse response - var apiResp VMPower - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse VM shutdown response: %v", err) - } - - return &apiResp, nil -} - -func StopRequest(config *ProxmoxConfig, vm VM) (*VMPower, error) { - - path := fmt.Sprintf("api2/extjs/nodes/%s/qemu/%s/status/stop", vm.Node, strconv.Itoa(vm.VMID)) - - _, body, err := MakeRequest(config, path, "POST", nil, nil) - if err != nil { - return nil, fmt.Errorf("vm stop request failed: %v", err) - } - - // Parse response - var apiResp VMPower - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse VM stop response: %v", err) - } - - return &apiResp, nil -} - -/* - * ====== POWERING ON VIRTUAL MACHINES ====== - * POST requires "vmid" and "node" fields - */ -func PowerOnVirtualMachine(c *gin.Context) { - session := sessions.Default(c) - username := session.Get("username") - isAdmin := session.Get("is_admin") - - // Make sure user is admin (redundant with middleware) - if !isAdmin.(bool) { - log.Printf("Unauthorized access attempt by user %s", username) - c.JSON(http.StatusForbidden, gin.H{ - "error": "Only admin users can access vm data", - }) - return - } - - // store proxmox config - var config *ProxmoxConfig - var err error - config, err = LoadProxmoxConfig() - if err != nil { - log.Printf("Configuration error for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{ - "error": fmt.Sprintf("Failed to load Proxmox configuration: %v", err), - }) - return - } - - // If no proxmox host specified, return empty repsonse - if config.Host == "" { - log.Printf("No proxmox server configured") - c.JSON(http.StatusOK, VirtualMachineResponse{VirtualMachines: []VirtualResource{}}) - return - } - - // If no nodes specified, return empty response - if len(config.Nodes) == 0 { - log.Printf("No nodes configured for user %s", username) - c.JSON(http.StatusOK, ResourceUsageResponse{Nodes: []NodeResourceUsage{}}) - return - } - - // get req.VMID, req.Node - var req VM - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: must include 'vmid' and 'node'"}) - return - } - - // log request on backend - log.Printf("User %s requested to power on VM %d on node %s", username, req.VMID, req.Node) - var response *VMPower - - response, err = PowerOnRequest(config, req) - - // If we have error , return error status - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "failed to power on virtual machine", - "details": err.Error(), - }) - return - } - - var finalResponse VMPowerResponse - finalResponse.Success = response.Success - - if finalResponse.Success == 1 { - log.Printf("Successfully powered on VMID %s for %s", strconv.Itoa(req.VMID), username) - c.JSON(http.StatusOK, response) - } else { - log.Printf("Failed to power on VMID %s for %s", strconv.Itoa(req.VMID), username) - c.JSON(http.StatusOK, response) - } - -} - -func PowerOnRequest(config *ProxmoxConfig, vm VM) (*VMPower, error) { - - // ----- SECURITY CHECK ----- - // make sure VM is not critical - - criticalMembers, err := GetPoolMembers(config, CRITICAL_POOL) - if err != nil { - return nil, fmt.Errorf("could not verify members of critical pool: %v", err) - } - - // return unauthorized if error or vm is critical - if isCritical, err := isVmCritical(vm, &criticalMembers); err != nil || isCritical { - return nil, fmt.Errorf("not authorized to power on VMID %d: %v", vm.VMID, err) - } - - path := fmt.Sprintf("api2/extjs/nodes/%s/qemu/%s/status/start", vm.Node, strconv.Itoa(vm.VMID)) - - _, body, err := MakeRequest(config, path, "POST", nil, nil) - if err != nil { - return nil, fmt.Errorf("vm start request failed: %v", err) - } - - // Parse response - var apiResp VMPower - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse VM start response: %v", err) - } - - return &apiResp, nil -} - -// !!! should be refactored to use MakeRequest, written in really stupid way idk why I did this -// should change MakeRequest to variadic function to avoid recreating http client many times -func WaitForRunning(config *ProxmoxConfig, vm VM) error { - // Create a single HTTP client for all requests - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: !config.VerifySSL}, - } - client := &http.Client{Transport: tr} - - // Wait for status "running" with exponential backoff - statusURL := fmt.Sprintf("https://%s:%s/api2/json/nodes/%s/qemu/%d/status/current", - config.Host, config.Port, vm.Node, vm.VMID) - - backoff := time.Second - maxBackoff := 30 * time.Second - timeout := 3 * time.Minute - startTime := time.Now() - - for { - if time.Since(startTime) > timeout { - return fmt.Errorf("vm failed to start within %v", timeout) - } - - req, err := http.NewRequest("GET", statusURL, nil) - if err != nil { - return fmt.Errorf("failed to create status check request: %v", err) - } - req.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s", config.APIToken)) - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to check vm status: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - // Verify the VM is actually crunning - var statusResponse struct { - Data struct { - Status string `json:"status"` - } `json:"data"` - } - if err := json.NewDecoder(resp.Body).Decode(&statusResponse); err != nil { - return fmt.Errorf("failed to decode status response: %v", err) - } - if statusResponse.Data.Status == "running" { - return nil - } - } - - time.Sleep(backoff) - backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) - } -} - -// !!! should be refactored to use MakeRequest, written in really stupid way idk why I did this -// should change MakeRequest to variadic function to avoid recreating http client many times -func WaitForStopped(config *ProxmoxConfig, vm VM) error { - // Create a single HTTP client for all requests - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: !config.VerifySSL}, - } - client := &http.Client{Transport: tr} - - // Wait for status "stopped" with exponential backoff - statusURL := fmt.Sprintf("https://%s:%s/api2/json/nodes/%s/qemu/%d/status/current", - config.Host, config.Port, vm.Node, vm.VMID) - - backoff := time.Second - maxBackoff := 30 * time.Second - timeout := 3 * time.Minute - startTime := time.Now() - - for { - if time.Since(startTime) > timeout { - return fmt.Errorf("vm failed to stop within %v", timeout) - } - - req, err := http.NewRequest("GET", statusURL, nil) - if err != nil { - return fmt.Errorf("failed to create status check request: %v", err) - } - req.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s", config.APIToken)) - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to check vm status: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - // Verify the VM is actually stopped - var statusResponse struct { - Data struct { - Status string `json:"status"` - } `json:"data"` - } - if err := json.NewDecoder(resp.Body).Decode(&statusResponse); err != nil { - return fmt.Errorf("failed to decode status response: %v", err) - } - if statusResponse.Data.Status == "stopped" { - return nil - } - } - - time.Sleep(backoff) - backoff = time.Duration(math.Min(float64(backoff*2), float64(maxBackoff))) - } -} - -// return whether or not a vm is in a resource pool member list -func isVmCritical(vm VM, poolMembers *[]VirtualResource) (isInCritical bool, err error) { - for _, poolVm := range *poolMembers { - if poolVm.VmId == vm.VMID { - return true, nil - } - } - - return false, nil -} - -func GetPoolMembers(config *ProxmoxConfig, pool string) (members []VirtualResource, err error) { - // Prepare proxmox pool get URL - poolPath := fmt.Sprintf("api2/json/pools/%s", pool) - - _, body, err := MakeRequest(config, poolPath, "GET", nil, nil) - if err != nil { - return nil, fmt.Errorf("failed to request resource pool: %v", err) - } - - // Parse response into VMResponse struct - var apiResp PoolResponse - if err := json.Unmarshal(body, &apiResp); err != nil { - return nil, fmt.Errorf("failed to parse status response: %v", err) - } - - // return array of resource pool members - return apiResp.Data.Members, nil -}