diff --git a/pkg/transport/proxy/manager.go b/pkg/transport/proxy/manager.go deleted file mode 100644 index 270e768e4..000000000 --- a/pkg/transport/proxy/manager.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package proxy contains code for managing proxy processes. -package proxy - -import ( - "github.com/stacklok/toolhive/pkg/logger" - "github.com/stacklok/toolhive/pkg/process" -) - -// We may want to move these operations behind an interface. For now, they -// have been moved to this package to keep proxy-related logic grouped together. - -// StopProcess stops the proxy process associated with the container -func StopProcess(containerBaseName string) { - if containerBaseName == "" { - logger.Warnf("Warning: Could not find base container name in labels") - return - } - - // Try to read the PID file and kill the process - pid, err := process.ReadPIDFile(containerBaseName) - if err != nil { - logger.Errorf("No PID file found for %s, proxy may not be running in detached mode", containerBaseName) - return - } - - // PID file found, try to kill the process - logger.Infof("Stopping proxy process (PID: %d)...", pid) - if err := process.KillProcess(pid); err != nil { - logger.Warnf("Warning: Failed to kill proxy process: %v", err) - } else { - logger.Info("Proxy process stopped") - } - - // Remove the PID file - if err := process.RemovePIDFile(containerBaseName); err != nil { - logger.Warnf("Warning: Failed to remove PID file: %v", err) - } -} - -// IsRunning checks if the proxy process is running -func IsRunning(containerBaseName string) bool { - logger.Debugf("Checking if proxy process is running for container %s", containerBaseName) - if containerBaseName == "" { - logger.Warnf("Warning: Could not find base container name in labels") - return false - } - - // Try to read the PID file - logger.Debugf("Reading PID file for container %s", containerBaseName) - pid, err := process.ReadPIDFile(containerBaseName) - if err != nil { - logger.Debugf("No PID file found for container %s", containerBaseName) - return false - } - - // Check if the process exists and is running - logger.Debugf("Checking if process with PID %d is running", pid) - isRunning, err := process.FindProcess(pid) - if err != nil { - logger.Warnf("Warning: Error checking process: %v", err) - return false - } - - return isRunning -} diff --git a/pkg/workloads/manager.go b/pkg/workloads/manager.go index 66d6aa027..ae73f7ff8 100644 --- a/pkg/workloads/manager.go +++ b/pkg/workloads/manager.go @@ -26,7 +26,6 @@ import ( "github.com/stacklok/toolhive/pkg/secrets" "github.com/stacklok/toolhive/pkg/state" "github.com/stacklok/toolhive/pkg/transport" - "github.com/stacklok/toolhive/pkg/transport/proxy" "github.com/stacklok/toolhive/pkg/workloads/statuses" "github.com/stacklok/toolhive/pkg/workloads/types" ) @@ -575,16 +574,39 @@ func (d *defaultManager) getWorkloadContainer(ctx context.Context, name string) return &container, nil } +// stopProcess stops the proxy process associated with the container +func (d *defaultManager) stopProcess(ctx context.Context, name string) { + if name == "" { + logger.Warnf("Warning: Could not find base container name in labels") + return + } + + // Try to read the PID and kill the process + pid, err := d.statuses.GetWorkloadPID(ctx, name) + if err != nil { + logger.Errorf("No PID file found for %s, proxy may not be running in detached mode", name) + return + } + + // PID file found, try to kill the process + logger.Infof("Stopping proxy process (PID: %d)...", pid) + if err := process.KillProcess(pid); err != nil { + logger.Warnf("Warning: Failed to kill proxy process: %v", err) + } else { + logger.Info("Proxy process stopped") + } + + // Clean up PID file after successful kill + if err := process.RemovePIDFile(name); err != nil { + logger.Warnf("Warning: Failed to remove PID file: %v", err) + } +} + // stopProxyIfNeeded stops the proxy process if the workload has a base name func (d *defaultManager) stopProxyIfNeeded(ctx context.Context, name, baseName string) { logger.Infof("Removing proxy process for %s...", name) if baseName != "" { - proxy.StopProcess(baseName) - // TODO: refactor the StopProcess function to stop dealing explicitly with PID files. - // Note that this is not a blocker for k8s since this code path is not called there. - if err := d.statuses.ResetWorkloadPID(ctx, baseName); err != nil { - logger.Warnf("Warning: Failed to reset workload %s PID: %v", name, err) - } + d.stopProcess(ctx, baseName) } } @@ -798,32 +820,40 @@ func (d *defaultManager) restartContainerWorkload(ctx context.Context, name stri workloadName = name } - // Get workload state information using the original name - workloadState, err := d.getWorkloadState(ctx, name) - if err != nil { + // Get workload status using the status manager + workload, err := d.statuses.GetWorkload(ctx, name) + if err != nil && !errors.Is(err, rt.ErrWorkloadNotFound) { return err } - // Check if already running - use container name for this check - if d.isWorkloadAlreadyRunning(containerName, workloadState) { + // Check if already running - compare status to WorkloadStatusRunning + if err == nil && workload.Status == rt.WorkloadStatusRunning { + logger.Infof("Container %s is already running", containerName) return nil } // Load runner configuration from state - mcpRunner, err := d.loadRunnerFromState(ctx, workloadState.BaseName) + mcpRunner, err := d.loadRunnerFromState(ctx, workloadName) if err != nil { - return fmt.Errorf("failed to load state for %s: %v", workloadState.BaseName, err) + return fmt.Errorf("failed to load state for %s: %v", workloadName, err) } // Set workload status to starting - use the workload name for status operations if err := d.statuses.SetWorkloadStatus(ctx, workloadName, rt.WorkloadStatusStarting, ""); err != nil { logger.Warnf("Failed to set workload %s status to starting: %v", workloadName, err) } - logger.Infof("Loaded configuration from state for %s", workloadState.BaseName) + logger.Infof("Loaded configuration from state for %s", workloadName) - // Stop container if running but proxy is not - use the container name for runtime operations - if err := d.stopContainerIfNeeded(ctx, containerName, workloadName, workloadState); err != nil { - return err + // Stop container if needed - since workload is not in running status, check if container needs stopping + if container.IsRunning() { + logger.Infof("Container %s is running but workload is not in running state. Stopping container...", containerName) + if err := d.runtime.StopWorkload(ctx, containerName); err != nil { + if statusErr := d.statuses.SetWorkloadStatus(ctx, workloadName, rt.WorkloadStatusError, ""); statusErr != nil { + logger.Warnf("Failed to set workload %s status to error: %v", workloadName, statusErr) + } + return fmt.Errorf("failed to stop container %s: %v", containerName, err) + } + logger.Infof("Container %s stopped", containerName) } // Start the workload with background context to avoid timeout cancellation @@ -840,34 +870,6 @@ type workloadState struct { ProxyRunning bool } -// getWorkloadState retrieves the current state of a workload -func (d *defaultManager) getWorkloadState(ctx context.Context, name string) (*workloadState, error) { - workloadSt := &workloadState{} - - // Try to find the container - container, err := d.runtime.GetWorkloadInfo(ctx, name) - if err != nil { - if errors.Is(err, rt.ErrWorkloadNotFound) { - logger.Warnf("Warning: Failed to find container: %v", err) - logger.Warnf("Trying to find state with name %s directly...", name) - // Try to use the provided name as the base name - workloadSt.BaseName = name - workloadSt.Running = false - } else { - return nil, fmt.Errorf("failed to find workload %s: %v", name, err) - } - } else { - // Container found, check if it's running and get the base name - workloadSt.Running = container.IsRunning() - workloadSt.BaseName = labels.GetContainerBaseName(container.Labels) - } - - // Check if the proxy process is running - workloadSt.ProxyRunning = proxy.IsRunning(workloadSt.BaseName) - - return workloadSt, nil -} - // getRemoteWorkloadState retrieves the current state of a remote workload func (d *defaultManager) getRemoteWorkloadState(ctx context.Context, name, baseName string) *workloadState { workloadSt := &workloadState{ @@ -884,9 +886,6 @@ func (d *defaultManager) getRemoteWorkloadState(ctx context.Context, name, baseN workloadSt.Running = workload.Status == rt.WorkloadStatusRunning } - // Check if the detached process is actually running - workloadSt.ProxyRunning = proxy.IsRunning(baseName) - return workloadSt } @@ -899,25 +898,6 @@ func (*defaultManager) isWorkloadAlreadyRunning(name string, workloadSt *workloa return false } -// stopContainerIfNeeded stops the container if it's running but proxy is not -func (d *defaultManager) stopContainerIfNeeded( - ctx context.Context, containerName, workloadName string, workloadSt *workloadState, -) error { - if !workloadSt.Running { - return nil - } - - logger.Infof("Container %s is running but proxy is not. Stopping container...", containerName) - if err := d.runtime.StopWorkload(ctx, containerName); err != nil { - if statusErr := d.statuses.SetWorkloadStatus(ctx, workloadName, rt.WorkloadStatusError, ""); statusErr != nil { - logger.Warnf("Failed to set workload %s status to error: %v", workloadName, statusErr) - } - return fmt.Errorf("failed to stop container %s: %v", containerName, err) - } - logger.Infof("Container %s stopped", containerName) - return nil -} - // startWorkload starts the workload in either foreground or background mode func (d *defaultManager) startWorkload(ctx context.Context, name string, mcpRunner *runner.Runner, foreground bool) error { logger.Infof("Starting tooling server %s...", name) @@ -1024,8 +1004,9 @@ func (d *defaultManager) stopSingleContainerWorkload(ctx context.Context, worklo if labels.IsAuxiliaryWorkload(workload.Labels) { logger.Debugf("Skipping proxy stop for auxiliary workload %s", name) } else { - proxy.StopProcess(name) + d.stopProcess(ctx, name) } + // TODO: refactor the StopProcess function to stop dealing explicitly with PID files. // Note that this is not a blocker for k8s since this code path is not called there. if err := d.statuses.ResetWorkloadPID(ctx, name); err != nil { diff --git a/pkg/workloads/manager_test.go b/pkg/workloads/manager_test.go index 6d4bce730..1d551d483 100644 --- a/pkg/workloads/manager_test.go +++ b/pkg/workloads/manager_test.go @@ -1252,6 +1252,8 @@ func TestDefaultManager_updateSingleWorkload(t *testing.T) { State: "running", Labels: map[string]string{"toolhive-basename": "test-workload"}, }, nil) + // Mock GetWorkloadPID call from stopProcess + sm.EXPECT().GetWorkloadPID(gomock.Any(), "test-workload").Return(1234, nil) rt.EXPECT().StopWorkload(gomock.Any(), "test-workload").Return(nil) sm.EXPECT().ResetWorkloadPID(gomock.Any(), "test-workload").Return(nil) @@ -1288,6 +1290,8 @@ func TestDefaultManager_updateSingleWorkload(t *testing.T) { State: "running", Labels: map[string]string{"toolhive-basename": "test-workload"}, }, nil) + // Mock GetWorkloadPID call from stopProcess + sm.EXPECT().GetWorkloadPID(gomock.Any(), "test-workload").Return(1234, nil) rt.EXPECT().StopWorkload(gomock.Any(), "test-workload").Return(nil) sm.EXPECT().ResetWorkloadPID(gomock.Any(), "test-workload").Return(nil) diff --git a/pkg/workloads/statuses/file_status.go b/pkg/workloads/statuses/file_status.go index 3b36c5dc1..49a205db9 100644 --- a/pkg/workloads/statuses/file_status.go +++ b/pkg/workloads/statuses/file_status.go @@ -19,7 +19,6 @@ import ( "github.com/stacklok/toolhive/pkg/logger" "github.com/stacklok/toolhive/pkg/process" "github.com/stacklok/toolhive/pkg/state" - "github.com/stacklok/toolhive/pkg/transport/proxy" "github.com/stacklok/toolhive/pkg/workloads/types" ) @@ -114,6 +113,7 @@ type workloadStatusFile struct { // GetWorkload retrieves the status of a workload by its name. func (f *fileStatusManager) GetWorkload(ctx context.Context, workloadName string) (core.Workload, error) { + var pid int result := core.Workload{Name: workloadName} fileFound := false @@ -152,6 +152,8 @@ func (f *fileStatusManager) GetWorkload(ctx context.Context, workloadName string } } + pid = statusFile.ProcessID + return nil }) if err != nil { @@ -172,7 +174,7 @@ func (f *fileStatusManager) GetWorkload(ctx context.Context, workloadName string // If workload is running, validate against runtime if result.Status == rt.WorkloadStatusRunning { - return f.validateRunningWorkload(ctx, workloadName, result) + return f.validateRunningWorkload(ctx, workloadName, result, pid) } // Return file data @@ -197,7 +199,7 @@ func (f *fileStatusManager) ListWorkloads(ctx context.Context, listAll bool, lab } // Get workloads from files - fileWorkloads, err := f.getWorkloadsFromFiles() + fileWorkloadsWithPID, err := f.getWorkloadsFromFiles() if err != nil { return nil, fmt.Errorf("failed to get workloads from files: %w", err) } @@ -205,14 +207,14 @@ func (f *fileStatusManager) ListWorkloads(ctx context.Context, listAll bool, lab // TODO: Fetch the runconfig if present to populate additional fields like package, tool type, group etc. // There's currently an import cycle between this package and the runconfig package - for _, fileWorkload := range fileWorkloads { - if fileWorkload.Remote { // Remote workloads are not managed by the container runtime - delete(fileWorkloads, fileWorkload.Name) // Skip remote workloads here, we add them in workload manager + for _, fileWorkload := range fileWorkloadsWithPID { + if fileWorkload.workload.Remote { // Remote workloads are not managed by the container runtime + delete(fileWorkloadsWithPID, fileWorkload.workload.Name) // Skip remote workloads here, we add them in workload manager } } // Create a map of runtime workloads by name for easy lookup - workloadMap := f.mergeRuntimeAndFileWorkloads(ctx, runtimeContainers, fileWorkloads) + workloadMap := f.mergeRuntimeAndFileWorkloads(ctx, runtimeContainers, fileWorkloadsWithPID) // Convert map to slice and apply filters var workloads []core.Workload @@ -370,6 +372,37 @@ func (f *fileStatusManager) ResetWorkloadPID(ctx context.Context, workloadName s return f.SetWorkloadPID(ctx, workloadName, 0) } +// GetWorkloadPID retrieves the PID of a workload from its status file. +func (f *fileStatusManager) GetWorkloadPID(ctx context.Context, workloadName string) (int, error) { + var pid int + + err := f.withFileReadLock(ctx, workloadName, func(statusFilePath string) error { + // Check if file exists + if _, err := os.Stat(statusFilePath); os.IsNotExist(err) { + // File doesn't exist, return 0 + pid = 0 + return nil + } else if err != nil { + return fmt.Errorf("failed to check status file for workload %s: %w", workloadName, err) + } + + statusFile, err := f.readStatusFile(statusFilePath) + if err != nil { + return fmt.Errorf("failed to read status file for workload %s: %w", workloadName, err) + } + + pid = statusFile.ProcessID + return nil + }) + + if err != nil { + return 0, err + } + + logger.Debugf("workload %s PID retrieved: %d", workloadName, pid) + return pid, nil +} + // migratePIDFromFile migrates PID from legacy PID file to status file if needed. // This is called when the status is running and ProcessID is 0. // Returns (migratedPID, wasUpdated) where wasUpdated indicates if the PID was successfully migrated @@ -553,8 +586,14 @@ func (f *fileStatusManager) getWorkloadFromRuntime(ctx context.Context, workload return types.WorkloadFromContainerInfo(&info) } +// workloadWithPID holds a workload and its associated PID for internal processing +type workloadWithPID struct { + workload core.Workload + pid int +} + // getWorkloadsFromFiles retrieves all workloads from status files. -func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]core.Workload, error) { +func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]workloadWithPID, error) { // Ensure base directory exists if err := f.ensureBaseDir(); err != nil { return nil, fmt.Errorf("failed to ensure base directory: %w", err) @@ -566,7 +605,7 @@ func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]core.Workload, e return nil, fmt.Errorf("failed to list status files: %w", err) } - workloads := make(map[string]core.Workload) + workloads := make(map[string]workloadWithPID) ctx := context.Background() // Create context for file locking for _, file := range files { @@ -614,6 +653,7 @@ func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]core.Workload, e } // Check if PID migration is needed + pid := statusFile.ProcessID if statusFile.Status == rt.WorkloadStatusRunning && statusFile.ProcessID == 0 { // Try PID migration - the migration function will handle cases // where container info is not available gracefully @@ -621,6 +661,7 @@ func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]core.Workload, e // Update the status file with the migrated PID statusFile.ProcessID = migratedPID statusFile.UpdatedAt = time.Now() + pid = migratedPID if err := f.writeStatusFile(statusFilePath, *statusFile); err != nil { logger.Warnf("failed to write migrated PID for workload %s: %v", workloadName, err) } else { @@ -629,7 +670,10 @@ func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]core.Workload, e } } - workloads[workloadName] = workload + workloads[workloadName] = workloadWithPID{ + workload: workload, + pid: pid, + } return nil }) @@ -647,7 +691,7 @@ func (f *fileStatusManager) getWorkloadsFromFiles() (map[string]core.Workload, e // validateRunningWorkload validates that a workload marked as running in the file // is actually running in the runtime and has a healthy proxy process if applicable. func (f *fileStatusManager) validateRunningWorkload( - ctx context.Context, workloadName string, result core.Workload, + ctx context.Context, workloadName string, result core.Workload, pid int, ) (core.Workload, error) { // For remote workloads, we don't need to validate against the container runtime // since they don't have containers @@ -667,7 +711,7 @@ func (f *fileStatusManager) validateRunningWorkload( } // Check if proxy process is running when workload is running - if unhealthyWorkload, isUnhealthy := f.checkProxyHealth(ctx, workloadName, result, containerInfo); isUnhealthy { + if unhealthyWorkload, isUnhealthy := f.isProxyUnhealthy(ctx, workloadName, result, containerInfo, pid); isUnhealthy { return unhealthyWorkload, nil } @@ -722,10 +766,10 @@ func (f *fileStatusManager) handleRuntimeMissing( return fileWorkload, nil } -// checkProxyHealth checks if the proxy process is running for the workload. +// isProxyUnhealthy checks if the proxy process is running for the workload. // Returns (unhealthyWorkload, true) if proxy is not running, (emptyWorkload, false) if proxy is healthy or not applicable. -func (f *fileStatusManager) checkProxyHealth( - ctx context.Context, workloadName string, result core.Workload, containerInfo rt.ContainerInfo, +func (f *fileStatusManager) isProxyUnhealthy( + ctx context.Context, workloadName string, result core.Workload, containerInfo rt.ContainerInfo, pid int, ) (core.Workload, bool) { // Use original container labels (before filtering) to get base name baseName := labels.GetContainerBaseName(containerInfo.Labels) @@ -733,8 +777,10 @@ func (f *fileStatusManager) checkProxyHealth( return core.Workload{}, false // No proxy check needed } - proxyRunning := proxy.IsRunning(baseName) - if proxyRunning { + proxyRunning, err := process.FindProcess(pid) + if err != nil { + logger.Warnf("unable to find process %d: %v", pid, err) + } else if proxyRunning { return core.Workload{}, false // Proxy is healthy } @@ -775,7 +821,7 @@ func (*fileStatusManager) mergeHealthyWorkloadData(containerInfo rt.ContainerInf // validateWorkloadInList validates a workload during list operations, similar to validateRunningWorkload // but with different error handling to avoid disrupting the entire list operation. func (f *fileStatusManager) validateWorkloadInList( - ctx context.Context, workloadName string, fileWorkload core.Workload, containerInfo rt.ContainerInfo, + ctx context.Context, workloadName string, fileWorkload core.Workload, containerInfo rt.ContainerInfo, pid int, ) (core.Workload, error) { // Only validate if file shows running status if fileWorkload.Status != rt.WorkloadStatusRunning { @@ -797,7 +843,7 @@ func (f *fileStatusManager) validateWorkloadInList( } // Check if proxy process is running when workload is running - if unhealthyWorkload, isUnhealthy := f.checkProxyHealth(ctx, workloadName, fileWorkload, containerInfo); isUnhealthy { + if unhealthyWorkload, isUnhealthy := f.isProxyUnhealthy(ctx, workloadName, fileWorkload, containerInfo, pid); isUnhealthy { return unhealthyWorkload, nil } @@ -809,7 +855,7 @@ func (f *fileStatusManager) validateWorkloadInList( func (f *fileStatusManager) mergeRuntimeAndFileWorkloads( ctx context.Context, runtimeContainers []rt.ContainerInfo, - fileWorkloads map[string]core.Workload, + fileWorkloadsWithPID map[string]workloadWithPID, ) map[string]core.Workload { runtimeWorkloadMap := make(map[string]rt.ContainerInfo) for _, container := range runtimeContainers { @@ -840,14 +886,16 @@ func (f *fileStatusManager) mergeRuntimeAndFileWorkloads( } // Then, merge with file workloads, validating running workloads - for name, fileWorkload := range fileWorkloads { + for name, fileWorkloadWithPID := range fileWorkloadsWithPID { + fileWorkload := fileWorkloadWithPID.workload + pid := fileWorkloadWithPID.pid if fileWorkload.Remote { // Remote workloads are not managed by the container runtime continue // Skip remote workloads here, we add them in workload manager } if runtimeContainer, exists := runtimeWorkloadMap[name]; exists { // Validate running workloads similar to GetWorkload - validatedWorkload, err := f.validateWorkloadInList(ctx, name, fileWorkload, runtimeContainer) + validatedWorkload, err := f.validateWorkloadInList(ctx, name, fileWorkload, runtimeContainer, pid) if err != nil { logger.Warnf("failed to validate workload %s in list: %v", name, err) // Fall back to basic merge without validation @@ -869,8 +917,9 @@ func (f *fileStatusManager) mergeRuntimeAndFileWorkloads( if err != nil { logger.Warnf("failed to handle missing runtime for workload %s: %v", name, err) workloadMap[name] = fileWorkload + } else { + workloadMap[name] = updatedWorkload } - workloadMap[name] = updatedWorkload } } return workloadMap diff --git a/pkg/workloads/statuses/file_status_test.go b/pkg/workloads/statuses/file_status_test.go index 9da2b10c3..ba2004cee 100644 --- a/pkg/workloads/statuses/file_status_test.go +++ b/pkg/workloads/statuses/file_status_test.go @@ -529,8 +529,8 @@ func TestFileStatusManager_ValidateRunningWorkload_Remote(t *testing.T) { // Mock runtime should NOT be called for remote workloads // (no expectations set, so any call would fail the test) - // Validate the remote workload - result, err := manager.validateRunningWorkload(ctx, "remote-test", remoteWorkload) + // Validate the remote workload (PID is irrelevant for remote workloads) + result, err := manager.validateRunningWorkload(ctx, "remote-test", remoteWorkload, 0) require.NoError(t, err) // Should return the workload unchanged without calling runtime diff --git a/pkg/workloads/statuses/mocks/mock_status_manager.go b/pkg/workloads/statuses/mocks/mock_status_manager.go index 850f47c32..bccabe767 100644 --- a/pkg/workloads/statuses/mocks/mock_status_manager.go +++ b/pkg/workloads/statuses/mocks/mock_status_manager.go @@ -71,6 +71,21 @@ func (mr *MockStatusManagerMockRecorder) GetWorkload(ctx, workloadName any) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkload", reflect.TypeOf((*MockStatusManager)(nil).GetWorkload), ctx, workloadName) } +// GetWorkloadPID mocks base method. +func (m *MockStatusManager) GetWorkloadPID(ctx context.Context, workloadName string) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkloadPID", ctx, workloadName) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkloadPID indicates an expected call of GetWorkloadPID. +func (mr *MockStatusManagerMockRecorder) GetWorkloadPID(ctx, workloadName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkloadPID", reflect.TypeOf((*MockStatusManager)(nil).GetWorkloadPID), ctx, workloadName) +} + // ListWorkloads mocks base method. func (m *MockStatusManager) ListWorkloads(ctx context.Context, listAll bool, labelFilters []string) ([]core.Workload, error) { m.ctrl.T.Helper() diff --git a/pkg/workloads/statuses/status.go b/pkg/workloads/statuses/status.go index 23cc2d082..dd32dac16 100644 --- a/pkg/workloads/statuses/status.go +++ b/pkg/workloads/statuses/status.go @@ -33,6 +33,9 @@ type StatusManager interface { // ResetWorkloadPID resets the PID of a workload to 0. // This method will do nothing if the workload does not exist. ResetWorkloadPID(ctx context.Context, workloadName string) error + // GetWorkloadPID retrieves the PID of a workload by its name. + // Returns 0 if the workload does not exist or if PID is not available. + GetWorkloadPID(ctx context.Context, workloadName string) (int, error) } // NewStatusManagerFromRuntime creates a new instance of StatusManager from an existing runtime. @@ -139,3 +142,9 @@ func (*runtimeStatusManager) ResetWorkloadPID(_ context.Context, workloadName st logger.Debugf("workload %s PID reset (noop for runtime status manager)", workloadName) return nil } + +func (*runtimeStatusManager) GetWorkloadPID(_ context.Context, workloadName string) (int, error) { + // Noop for runtime status manager - always return 0 + logger.Debugf("workload %s PID requested (noop for runtime status manager, returning 0)", workloadName) + return 0, nil +}