From 365b7915f15c452695c67225da0cf5c4718f3476 Mon Sep 17 00:00:00 2001 From: Craig Lurey Date: Tue, 3 Feb 2026 23:48:25 -0800 Subject: [PATCH 01/16] Bug fixes in search and cc, and supershell refactor (#1806) * Fixed search functions and completed pending supershell refactoring - Fix lsf command returning "No shared folders found" by making search_shared_folders() return all items when search string is empty - Apply same fix to search_teams() for consistency - Fix cc and ls commands to use regex search (use_regex=True) to restore previous behavior broken by token-based search default change * Fixed unit test --- keepercommander/api.py | 26 +- keepercommander/commands/record.py | 5 +- keepercommander/commands/supershell/README.md | 13 +- .../commands/supershell/__init__.py | 25 +- .../app.py} | 910 +----------------- .../commands/supershell/command.py | 161 ++++ keepercommander/commands/supershell/debug.py | 53 + .../commands/supershell/handlers/keyboard.py | 29 +- .../commands/supershell/themes/css.py | 89 +- .../commands/supershell/widgets/__init__.py | 5 + .../supershell/widgets/auto_copy_textarea.py | 222 +++++ .../widgets/shell_input_textarea.py | 127 +++ unit-tests/test_api.py | 6 +- 13 files changed, 755 insertions(+), 916 deletions(-) rename keepercommander/commands/{_supershell_impl.py => supershell/app.py} (87%) create mode 100644 keepercommander/commands/supershell/command.py create mode 100644 keepercommander/commands/supershell/debug.py create mode 100644 keepercommander/commands/supershell/widgets/auto_copy_textarea.py create mode 100644 keepercommander/commands/supershell/widgets/shell_input_textarea.py diff --git a/keepercommander/api.py b/keepercommander/api.py index af0e1b3e9..392c57730 100644 --- a/keepercommander/api.py +++ b/keepercommander/api.py @@ -493,21 +493,24 @@ def search_shared_folders(params, searchstring, use_regex=False): params: KeeperParams searchstring: Search string (tokens or regex depending on use_regex) use_regex: If True, treat as regex. If False (default), token-based search. + If searchstring is empty, returns all shared folders. """ search_results = [] if not searchstring: - return search_results - - if use_regex: + # No search string - return all shared folders + match_func = lambda target: True + elif use_regex: p = re.compile(searchstring.lower()) match_func = lambda target: p.search(target) else: # Token-based search: all tokens must match tokens = [t.lower() for t in searchstring.split() if t.strip()] if not tokens: - return search_results - match_func = lambda target: all(token in target for token in tokens) + # No valid tokens - return all shared folders + match_func = lambda target: True + else: + match_func = lambda target: all(token in target for token in tokens) for shared_folder_uid in params.shared_folder_cache: @@ -529,21 +532,24 @@ def search_teams(params, searchstring, use_regex=False): params: KeeperParams searchstring: Search string (tokens or regex depending on use_regex) use_regex: If True, treat as regex. If False (default), token-based search. + If searchstring is empty, returns all teams. """ search_results = [] if not searchstring: - return search_results - - if use_regex: + # No search string - return all teams + match_func = lambda target: True + elif use_regex: p = re.compile(searchstring.lower()) match_func = lambda target: p.search(target) else: # Token-based search: all tokens must match tokens = [t.lower() for t in searchstring.split() if t.strip()] if not tokens: - return search_results - match_func = lambda target: all(token in target for token in tokens) + # No valid tokens - return all teams + match_func = lambda target: True + else: + match_func = lambda target: all(token in target for token in tokens) for team_uid in params.team_cache: team = get_team(params, team_uid) diff --git a/keepercommander/commands/record.py b/keepercommander/commands/record.py index 6fcc91329..87d863f85 100644 --- a/keepercommander/commands/record.py +++ b/keepercommander/commands/record.py @@ -1263,7 +1263,8 @@ def execute(self, params, **kwargs): pattern, record_type=record_type, record_version=record_version, - search_fields=search_fields)] + search_fields=search_fields, + use_regex=True)] if any(records): headers = ['record_uid', 'type', 'title', 'description', 'shared'] if fmt == 'table': @@ -2386,7 +2387,7 @@ def execute(self, params, **kwargs): if record_uid is None: records = [] # type: List[vault.KeeperRecord] - for r in vault_extensions.find_records(params, record_name): + for r in vault_extensions.find_records(params, record_name, use_regex=True): if isinstance(r, (vault.PasswordRecord, vault.TypedRecord)): if user_pattern: login = '' diff --git a/keepercommander/commands/supershell/README.md b/keepercommander/commands/supershell/README.md index 26f236835..51ba9824a 100644 --- a/keepercommander/commands/supershell/README.md +++ b/keepercommander/commands/supershell/README.md @@ -7,13 +7,16 @@ SuperShell is a full-screen terminal UI (TUI) for browsing and managing Keeper v ``` supershell/ ├── __init__.py # Main exports and package interface +├── app.py # SuperShellApp - main Textual application +├── command.py # SuperShellCommand - CLI entry point ├── constants.py # Configuration constants +├── debug.py # Debug logging utilities ├── utils.py # Utility functions (preferences, ANSI stripping) │ ├── themes/ # Visual theming │ ├── __init__.py │ ├── colors.py # COLOR_THEMES dict with 5 color schemes -│ └── css.py # Textual CSS stylesheet +│ └── css.py # Textual CSS stylesheet (BASE_CSS) │ ├── screens/ # Modal screens │ ├── __init__.py @@ -24,7 +27,9 @@ supershell/ │ ├── __init__.py │ ├── clickable_line.py # ClickableDetailLine - copy-on-click text │ ├── clickable_field.py # ClickableField - labeled copy-on-click -│ └── clickable_uid.py # ClickableRecordUID - UID with navigation +│ ├── clickable_uid.py # ClickableRecordUID - UID with navigation +│ ├── auto_copy_textarea.py # AutoCopyTextArea - auto-copy on select +│ └── shell_input_textarea.py # ShellInputTextArea - shell command input │ ├── state/ # State management dataclasses │ ├── __init__.py @@ -50,9 +55,9 @@ supershell/ ## Key Components -### Main Application (`_supershell_impl.py`) +### Main Application (`app.py`) -The `SuperShellApp` class (in the parent directory) is the main Textual application. It: +The `SuperShellApp` class is the main Textual application. It: - Composes the UI layout (tree, detail pane, search bar, shell pane) - Manages application state - Handles tree node selection events diff --git a/keepercommander/commands/supershell/__init__.py b/keepercommander/commands/supershell/__init__.py index bd78ee783..65750f692 100644 --- a/keepercommander/commands/supershell/__init__.py +++ b/keepercommander/commands/supershell/__init__.py @@ -3,19 +3,25 @@ This package provides a modern TUI interface with vim-style navigation for browsing and managing Keeper vault records. - -During refactoring, the main implementation is in _supershell_impl.py. -This will be gradually migrated into this package structure. """ -# Re-export from implementation file for backward compatibility -from .._supershell_impl import SuperShellCommand, SuperShellApp +# Re-export main classes +from .command import SuperShellCommand +from .app import SuperShellApp # Export theme and utility modules +from .debug import debug_log, DEBUG_EVENTS, close_debug_log from .themes import COLOR_THEMES from .screens import PreferencesScreen, HelpScreen from .utils import load_preferences, save_preferences, strip_ansi_codes -from .widgets import ClickableDetailLine, ClickableField, ClickableRecordUID +from .widgets import ( + ClickableDetailLine, + ClickableField, + ClickableRecordUID, + AutoCopyTextArea, + safe_copy_to_clipboard, + ShellInputTextArea, +) from .state import VaultData, UIState, ThemeState, SelectionState from .renderers import ( is_sensitive_field, @@ -35,6 +41,10 @@ # Main classes 'SuperShellCommand', 'SuperShellApp', + # Debug + 'debug_log', + 'DEBUG_EVENTS', + 'close_debug_log', # Themes 'COLOR_THEMES', # Screens @@ -48,6 +58,9 @@ 'ClickableDetailLine', 'ClickableField', 'ClickableRecordUID', + 'AutoCopyTextArea', + 'safe_copy_to_clipboard', + 'ShellInputTextArea', # State 'VaultData', 'UIState', diff --git a/keepercommander/commands/_supershell_impl.py b/keepercommander/commands/supershell/app.py similarity index 87% rename from keepercommander/commands/_supershell_impl.py rename to keepercommander/commands/supershell/app.py index 7c0676c68..42f7a3f93 100644 --- a/keepercommander/commands/_supershell_impl.py +++ b/keepercommander/commands/supershell/app.py @@ -1,8 +1,8 @@ """ Keeper SuperShell - A full-screen terminal UI for Keeper vault -This is the implementation file during refactoring. Code is being -gradually migrated to the supershell/ package. +This module contains the main SuperShellApp class, which is the Textual +application for the vault browser TUI. """ import logging @@ -16,33 +16,24 @@ import os from pathlib import Path from typing import Optional, List, Dict, Any -import pyperclip -from pyperclip import PyperclipException from rich.markup import escape as rich_escape - -def safe_copy_to_clipboard(text: str) -> tuple[bool, str]: - """Safely copy text to clipboard, handling missing clipboard on remote/headless systems. - - Returns: - (True, "") on success - (False, error_message) on failure - """ - try: - pyperclip.copy(text) - return True, "" - except PyperclipException: - return False, "Clipboard not available (no X11/Wayland)" - except Exception as e: - return False, str(e) - -# Import from refactored modules -from .supershell.themes import COLOR_THEMES -from .supershell.utils import load_preferences, save_preferences, strip_ansi_codes -from .supershell.widgets import ClickableDetailLine, ClickableField, ClickableRecordUID -from .supershell.state import VaultData, UIState, ThemeState, SelectionState -from .supershell.data import load_vault_data, search_records -from .supershell.renderers import ( +# Import from package modules +from .debug import debug_log as _debug_log +from .themes import COLOR_THEMES +from .themes.css import BASE_CSS +from .utils import load_preferences, save_preferences, strip_ansi_codes +from .widgets import ( + ClickableDetailLine, + ClickableField, + ClickableRecordUID, + AutoCopyTextArea, + safe_copy_to_clipboard, + ShellInputTextArea, +) +from .state import VaultData, UIState, ThemeState, SelectionState +from .data import load_vault_data, search_records +from .renderers import ( is_sensitive_field as is_sensitive_field_name, mask_passwords_in_json, strip_field_type_prefix, @@ -51,8 +42,8 @@ def safe_copy_to_clipboard(text: str) -> tuple[bool, str]: FIELD_TYPE_PREFIXES, TYPE_FRIENDLY_NAMES, ) -from .supershell.handlers import keyboard_dispatcher -from .supershell.screens import PreferencesScreen, HelpScreen +from .handlers import keyboard_dispatcher +from .screens import PreferencesScreen, HelpScreen from textual.app import App, ComposeResult from textual.containers import Container, Horizontal, Vertical, VerticalScroll, Center, Middle @@ -66,344 +57,11 @@ def safe_copy_to_clipboard(text: str) -> tuple[bool, str]: from rich.text import Text from textual.events import Click, MouseDown, MouseUp, MouseMove, Paste -# === DEBUG EVENT LOGGING === -# Set to True to log all mouse/keyboard events to /tmp/supershell_debug.log -# tail -f /tmp/supershell_debug.log to watch events in real-time -DEBUG_EVENTS = False -_debug_log_file = None - -def _debug_log(msg: str): - """Log debug message to /tmp/supershell_debug.log if DEBUG_EVENTS is True.""" - if not DEBUG_EVENTS: - return - global _debug_log_file - try: - if _debug_log_file is None: - _debug_log_file = open('/tmp/supershell_debug.log', 'a') - import datetime - timestamp = datetime.datetime.now().strftime('%H:%M:%S.%f')[:-3] - _debug_log_file.write(f"[{timestamp}] {msg}\n") - _debug_log_file.flush() - except Exception as e: - pass # Silently fail if logging fails -# === END DEBUG EVENT LOGGING === - - -class AutoCopyTextArea(TextArea): - """TextArea that auto-copies selected text to clipboard on mouse release. - - Behavior matches standard Linux terminal: - - Click and drag to select text - - Double-click to select word, drag to extend from word boundaries - - On mouse up, automatically copy selection to clipboard - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - import time - self._last_click_time = 0.0 - self._last_click_pos = (0, 0) - self._word_select_mode = False - self._word_anchor_start = None # (row, col) - self._word_anchor_end = None # (row, col) - - async def _on_mouse_down(self, event: MouseDown) -> None: - """Handle mouse down - detect double-click for word selection.""" - import time - current_time = time.time() - current_pos = (event.x, event.y) - - # Check for double-click (within 500ms and reasonably close position) - time_ok = (current_time - self._last_click_time) < 0.5 - pos_ok = (abs(current_pos[0] - self._last_click_pos[0]) <= 10 and - abs(current_pos[1] - self._last_click_pos[1]) <= 5) - is_double_click = time_ok and pos_ok - - # Update click tracking - self._last_click_time = current_time - self._last_click_pos = current_pos - - if is_double_click: - # Double-click: select word and prepare for drag - self._select_word_at_position(event) - else: - # Single click: reset word mode and do normal selection - self._word_select_mode = False - self._word_anchor_start = None - self._word_anchor_end = None - await super()._on_mouse_down(event) - - def _select_word_at_position(self, event: MouseDown) -> None: - """Select the word at the mouse position.""" - try: - location = self.get_target_document_location(event) - row, col = location - - lines = self.text.split('\n') - if row >= len(lines): - return - line = lines[row] - if col > len(line): - col = len(line) - - # Find word boundaries (whitespace-delimited) - start = col - while start > 0 and not line[start - 1].isspace(): - start -= 1 - - end = col - while end < len(line) and not line[end].isspace(): - end += 1 - - if start == end: - # No word at this position - return - - # Store anchors for potential drag extension - self._word_anchor_start = (row, start) - self._word_anchor_end = (row, end) - self._word_select_mode = True - - # Select the word - from textual.widgets.text_area import Selection - self.selection = Selection((row, start), (row, end)) - - # Set up for potential drag (like parent's _on_mouse_down) - self._selecting = True - self.capture_mouse() - self._pause_blink(visible=False) - self.history.checkpoint() - - except Exception as e: - _debug_log(f"AutoCopyTextArea._select_word_at_position error: {e}") - # On error, fall back to normal behavior - self._word_select_mode = False - - async def _on_mouse_move(self, event: MouseMove) -> None: - """Handle mouse move - extend selection if dragging.""" - if not self._selecting: - return - - try: - target = self.get_target_document_location(event) - from textual.widgets.text_area import Selection - - if self._word_select_mode and self._word_anchor_start: - # Word-select mode: anchor to original word boundaries - anchor_start = self._word_anchor_start - anchor_end = self._word_anchor_end - - if target < anchor_start: - self.selection = Selection(target, anchor_end) - elif target > anchor_end: - self.selection = Selection(anchor_start, target) - else: - self.selection = Selection(anchor_start, anchor_end) - else: - # Normal drag: extend from original click position - selection_start, _ = self.selection - self.selection = Selection(selection_start, target) - except Exception: - pass - - async def _on_mouse_up(self, event: MouseUp) -> None: - """Handle mouse up - finalize selection and copy.""" - # Clean up word select state - self._word_select_mode = False - - # Let parent finalize selection mode - self._end_mouse_selection() - - # Always try to copy - _auto_copy_if_selected checks if there's actual selection - self._auto_copy_if_selected() - - def _on_click(self, event: Click) -> None: - """Handle click events - double-click selects and copies word.""" - # Double-click: select word and copy (backup for mouse_down detection) - if event.chain >= 2: - try: - location = self.get_target_document_location(event) - row, col = location - - lines = self.text.split('\n') - if row < len(lines): - line = lines[row] - if col > len(line): - col = len(line) - - # Find word boundaries - start = col - while start > 0 and not line[start - 1].isspace(): - start -= 1 - end = col - while end < len(line) and not line[end].isspace(): - end += 1 - - if start < end: - word = line[start:end] - # Select and copy the word - from textual.widgets.text_area import Selection - self.selection = Selection((row, start), (row, end)) - # Copy immediately - success, err = safe_copy_to_clipboard(word) - if success: - preview = word[:40] + ('...' if len(word) > 40 else '') - self.app.notify(f"Copied: {preview}", severity="information") - else: - self.app.notify(f"⚠️ {err}", severity="warning") - except Exception: - pass - event.stop() - return - # Let parent handle single clicks - super()._on_click(event) - - def _auto_copy_if_selected(self) -> None: - """Copy selected text to clipboard if any.""" - try: - selected = self.selected_text - _debug_log(f"AutoCopyTextArea: selected_text={selected!r}") - if selected and selected.strip(): - success, err = safe_copy_to_clipboard(selected) - if success: - preview = selected[:40] + ('...' if len(selected) > 40 else '') - preview = preview.replace('\n', ' ') - # Use app.notify() instead of widget's notify() - self.app.notify(f"Copied: {preview}", severity="information") - _debug_log(f"AutoCopyTextArea: Copied to clipboard") - else: - self.app.notify(f"⚠️ {err}", severity="warning") - except Exception as e: - _debug_log(f"AutoCopyTextArea: Error: {e}") - - -class ShellInputTextArea(TextArea): - """TextArea for shell command input with Enter-to-execute behavior. - - Features: - - Enter executes command instead of inserting newline - - Soft wrapping for long commands - - Multi-line display - - Integrates with shell history navigation - """ - - def __init__(self, app_ref: 'SuperShellApp', *args, **kwargs): - # Set defaults for shell input behavior - kwargs.setdefault('soft_wrap', True) - kwargs.setdefault('show_line_numbers', False) - kwargs.setdefault('tab_behavior', 'focus') # Tab cycles focus, not inserts tab - super().__init__(*args, **kwargs) - self._app_ref = app_ref - - async def _on_key(self, event) -> None: - """Intercept keys for shell-specific behavior.""" - # Enter executes command instead of inserting newline - if event.key == "enter": - command = self.text.strip() - self.clear() # Clear immediately for responsiveness - if command: - # Execute asynchronously with loading indicator - self._app_ref._execute_shell_command_async(command) - event.prevent_default() - event.stop() - return - - # Up arrow navigates history - if event.key == "up": - if self._app_ref.shell_command_history: - if self._app_ref.shell_history_index < len(self._app_ref.shell_command_history) - 1: - self._app_ref.shell_history_index += 1 - history_cmd = self._app_ref.shell_command_history[-(self._app_ref.shell_history_index + 1)] - self.clear() - self.insert(history_cmd) - event.prevent_default() - event.stop() - return - - # Down arrow navigates history - if event.key == "down": - if self._app_ref.shell_history_index > 0: - self._app_ref.shell_history_index -= 1 - history_cmd = self._app_ref.shell_command_history[-(self._app_ref.shell_history_index + 1)] - self.clear() - self.insert(history_cmd) - elif self._app_ref.shell_history_index == 0: - self._app_ref.shell_history_index = -1 - self.clear() - event.prevent_default() - event.stop() - return - - # Ctrl+U clears the input (bash-like) - if event.key == "ctrl+u": - self.clear() - self._app_ref.shell_history_index = -1 - event.prevent_default() - event.stop() - return - - # Ctrl+D closes shell pane - if event.key == "ctrl+d": - self._app_ref._close_shell_pane() - event.prevent_default() - event.stop() - return - - # Escape unfocuses the input - if event.key == "escape": - self._app_ref.shell_input_active = False - tree = self._app_ref.query_one("#folder_tree") - tree.focus() - self._app_ref._update_status("Shell open | Tab to cycle | press Enter in shell to run commands") - event.prevent_default() - event.stop() - return - - # Tab cycles to search mode - if event.key == "tab": - from textual.widgets import Tree - self._app_ref.shell_input_active = False - self._app_ref.search_input_active = True - tree = self._app_ref.query_one("#folder_tree", Tree) - tree.add_class("search-input-active") - search_bar = self._app_ref.query_one("#search_bar") - search_bar.add_class("search-active") - tree.focus() # Search mode works with tree focused - self._app_ref._update_search_display(perform_search=False) - self._app_ref._update_status("Type to search | Tab to tree | Ctrl+U to clear") - event.prevent_default() - event.stop() - return - - # Shift+Tab cycles to shell output pane - if event.key == "shift+tab": - from textual.widgets import TextArea - self._app_ref.shell_input_active = False - try: - shell_output = self._app_ref.query_one("#shell_output_content", TextArea) - shell_output.focus() - except Exception: - pass - self._app_ref._update_status("Shell output | j/k to scroll | Tab to input | Shift+Tab to detail") - event.prevent_default() - event.stop() - return - - # Let parent TextArea handle all other keys (typing, backspace, cursor movement, etc.) - await super()._on_key(event) - - -from ..commands.base import Command - -# Widget classes are now imported from .supershell.widgets at the top of this file - -from ..commands.record import RecordGetUidCommand, ClipboardCommand -from ..display import bcolors -from .. import vault -from .. import utils - - -# Screen classes imported from .supershell.screens +from ..base import Command +from ..record import RecordGetUidCommand, ClipboardCommand +from ...display import bcolors +from ... import vault +from ... import utils class SuperShellApp(App): """The Keeper SuperShell TUI application""" @@ -415,333 +73,8 @@ class SuperShellApp(App): PAGE_DOWN_NODES = 10 # Number of nodes to move for half-page down PAGE_DOWN_FULL_NODES = 20 # Number of nodes to move for full-page down - # _strip_ansi_codes is now imported from .supershell.utils - - CSS = """ - Screen { - background: #000000; - } - - Input { - background: #111111; - color: #ffffff; - } - - Input > .input--content { - color: #ffffff; - } - - Input > .input--placeholder { - color: #666666; - } - - Input > .input--cursor { - color: #ffffff; - text-style: reverse; - } - - Input:focus { - border: solid #888888; - } - - Input:focus > .input--content { - color: #ffffff; - } - - #search_bar { - dock: top; - height: 3; - width: 100%; - background: #222222; - border: solid #666666; - } - - #search_display { - width: 35%; - background: #222222; - color: #ffffff; - padding: 0 2; - height: 3; - } - - #search_results_label { - width: 15%; - color: #aaaaaa; - text-align: right; - padding: 0 2; - height: 3; - background: #222222; - } - - #user_info { - width: auto; - height: 3; - background: #222222; - color: #888888; - padding: 0 1; - } - - #device_status_info { - width: auto; - height: 3; - background: #222222; - color: #888888; - padding: 0 2; - text-align: right; - } - - .clickable-info:hover { - background: #333333; - } - - #main_container { - height: 100%; - background: #000000; - } - - #folder_panel { - width: 50%; - border-right: thick #666666; - padding: 1; - background: #000000; - } - - #folder_tree { - height: 100%; - background: #000000; - } - - #record_panel { - width: 50%; - padding: 1; - background: #000000; - } - - #record_detail { - height: 100%; - overflow-y: auto; - padding: 1; - background: #000000; - } - - #detail_content { - background: #000000; - color: #ffffff; - } - - Tree { - background: #000000; - color: #ffffff; - } - - Tree > .tree--guides { - color: #444444; - } - - Tree > .tree--toggle { - /* Hide expand/collapse icons - nodes still expand/collapse on click */ - width: 0; - } - - Tree > .tree--cursor { - /* Selected row - neutral background that works with all color themes */ - background: #333333; - text-style: bold; - } - - Tree > .tree--highlight { - /* Hover row - subtle background, different from selection */ - background: #1a1a1a; - } - - Tree > .tree--highlight-line { - background: #1a1a1a; - } - - /* Hide tree selection when search input is active */ - Tree.search-input-active > .tree--cursor { - background: transparent; - text-style: none; - } - - Tree.search-input-active > .tree--highlight { - background: transparent; - } - - DataTable { - background: #000000; - color: #ffffff; - } - - DataTable > .datatable--cursor { - background: #444444; - color: #ffffff; - text-style: bold; - } - - DataTable > .datatable--header { - background: #222222; - color: #ffffff; - text-style: bold; - } - - Static { - background: #000000; - color: #ffffff; - } - - VerticalScroll { - background: #000000; - } - - #record_detail:focus { - background: #0a0a0a; - } - - #record_detail:focus-within { - background: #0a0a0a; - } - - /* Focus indicators - green left border shows which pane is active */ - #folder_panel:focus-within { - border-left: solid #00cc00; - } - - #record_panel:focus-within { - border-left: solid #00cc00; - } - - #shell_output_content:focus { - border-left: solid #00cc00; - } - - #shell_input_container:focus-within { - border-left: solid #00cc00; - } - - #search_bar.search-active { - border-left: solid #00cc00; - } - - #status_bar { - dock: bottom; - height: 1; - background: #000000; - color: #aaaaaa; - padding: 0 2; - } - - #shortcuts_bar { - dock: bottom; - height: 2; - background: #111111; - color: #888888; - padding: 0 1; - border-top: solid #333333; - } - - /* Content area wrapper for shell pane visibility control */ - #content_area { - height: 100%; - width: 100%; - } - - /* When shell is visible, compress main container to top half */ - #content_area.shell-visible #main_container { - height: 50%; - } - - /* Shell pane - hidden by default */ - #shell_pane { - display: none; - height: 50%; - width: 100%; - border-top: thick #666666; - background: #000000; - } - - /* Show shell pane when class is added */ - #content_area.shell-visible #shell_pane { - display: block; - } - - #shell_header { - height: 1; - background: #222222; - color: #00ff00; - padding: 0 1; - border-bottom: solid #333333; - } - - #shell_output_content { - height: 1fr; - background: #000000; - color: #ffffff; - border: none; - padding: 0 1; - } - - /* Theme-specific selection colors for shell output */ - #shell_output_content.theme-green .text-area--selection { - background: #004400; - } - #shell_output_content.theme-blue .text-area--selection { - background: #002244; - } - #shell_output_content.theme-magenta .text-area--selection { - background: #330033; - } - #shell_output_content.theme-yellow .text-area--selection { - background: #333300; - } - #shell_output_content.theme-white .text-area--selection { - background: #444444; - } - /* Default fallback */ - #shell_output_content .text-area--selection { - background: #004400; - } - - /* Shell input container with prompt and TextArea */ - #shell_input_container { - height: auto; - min-height: 3; - max-height: 6; - background: #000000; - border-top: solid #333333; - border-bottom: solid #333333; - padding: 0 1; - } - - #shell_prompt { - width: 2; - height: 100%; - background: #000000; - color: #00ff00; - padding: 0; - } - - /* Shell input area - multi-line TextArea for command entry */ - #shell_input_area { - width: 1fr; - height: auto; - min-height: 1; - max-height: 5; - background: #000000; - color: #00ff00; - border: none; - padding: 0; - } - - #shell_input_area:focus { - background: #000000; - } - - #shell_input_area .text-area--cursor { - color: #00ff00; - background: #00ff00; - } - """ + # CSS is imported from supershell.themes.css + CSS = BASE_CSS BINDINGS = [ Binding("ctrl+q", "quit", "Quit", show=False), @@ -994,7 +327,7 @@ async def on_mount(self): # Sync vault data if needed if not hasattr(self.params, 'record_cache') or not self.params.record_cache: - from .utils import SyncDownCommand + from ..utils import SyncDownCommand try: logging.debug("Syncing vault data...") SyncDownCommand().execute(self.params) @@ -1274,7 +607,7 @@ def _load_vault_data(self): def _load_device_info(self): """Load device info using the 'this-device' command""" try: - from .utils import ThisDeviceCommand + from ..utils import ThisDeviceCommand # Call get_device_info directly - returns dict without printing return ThisDeviceCommand.get_device_info(self.params) @@ -1286,8 +619,8 @@ def _load_device_info(self): def _load_whoami_info(self): """Load whoami info using the 'whoami' command""" try: - from .utils import WhoamiCommand - from .. import constants + from ..utils import WhoamiCommand + from ... import constants import datetime # Call get_whoami_info directly - returns dict without printing @@ -3308,7 +2641,7 @@ def _display_secrets_manager_app(self, app_uid: str): t = self.theme_colors try: - from ..proto import APIRequest_pb2, enterprise_pb2 + from ...proto import APIRequest_pb2, enterprise_pb2 from .. import api, utils import json @@ -3603,7 +2936,9 @@ def on_tree_node_selected(self, event: Tree.NodeSelected): # Update search display to remove cursor search_display = self.query_one("#search_display", Static) if self.search_input_text: - search_display.update(rich_escape(self.search_input_text)) + # Escape all brackets (rich_escape only escapes matched pairs) + escaped = self.search_input_text.replace('\\', '\\\\').replace('[', '\\[').replace(']', '\\]') + search_display.update(escaped) else: search_display.update("[dim]Search... (Tab or /)[/dim]") @@ -3696,10 +3031,9 @@ def _update_search_display(self, perform_search=True): # Update display with blinking cursor at end if self.search_input_text: # Show text with blinking cursor (escape special chars for Rich markup) - escaped_text = rich_escape(self.search_input_text) - # Double trailing backslash so it doesn't escape the [blink] tag - if escaped_text.endswith('\\'): - escaped_text += '\\' + # Note: rich_escape() only escapes matched bracket pairs [x], not standalone [ + # So we manually escape all brackets to be safe + escaped_text = self.search_input_text.replace('\\', '\\\\').replace('[', '\\[').replace(']', '\\]') display_text = f"> {escaped_text}[blink]▎[/blink]" else: # Show prompt with blinking cursor (ready to type) @@ -3731,7 +3065,9 @@ def _update_search_display(self, perform_search=True): except Exception as e: logging.error(f"Error in _update_search_display: {e}", exc_info=True) - self._update_status(f"ERROR: {str(e)}") + # Escape the error message to avoid Rich markup parsing issues + error_msg = str(e).replace('[', '\\[').replace(']', '\\]') + self._update_status(f"ERROR: {error_msg}") def on_key(self, event): """Handle keyboard events using the dispatcher pattern. @@ -3994,7 +3330,7 @@ def _execute_shell_command_worker(self, command: str): sys.stderr = stderr_buffer # Execute via cli.do_command - from ..cli import do_command + from ...cli import do_command result = do_command(self.params, command) # Some commands return output (e.g., JSON format) instead of printing if result is not None: @@ -4088,7 +3424,7 @@ def _execute_shell_command(self, command: str): sys.stderr = stderr_buffer # Execute via cli.do_command - from ..cli import do_command + from ...cli import do_command result = do_command(self.params, command) # Some commands return output (e.g., JSON format) instead of printing if result is not None: @@ -4282,13 +3618,9 @@ def action_copy_password(self): """Copy password of selected record to clipboard using clipboard-copy command (generates audit event)""" if self.selected_record and self.selected_record in self.records: # First check if clipboard is available (to distinguish from "no password" errors) - try: - pyperclip.copy("") # Test clipboard availability - except PyperclipException: - self.notify("⚠️ Clipboard not available (no X11/Wayland)", severity="warning") - return - except Exception as e: - self.notify(f"⚠️ {e}", severity="warning") + success, err = safe_copy_to_clipboard("") + if not success: + self.notify(f"⚠️ {err}", severity="warning") return try: @@ -4359,7 +3691,7 @@ def action_copy_record(self): # Check if it's a Secrets Manager app record if self.selected_record in self.app_record_uids: # For Secrets Manager apps, copy the app data in JSON format - from ..proto import APIRequest_pb2, enterprise_pb2 + from ...proto import APIRequest_pb2, enterprise_pb2 from .. import api, utils record = self.records[self.selected_record] @@ -4522,7 +3854,7 @@ def action_sync_vault(self): try: # Run sync-down command - from .utils import SyncDownCommand + from ..utils import SyncDownCommand SyncDownCommand().execute(self.params) # Run enterprise-down if available (enterprise users) @@ -4712,149 +4044,3 @@ def action_quit(self): """Quit the application""" self._stop_totp_timer() self.exit() - - -class SuperShellCommand(Command): - """Command to launch the SuperShell TUI""" - - def get_parser(self): - from argparse import ArgumentParser - parser = ArgumentParser(prog='supershell', description='Launch full terminal vault UI with vim navigation') - # -h/--help is automatically added by ArgumentParser - return parser - - def is_authorised(self): - """Don't require pre-authentication - TUI handles all auth""" - return False - - def execute(self, params, **kwargs): - """Launch the SuperShell TUI - handles login if needed""" - from .. import display - from ..cli import debug_manager - - # Show government warning for GOV environments when entering SuperShell - if params.server and 'govcloud' in params.server.lower(): - display.show_government_warning() - - # Disable debug mode for SuperShell to prevent log output from messing up the TUI - saved_debug = getattr(params, 'debug', False) - saved_log_level = logging.getLogger().level - if saved_debug or logging.getLogger().level == logging.DEBUG: - params.debug = False - debug_manager.set_console_debug(False, params.batch_mode) - # Also set root logger level to suppress all debug output - logging.getLogger().setLevel(logging.WARNING) - - try: - self._execute_supershell(params, **kwargs) - finally: - # Restore debug state when SuperShell exits - if saved_debug: - params.debug = saved_debug - debug_manager.set_console_debug(True, params.batch_mode) - logging.getLogger().setLevel(saved_log_level) - - def _execute_supershell(self, params, **kwargs): - """Internal method to run SuperShell""" - import threading - import time - import sys - - class Spinner: - """Animated spinner that runs in a background thread""" - def __init__(self, message="Loading..."): - self.message = message - self.running = False - self.thread = None - self.chars = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'] - self.colors = ['\033[36m', '\033[32m', '\033[33m', '\033[35m'] - - def _spin(self): - i = 0 - while self.running: - color = self.colors[i % len(self.colors)] - char = self.chars[i % len(self.chars)] - # Check running again before writing to avoid race condition - if not self.running: - break - sys.stdout.write(f"\r {color}{char}\033[0m {self.message}") - sys.stdout.flush() - time.sleep(0.1) - i += 1 - - def start(self): - self.running = True - self.thread = threading.Thread(target=self._spin, daemon=True) - self.thread.start() - - def stop(self, success_message=None): - self.running = False - if self.thread: - self.thread.join(timeout=0.5) - # Small delay to ensure thread has stopped writing - time.sleep(0.15) - # Clear spinner line (do it twice to handle any race condition) - sys.stdout.write("\r\033[K") - sys.stdout.write("\r\033[K") - sys.stdout.flush() - if success_message: - print(f" \033[32m✓\033[0m {success_message}") - - def update(self, message): - self.message = message - - # Check if authentication is needed - if not params.session_token: - from .utils import LoginCommand - try: - # Run login (no spinner - login may prompt for 2FA, password, etc.) - # show_help=False to suppress the batch mode help text - LoginCommand().execute(params, email=params.user, password=params.password, new_login=False, show_help=False) - - if not params.session_token: - logging.error("\nLogin failed or was cancelled.") - return - - # Sync vault data with spinner (no success message - TUI will load immediately) - sync_spinner = Spinner("Syncing vault data...") - sync_spinner.start() - try: - from .utils import SyncDownCommand - SyncDownCommand().execute(params) - sync_spinner.stop() # No success message - TUI loads immediately - except Exception as e: - sync_spinner.stop() - raise - - except KeyboardInterrupt: - print("\n\nLogin cancelled.") - return - except Exception as e: - logging.error(f"\nLogin failed: {e}") - return - - # Launch the TUI app - try: - app = SuperShellApp(params) - result = app.run() - - # If user pressed '!' to exit to shell, start the Keeper shell - if result and "Exited to Keeper shell" in str(result): - print(result) # Show the exit message - # Check if we were in batch mode BEFORE modifying it - was_batch_mode = params.batch_mode - # Clear batch mode and pending commands so the shell runs interactively - params.batch_mode = False - params.commands = [c for c in params.commands if c.lower() not in ('q', 'quit')] - # Only start a new shell if we were in batch mode (ran 'keeper supershell' directly) - # Otherwise, just return to the existing interactive shell - if was_batch_mode: - from ..cli import loop as shell_loop - shell_loop(params, skip_init=True, suppress_goodbye=True) - # When the inner shell exits, queue 'q' so the outer batch-mode loop also exits - params.commands.append('q') - except KeyboardInterrupt: - logging.debug("SuperShell interrupted") - except Exception as e: - logging.error(f"Error running SuperShell: {e}") - raise diff --git a/keepercommander/commands/supershell/command.py b/keepercommander/commands/supershell/command.py new file mode 100644 index 000000000..9d5188ec8 --- /dev/null +++ b/keepercommander/commands/supershell/command.py @@ -0,0 +1,161 @@ +""" +SuperShell command - launches the full-screen TUI + +This module contains the command that launches the SuperShell TUI interface. +""" + +import logging +from typing import TYPE_CHECKING + +from ...commands.base import Command + +if TYPE_CHECKING: + from ...params import KeeperParams + + +class SuperShellCommand(Command): + """Command to launch the SuperShell TUI""" + + def get_parser(self): + from argparse import ArgumentParser + parser = ArgumentParser(prog='supershell', description='Launch full terminal vault UI with vim navigation') + # -h/--help is automatically added by ArgumentParser + return parser + + def is_authorised(self): + """Don't require pre-authentication - TUI handles all auth""" + return False + + def execute(self, params: 'KeeperParams', **kwargs): + """Launch the SuperShell TUI - handles login if needed""" + from ... import display + from ...cli import debug_manager + + # Show government warning for GOV environments when entering SuperShell + if params.server and 'govcloud' in params.server.lower(): + display.show_government_warning() + + # Disable debug mode for SuperShell to prevent log output from messing up the TUI + saved_debug = getattr(params, 'debug', False) + saved_log_level = logging.getLogger().level + if saved_debug or logging.getLogger().level == logging.DEBUG: + params.debug = False + debug_manager.set_console_debug(False, params.batch_mode) + # Also set root logger level to suppress all debug output + logging.getLogger().setLevel(logging.WARNING) + + try: + self._execute_supershell(params, **kwargs) + finally: + # Restore debug state when SuperShell exits + if saved_debug: + params.debug = saved_debug + debug_manager.set_console_debug(True, params.batch_mode) + logging.getLogger().setLevel(saved_log_level) + + def _execute_supershell(self, params: 'KeeperParams', **kwargs): + """Internal method to run SuperShell""" + import threading + import time + import sys + + class Spinner: + """Animated spinner that runs in a background thread""" + def __init__(self, message="Loading..."): + self.message = message + self.running = False + self.thread = None + self.chars = ['\u280b', '\u2819', '\u2839', '\u2838', '\u283c', '\u2834', '\u2826', '\u2827', '\u2807', '\u280f'] + self.colors = ['\033[36m', '\033[32m', '\033[33m', '\033[35m'] + + def _spin(self): + i = 0 + while self.running: + color = self.colors[i % len(self.colors)] + char = self.chars[i % len(self.chars)] + # Check running again before writing to avoid race condition + if not self.running: + break + sys.stdout.write(f"\r {color}{char}\033[0m {self.message}") + sys.stdout.flush() + time.sleep(0.1) + i += 1 + + def start(self): + self.running = True + self.thread = threading.Thread(target=self._spin, daemon=True) + self.thread.start() + + def stop(self, success_message=None): + self.running = False + if self.thread: + self.thread.join(timeout=0.5) + # Small delay to ensure thread has stopped writing + time.sleep(0.15) + # Clear spinner line (do it twice to handle any race condition) + sys.stdout.write("\r\033[K") + sys.stdout.write("\r\033[K") + sys.stdout.flush() + if success_message: + print(f" \033[32m\u2713\033[0m {success_message}") + + def update(self, message): + self.message = message + + # Check if authentication is needed + if not params.session_token: + from ..utils import LoginCommand + try: + # Run login (no spinner - login may prompt for 2FA, password, etc.) + # show_help=False to suppress the batch mode help text + LoginCommand().execute(params, email=params.user, password=params.password, new_login=False, show_help=False) + + if not params.session_token: + logging.error("\nLogin failed or was cancelled.") + return + + # Sync vault data with spinner (no success message - TUI will load immediately) + sync_spinner = Spinner("Syncing vault data...") + sync_spinner.start() + try: + from ..utils import SyncDownCommand + SyncDownCommand().execute(params) + sync_spinner.stop() # No success message - TUI loads immediately + except Exception as e: + sync_spinner.stop() + raise + + except KeyboardInterrupt: + print("\n\nLogin cancelled.") + return + except Exception as e: + logging.error(f"\nLogin failed: {e}") + return + + # Launch the TUI app - import here to avoid circular import + from .app import SuperShellApp + + try: + app = SuperShellApp(params) + result = app.run() + + # If user pressed '!' to exit to shell, start the Keeper shell + if result and "Exited to Keeper shell" in str(result): + print(result) # Show the exit message + # Check if we were in batch mode BEFORE modifying it + was_batch_mode = params.batch_mode + # Clear batch mode and pending commands so the shell runs interactively + params.batch_mode = False + params.commands = [c for c in params.commands if c.lower() not in ('q', 'quit')] + # Only start a new shell if we were in batch mode (ran 'keeper supershell' directly) + # Otherwise, just return to the existing interactive shell + if was_batch_mode: + from ...cli import loop as shell_loop + shell_loop(params, skip_init=True, suppress_goodbye=True) + # When the inner shell exits, queue 'q' so the outer batch-mode loop also exits + params.commands.append('q') + except KeyboardInterrupt: + logging.debug("SuperShell interrupted") + except Exception as e: + logging.error(f"Error running SuperShell: {e}") + raise diff --git a/keepercommander/commands/supershell/debug.py b/keepercommander/commands/supershell/debug.py new file mode 100644 index 000000000..2c92bb38f --- /dev/null +++ b/keepercommander/commands/supershell/debug.py @@ -0,0 +1,53 @@ +""" +SuperShell debug logging utilities + +Shared debug logging infrastructure for SuperShell components. +Set DEBUG_EVENTS to True to log events to /tmp/supershell_debug.log. + +Usage: + from .debug import debug_log, DEBUG_EVENTS + + debug_log("Key pressed: j") + +To watch events in real-time: + tail -f /tmp/supershell_debug.log +""" + +# Set to True to log all mouse/keyboard events to /tmp/supershell_debug.log +DEBUG_EVENTS = False + +_debug_log_file = None + + +def debug_log(msg: str) -> None: + """Log debug message to /tmp/supershell_debug.log if DEBUG_EVENTS is True. + + Args: + msg: The message to log. Will be prefixed with timestamp. + """ + if not DEBUG_EVENTS: + return + global _debug_log_file + try: + if _debug_log_file is None: + _debug_log_file = open('/tmp/supershell_debug.log', 'a') + import datetime + timestamp = datetime.datetime.now().strftime('%H:%M:%S.%f')[:-3] + _debug_log_file.write(f"[{timestamp}] {msg}\n") + _debug_log_file.flush() + except Exception: + pass # Silently fail if logging fails + + +def close_debug_log() -> None: + """Close the debug log file if open. + + Call this when the application exits to ensure clean shutdown. + """ + global _debug_log_file + if _debug_log_file is not None: + try: + _debug_log_file.close() + except Exception: + pass + _debug_log_file = None diff --git a/keepercommander/commands/supershell/handlers/keyboard.py b/keepercommander/commands/supershell/handlers/keyboard.py index f83394827..11d502944 100644 --- a/keepercommander/commands/supershell/handlers/keyboard.py +++ b/keepercommander/commands/supershell/handlers/keyboard.py @@ -10,29 +10,12 @@ from rich.markup import escape as rich_escape -# Debug logging - writes to /tmp/supershell_debug.log when enabled -DEBUG_EVENTS = False -_debug_log_file = None - -def _debug_log(msg: str): - """Log debug message to /tmp/supershell_debug.log if DEBUG_EVENTS is True.""" - if not DEBUG_EVENTS: - return - global _debug_log_file - try: - if _debug_log_file is None: - _debug_log_file = open('/tmp/supershell_debug.log', 'a') - import datetime - timestamp = datetime.datetime.now().strftime('%H:%M:%S.%f')[:-3] - _debug_log_file.write(f"[{timestamp}] {msg}\n") - _debug_log_file.flush() - except Exception: - pass +from ..debug import debug_log as _debug_log if TYPE_CHECKING: from textual.events import Key from textual.widgets import Tree - from .._supershell_impl import SuperShellApp + from ..app import SuperShellApp class KeyHandler(ABC): @@ -406,7 +389,9 @@ def handle(self, event: 'Key', app: 'SuperShellApp') -> bool: search_bar.remove_class("search-active") if app.search_input_text: - search_display.update(rich_escape(app.search_input_text)) + # Escape all brackets (rich_escape only escapes matched pairs) + escaped = app.search_input_text.replace('\\', '\\\\').replace('[', '\\[').replace(']', '\\]') + search_display.update(escaped) else: search_display.update("[dim]Search...[/dim]") @@ -649,7 +634,9 @@ def handle(self, event: 'Key', app: 'SuperShellApp') -> bool: search_display = app.query_one("#search_display") if app.search_input_text: - search_display.update(rich_escape(app.search_input_text)) + # Escape all brackets (rich_escape only escapes matched pairs) + escaped = app.search_input_text.replace('\\', '\\\\').replace('[', '\\[').replace(']', '\\]') + search_display.update(escaped) else: search_display.update("[dim]Search...[/dim]") diff --git a/keepercommander/commands/supershell/themes/css.py b/keepercommander/commands/supershell/themes/css.py index dc4be9eb5..4ffbc72fc 100644 --- a/keepercommander/commands/supershell/themes/css.py +++ b/keepercommander/commands/supershell/themes/css.py @@ -186,13 +186,33 @@ #record_detail:focus { background: #0a0a0a; - border: solid #333333; } #record_detail:focus-within { background: #0a0a0a; } +/* Focus indicators - green left border shows which pane is active */ +#folder_panel:focus-within { + border-left: solid #00cc00; +} + +#record_panel:focus-within { + border-left: solid #00cc00; +} + +#shell_output_content:focus { + border-left: solid #00cc00; +} + +#shell_input_container:focus-within { + border-left: solid #00cc00; +} + +#search_bar.search-active { + border-left: solid #00cc00; +} + #status_bar { dock: bottom; height: 1; @@ -252,21 +272,72 @@ } #shell_output_content { + height: 1fr; background: #000000; color: #ffffff; - width: 100%; + border: none; + padding: 0 1; } -#shell_input_line { - height: 2; - background: #111111; - color: #00ff00; - padding: 1 1 0 1; +/* Theme-specific selection colors for shell output */ +#shell_output_content.theme-green .text-area--selection { + background: #004400; +} +#shell_output_content.theme-blue .text-area--selection { + background: #002244; +} +#shell_output_content.theme-magenta .text-area--selection { + background: #330033; +} +#shell_output_content.theme-yellow .text-area--selection { + background: #333300; +} +#shell_output_content.theme-white .text-area--selection { + background: #444444; +} +/* Default fallback */ +#shell_output_content .text-area--selection { + background: #004400; +} + +/* Shell input container with prompt and TextArea */ +#shell_input_container { + height: auto; + min-height: 3; + max-height: 6; + background: #000000; border-top: solid #333333; + border-bottom: solid #333333; + padding: 0 1; +} + +#shell_prompt { + width: 2; + height: 100%; + background: #000000; + color: #00ff00; + padding: 0; +} + +/* Shell input area - multi-line TextArea for command entry */ +#shell_input_area { + width: 1fr; + height: auto; + min-height: 1; + max-height: 5; + background: #000000; + color: #00ff00; + border: none; + padding: 0; } -#shell_pane:focus-within #shell_input_line { - background: #1a1a2e; +#shell_input_area:focus { + background: #000000; +} + +#shell_input_area .text-area--cursor { + color: #00ff00; + background: #00ff00; } """ diff --git a/keepercommander/commands/supershell/widgets/__init__.py b/keepercommander/commands/supershell/widgets/__init__.py index 8887cf33d..e7d4f4f59 100644 --- a/keepercommander/commands/supershell/widgets/__init__.py +++ b/keepercommander/commands/supershell/widgets/__init__.py @@ -7,9 +7,14 @@ from .clickable_line import ClickableDetailLine from .clickable_field import ClickableField from .clickable_uid import ClickableRecordUID +from .auto_copy_textarea import AutoCopyTextArea, safe_copy_to_clipboard +from .shell_input_textarea import ShellInputTextArea __all__ = [ 'ClickableDetailLine', 'ClickableField', 'ClickableRecordUID', + 'AutoCopyTextArea', + 'safe_copy_to_clipboard', + 'ShellInputTextArea', ] diff --git a/keepercommander/commands/supershell/widgets/auto_copy_textarea.py b/keepercommander/commands/supershell/widgets/auto_copy_textarea.py new file mode 100644 index 000000000..358e3e5dd --- /dev/null +++ b/keepercommander/commands/supershell/widgets/auto_copy_textarea.py @@ -0,0 +1,222 @@ +""" +Auto-copy TextArea widget for SuperShell + +A TextArea that automatically copies selected text to clipboard on mouse release, +similar to Linux terminal behavior. +""" + +from typing import TYPE_CHECKING + +from textual.widgets import TextArea +from textual.events import Click, MouseDown, MouseUp, MouseMove + +from ..debug import debug_log as _debug_log + +if TYPE_CHECKING: + from textual.widgets.text_area import Selection + + +def safe_copy_to_clipboard(text: str) -> tuple[bool, str]: + """Safely copy text to clipboard, handling missing clipboard on remote/headless systems. + + Returns: + (True, "") on success + (False, error_message) on failure + """ + try: + import pyperclip + from pyperclip import PyperclipException + pyperclip.copy(text) + return True, "" + except Exception as e: + if 'PyperclipException' in str(type(e)): + return False, "Clipboard not available (no X11/Wayland)" + return False, str(e) + + +class AutoCopyTextArea(TextArea): + """TextArea that auto-copies selected text to clipboard on mouse release. + + Behavior matches standard Linux terminal: + - Click and drag to select text + - Double-click to select word, drag to extend from word boundaries + - On mouse up, automatically copy selection to clipboard + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + import time + self._last_click_time = 0.0 + self._last_click_pos = (0, 0) + self._word_select_mode = False + self._word_anchor_start = None # (row, col) + self._word_anchor_end = None # (row, col) + + async def _on_mouse_down(self, event: MouseDown) -> None: + """Handle mouse down - detect double-click for word selection.""" + import time + current_time = time.time() + current_pos = (event.x, event.y) + + # Check for double-click (within 500ms and reasonably close position) + time_ok = (current_time - self._last_click_time) < 0.5 + pos_ok = (abs(current_pos[0] - self._last_click_pos[0]) <= 10 and + abs(current_pos[1] - self._last_click_pos[1]) <= 5) + is_double_click = time_ok and pos_ok + + # Update click tracking + self._last_click_time = current_time + self._last_click_pos = current_pos + + if is_double_click: + # Double-click: select word and prepare for drag + self._select_word_at_position(event) + else: + # Single click: reset word mode and do normal selection + self._word_select_mode = False + self._word_anchor_start = None + self._word_anchor_end = None + await super()._on_mouse_down(event) + + def _select_word_at_position(self, event: MouseDown) -> None: + """Select the word at the mouse position.""" + try: + location = self.get_target_document_location(event) + row, col = location + + lines = self.text.split('\n') + if row >= len(lines): + return + line = lines[row] + if col > len(line): + col = len(line) + + # Find word boundaries (whitespace-delimited) + start = col + while start > 0 and not line[start - 1].isspace(): + start -= 1 + + end = col + while end < len(line) and not line[end].isspace(): + end += 1 + + if start == end: + # No word at this position + return + + # Store anchors for potential drag extension + self._word_anchor_start = (row, start) + self._word_anchor_end = (row, end) + self._word_select_mode = True + + # Select the word + from textual.widgets.text_area import Selection + self.selection = Selection((row, start), (row, end)) + + # Set up for potential drag (like parent's _on_mouse_down) + self._selecting = True + self.capture_mouse() + self._pause_blink(visible=False) + self.history.checkpoint() + + except Exception as e: + _debug_log(f"AutoCopyTextArea._select_word_at_position error: {e}") + # On error, fall back to normal behavior + self._word_select_mode = False + + async def _on_mouse_move(self, event: MouseMove) -> None: + """Handle mouse move - extend selection if dragging.""" + if not self._selecting: + return + + try: + target = self.get_target_document_location(event) + from textual.widgets.text_area import Selection + + if self._word_select_mode and self._word_anchor_start: + # Word-select mode: anchor to original word boundaries + anchor_start = self._word_anchor_start + anchor_end = self._word_anchor_end + + if target < anchor_start: + self.selection = Selection(target, anchor_end) + elif target > anchor_end: + self.selection = Selection(anchor_start, target) + else: + self.selection = Selection(anchor_start, anchor_end) + else: + # Normal drag: extend from original click position + selection_start, _ = self.selection + self.selection = Selection(selection_start, target) + except Exception: + pass + + async def _on_mouse_up(self, event: MouseUp) -> None: + """Handle mouse up - finalize selection and copy.""" + # Clean up word select state + self._word_select_mode = False + + # Let parent finalize selection mode + self._end_mouse_selection() + + # Always try to copy - _auto_copy_if_selected checks if there's actual selection + self._auto_copy_if_selected() + + def _on_click(self, event: Click) -> None: + """Handle click events - double-click selects and copies word.""" + # Double-click: select word and copy (backup for mouse_down detection) + if event.chain >= 2: + try: + location = self.get_target_document_location(event) + row, col = location + + lines = self.text.split('\n') + if row < len(lines): + line = lines[row] + if col > len(line): + col = len(line) + + # Find word boundaries + start = col + while start > 0 and not line[start - 1].isspace(): + start -= 1 + end = col + while end < len(line) and not line[end].isspace(): + end += 1 + + if start < end: + word = line[start:end] + # Select and copy the word + from textual.widgets.text_area import Selection + self.selection = Selection((row, start), (row, end)) + # Copy immediately + success, err = safe_copy_to_clipboard(word) + if success: + preview = word[:40] + ('...' if len(word) > 40 else '') + self.app.notify(f"Copied: {preview}", severity="information") + else: + self.app.notify(f" {err}", severity="warning") + except Exception: + pass + event.stop() + return + # Let parent handle single clicks + super()._on_click(event) + + def _auto_copy_if_selected(self) -> None: + """Copy selected text to clipboard if any.""" + try: + selected = self.selected_text + _debug_log(f"AutoCopyTextArea: selected_text={selected!r}") + if selected and selected.strip(): + success, err = safe_copy_to_clipboard(selected) + if success: + preview = selected[:40] + ('...' if len(selected) > 40 else '') + preview = preview.replace('\n', ' ') + # Use app.notify() instead of widget's notify() + self.app.notify(f"Copied: {preview}", severity="information") + _debug_log(f"AutoCopyTextArea: Copied to clipboard") + else: + self.app.notify(f" {err}", severity="warning") + except Exception as e: + _debug_log(f"AutoCopyTextArea: Error: {e}") diff --git a/keepercommander/commands/supershell/widgets/shell_input_textarea.py b/keepercommander/commands/supershell/widgets/shell_input_textarea.py new file mode 100644 index 000000000..e91881b2a --- /dev/null +++ b/keepercommander/commands/supershell/widgets/shell_input_textarea.py @@ -0,0 +1,127 @@ +""" +Shell input TextArea widget for SuperShell + +A TextArea specialized for shell command input with Enter-to-execute behavior +and shell history navigation. +""" + +from typing import TYPE_CHECKING + +from textual.widgets import TextArea, Tree + +if TYPE_CHECKING: + from ..app import SuperShellApp + + +class ShellInputTextArea(TextArea): + """TextArea for shell command input with Enter-to-execute behavior. + + Features: + - Enter executes command instead of inserting newline + - Soft wrapping for long commands + - Multi-line display + - Integrates with shell history navigation + """ + + def __init__(self, app_ref: 'SuperShellApp', *args, **kwargs): + # Set defaults for shell input behavior + kwargs.setdefault('soft_wrap', True) + kwargs.setdefault('show_line_numbers', False) + kwargs.setdefault('tab_behavior', 'focus') # Tab cycles focus, not inserts tab + super().__init__(*args, **kwargs) + self._app_ref = app_ref + + async def _on_key(self, event) -> None: + """Intercept keys for shell-specific behavior.""" + # Enter executes command instead of inserting newline + if event.key == "enter": + command = self.text.strip() + self.clear() # Clear immediately for responsiveness + if command: + # Execute asynchronously with loading indicator + self._app_ref._execute_shell_command_async(command) + event.prevent_default() + event.stop() + return + + # Up arrow navigates history + if event.key == "up": + if self._app_ref.shell_command_history: + if self._app_ref.shell_history_index < len(self._app_ref.shell_command_history) - 1: + self._app_ref.shell_history_index += 1 + history_cmd = self._app_ref.shell_command_history[-(self._app_ref.shell_history_index + 1)] + self.clear() + self.insert(history_cmd) + event.prevent_default() + event.stop() + return + + # Down arrow navigates history + if event.key == "down": + if self._app_ref.shell_history_index > 0: + self._app_ref.shell_history_index -= 1 + history_cmd = self._app_ref.shell_command_history[-(self._app_ref.shell_history_index + 1)] + self.clear() + self.insert(history_cmd) + elif self._app_ref.shell_history_index == 0: + self._app_ref.shell_history_index = -1 + self.clear() + event.prevent_default() + event.stop() + return + + # Ctrl+U clears the input (bash-like) + if event.key == "ctrl+u": + self.clear() + self._app_ref.shell_history_index = -1 + event.prevent_default() + event.stop() + return + + # Ctrl+D closes shell pane + if event.key == "ctrl+d": + self._app_ref._close_shell_pane() + event.prevent_default() + event.stop() + return + + # Escape unfocuses the input + if event.key == "escape": + self._app_ref.shell_input_active = False + tree = self._app_ref.query_one("#folder_tree") + tree.focus() + self._app_ref._update_status("Shell open | Tab to cycle | press Enter in shell to run commands") + event.prevent_default() + event.stop() + return + + # Tab cycles to search mode + if event.key == "tab": + self._app_ref.shell_input_active = False + self._app_ref.search_input_active = True + tree = self._app_ref.query_one("#folder_tree", Tree) + tree.add_class("search-input-active") + search_bar = self._app_ref.query_one("#search_bar") + search_bar.add_class("search-active") + tree.focus() # Search mode works with tree focused + self._app_ref._update_search_display(perform_search=False) + self._app_ref._update_status("Type to search | Tab to tree | Ctrl+U to clear") + event.prevent_default() + event.stop() + return + + # Shift+Tab cycles to shell output pane + if event.key == "shift+tab": + self._app_ref.shell_input_active = False + try: + shell_output = self._app_ref.query_one("#shell_output_content", TextArea) + shell_output.focus() + except Exception: + pass + self._app_ref._update_status("Shell output | j/k to scroll | Tab to input | Shift+Tab to detail") + event.prevent_default() + event.stop() + return + + # Let parent TextArea handle all other keys (typing, backspace, cursor movement, etc.) + await super()._on_key(event) diff --git a/unit-tests/test_api.py b/unit-tests/test_api.py index fca96387f..7601c1926 100644 --- a/unit-tests/test_api.py +++ b/unit-tests/test_api.py @@ -48,8 +48,9 @@ def test_search_records(self): def test_search_shared_folders(self): params = get_synced_params() + # Empty search returns all shared folders sfs = api.search_shared_folders(params, '') - self.assertEqual(len(sfs), 0) + self.assertEqual(len(sfs), len(params.shared_folder_cache)) sfs = api.search_shared_folders(params, 'folder') self.assertEqual(len(sfs), len(params.shared_folder_cache)) @@ -63,8 +64,9 @@ def test_search_shared_folders(self): def test_search_teams(self): params = get_synced_params() + # Empty search returns all teams teams = api.search_teams(params, '') - self.assertEqual(len(teams), 0) + self.assertEqual(len(teams), len(params.team_cache)) teams = api.search_shared_folders(params, 'team') self.assertEqual(len(teams), len(params.shared_folder_cache)) From 1bcde72f79895eb1d0de446b1d4f619e55e7c512 Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Wed, 4 Feb 2026 13:46:03 +0530 Subject: [PATCH 02/16] Add Enforcement and Managed Node Details to enterprise-info Roles and Prevent PAM records for One-Time Shares (#1798) (#1805) --- keepercommander/commands/enterprise.py | 98 +++++++++++++++++++++++++- keepercommander/commands/register.py | 7 ++ 2 files changed, 103 insertions(+), 2 deletions(-) diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index 09c4218ee..41a073a55 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -104,7 +104,8 @@ def register_command_info(aliases, command_info): SUPPORTED_USER_COLUMNS = ['name', 'status', 'transfer_status', 'node', 'team_count', 'teams', 'role_count', 'roles', 'alias', '2fa_enabled'] SUPPORTED_TEAM_COLUMNS = ['restricts', 'node', 'user_count', 'users', 'queued_user_count', 'queued_users', 'role_count', 'roles'] -SUPPORTED_ROLE_COLUMNS = ['visible_below', 'default_role', 'admin', 'node', 'user_count', 'users', 'team_count', 'teams'] +SUPPORTED_ROLE_COLUMNS = ['visible_below', 'default_role', 'admin', 'node', 'user_count', 'users', 'team_count', 'teams', + 'enforcement_count', 'enforcements', 'managed_node_count', 'managed_nodes', 'managed_nodes_permissions'] enterprise_data_parser = argparse.ArgumentParser(prog='enterprise-down', description='Download & decrypt enterprise data.') @@ -662,7 +663,15 @@ def tree_node(node): else: columns = set() if kwargs.get('columns'): - columns.update((x.strip() for x in kwargs.get('columns').split(','))) + raw_columns = kwargs.get('columns') + # Handle role(...) or roles(...) syntax by stripping the prefix and suffix + for prefix in ['roles(', 'role(', 'teams(', 'team(', 'users(', 'user(', 'nodes(', 'node(']: + if raw_columns.startswith(prefix): + raw_columns = raw_columns[len(prefix):] + if raw_columns.endswith(')'): + raw_columns = raw_columns[:-1] + break + columns.update((x.strip() for x in raw_columns.split(','))) pattern = (kwargs.get('pattern') or '').lower() if show_nodes: supported_columns = SUPPORTED_NODE_COLUMNS @@ -928,9 +937,54 @@ def tree_node(node): displayed_columns = [x for x in supported_columns if x in columns] + role_enforcements = {} # type: Dict[int, dict] + if 'role_enforcements' in params.enterprise: + for re_entry in params.enterprise['role_enforcements']: + role_id = re_entry.get('role_id') + if role_id: + role_enforcements[role_id] = re_entry.get('enforcements', {}) + + role_managed_nodes = {} # type: Dict[int, List[dict]] + if 'managed_nodes' in params.enterprise: + node_names = {x['node_id']: x['data'].get('displayname') or params.enterprise['enterprise_name'] + for x in params.enterprise['nodes']} + for mn in params.enterprise['managed_nodes']: + role_id = mn['role_id'] + if role_id not in role_managed_nodes: + role_managed_nodes[role_id] = [] + node_id = mn['managed_node_id'] + role_managed_nodes[role_id].append({ + 'node_id': node_id, + 'node_name': node_names.get(node_id, str(node_id)), + 'cascade': mn.get('cascade_node_management', False) + }) + + role_privileges = {} # type: Dict[int, Dict[int, List[str]]] + is_msp = EnterpriseCommand.is_msp(params) + supported_privileges = {x[1].lower(): x[2] for x in constants.ROLE_PRIVILEGES} + if 'role_privileges' in params.enterprise: + for rp in params.enterprise['role_privileges']: + privilege = rp['privilege'].lower() + if privilege not in supported_privileges: + continue + if supported_privileges[privilege] == constants.PrivilegeScope.Hidden: + continue + if supported_privileges[privilege] == constants.PrivilegeScope.MSP and not is_msp: + continue + role_id = rp['role_id'] + node_id = rp['managed_node_id'] + if role_id not in role_privileges: + role_privileges[role_id] = {} + if node_id not in role_privileges[role_id]: + role_privileges[role_id][node_id] = [] + role_privileges[role_id][node_id].append(privilege) + rows = [] for r in roles.values(): row = [r['id'], r['name']] + role_id = r['id'] + managed_nodes_list = role_managed_nodes.get(role_id, []) + for column in displayed_columns: if column == 'visible_below': row.append(r['visible_below']) @@ -949,6 +1003,46 @@ def tree_node(node): elif column == 'teams': team_names = [teams[team_uid]['name'] for team_uid in r['teams'] if team_uid in teams] row.append(team_names) + elif column == 'enforcement_count': + enforcements = role_enforcements.get(role_id, {}) + row.append(len(enforcements)) + elif column == 'enforcements': + enforcements = role_enforcements.get(role_id, {}) + row.append(list(enforcements.keys())) + elif column == 'managed_node_count': + row.append(len(managed_nodes_list)) + elif column == 'managed_nodes': + managed_node_info = [] + for mn in managed_nodes_list: + node_name = mn.get('node_name', '') + managed_node_info.append(node_name) + row.append(managed_node_info) + elif column == 'managed_nodes_permissions': + privileges_for_role = role_privileges.get(role_id, {}) + is_json = kwargs.get('format') == 'json' + permissions_info = [] + + for mn in managed_nodes_list: + node_id = mn.get('node_id') + node_name = mn.get('node_name', '') + cascade = mn.get('cascade', False) + privs = privileges_for_role.get(node_id, []) + + if is_json: + permissions_info.append({ + 'node_name': node_name, + 'node_id': node_id, + 'cascade': cascade, + 'privileges': privs + }) + else: + if privs: + permissions_info.append(f"{node_name} (cascade: {cascade}):") + for priv in privs: + permissions_info.append(f" {priv}") + else: + permissions_info.append(f"{node_name} (cascade: {cascade}): none") + row.append(permissions_info) if pattern: if not any(1 for x in row if x and str(x).lower().find(pattern) >= 0): continue diff --git a/keepercommander/commands/register.py b/keepercommander/commands/register.py index a2c662e06..1c1f5ed7b 100644 --- a/keepercommander/commands/register.py +++ b/keepercommander/commands/register.py @@ -2398,6 +2398,13 @@ def execute(self, params, **kwargs): urls = {} # type: Dict[str, str] for record_name in record_names: record_uid = OneTimeShareCommand.resolve_record(params, record_name) + + record = api.get_record(params, record_uid) + if record and hasattr(record, 'record_type'): + pam_record_types = {'pamDatabase', 'pamDirectory', 'pamMachine', 'pamUser', 'pamRemoteBrowser'} + if record.record_type in pam_record_types: + raise CommandError('one-time-share', 'One-Time Shares are currently not available for PAM records.') + record_key = params.record_cache[record_uid]['record_key_unencrypted'] client_key = utils.generate_aes_key() From 70a2415ffd0808a60424ece7d3527a709daf515e Mon Sep 17 00:00:00 2001 From: John Walstra Date: Wed, 4 Feb 2026 13:36:38 -0600 Subject: [PATCH 03/16] Configuration has no parent, skip getting ACL if configuration record. --- keepercommander/commands/pam_debug/graph.py | 25 ++++++++++++--------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/keepercommander/commands/pam_debug/graph.py b/keepercommander/commands/pam_debug/graph.py index 7b1fbfc0b..5f04313b6 100644 --- a/keepercommander/commands/pam_debug/graph.py +++ b/keepercommander/commands/pam_debug/graph.py @@ -339,17 +339,20 @@ def _handle(current_vertex: DAGVertex, parent_vertex: Optional[DAGVertex] = None return acl_text = "" - acl = user_service.get_acl(parent_vertex, current_vertex) - if acl is not None: - acl_text = self._f("None") - acl_parts = [] - if acl.is_service: - acl_parts.append(self._bl("Service")) - if acl.is_task: - acl_parts.append(self._bl("Task")) - if len(acl_parts) > 0: - acl_text = ", ".join(acl_parts) - acl_text = f"- {acl_text}" + if parent_vertex is not None: + acl = user_service.get_acl(resource_uid=parent_vertex.uid, user_uid=current_vertex.uid) + if acl is not None: + acl_text = self._f("No Services") + acl_parts = [] + if acl.is_service: + acl_parts.append(self._bl("Service")) + if acl.is_task: + acl_parts.append(self._bl("Task")) + if acl.is_iis_pool: + acl_parts.append(self._bl("Task")) + if len(acl_parts) > 0: + acl_text = ", ".join(acl_parts) + acl_text = f" -> {acl_text}" print(f"{pad}{record.record_type}, {record.title}, {record.record_uid}{acl_text}") From 28bb689e1a62ae2243dd69cd338bcbb638eee8be Mon Sep 17 00:00:00 2001 From: amangalampalli-ks Date: Sun, 8 Feb 2026 08:34:31 +0530 Subject: [PATCH 04/16] Fix invalid base64 cases (#1808) --- RECORD_ADD_DOCUMENTATION.md | 16 ++++++++--- keepercommander/commands/record_edit.py | 36 ++++++++++++++----------- keepercommander/rest_api.py | 17 +++++++++--- 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/RECORD_ADD_DOCUMENTATION.md b/RECORD_ADD_DOCUMENTATION.md index 68955359f..99372f7c2 100644 --- a/RECORD_ADD_DOCUMENTATION.md +++ b/RECORD_ADD_DOCUMENTATION.md @@ -36,6 +36,7 @@ record-add --title "Record Title" --record-type "RECORD_TYPE" [OPTIONS] [FIELDS. **Special Value Syntax:** - `$JSON:{"key": "value"}` - For complex object fields - `$GEN` - Generate passwords, TOTP codes, or key pairs +- `$BASE64:` - Decode base64-encoded values for any string field - `file=@filename` - File attachments ## Record Types @@ -511,10 +512,11 @@ echo "Emergency database access: $URL" | secure-send user@company.com 1. **Use single-line commands for copy-paste** to avoid trailing space issues 2. **Quote JSON values** to prevent shell interpretation 3. **Use $GEN for passwords** instead of hardcoding them -4. **Test with simple records first** before creating complex ones -5. **Use custom fields (c.) for non-standard data** -6. **Organize records in folders** using the `--folder` parameter -7. **Add meaningful notes** with `--notes` for context +4. **Use $BASE64: for complex passwords** with special characters to avoid shell escaping issues +5. **Test with simple records first** before creating complex ones +6. **Use custom fields (c.) for non-standard data** +7. **Organize records in folders** using the `--folder` parameter +8. **Add meaningful notes** with `--notes` for context ## Troubleshooting @@ -538,6 +540,12 @@ echo "Emergency database access: $URL" | secure-send user@company.com - Ensure file path is accessible - Use absolute paths to avoid confusion +**Base64 decoding errors** +- Ensure the base64 string is valid (test with `echo | base64 -d`) +- Use the `$BASE64:` prefix: `password='$BASE64:UEBzc3cwcmQh'` +- Remove any newlines or spaces from the base64 string +- Check that the decoded value is valid UTF-8 text + ## Record-Update vs Record-Add While `record-add` creates new records, `record-update` modifies existing records. Here's how they compare: diff --git a/keepercommander/commands/record_edit.py b/keepercommander/commands/record_edit.py index 3cf81c329..f8fa052d1 100644 --- a/keepercommander/commands/record_edit.py +++ b/keepercommander/commands/record_edit.py @@ -290,8 +290,6 @@ def assign_legacy_fields(self, record, fields): elif self.is_base64_value(parsed_field.value, action_params): if action_params: record.password = action_params[0] - else: - logging.warning('Base64 decoding failed for password field') else: record.password = parsed_field.value elif parsed_field.type == 'url': @@ -322,11 +320,13 @@ def is_json_value(self, value, parameters): # type: (str, List[Any]) -> Option value = value[5:] if value.startswith(':'): j_str = value[1:] - if j_str and isinstance(parameters, list): + if not j_str: + self.on_warning('JSON value cannot be empty. Format: $JSON:') + elif isinstance(parameters, list): try: parameters.append(json.loads(j_str)) except Exception as e: - self.on_warning(f'Invalid JSON value: {j_str}: {e}') + self.on_warning(f'Invalid JSON value: {e}') return True @staticmethod @@ -339,19 +339,27 @@ def is_generate_value(value, parameters): # type: (str, List[str]) -> Optiona parameters.extend((x.strip() for x in gen_parameters.split(','))) return True - @staticmethod - def is_base64_value(value, parameters): # type: (str, List[str]) -> Optional[bool] + def is_base64_value(self, value, parameters): # type: (str, List[str]) -> Optional[bool] """Check if value is base64-encoded and decode it.""" if value.startswith("$BASE64:"): encoded_value = value[8:] # Skip "$BASE64:" - if encoded_value and isinstance(parameters, list): + + # Validate and provide helpful error messages + if not encoded_value: + self.on_warning('Base64 value cannot be empty. Format: $BASE64:') + elif isinstance(parameters, list): try: - decoded_bytes = base64.b64decode(encoded_value) - decoded_str = decoded_bytes.decode('utf-8') - parameters.append(decoded_str) - return True + decoded_bytes = base64.b64decode(encoded_value, validate=True) + if not decoded_bytes: + self.on_warning('Base64 decoded to empty value') + else: + decoded_str = decoded_bytes.decode('utf-8') + if not decoded_str: + self.on_warning('Base64 decoded to empty string') + else: + parameters.append(decoded_str) except Exception as e: - logging.warning(f'Failed to decode base64 value: {e}') + self.on_warning(f'Invalid base64 value: {e}') return True return False @@ -599,10 +607,8 @@ def assign_typed_fields(self, record, fields): else: self.on_warning(f'Cannot generate a value for a \"{record_field.type}\" field.') elif self.is_base64_value(parsed_field.value, action_params): - if len(action_params) > 0: + if action_params: value = action_params[0] - else: - self.on_warning(f'Base64 decoding failed for field \"{record_field.type}\".') elif self.is_json_value(parsed_field.value, action_params): if len(action_params) > 0: value = self.validate_json_value(record_field.type, action_params[0]) diff --git a/keepercommander/rest_api.py b/keepercommander/rest_api.py index e0b170edd..4e1a4d0a3 100644 --- a/keepercommander/rest_api.py +++ b/keepercommander/rest_api.py @@ -226,14 +226,23 @@ def execute_rest(context, endpoint, payload): server_key_id = failure['key_id'] if 'qrc_ec_key_id' in failure: qrc_ec_key_id = failure['qrc_ec_key_id'] - if context.server_key_id != qrc_ec_key_id: + # Defensive check: qrc_ec_key_id must be EC key (7-18) + if not (7 <= qrc_ec_key_id <= 18): + logging.warning(f"Server returned invalid qrc_ec_key_id={qrc_ec_key_id} (expected EC key 7-18). Falling back to EC-only encryption.") + context.disable_qrc() + elif context.server_key_id != qrc_ec_key_id: # EC key mismatch: update and retry with QRC logging.debug(f"QRC EC key mismatch: updating from {context.server_key_id} to {qrc_ec_key_id}") context.server_key_id = qrc_ec_key_id - run_request = True - continue + run_request = True + continue elif server_key_id != context.server_key_id: - context.server_key_id = server_key_id + # If server returns non-EC key without qrc_ec_key_id, disable QRC + if not (7 <= server_key_id <= 18): + logging.warning(f"Server returned non-EC key_id={server_key_id} without qrc_ec_key_id. Falling back to EC-only encryption.") + context.disable_qrc() + else: + context.server_key_id = server_key_id run_request = True continue elif rs.status_code == 403: From 30e721e5b52b64ea0fd26db126a703f35399249c Mon Sep 17 00:00:00 2001 From: tbjones-ks Date: Thu, 12 Feb 2026 09:15:09 -0800 Subject: [PATCH 05/16] Commander | 45-add-keeper-db-proxy-option Change List: - Add --keeper-db-proxy (-kdbp) to pam tunnel edit for pamDatabase records - Add launch credential check when enabling DB proxy (--keeper-db-proxy=on) - Add --launch-user (-lu) flag to pam connection edit for setting launch credentials - Add check_if_resource_has_launch_credential() to TunnelDAG - Add is_launch_credential support to link_user/link_user_to_resource in TunnelDAG - Validate --launch-user record is pamUser type --- .../tunnel/port_forward/TunnelGraph.py | 21 ++++++-- .../commands/tunnel_and_connections.py | 54 +++++++++++++++++++ 2 files changed, 72 insertions(+), 3 deletions(-) diff --git a/keepercommander/commands/tunnel/port_forward/TunnelGraph.py b/keepercommander/commands/tunnel/port_forward/TunnelGraph.py index 44242b782..ca95b9f0e 100644 --- a/keepercommander/commands/tunnel/port_forward/TunnelGraph.py +++ b/keepercommander/commands/tunnel/port_forward/TunnelGraph.py @@ -325,15 +325,16 @@ def unlink_user_from_resource(self, user_uid, resource_uid) -> bool: return False - def link_user_to_resource(self, user_uid, resource_uid, is_admin=None, belongs_to=None): + def link_user_to_resource(self, user_uid, resource_uid, is_admin=None, belongs_to=None, is_launch_credential=None): resource_vertex = self.linking_dag.get_vertex(resource_uid) if resource_vertex is None or not self.resource_belongs_to_config(resource_uid): print(f"{bcolors.FAIL}Resource {resource_uid} does not belong to the configuration{bcolors.ENDC}") return False - self.link_user(user_uid, resource_vertex, is_admin, belongs_to) + self.link_user(user_uid, resource_vertex, is_admin, belongs_to, is_launch_credential=is_launch_credential) return None - def link_user(self, user_uid, source_vertex: DAGVertex, is_admin=None, belongs_to=None, is_iam_user=None): + def link_user(self, user_uid, source_vertex: DAGVertex, is_admin=None, belongs_to=None, is_iam_user=None, + is_launch_credential=None): user_vertex = self.linking_dag.get_vertex(user_uid) if user_vertex is None: @@ -347,6 +348,8 @@ def link_user(self, user_uid, source_vertex: DAGVertex, is_admin=None, belongs_t content["is_admin"] = bool(is_admin) if is_iam_user is not None: content["is_iam_user"] = bool(is_iam_user) + if is_launch_credential is not None: + content["is_launch_credential"] = bool(is_launch_credential) if user_vertex.vertex_type != RefType.PAM_USER: user_vertex.vertex_type = RefType.PAM_USER @@ -395,6 +398,18 @@ def check_if_resource_has_admin(self, resource_uid): return user_vertex.uid return False + def check_if_resource_has_launch_credential(self, resource_uid): + resource_vertex = self.linking_dag.get_vertex(resource_uid) + if resource_vertex is None: + return False + for user_vertex in resource_vertex.has_vertices(EdgeType.ACL): + acl_edge = user_vertex.get_edge(resource_vertex, EdgeType.ACL) + if acl_edge: + content = acl_edge.content_as_dict + if content.get('is_launch_credential'): + return user_vertex.uid + return False + def check_if_resource_allowed(self, resource_uid, setting): resource_vertex = self.linking_dag.get_vertex(resource_uid) content = get_vertex_content(resource_vertex) diff --git a/keepercommander/commands/tunnel_and_connections.py b/keepercommander/commands/tunnel_and_connections.py index 0f07017fd..48cd8b2dd 100644 --- a/keepercommander/commands/tunnel_and_connections.py +++ b/keepercommander/commands/tunnel_and_connections.py @@ -274,6 +274,9 @@ class PAMTunnelEditCommand(Command): pam_cmd_parser.add_argument('--remove-tunneling-override-port', '-rtop', required=False, dest='remove_tunneling_override_port', action='store_true', help='Remove tunneling override port') + pam_cmd_parser.add_argument('--keeper-db-proxy', '-kdbp', required=False, dest='keeper_db_proxy', + choices=['on', 'off', 'default'], + help='Enable/disable Keeper Database Proxy for pamDatabase records (on/off/default)') def get_parser(self): return PAMTunnelEditCommand.pam_cmd_parser @@ -422,8 +425,42 @@ def execute(self, params, **kwargs): if _tunneling is not None and tmp_dag.check_if_resource_allowed(record_uid, "portForwards") != _tunneling: dirty = True + # Handle --keeper-db-proxy option for database proxy routing (pamDatabase records only) + keeper_db_proxy = kwargs.get('keeper_db_proxy') + if keeper_db_proxy: + if record_type != 'pamDatabase': + raise CommandError('pam tunnel edit', + f'{bcolors.FAIL}--keeper-db-proxy is only supported for pamDatabase records. ' + f'Record "{record_name}" is of type "{record_type}".{bcolors.ENDC}') + if keeper_db_proxy == 'on' and not tmp_dag.check_if_resource_has_launch_credential(record_uid): + raise CommandError('', + f'{bcolors.FAIL}No Launch Credentials assigned to record "{record_uid}". ' + f'Please assign launch credentials to the record before enabling ' + f'the database proxy.\n' + f'Use: {bcolors.OKBLUE}pam connection edit ' + f'--launch-user (-lu) {bcolors.ENDC}') + if not pam_settings: + pam_settings = vault.TypedField.new_field('pamSettings', {"connection": {}, "portForward": {}}, "") + record.custom.append(pam_settings) + if not pam_settings.value: + pam_settings.value.append({"connection": {}, "portForward": {}}) + if "connection" not in pam_settings.value[0]: + pam_settings.value[0]["connection"] = {} + current_value = pam_settings.value[0]["connection"].get('allowKeeperDBProxy') + if keeper_db_proxy == 'on' and current_value is not True: + pam_settings.value[0]["connection"]["allowKeeperDBProxy"] = True + dirty = True + elif keeper_db_proxy == 'off' and current_value is not False: + pam_settings.value[0]["connection"]["allowKeeperDBProxy"] = False + dirty = True + elif keeper_db_proxy == 'default' and current_value is not None: + pam_settings.value[0]["connection"].pop('allowKeeperDBProxy', None) + dirty = True + if dirty: tmp_dag.set_resource_allowed(resource_uid=record_uid, tunneling=_tunneling, allowed_settings_name=allowed_settings_name) + record_management.update_record(params, record) + api.sync_down(params) # Print out the tunnel settings if not kwargs.get('silent'): @@ -770,6 +807,9 @@ class PAMConnectionEditCommand(Command): parser.add_argument('--admin-user', '-a', required=False, dest='admin', action='store', help='The record path or UID of the PAM User record to configure the admin ' 'credential on the PAM Resource') + parser.add_argument('--launch-user', '-lu', required=False, dest='launch_user', action='store', + help='The record path or UID of the PAM User record to configure as the launch ' + 'credential on the PAM Resource') parser.add_argument('--protocol', '-p', dest='protocol', choices=protocols, help='Set connection protocol') parser.add_argument('--connections', '-cn', dest='connections', choices=choices, @@ -994,6 +1034,20 @@ def execute(self, params, **kwargs): tdag.link_user_to_resource(admin_uid, record_uid, is_admin=True, belongs_to=True) # tdag.link_user_to_config(admin_uid) # is_iam_user=True + # launch-user parameter sets the launch credential on the resource + launch_user_name = kwargs.get('launch_user') + if launch_user_name: + launch_rec = RecordMixin.resolve_single_record(params, launch_user_name) + if not launch_rec: + raise CommandError('', + f'{bcolors.FAIL}Launch user record "{launch_user_name}" not found.{bcolors.ENDC}') + if not isinstance(launch_rec, vault.TypedRecord) or launch_rec.record_type != 'pamUser': + raise CommandError('', + f'{bcolors.FAIL}Launch user record must be a pamUser record type.{bcolors.ENDC}') + launch_uid = launch_rec.record_uid + if record_type in ("pamDatabase", "pamDirectory", "pamMachine"): + tdag.link_user_to_resource(launch_uid, record_uid, is_launch_credential=True, belongs_to=True) + # Print out PAM Settings if not kwargs.get("silent", False): tdag.print_tunneling_config(record_uid, record.get_typed_field('pamSettings'), config_uid) From 8384a4c4aae569445bf552d63eccdf4ffdd52ef0 Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Thu, 12 Feb 2026 13:05:33 -0600 Subject: [PATCH 06/16] fixes trickle ICE for pam launch command (#1811) --- keepercommander/commands/pam/router_helper.py | 39 +++--- .../pam_launch/terminal_connection.py | 78 +++++++++--- .../tunnel/port_forward/tunnel_helpers.py | 115 +++++++++++++++--- 3 files changed, 187 insertions(+), 45 deletions(-) diff --git a/keepercommander/commands/pam/router_helper.py b/keepercommander/commands/pam/router_helper.py index 2d0badcc8..1cae08992 100644 --- a/keepercommander/commands/pam/router_helper.py +++ b/keepercommander/commands/pam/router_helper.py @@ -194,7 +194,7 @@ def _post_request_to_router(params, path, rq_proto=None, rs_type=None, method='p def router_send_action_to_gateway(params, gateway_action: GatewayAction, message_type, is_streaming, destination_gateway_uid_str=None, gateway_timeout=15000, transmission_key=None, - encrypted_transmission_key=None, encrypted_session_token=None): + encrypted_transmission_key=None, encrypted_session_token=None, http_session=None): # Default time out how long the response from the Gateway should be krouter_host = get_router_url(params) @@ -257,7 +257,8 @@ def router_send_action_to_gateway(params, gateway_action: GatewayAction, message transmission_key=transmission_key, rq_proto=rq, encrypted_transmission_key=encrypted_transmission_key, - encrypted_session_token=encrypted_session_token) + encrypted_session_token=encrypted_session_token, + http_session=http_session) rs_body = response.content @@ -306,8 +307,10 @@ def router_send_action_to_gateway(params, gateway_action: GatewayAction, message def router_send_message_to_gateway(params, transmission_key, rq_proto, - encrypted_transmission_key=None, encrypted_session_token=None): - + encrypted_transmission_key=None, encrypted_session_token=None, + http_session=None): + """Send controller message to gateway. When http_session is provided (for streaming/ALB affinity), + the request uses that session so cookies match the WebSocket connection.""" krouter_host = get_router_url(params) if not encrypted_transmission_key: @@ -325,16 +328,24 @@ def router_send_message_to_gateway(params, transmission_key, rq_proto, if not encrypted_session_token: encrypted_session_token = crypto.encrypt_aes_v2(utils.base64_url_decode(params.session_token), transmission_key) - rs = requests.post( - krouter_host+"/api/user/send_controller_message", - verify=VERIFY_SSL, - - headers={ - 'TransmissionKey': bytes_to_base64(encrypted_transmission_key), - 'Authorization': f'KeeperUser {bytes_to_base64(encrypted_session_token)}', - }, - data=encrypted_payload if rq_proto else None - ) + headers = { + 'TransmissionKey': bytes_to_base64(encrypted_transmission_key), + 'Authorization': f'KeeperUser {bytes_to_base64(encrypted_session_token)}', + } + if http_session is not None: + rs = http_session.post( + krouter_host + "/api/user/send_controller_message", + verify=VERIFY_SSL, + headers=headers, + data=encrypted_payload if rq_proto else None + ) + else: + rs = requests.post( + krouter_host + "/api/user/send_controller_message", + verify=VERIFY_SSL, + headers=headers, + data=encrypted_payload if rq_proto else None + ) if rs.status_code >= 300: raise Exception(str(rs.status_code) + ': error: ' + rs.reason + ', message: ' + rs.text) diff --git a/keepercommander/commands/pam_launch/terminal_connection.py b/keepercommander/commands/pam_launch/terminal_connection.py index 57dcf4d85..90c896f00 100644 --- a/keepercommander/commands/pam_launch/terminal_connection.py +++ b/keepercommander/commands/pam_launch/terminal_connection.py @@ -22,6 +22,10 @@ import sys import base64 import json +import secrets +import shutil +import time +import uuid from typing import TYPE_CHECKING, Optional, Dict, Any import requests @@ -61,6 +65,7 @@ ) from ...proto import pam_pb2 from ...display import bcolors +from .python_handler import create_python_handler if TYPE_CHECKING: from ...params import KeeperParams @@ -933,7 +938,6 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, seed = base64_to_bytes(seed) else: # Generate a random seed if not present - import secrets seed = secrets.token_bytes(32) logging.debug("No trafficEncryptionSeed found, using generated seed") @@ -1001,7 +1005,6 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, # Register the encryption key in the global conversation store register_conversation_key(conversation_id, symmetric_key) # Create a temporary tunnel session - import uuid temp_tube_id = str(uuid.uuid4()) # Pre-create tunnel session to buffer early ICE candidates @@ -1024,6 +1027,23 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, no_trickle_ice = kwargs.get('no_trickle_ice', False) trickle_ice = not no_trickle_ice + # For trickle ICE, use shared tokens and bind_to_controller for ALB stickiness (same worker for WebSocket + POST) + router_tokens = None + http_session = None + cookie_header = None + if trickle_ice: + router_tokens = get_keeper_tokens(params) + http_session = requests.Session() + krouter_host = get_router_url(params) + try: + bind_url = krouter_host + "/api/user/bind_to_controller/" + gateway_uid + http_session.get(bind_url, verify=VERIFY_SSL, timeout=10) + except Exception as e: + logging.debug("bind_to_controller GET failed (continuing): %s", e) + if http_session.cookies: + cookie_header = "; ".join(f"{c.name}={c.value}" for c in http_session.cookies) + logging.debug("Bound to controller for ALB stickiness (WebSocket and streaming HTTP will use same backend)") + # Create signal handler for Rust events signal_handler = TunnelSignalHandler( params=params, @@ -1034,7 +1054,9 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, conversation_id=conversation_id, tube_registry=tube_registry, tube_id=temp_tube_id, - trickle_ice=trickle_ice + trickle_ice=trickle_ice, + router_tokens=router_tokens, + http_session=http_session ) # Store signal handler reference @@ -1052,9 +1074,6 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, handler_callback = None if use_python_handler: - # Import and create PythonHandler for simplified Guacamole protocol handling - from .python_handler import create_python_handler - logging.debug("Using PythonHandler mode - Rust handles control frames automatically") # Set conversationType to "python_handler" to enable PythonHandler protocol mode in Rust @@ -1160,12 +1179,33 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, logging.debug(f"Registered encryption key for conversation: {conversation_id}") logging.debug(f"Expecting WebSocket responses for conversation ID: {conversation_id}") - # Start WebSocket listener - websocket_thread = start_websocket_listener(params, tube_registry, timeout=300, gateway_uid=gateway_uid, tunnel_session=tunnel_session) + # Start WebSocket listener (pass cookie_header for ALB stickiness when trickle ICE) + websocket_thread = start_websocket_listener( + params, tube_registry, timeout=300, gateway_uid=gateway_uid, + tunnel_session=tunnel_session, + router_tokens=router_tokens, + cookie_header=cookie_header + ) - # Wait a moment for WebSocket to establish connection - import time - time.sleep(1.5) + # Wait for WebSocket to be ready before sending offer (same as pam tunnel start). + # Use event.wait() when available so we proceed as soon as ready; fallback to short sleep. + max_wait = 15.0 + # Same backend registration delay as when event is present (router/gateway need time to register) + backend_delay = float(os.environ.get('WEBSOCKET_BACKEND_DELAY', '2.0')) + if tunnel_session.websocket_ready_event: + logging.debug(f"Waiting for dedicated WebSocket to connect (max {max_wait}s)...") + websocket_ready = tunnel_session.websocket_ready_event.wait(timeout=max_wait) + if not websocket_ready: + logging.error(f"Dedicated WebSocket did not become ready within {max_wait}s") + signal_handler.cleanup() + unregister_tunnel_session(commander_tube_id) + return {"success": False, "error": "WebSocket connection timeout"} + logging.debug("Dedicated WebSocket connection established and ready for streaming") + logging.debug(f"Waiting {backend_delay}s for backend to register conversation...") + time.sleep(backend_delay) + else: + logging.warning("No WebSocket ready event for tunnel, using backend delay %.1fs", backend_delay) + time.sleep(backend_delay) # Send offer to gateway via HTTP POST logging.debug(f"{bcolors.OKBLUE}Sending {protocol} connection offer to gateway...{bcolors.ENDC}") @@ -1173,8 +1213,6 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, # Prepare the offer data with terminal-specific parameters # Match webvault format: host, size, audio, video, image (for guacd configuration) # These parameters are needed by Gateway to configure guacd BEFORE OpenConnection - import shutil - raw_columns = DEFAULT_TERMINAL_COLUMNS raw_rows = DEFAULT_TERMINAL_ROWS # Get terminal size for Guacamole size parameter @@ -1306,7 +1344,16 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, # Two paths: streaming vs non-streaming if trickle_ice: - # Streaming path: Response will come via WebSocket + # Streaming path: Response will come via WebSocket (use same tokens and session as WebSocket for ALB stickiness) + offer_kwargs = {} + if router_tokens and len(router_tokens) >= 3: + offer_kwargs = { + "transmission_key": router_tokens[2], + "encrypted_transmission_key": router_tokens[1], + "encrypted_session_token": router_tokens[0], + } + if http_session is not None: + offer_kwargs["http_session"] = http_session router_response = router_send_action_to_gateway( params=params, destination_gateway_uid_str=gateway_uid, @@ -1317,7 +1364,8 @@ def _open_terminal_webrtc_tunnel(params: KeeperParams, ), message_type=pam_pb2.CMT_CONNECT, is_streaming=True, # Response will come via WebSocket - gateway_timeout=30000 + gateway_timeout=30000, + **offer_kwargs ) logging.debug(f"{bcolors.OKGREEN}Offer sent to gateway (streaming mode){bcolors.ENDC}") diff --git a/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py b/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py index 84930ff40..0e67930e7 100644 --- a/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py +++ b/keepercommander/commands/tunnel/port_forward/tunnel_helpers.py @@ -10,6 +10,7 @@ import time import ssl import asyncio +import requests from keeper_secrets_manager_core.utils import string_to_bytes, bytes_to_string, url_safe_str_to_bytes from cryptography.hazmat.backends import default_backend @@ -23,7 +24,7 @@ from ....commands.pam.config_helper import configuration_controller_get from ....commands.pam.pam_dto import GatewayAction, GatewayActionWebRTCSession from ....commands.pam.router_helper import router_get_relay_access_creds, get_dag_leafs, \ - get_router_ws_url, router_send_action_to_gateway + get_router_ws_url, get_router_url, router_send_action_to_gateway from ....display import bcolors from ....error import CommandError from ....subfolder import try_resolve_path @@ -934,7 +935,7 @@ async def connect_websocket_with_fallback(ws_endpoint, headers, ssl_context, tub # Each tunnel's ready_event is now signaled directly in connect_websocket_with_fallback() -async def handle_websocket_responses(params, tube_registry, timeout=60, gateway_uid=None, ready_event=None, stop_event=None): +async def handle_websocket_responses(params, tube_registry, timeout=60, gateway_uid=None, ready_event=None, stop_event=None, router_tokens=None, cookie_header=None): """ Direct WebSocket handler that connects, listens for responses, and routes them to Rust. Uses global conversation key store to support multiple concurrent tunnels. @@ -946,6 +947,9 @@ async def handle_websocket_responses(params, tube_registry, timeout=60, gateway_ gateway_uid: Gateway UID (optional, for filtering) ready_event: threading.Event to signal when WebSocket is connected stop_event: threading.Event to signal when WebSocket should close + router_tokens: Optional (encrypted_session_token, encrypted_transmission_key, transmission_key) + so WebSocket uses same auth as streaming HTTP (required for trickle ICE) + cookie_header: Optional Cookie header from bind_to_controller for ALB stickiness """ if not WEBSOCKETS_AVAILABLE: raise Exception("WebSocket library not available - install with: pip install websockets") @@ -956,12 +960,17 @@ async def handle_websocket_responses(params, tube_registry, timeout=60, gateway_ logging.debug(f"Connecting to WebSocket: {ws_endpoint}") - # Prepare headers using the same pattern as HTTP - encrypted_session_token, encrypted_transmission_key, _ = get_keeper_tokens(params) + # Use shared router tokens when provided (trickle ICE) so router associates this socket with streaming HTTP + if router_tokens and len(router_tokens) >= 2: + encrypted_session_token, encrypted_transmission_key = router_tokens[0], router_tokens[1] + else: + encrypted_session_token, encrypted_transmission_key, _ = get_keeper_tokens(params) headers = { 'TransmissionKey': bytes_to_base64(encrypted_transmission_key), 'Authorization': f'KeeperUser {bytes_to_base64(encrypted_session_token)}', } + if cookie_header: + headers['Cookie'] = cookie_header # Set up SSL context ssl_context = None if ws_endpoint.startswith('wss://'): @@ -1239,6 +1248,15 @@ def route_message_to_rust(response_item, tube_registry): # Send answer back to Gateway via HTTP POST logging.debug(f"Sending ICE restart answer to Gateway for tube {tube_id}") + answer_kwargs = {} + if signal_handler.trickle_ice and getattr(signal_handler, "_router_transmission_key", None) is not None: + answer_kwargs = { + "transmission_key": signal_handler._router_transmission_key, + "encrypted_transmission_key": signal_handler._router_encrypted_transmission_key, + "encrypted_session_token": signal_handler._router_encrypted_session_token, + } + if getattr(signal_handler, "_http_session", None) is not None: + answer_kwargs["http_session"] = signal_handler._http_session router_response = router_send_action_to_gateway( params=signal_handler.params, destination_gateway_uid_str=session.gateway_uid, @@ -1256,7 +1274,8 @@ def route_message_to_rust(response_item, tube_registry): ), message_type=pam_pb2.CMT_CONNECT, is_streaming=signal_handler.trickle_ice, # Streaming only for trickle ICE - gateway_timeout=GATEWAY_TIMEOUT + gateway_timeout=GATEWAY_TIMEOUT, + **answer_kwargs ) logging.debug(f"ICE restart answer sent for tube {tube_id}") @@ -1322,7 +1341,7 @@ def route_message_to_rust(response_item, tube_registry): logging.error(f"Full traceback: {traceback.format_exc()}") -def start_websocket_listener(params, tube_registry, timeout=60, gateway_uid=None, tunnel_session=None): +def start_websocket_listener(params, tube_registry, timeout=60, gateway_uid=None, tunnel_session=None, router_tokens=None, cookie_header=None): """ Start WebSocket listener in a background thread. @@ -1335,6 +1354,9 @@ def start_websocket_listener(params, tube_registry, timeout=60, gateway_uid=None timeout: Maximum time to listen for messages (seconds) gateway_uid: Gateway UID (optional) tunnel_session: TunnelSession instance for dedicated WebSocket (required) + router_tokens: Optional (encrypted_session_token, encrypted_transmission_key, transmission_key) + for trickle ICE so WebSocket uses same auth as streaming HTTP requests + cookie_header: Optional Cookie header from bind_to_controller for ALB stickiness Returns: (thread, is_reused) tuple - is_reused is always False (each tunnel gets its own WebSocket) @@ -1356,7 +1378,9 @@ def run_dedicated_websocket(): loop.run_until_complete(handle_websocket_responses( params, tube_registry, timeout, gateway_uid, ready_event=tunnel_session.websocket_ready_event, - stop_event=tunnel_session.websocket_stop_event + stop_event=tunnel_session.websocket_stop_event, + router_tokens=router_tokens, + cookie_header=cookie_header )) except Exception as e: logging.error(f"Dedicated WebSocket listener error for tunnel {tunnel_session.tube_id}: {e}") @@ -1412,7 +1436,7 @@ class TunnelSignalHandler: def __init__(self, params, record_uid, gateway_uid, symmetric_key, base64_nonce, conversation_id, tube_registry, tube_id=None, trickle_ice=False, websocket_router=None, - conversation_type='tunnel'): + conversation_type='tunnel', router_tokens=None, http_session=None): self.params = params self.record_uid = record_uid self.gateway_uid = gateway_uid @@ -1431,6 +1455,13 @@ def __init__(self, params, record_uid, gateway_uid, symmetric_key, base64_nonce, self.websocket_router = websocket_router # For key cleanup self.offer_sent = False # Track if offer has been sent to gateway self.buffered_ice_candidates = [] # Buffer ICE candidates until offer is sent + # Shared router auth for streaming: (encrypted_session_token, encrypted_transmission_key, transmission_key) + self._router_transmission_key = None + self._router_encrypted_transmission_key = None + self._router_encrypted_session_token = None + if router_tokens and len(router_tokens) >= 3: + self._router_encrypted_session_token, self._router_encrypted_transmission_key, self._router_transmission_key = router_tokens[0], router_tokens[1], router_tokens[2] + self._http_session = http_session # Shared session for ALB stickiness (bind_to_controller cookie) # WebSocket routing is handled automatically - no setup needed if trickle_ice and not WEBSOCKETS_AVAILABLE: @@ -1788,7 +1819,16 @@ def _send_ice_candidate_immediately(self, candidate_data, tube_id=None): logging.debug(f"Sending ICE candidate to gateway immediately") - # Send an ICE candidate via HTTP POST with streamResponse=True + # Use same router tokens and session as WebSocket when streaming (ALB stickiness) + ice_kwargs = {} + if self.trickle_ice and self._router_transmission_key is not None: + ice_kwargs = { + "transmission_key": self._router_transmission_key, + "encrypted_transmission_key": self._router_encrypted_transmission_key, + "encrypted_session_token": self._router_encrypted_session_token, + } + if self.trickle_ice and getattr(self, "_http_session", None) is not None: + ice_kwargs["http_session"] = self._http_session router_response = router_send_action_to_gateway( params=self.params, destination_gateway_uid_str=self.gateway_uid, @@ -1806,7 +1846,8 @@ def _send_ice_candidate_immediately(self, candidate_data, tube_id=None): ), message_type=pam_pb2.CMT_CONNECT, is_streaming=self.trickle_ice, # Streaming only for trickle ICE - gateway_timeout=GATEWAY_TIMEOUT + gateway_timeout=GATEWAY_TIMEOUT, + **ice_kwargs ) if self.trickle_ice: @@ -1848,7 +1889,15 @@ def _send_restart_offer(self, restart_sdp, tube_id): logging.debug(f"Sending ICE restart offer to gateway for tube {tube_id}") - # Send ICE restart offer via HTTP POST with streamResponse=True + restart_kwargs = {} + if self.trickle_ice and self._router_transmission_key is not None: + restart_kwargs = { + "transmission_key": self._router_transmission_key, + "encrypted_transmission_key": self._router_encrypted_transmission_key, + "encrypted_session_token": self._router_encrypted_session_token, + } + if self.trickle_ice and getattr(self, "_http_session", None) is not None: + restart_kwargs["http_session"] = self._http_session router_response = router_send_action_to_gateway( params=self.params, destination_gateway_uid_str=self.gateway_uid, @@ -1866,7 +1915,8 @@ def _send_restart_offer(self, restart_sdp, tube_id): ), message_type=pam_pb2.CMT_CONNECT, is_streaming=self.trickle_ice, # Streaming only for trickle ICE - gateway_timeout=GATEWAY_TIMEOUT + gateway_timeout=GATEWAY_TIMEOUT, + **restart_kwargs ) if self.trickle_ice: @@ -2052,6 +2102,25 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, # Register the temporary session so ICE candidates can be buffered immediately register_tunnel_session(temp_tube_id, tunnel_session) + # For trickle ICE, use shared tokens and bind_to_controller for ALB stickiness (same worker for WebSocket + POST) + router_tokens = None + http_session = None + cookie_header = None + if trickle_ice: + router_tokens = get_keeper_tokens(params) + logging.debug("Using shared router tokens for WebSocket and streaming HTTP") + http_session = requests.Session() + krouter_host = get_router_url(params) + verify_ssl = bool(os.environ.get("VERIFY_SSL", "TRUE").upper() == "TRUE") + try: + bind_url = krouter_host + "/api/user/bind_to_controller/" + gateway_uid + http_session.get(bind_url, verify=verify_ssl, timeout=10) + except Exception as e: + logging.debug(f"bind_to_controller GET failed (continuing): %s", e) + if http_session.cookies: + cookie_header = "; ".join(f"{c.name}={c.value}" for c in http_session.cookies) + logging.debug("Bound to controller for ALB stickiness (WebSocket and streaming HTTP will use same backend)") + # Create the tube to get the WebRTC offer with trickle ICE logging.debug("Creating WebRTC offer with trickle ICE gathering") @@ -2066,7 +2135,9 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, tube_registry=tube_registry, tube_id=temp_tube_id, # Use temp ID initially trickle_ice=trickle_ice, - conversation_type=conversation_type + conversation_type=conversation_type, + router_tokens=router_tokens, + http_session=http_session ) # Store signal handler reference so we can send buffered candidates later @@ -2126,8 +2197,10 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, # Start DEDICATED WebSocket listener for this tunnel # Each tunnel gets its own WebSocket connection - no sharing, no contention! websocket_thread, is_websocket_reused = start_websocket_listener( - params, tube_registry, timeout=300, gateway_uid=gateway_uid, - tunnel_session=tunnel_session # Pass tunnel_session for dedicated WebSocket + params, tube_registry, timeout=300, gateway_uid=gateway_uid, + tunnel_session=tunnel_session, + router_tokens=router_tokens, + cookie_header=cookie_header ) # Wait for WebSocket to establish connection before sending streaming requests @@ -2202,6 +2275,15 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, logging.debug(f"Retry attempt {attempt}/{max_retries} after {retry_delay}s delay...") time.sleep(retry_delay) + offer_kwargs = {} + if trickle_ice and router_tokens: + offer_kwargs = { + "transmission_key": router_tokens[2], + "encrypted_transmission_key": router_tokens[1], + "encrypted_session_token": router_tokens[0], + } + if trickle_ice and http_session is not None: + offer_kwargs["http_session"] = http_session router_response = router_send_action_to_gateway( params=params, destination_gateway_uid_str=gateway_uid, @@ -2219,7 +2301,8 @@ def start_rust_tunnel(params, record_uid, gateway_uid, host, port, ), message_type=pam_pb2.CMT_CONNECT, is_streaming=trickle_ice, # Streaming only for trickle ICE - gateway_timeout=GATEWAY_TIMEOUT + gateway_timeout=GATEWAY_TIMEOUT, + **offer_kwargs ) # Success! Break out of retry loop From 72c81b41f7447bbb486d54cbdba9ff38a7ee4f0e Mon Sep 17 00:00:00 2001 From: idimov-keeper <78815270+idimov-keeper@users.noreply.github.com> Date: Thu, 12 Feb 2026 13:08:22 -0600 Subject: [PATCH 07/16] KC-1121 Add pam project extend command (#1810) * Merged with upstream changes (incl. refactoring into base.py) extend.py still WIP * Initial implementation of pam project extend command * Fixed RBI handlers (RBI has no JIT nor AI settings) --- .gitignore | 1 + keepercommander/commands/discoveryrotation.py | 2 +- keepercommander/commands/pam_import/README.md | 17 +- keepercommander/commands/pam_import/base.py | 3334 ++++++++++++++++ .../commands/pam_import/commands.py | 20 + keepercommander/commands/pam_import/edit.py | 3336 +---------------- keepercommander/commands/pam_import/extend.py | 1351 +++++++ 7 files changed, 4772 insertions(+), 3289 deletions(-) create mode 100644 keepercommander/commands/pam_import/base.py create mode 100644 keepercommander/commands/pam_import/commands.py create mode 100644 keepercommander/commands/pam_import/extend.py diff --git a/.gitignore b/.gitignore index 7bc716bf0..02c4882b3 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .DS_Store *.iml .cache/ +.cursor/ venv/ *.json *.pyc diff --git a/keepercommander/commands/discoveryrotation.py b/keepercommander/commands/discoveryrotation.py index 11802a5a4..627099419 100644 --- a/keepercommander/commands/discoveryrotation.py +++ b/keepercommander/commands/discoveryrotation.py @@ -73,7 +73,7 @@ from .pam_debug.rotation_setting import PAMDebugRotationSettingsCommand from .pam_debug.link import PAMDebugLinkCommand from .pam_debug.vertex import PAMDebugVertexCommand -from .pam_import.edit import PAMProjectCommand +from .pam_import.commands import PAMProjectCommand from .pam_launch.launch import PAMLaunchCommand from .pam_service.list import PAMActionServiceListCommand from .pam_service.add import PAMActionServiceAddCommand diff --git a/keepercommander/commands/pam_import/README.md b/keepercommander/commands/pam_import/README.md index 97db7271b..ed922152a 100644 --- a/keepercommander/commands/pam_import/README.md +++ b/keepercommander/commands/pam_import/README.md @@ -1,15 +1,28 @@ -## PAM Import Command +## PAM Project Import Commands PAM Import command helps customers with thousands of managed companies to automate the creation of folders, gateways, machines, users, connections, tunnels and (optionally) rotations. ### Command line options -`pam project import --name=project1 --filename=/path/to/import.json --dry-run` +Initial Import. +`pam project import --name=project1 --filename=/path/to/import.json [--dry-run]` - `--name`, `-n` → Project name _(overrides `"project":""` from JSON)_ - `--filename`, `-f` → JSON file to load import data from. - `--dry-run`, `-d` → Test import without modifying vault. +Adding new PAM resources and users to an existing PAM configuration from an import file. The command validates folders and records, then creates only new items (match by title, existing records are skipped). The import JSON format is the same. +`pam project extend --config= --filename=/path/to/import.json [--dry-run]` + +- `--config`, `-c` → PAM Configuration record UID or title. +- `--filename`, `-f` → JSON file to load import data from. +- `--dry-run`, `-d` → Test import without modifying vault. + +> **Notes:** +- Use **`--dry-run`** to preview what would be created and to see detailed validation output without changing the vault. +- If the command reports errors, run it again with **`--dry-run`** for more detailed error messages. + + ### JSON format details Text UI (TUI) elements (a.k.a. JSON Keys) match their Web UI counterparts so you can create the correponding record type in your web vault to help you visualize all options and possible values. diff --git a/keepercommander/commands/pam_import/base.py b/keepercommander/commands/pam_import/base.py new file mode 100644 index 000000000..54a903825 --- /dev/null +++ b/keepercommander/commands/pam_import/base.py @@ -0,0 +1,3334 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' None: + self._initialize() + settings = settings if isinstance(settings, dict) else {} + environment_type = str(environment_type).strip() + if environment_type not in PAM_ENVIRONMENT_TYPES: + environment_type = str(settings.get("environment", "")).strip() + if environment_type not in PAM_ENVIRONMENT_TYPES: + logging.warning("Unrecognized environment type " + f"""{bcolors.WARNING}"{environment_type}"{bcolors.ENDC} """ + f"""must be one of {PAM_ENVIRONMENT_TYPES} - switching to "local" """) + environment_type = "local" + self.environment = environment_type + + # common properties shared across all PAM config types: + self.pam_resources = { + "controllerUid": controller_uid, + "folderUid": folder_uid + # "resourceRef": "" - unused/legacy + } + val = settings.get("title", None) + if isinstance(val, str): self.title = val + + # gateway_name, ksm_app_name used externally during gw creation, use controllerUid here + + choices = ("on", "off", "default") + val = settings.get("connections", None) + if isinstance(val, str) and val in choices: self.connections = val + val = settings.get("rotation", None) + if isinstance(val, str) and val in choices: self.rotation = val + val = settings.get("tunneling", None) + if isinstance(val, str) and val in choices: self.tunneling = val + val = settings.get("remote_browser_isolation", None) + if isinstance(val, str) and val in choices: self.remote_browser_isolation = val + val = settings.get("graphical_session_recording", None) + if isinstance(val, str) and val in choices: self.graphical_session_recording = val + val = settings.get("text_session_recording", None) + if isinstance(val, str) and val in choices: self.text_session_recording = val + val = settings.get("ai_threat_detection", None) + if isinstance(val, str) and val in choices: self.ai_threat_detection = val + val = settings.get("ai_terminate_session_on_detection", None) + if isinstance(val, str) and val in choices: self.ai_terminate_session_on_detection = val + + val = settings.get("port_mapping", None) # multiline + if isinstance(val, str): val = [val] + if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): + self.port_mapping = val + elif val is not None: + logging.warning("Unrecognized port_mapping values (skipped) - expecting list of strings,"\ + """ ex. ["2222=ssh", "33060=mysql"]""") + + # {"type": "on-demand"} or {"type": "CRON", "cron": "30 18 * * *", "tz": "America/Chicago" } + val = settings.get("default_rotation_schedule", None) + if isinstance(val, dict): + schedule_type = str(val.get("type", "")).lower() + schedule_type = {"on-demand": "ON_DEMAND", "cron": "CRON"}.get(schedule_type, "") + if schedule_type != "": + if schedule_type == "ON_DEMAND": + self.default_rotation_schedule = { "type": "ON_DEMAND" } + elif schedule_type == "CRON": + cron = str(val.get("cron", "")).strip() + if cron: + self.default_rotation_schedule = { "type": "CRON", "cron": cron } + tz = str(val.get("tz", "")).strip() + if tz: self.default_rotation_schedule["tz"] = tz + else: + logging.warning("Skipped unrecognized CRON settings in default_rotation_schedule") + else: + logging.warning("Skipped unrecognized default_rotation_schedule type") + + self.scripts = PamScriptsObject.load(settings.get("scripts", None)) + self.attachments = PamAttachmentsObject.load(settings.get("attachments", None)) + + # Local Network + if environment_type == "local": + val = settings.get("network_id", None) + if isinstance(val, str): self.network_id = val + val = settings.get("network_cidr", None) + if isinstance(val, str): self.network_cidr = val + elif environment_type == "aws": + val = settings.get("aws_id", None) # required + if isinstance(val, str): self.aws_id = val + val = settings.get("aws_access_key_id", None) + if isinstance(val, str): self.aws_access_key_id = val + val = settings.get("aws_secret_access_key", None) + if isinstance(val, str): self.aws_secret_access_key = val + + val = settings.get("aws_region_names", None) # multiline + if isinstance(val, str): val = [val] + if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): + self.aws_region_names = val + elif val is not None: + logging.warning("Unrecognized aws_region_names values (skipped) - expecting list of strings") + elif environment_type == "azure": + val = settings.get("az_entra_id", None) # required + if isinstance(val, str): self.az_entra_id = val + val = settings.get("az_client_id", None) # required + if isinstance(val, str): self.az_client_id = val + val = settings.get("az_client_secret", None) # required + if isinstance(val, str): self.az_client_secret = val + val = settings.get("az_subscription_id", None) # required + if isinstance(val, str): self.az_subscription_id = val + val = settings.get("az_tenant_id", None) # required + if isinstance(val, str): self.az_tenant_id = val + val = settings.get("az_resource_groups", None) # multiline + if isinstance(val, str): val = [val] + if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): + self.az_resource_groups = val + elif val is not None: + logging.warning("Unrecognized az_resource_groups values (skipped) - expecting list of strings") + elif environment_type == "domain": + val = settings.get("dom_domain_id", None) # required + if isinstance(val, str): self.dom_domain_id = val + val = settings.get("dom_hostname", None) # required + if isinstance(val, str): self.dom_hostname = val + val = settings.get("dom_port", None) # required + if isinstance(val, int) and 0 <= val <= 65535: val = str(val) + if isinstance(val, str): self.dom_port = val + val = utils.value_to_boolean(settings.get("dom_use_ssl")) # required, bool + if isinstance(val, bool): self.dom_use_ssl = val + val = utils.value_to_boolean(settings.get("dom_scan_dc_cidr")) # optional, bool + if isinstance(val, bool): self.dom_scan_dc_cidr = val + val = settings.get("dom_network_cidr", None) # optional + if isinstance(val, str): self.dom_network_cidr = val + val = settings.get("dom_administrative_credential", None) # required, existing pamUser + if isinstance(val, str): self.dom_administrative_credential = val + # self.admin_credential_ref - will be resolved from dom_administrative_credential (later) + elif environment_type == "gcp": + val = settings.get("gcp_id", None) # required + if isinstance(val, str): self.gcp_id = val + # --service-account-key accepts only JSON.stringify(value) anyways + val = settings.get("gcp_service_account_key", None) # required + if isinstance(val, str): self.gcp_service_account_key = val + val = settings.get("gcp_google_admin_email", None) # required + if isinstance(val, str): self.gcp_google_admin_email = val + val = settings.get("gcp_region_names", None) # required, multiline + if isinstance(val, str): val = [val] + if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): + self.gcp_region_names = val + elif val is not None: + logging.warning("Unrecognized gcp_region_names values (skipped) - expecting list of strings") + elif environment_type == "oci": + val = settings.get("oci_id", None) # required + if isinstance(val, str): self.oci_id = val + val = settings.get("oci_admin_id", None) # required + if isinstance(val, str): self.oci_admin_id = val + val = settings.get("oci_admin_public_key", None) # required + if isinstance(val, str): self.oci_admin_public_key = val + val = settings.get("oci_admin_private_key", None) # required + if isinstance(val, str): self.oci_admin_private_key = val + val = settings.get("oci_tenancy", None) # required + if isinstance(val, str): self.oci_tenancy = val + val = settings.get("oci_region", None) # required + if isinstance(val, str): self.oci_region = val + + +class PamScriptsObject(): + def __init__(self): + self.scripts: List[PamScriptObject] = [] + + @classmethod + def load(cls, data: Optional[Union[str, list]]) -> PamScriptsObject: + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Pam Scripts failed to load from: {str(data)[:80]}...") + if not(data and isinstance(data, list)): return obj + + for s in data: + so = PamScriptObject.load(s) + if so.validate(): + obj.scripts.append(so) + else: + logging.warning(f"""Script file not found (skipped): "{str(so.file)}" """) + if not obj.scripts: logging.warning("Skipped empty scripts section") + return obj + + # def to_json(self): pass # File upload will create the JSON format + + +class PamScriptObject(): + def __init__(self): + self.file: str = "" + self.script_command: str = "" + self.additional_credentials: List[str] = [] + self.file_ref: str = "" # fileRef generated by file upload + self.record_refs: List[str] = [] # "recordRef":["uid1","uid2"] from additional_credentials + + def validate(self): + valid = isinstance(self.file, str) + valid = valid and Path(self.file).resolve().exists() + return valid + + @classmethod + def load(cls, data: Union[str, dict]) -> PamScriptObject: + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"PAM script failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + # TUI: "script": { "script_command": "pwsh.exe", "file": "path/file.ext", "additional_credentials": ["admin1", "user2"] }, + # JSON: "script": [{"command":"", "fileRef":"path/file.ext", "recordRef": ["uid1", "uid2"]}] + # use file upload to attach to existing record and get UIDs + cmd = data.get("script_command", None) + if isinstance(cmd, str) and cmd.strip() != "": obj.script_command = cmd.strip() + file = data.get("file", None) + if isinstance(file, str) and file.strip() != "": obj.file = file.strip() + # before use call validate() which also checks if file exists + + # NB! If script has additional_credentials these must be added later, + # after pamUser creation + acs = data.get("additional_credentials", None) + if isinstance(acs, str): acs = [acs] + if isinstance(acs, list) and acs: obj.additional_credentials = acs + + return obj + + # def to_json(self): pass # File upload will create the JSON format + + +class PamAttachmentsObject(): + def __init__(self): + self.attachments: List[PamAttachmentObject] = [] + # self.file_ref: List[str] # fileRef: [] populated by file upload + + @classmethod + def load(cls, data: Optional[Union[str, list]]) -> PamAttachmentsObject: + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"PAM Attachments failed to load from: {str(data)[:80]}...") + if not(data and isinstance(data, list)): return obj + + for a in data: + if isinstance(a, str): a = { "file": a } + ao = PamAttachmentObject.load(a) + if ao.validate(): + obj.attachments.append(ao) + else: + logging.warning(f"""File attachment not found (skipped): "{str(ao.file)}" """) + if not obj.attachments: logging.warning("Skipped empty file attachments section") + return obj + + # def to_json(self): pass # File upload will create the JSON format + + +class PamAttachmentObject(): + def __init__(self): + self.file: str = "" + self.title: str = "" + self.file_ref: str = "" # fileRef generated by file upload + + def validate(self): + valid = isinstance(self.file, str) + valid = valid and Path(self.file).resolve().exists() + return valid + + @classmethod + def load(cls, data: Union[str, dict]) -> PamAttachmentObject: + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Failed to load file attachment from: {str(data)[:80]}") + if isinstance(data, str): data = {"file": data} + if not isinstance(data, dict): return obj + + # TUI: "attachments": [{ "file": "path/file.ext", "title": "File1" }] + # TUI: "attachments": ["path/file1", "file2"] - currently / title=filename + # JSON: "fileRef": ["uid1", "uid2"] # file upload generated + # use file upload to attach to existing record and get UIDs + title = data.get("title", None) + if isinstance(title, str) and title.strip() != "": obj.title = title.strip() + file = data.get("file", None) + if isinstance(file, str) and file.strip() != "": obj.file = file.strip() + # before use call validate() which also checks if file exists + + return obj + + # def to_json(self): pass # File upload will create the JSON format + + +class PamRotationScheduleObject(): + def __init__(self): + self.type: str = "" # on-demand|CRON + self.cron: str = "" # ex. "cron": "30 18 * * *" + self.tz: str = "" # timezone - default = "Etc/UTC" + # {"type": "on-demand"}|{"type": "CRON", "cron": "30 18 * * *"} + # http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html#examples + + @classmethod + def load(cls, data: Union[str, dict]) -> PamRotationScheduleObject: + schedule_types = ("on-demand", "cron") + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Failed to load rotation schedule from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + type = data.get("type", None) + if type and isinstance(type, str) and type.strip().lower() in schedule_types: + obj.type = type.strip().lower() + elif type: + logging.error(f"""Schedule type "{str(type)[:80]}" is unknown - must be one of {schedule_types}""") + + if obj.type.lower() == "cron": + cron = data.get("cron", None) + if isinstance(cron, str) and cron.strip() != "": obj.cron = cron.strip() + if obj.cron: # validate + try: + parsed_cron = vault.TypedField.import_schedule_field(obj.cron) + except: + parsed_cron = {} + if not (parsed_cron and parsed_cron.get("time", "")): + logging.error(f"Failed to load CRON from: {obj.cron}") + tz = data.get("tz", None) + if isinstance(tz, str) and tz.strip() != "": obj.tz = tz.strip() + + return obj + +class PamRotationParams(): + def __init__(self, configUid: str, profiles: dict): + self.configUid: str = configUid # iam_user|scripts_only=NOOP + self.ownerUid: str = "" # general - pamMachine rec UID + self.ownerTitle: str = "" # general - pamMachine rec title + self.rotation_profiles: dict = profiles or {} + +class PamRotationSettingsObject(): + def __init__(self): + self.rotation: str = "" # general|iam_user|scripts_only=NOOP + self.resource: str = "" # general:MachineTitle, IAM/Scripts:skip - auto/PamConfig + self.enabled: str = "" # on|off|default + self.schedule = None # {"type": "on-demand"}|{"type": "CRON", "cron": "30 18 * * *"} + self.password_complexity: str = "" # "32,5,5,5,5" + self.resourceUid: str = "" # general:machineUID, iam_user,scripts_only:PamConfigUID + + @classmethod + def load(cls, data: Optional[Union[str, dict]], rotation_params: Optional[PamRotationParams] = None) -> PamRotationSettingsObject: + rotation_types = ("general", "iam_user", "scripts_only") + enabled_types = ("on", "off", "default") + rx_complexity = r"^(\d+,\d+,\d+,\d+,\d+)$" + obj = cls() + + # autodetect profile name (and load from rotation_profiles section) + if isinstance(data, str) and rotation_params and isinstance(rotation_params.rotation_profiles, dict): + profile = rotation_params.rotation_profiles.get(data, None) + if profile and isinstance(profile, dict): + data = profile + + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Failed to load rotation settings from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + rotation = data.get("rotation", None) + if rotation and isinstance(rotation, str) and rotation.strip().lower() in rotation_types: + obj.rotation = rotation.strip().lower() + elif rotation: + logging.error(f"""Rotation type "{str(rotation)[:80]}" is unknown - must be one of {rotation_types}""") + + # type: iam_user|scripts_only=NOOP - automatically pick up current PAM Config + # type: general - automatically picks owner record (uid by title) + if obj.rotation == "general": + resource = data.get("resource", None) + if isinstance(resource, str) and resource.strip() != "": + obj.resource = resource.strip() + if rotation_params and rotation_params.ownerTitle: + if obj.resource and obj.resource.lower() != rotation_params.ownerTitle.lower(): + logging.warning("Rotation record owner must be its parent - replacing " + f"""configured owner "resource":"{obj.resource}" """ + f"""with actual parent "{rotation_params.ownerTitle}" """) + obj.resource = rotation_params.ownerTitle + elif obj.rotation in ("iam_user", "scripts_only"): + if rotation_params and rotation_params.configUid: + obj.resource = rotation_params.configUid + + enabled = data.get("enabled", None) + if enabled and isinstance(enabled, str) and enabled.strip().lower() in enabled_types: + obj.enabled = enabled.strip().lower() + elif enabled: + logging.error(f"""Unknown rotation enablement type "{str(enabled)[:80]}" - must be one of {enabled_types}""") + + obj.schedule = PamRotationScheduleObject.load(data.get("schedule", None) or "") + complexity = data.get("password_complexity", None) + if complexity and isinstance(complexity, str): + if re.fullmatch(rx_complexity, complexity): + obj.password_complexity = complexity.strip() + if complexity and not obj.password_complexity: + logging.error(f"""Invalid password complexity "{str(enabled)[:20]}" - must be in csv format, ex. "32,5,5,5,5" """) + # pwd_complexity_rule_list = {} populated by password_complexity + + return obj + + +class DagOptionValue(Enum): + ON = "on" + OFF = "off" + DEFAULT = "default" + + @classmethod + def map(cls, dag_option: str): + try: return cls(str(dag_option).lower()) + except ValueError: return None + +class DagSettingsObject(): + def __init__(self): + self.pam_resource: Optional[str] = None + self.rotation: Optional[DagOptionValue] = None + self.connections: Optional[DagOptionValue] = None + self.tunneling: Optional[DagOptionValue] = None + self.remote_browser_isolation: Optional[DagOptionValue] = None + self.graphical_session_recording: Optional[DagOptionValue] = None + self.text_session_recording: Optional[DagOptionValue] = None + self.ai_threat_detection: Optional[DagOptionValue] = None + self.ai_terminate_session_on_detection: Optional[DagOptionValue] = None + # NB! PAM User has its own rotation_settings: {}, cannot enable con/tun on user anyways + # remote_browser_isolation uses rbi, pam_resource, graphical_session_recording + # rotation uses only pam_resource, rotation + # machine/db/dir uses all + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"DAG settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + val = data.get("resource", None) + if isinstance(val, str): obj.pam_resource = val + obj.rotation = DagOptionValue.map(data.get("rotation", None) or "") + obj.connections = DagOptionValue.map(data.get("connections", None) or "") + obj.tunneling = DagOptionValue.map(data.get("tunneling", None) or "") + obj.remote_browser_isolation = DagOptionValue.map(data.get("remote_browser_isolation", None) or "") + obj.graphical_session_recording = DagOptionValue.map(data.get("graphical_session_recording", None) or "") + obj.text_session_recording = DagOptionValue.map(data.get("text_session_recording", None) or "") + obj.ai_threat_detection = DagOptionValue.map(data.get("ai_threat_detection", None) or "") + obj.ai_terminate_session_on_detection = DagOptionValue.map(data.get("ai_terminate_session_on_detection", None) or "") + + return obj + + +class DagJitSettingsObject(): + def __init__(self): + self.create_ephemeral: bool = False + self.elevate: bool = False + self.elevation_method: str = "group" + self.elevation_string: str = "" + self.base_distinguished_name: str = "" + self.ephemeral_account_type: Optional[str] = None # Omit if missing + self.pam_directory_record: Optional[str] = None # Title of pamDirectory from pam_data.resources[], resolved to UID + self.pam_directory_uid: Optional[str] = None # Resolved pamDirectory record UID (set in process_data) + + @classmethod + def validate_enum_value(cls, value: str, allowed_values: List[str], field_name: str) -> Optional[str]: + """Validate value against predefined list. Returns validated value or None if invalid.""" + if not value or value == "": + return None # Empty string not allowed for enum fields + value_lower = value.lower() + allowed_lower = [v.lower() for v in allowed_values] + if value_lower in allowed_lower: + # Return original case from allowed_values + idx = allowed_lower.index(value_lower) + return allowed_values[idx] + logging.warning(f"Invalid {field_name} value '{value}'. Allowed: {allowed_values}. Skipping.") + return None + + @classmethod + def load(cls, data: Union[str, dict]) -> Optional['DagJitSettingsObject']: + """Load JIT settings from JSON. Returns None if data is missing/empty.""" + obj = cls() + try: + data = json.loads(data) if isinstance(data, str) else data + except: + logging.error(f"JIT settings failed to load from: {str(data)[:80]}") + return None + + if not isinstance(data, dict): + return None + + # Check if object is empty (no valid fields) + has_valid_fields = False + + # Parse boolean fields with defaults + create_ephemeral = utils.value_to_boolean(data.get("create_ephemeral", None)) + if create_ephemeral is not None: + obj.create_ephemeral = create_ephemeral + has_valid_fields = True + + elevate = utils.value_to_boolean(data.get("elevate", None)) + if elevate is not None: + obj.elevate = elevate + has_valid_fields = True + + # Parse elevation_method with validation (defaults to "group" if missing or invalid) + elevation_method = data.get("elevation_method", None) + if elevation_method is not None: + validated = cls.validate_enum_value(str(elevation_method), ["group", "role"], "elevation_method") + if validated: + obj.elevation_method = validated + has_valid_fields = True + # If validation fails, keep the default "group" - still include in DAG JSON + # If missing, keep the default "group" - still include in DAG JSON + + # Parse string fields + elevation_string = data.get("elevation_string", None) + if elevation_string is not None and str(elevation_string).strip(): + obj.elevation_string = str(elevation_string).strip() + has_valid_fields = True + + base_distinguished_name = data.get("base_distinguished_name", None) + if base_distinguished_name is not None and str(base_distinguished_name).strip(): + obj.base_distinguished_name = str(base_distinguished_name).strip() + has_valid_fields = True + + # Parse ephemeral_account_type with validation (omit if missing) + ephemeral_account_type = data.get("ephemeral_account_type", None) + if ephemeral_account_type is not None: + validated = cls.validate_enum_value( + str(ephemeral_account_type), + ["linux", "mac", "windows", "domain"], + "ephemeral_account_type" + ) + if validated: + obj.ephemeral_account_type = validated + has_valid_fields = True + + # Parse pam_directory_record (title of pamDirectory from pam_data.resources[]; resolved to pam_directory_uid later) + pam_directory_record = data.get("pam_directory_record", None) + if pam_directory_record is not None and str(pam_directory_record).strip(): + obj.pam_directory_record = str(pam_directory_record).strip() + has_valid_fields = True + + # Silently ignore any other unknown fields (permissive parsing) + + # Return None if no valid fields were found (empty object) + return obj if has_valid_fields else None + + def to_dag_dict(self) -> Dict[str, Any]: + """Convert to DAG JSON format (camelCase).""" + result = { + "createEphemeral": self.create_ephemeral, + "elevate": self.elevate, + "elevationMethod": self.elevation_method, # Always included (defaults to "group" if missing/invalid) + "elevationString": self.elevation_string, + "baseDistinguishedName": self.base_distinguished_name + } + # Only include ephemeralAccountType if it was set (omit if missing/invalid) + if self.ephemeral_account_type: + result["ephemeralAccountType"] = self.ephemeral_account_type + return result + + +class DagAiSettingsObject(): + def __init__(self): + self.version: str = "v1.0.0" + self.risk_levels: Dict[str, Dict[str, Any]] = {} + + @classmethod + def _parse_tag_list(cls, items: Any) -> List[str]: + tags: List[str] = [] + if not isinstance(items, list): + return tags + for item in items: + tag = "" + if isinstance(item, str): + tag = item.strip() + elif isinstance(item, dict): + tag = str(item.get("tag", "")).strip() + if tag: + tags.append(tag) + return tags + + @classmethod + def load(cls, data: Union[str, dict]) -> Optional['DagAiSettingsObject']: + """Load AI settings from JSON. Returns None if data is missing/empty.""" + obj = cls() + try: + data = json.loads(data) if isinstance(data, str) else data + except: + logging.error(f"AI settings failed to load from: {str(data)[:80]}") + return None + + if not isinstance(data, dict): + return None + + risk_levels = data.get("risk_levels", None) + if not isinstance(risk_levels, dict): + return None + + for level in ["critical", "high", "medium", "low"]: + level_data = risk_levels.get(level, None) + if not isinstance(level_data, dict): + continue + + ai_session_terminate = utils.value_to_boolean(level_data.get("ai_session_terminate", None)) + activities = level_data.get("activities", None) or {} + if not isinstance(activities, dict): + activities = {} + + allow_tags = cls._parse_tag_list(activities.get("allow", [])) + deny_tags = cls._parse_tag_list(activities.get("deny", [])) + + if ai_session_terminate is None and not allow_tags and not deny_tags: + continue + + obj.risk_levels[level] = { + "ai_session_terminate": ai_session_terminate, + "allow": allow_tags, + "deny": deny_tags + } + + return obj if obj.risk_levels else None + + def _build_tag_entries(self, tags: List[str], action: str, user_id: str) -> List[Dict[str, Any]]: + entries: List[Dict[str, Any]] = [] + for tag in tags: + if not tag: + continue + entries.append({ + "tag": tag, + "auditLog": [{ + "date": utils.current_milli_time(), + "userId": user_id, + "action": action + }] + }) + return entries + + def to_dag_dict(self, user_id: str) -> Optional[Dict[str, Any]]: + if not self.risk_levels: + return None + + if not user_id: + logging.warning("AI settings auditLog userId is missing; auditLog will have empty userId.") + user_id = "" + + risk_levels: Dict[str, Any] = {} + for level, data in self.risk_levels.items(): + level_out: Dict[str, Any] = {} + + if data.get("ai_session_terminate") is not None: + level_out["aiSessionTerminate"] = data["ai_session_terminate"] + + tags_out: Dict[str, Any] = {} + allow_entries = self._build_tag_entries(data.get("allow", []), "added_to_allow", user_id) + if allow_entries: + tags_out["allow"] = allow_entries + deny_entries = self._build_tag_entries(data.get("deny", []), "added_to_deny", user_id) + if deny_entries: + tags_out["deny"] = deny_entries + + if tags_out: + level_out["tags"] = tags_out + + if level_out: + risk_levels[level] = level_out + + if not risk_levels: + return None + + return { + "version": self.version, + "riskLevels": risk_levels + } + + +class PamUserObject(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.uid = "" + self.type = "pamUser" + self.title = None + self.notes = None + self.login = None + self.password = None + self.privatePEMKey = None + self.distinguishedName = None + self.connectDatabase = None + self.managed = None + self.oneTimeCode = None + self.attachments = None # fileRef + self.scripts = None # script + self.rotation_settings = None # DAG: rotation settings + + @classmethod + def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"PAM User failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + dtype = str(data["type"]) if "type" in data else "pamUser" + if dtype and dtype.lower() != "pamUser".lower(): + logging.warning(f"""PAM User data using wrong type "pamUser" != "{dtype[:80]}" """) + + obj.type = "pamUser" + obj.title = str(data["title"]) if "title" in data else None + obj.notes = str(data["notes"]) if "notes" in data else None + + obj.login = str(data["login"]) if "login" in data else None + obj.password = str(data["password"]) if "password" in data else None + obj.privatePEMKey = str(data["private_pem_key"]) if "private_pem_key" in data else None + obj.distinguishedName = str(data["distinguished_name"]) if "distinguished_name" in data else None + obj.connectDatabase = str(data["connect_database"]) if "connect_database" in data else None + obj.managed = utils.value_to_boolean(data["managed"]) if "managed" in data else None + obj.oneTimeCode = str(data["otp"]) if "otp" in data else None + + obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) + obj.scripts = PamScriptsObject.load(data.get("scripts", None)) + rso = PamRotationSettingsObject.load(data.get("rotation_settings", None), rotation_params) + if not is_blank_instance(rso): + obj.rotation_settings = rso + + obj.folder_path = str(data["folder_path"]) if "folder_path" in data else None + obj.uid_imported = str(data["uid"]) if "uid" in data else None + + if (obj.title is None or not obj.title.strip()) and obj.login and obj.login.strip(): + obj.title = f"PAM User - {str(obj.login).strip()}" + + obj.validate_record() + + return obj + + def create_record(self, params, folder_uid): + args = { + "force": True, + "folder": folder_uid, + "record_type": self.type + } + if self.uid: args["record_uid"] = self.uid + if self.title: args["title"] = self.title + if self.notes: args["notes"] = self.notes + + fields = [] + if self.login: fields.append(f"f.login={self.login}") + if self.password: fields.append(f"f.password={self.password}") + if self.privatePEMKey: fields.append(f"f.secret.privatePEMKey={self.privatePEMKey}") + if self.distinguishedName: fields.append(f"f.text.distinguishedName={self.distinguishedName}") + if self.connectDatabase: fields.append(f"f.text.connectDatabase={self.connectDatabase}") + + managed = utils.value_to_boolean(self.managed) + if managed is not None: fields.append(f"f.checkbox.managed={str(managed).lower()}") + + if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") + + files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] + if files and isinstance(files, list): + for x in files: + if x and isinstance(x, PamAttachmentObject) and x.file: + fields.append(f"file=@{x.file}") + + if fields: args["fields"] = fields + uid = RecordEditAddCommand().execute(params, **args) + if uid and isinstance(uid, str): + self.uid = uid + + # after record creation add PAM scripts + if uid and self.scripts and self.scripts.scripts: + add_pam_scripts(params, uid, self.scripts.scripts) + + # DAG: after record creation - self.scripts, self.rotation_settings + return uid + + def validate_record(self): + if not self.password: + logging.warning("PAM User is missing required field `login`") + if not self.rotation_settings: + logging.debug("PAM User is missing rotation settings") + if isinstance(self.rotation_settings, PamRotationSettingsObject): + if (str(self.rotation_settings.rotation).lower() == "general" and + not self.rotation_settings.resource): + logging.warning("PAM User with rotation type=general is missing required machine `resource=xxx`") + if self.uid_imported is not None and (not isinstance(self.uid_imported, str) or not RecordV3.is_valid_ref_uid(self.uid_imported)): + logging.error(f"PAM User uid_imported is not a valid UID: {self.uid_imported}") + + +class LoginUserObject(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.uid = "" + self.type = "login" + self.title = None + self.notes = None + self.login = None + self.password = None + self.url = None + self.oneTimeCode = None + self.attachments = None + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Record type `login` failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + dtype = str(data["type"]) if "type" in data else "login" + if dtype.lower() != "login".lower(): + logging.warning(f"""User data using wrong type "login" != "{dtype[:80]}" """) + + obj.uid = "" + obj.type = "login" + obj.title = str(data["title"]) if "title" in data else None + obj.notes = str(data["notes"]) if "notes" in data else None + + obj.login = str(data["login"]) if "login" in data else None + obj.password = str(data["password"]) if "password" in data else None + obj.url = str(data["url"]) if "url" in data else None + obj.oneTimeCode = str(data["otp"]) if "otp" in data else None + obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) + + obj.folder_path = str(data["folder_path"]) if "folder_path" in data else None + obj.uid_imported = str(data["uid"]) if "uid" in data else None + + obj.validate_record() + + return obj + + def create_record(self, params, folder_uid): + args = { + "force": True, + "folder": folder_uid, + "record_type": self.type + } + if self.uid: args["record_uid"] = self.uid + if self.title: args["title"] = self.title + if self.notes: args["notes"] = self.notes + + fields = [] + if self.login: fields.append(f"f.login={self.login}") + if self.password: fields.append(f"f.password={self.password}") + if self.url: fields.append(f"f.url={self.url}") + if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") + + files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] + if files and isinstance(files, list): + for x in files: + if x and isinstance(x, PamAttachmentObject) and x.file: + fields.append(f"file=@{x.file}") + + if fields: args["fields"] = fields + uid = RecordEditAddCommand().execute(params, **args) + if uid and isinstance(uid, str): + self.uid = uid + return uid + + def validate_record(self): + if self.uid_imported is not None and (not isinstance(self.uid_imported, str) or not RecordV3.is_valid_ref_uid(self.uid_imported)): + logging.error(f"Login User uid_imported is not a valid UID: {self.uid_imported}") + +class PamBaseMachineParser(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.type = "" + self.title = None + self.notes = None + self.host = None + self.port = None + self.sslVerification = None + self.providerGroup = None + self.providerRegion = None + self.oneTimeCode = None + self.attachments = None + self.scripts = None + self.pam_settings : Optional[PamSettingsFieldData] = None + + # pamMachine + self.operatingSystem = None + self.instanceName = None + self.instanceId = None + # Warning! Unused, split into linked pamUser record + self.login = None + self.password = None + self.privatePEMKey = None + + # pamDatabase + self.useSSL = None + self.databaseId = None + self.databaseType = None # postgresql|postgresql-flexible|mysql|mysql-flexible|mariadb|mariadb-flexible|mssql|oracle|mongodb + + # pamDirectory + self.domainName = None + self.alternativeIPs = None + self.directoryId = None + self.directoryType = None # active_directory|openldap + self.userMatch = None + + @classmethod + def load(cls, record_type: str, data: Union[str, dict]): + pam_machine_types = ("pamMachine", "pamDatabase", "pamDirectory") + pam_db_types = ("postgresql", "postgresql-flexible", "mysql", "mysql-flexible", "mariadb", "mariadb-flexible", "mssql", "oracle", "mongodb") + pam_dir_types = ("active_directory", "openldap") + + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"""Record type "{record_type}" failed to load from: {str(data)[:80]}""") + if not isinstance(data, dict): return obj + + dtype = str(data.get("type", None)) + data_type = next((s for s in pam_machine_types if s.lower() == dtype.lower()), None) + rec_type = next((s for s in pam_machine_types if s.lower() == str(record_type).lower()), None) + if rec_type and data_type and rec_type != data_type: + logging.warning(f"""Expected machine record type "{rec_type}" != data record type "{data_type}" - Parsing as "{rec_type}" """) + if rec_type is None: + msg = f"""Unknown expected record type "{record_type}". """ + if data_type is None: + msg = msg + f"""Unknown data record type "{dtype}" - Parsing it as generic pamMachine.""" + else: + msg = msg + f"""Using data record type "{data_type}".""" + logging.error(f"""{msg} Expected record types "{pam_machine_types}" """) + + obj.type = rec_type or data_type or "pamMachine" + obj.title = str(data["title"]) if "title" in data else None + obj.notes = str(data["notes"]) if "notes" in data else None + obj.host = str(data["host"]) if "host" in data else None + obj.port = str(data["port"]) if "port" in data else None + obj.sslVerification = utils.value_to_boolean(data["ssl_verification"]) if "ssl_verification" in data else None + obj.providerGroup = str(data["provider_group"]) if "provider_group" in data else None + obj.providerRegion = str(data["provider_region"]) if "provider_region" in data else None + obj.oneTimeCode = str(data["otp"]) if "otp" in data else None + obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) + obj.scripts = PamScriptsObject.load(data.get("scripts", None)) + + psd = data.get("pam_settings", None) + if psd: + obj.pam_settings = PamSettingsFieldData.load(psd) + if not obj.pam_settings: + logging.error(f"""{obj.type}: failed to load PAM Settings from "{str(data)[:80]}" """) + + # pamMachine + obj.operatingSystem = str(data["operating_system"]) if "operating_system" in data else None + obj.instanceName = str(data["instance_name"]) if "instance_name" in data else None + obj.instanceId = str(data["instance_id"]) if "instance_id" in data else None + # Warning! Unused, split into linked pamUser record + obj.login = str(data["login"]) if "login" in data else None + obj.password = str(data["password"]) if "password" in data else None + obj.privatePEMKey = str(data["private_pem_key"]) if "private_pem_key" in data else None + + # pamDatabase + obj.useSSL = utils.value_to_boolean(data["use_ssl"]) if "use_ssl" in data else None + obj.databaseId = str(data["database_id"]) if "database_id" in data else None + + dbtype = str(data["database_type"]) if "database_type" in data else None + pamdbt = next((s for s in pam_db_types if s.lower() == str(dbtype).lower()), None) + if dbtype and not pamdbt: + logging.error(f"""Unexpected DB type "{dbtype}" - should be one of the known DB types "{pam_db_types}" """) + pamdbt = dbtype.lower() # use provided db type "as-is" + if not pamdbt and obj.type == "pamDatabase": + logging.debug(f"""pamDatabase - unable to determine DB type: database_type should be one of "{pam_db_types}" """) + obj.databaseType = pamdbt + + # pamDirectory + obj.domainName = str(data["domain_name"]) if "domain_name" in data else None + obj.alternativeIPs = multiline_to_str(parse_multiline(data, "alternative_ips", "Error parsing alternative_ips")) + obj.directoryId = str(data["directory_id"]) if "directory_id" in data else None + obj.userMatch = str(data["user_match"]) if "user_match" in data else None + + dt = str(data["directory_type"]) if "directory_type" in data else None + pamdt = next((s for s in pam_dir_types if s.lower() == str(dt).lower()), None) + if dt and not pamdt: + logging.error(f"""Unexpected Directory type "{dt}" - should be one of "{pam_dir_types}" """) + pamdt = dt.lower() # use provided directory type "as-is" + if not pamdt and obj.type == "pamDirectory": + logging.debug(f"""pamDirectory - unable to determine Directory type: directory_type should be one of "{pam_dir_types}" """) + obj.directoryType = pamdt # active_directory|openldap + + obj.folder_path = str(data["folder_path"]) if "folder_path" in data else None + obj.uid_imported = str(data["uid"]) if "uid" in data else None + + return obj + +class PamMachineObject(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.uid = "" + self.type = "pamMachine" + self.title = None + self.notes = None + self.host = None # pamHostname + self.port = None # pamHostname + self.sslVerification = None + self.operatingSystem = None + self.instanceName = None + self.instanceId = None + self.providerGroup = None + self.providerRegion = None + self.oneTimeCode = None + self.attachments = None # fileRef + self.scripts = None # script + + # Warning! unused - use users[] to link users, rotation scripts etc. + self.login = None + self.password = None + self.privatePEMKey = None + + self.pam_settings : Optional[PamSettingsFieldData] = None + self.users = None # List[PamUserObject] - one is admin(istrative credential) + + self.is_admin_external: bool = False # (True<=>found:pamDirectory#Title.pamUser#Title) + self.administrative_credentials_uid: str = "" # external or internal user UID + + @classmethod + def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"""Record type "pamMachine" failed to load from: {str(data)[:80]}""") + if not isinstance(data, dict): return obj + + bmp = PamBaseMachineParser.load("pamMachine", data) + + if bmp and bmp.type.lower() != "pamMachine".lower(): + logging.warning(f"""PAM Machine data using wrong type "pamMachine" != "{bmp.type}" """) + + obj.type = "pamMachine" + obj.title = bmp.title + obj.notes = bmp.notes + obj.host = bmp.host + obj.port = bmp.port + obj.sslVerification = bmp.sslVerification + obj.operatingSystem = bmp.operatingSystem + obj.instanceName = bmp.instanceName + obj.instanceId = bmp.instanceId + obj.providerGroup = bmp.providerGroup + obj.providerRegion = bmp.providerRegion + obj.oneTimeCode = bmp.oneTimeCode + obj.attachments = bmp.attachments + obj.scripts = bmp.scripts + obj.pam_settings = bmp.pam_settings + obj.folder_path = bmp.folder_path + obj.uid_imported = bmp.uid_imported + + # Warning! unused - use users[] to link users, rotation scripts etc. + obj.login = bmp.login + obj.password = bmp.password + obj.privatePEMKey = bmp.privatePEMKey + + if (obj.title is None or not obj.title.strip()) and obj.login and obj.login.strip(): + obj.title = f"PAM Machine - {str(obj.login).strip()}" + if rotation_params: + rotation_params.ownerTitle = obj.title or "" + + obj.users = [] + users = data.get("users", None) + if users: + for user in users: + rt = str(user.get("type", "")) if isinstance(user, dict) else "" + if not rt: rt = "pamUser" # pamMachine user list is pamUser recs only + if rt.lower() != "pamUser".lower(): + logging.error(f"""{obj.title}:{obj.type}.users[] Expected record type pamUser, got "{rt}" - skipped.""") + continue + usr = PamUserObject.load(user, rotation_params) + if usr: + obj.users.append(usr) + else: + logging.warning(f"""Warning: PAM Machine "{obj.title}" with empty users section.""") + + obj.validate_record() + + return obj + + def create_record(self, params, folder_uid): + args = { + "force": True, + "folder": folder_uid, + "record_type": self.type + } + if self.uid: args["record_uid"] = self.uid + if self.title: args["title"] = self.title + if self.notes: args["notes"] = self.notes + + fields = [] + hostname = self.host.strip() if isinstance(self.host, str) and self.host.strip() else "" + port = self.port.strip() if isinstance(self.port, str) and self.port.strip() else "" + if hostname or port: + val = json.dumps({"hostName": hostname, "port": port}) + fields.append(f"f.pamHostname=$JSON:{val}") + + sslv = utils.value_to_boolean(self.sslVerification) + if sslv is not None: fields.append(f"checkbox.sslVerification={str(sslv).lower()}") + if self.operatingSystem: fields.append(f"f.text.operatingSystem={self.operatingSystem}") + if self.instanceName: fields.append(f"f.text.instanceName={self.instanceName}") + if self.instanceId: fields.append(f"f.text.instanceId={self.instanceId}") + if self.providerGroup: fields.append(f"f.text.providerGroup={self.providerGroup}") + if self.providerRegion: fields.append(f"f.text.providerRegion={self.providerRegion}") + + # Warning! unused - use users[] to link users, rotation scripts etc. + # if self.login: fields.append(f"f.login={self.login}") + # if self.password: fields.append(f"f.password={self.password}") + # if self.privatePEMKey: fields.append(f"f.secret.privatePEMKey={self.privatePEMKey}") + + if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") + + files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] + if files and isinstance(files, list): + for x in files: + if x and isinstance(x, PamAttachmentObject) and x.file: + fields.append(f"file=@{x.file}") + + # pam_settings port_forward/connection belong to the record + if self.pam_settings and isinstance(self.pam_settings, PamSettingsFieldData): + allowSupplyHost = True if self.pam_settings.allowSupplyHost is True else False + portForward = self.pam_settings.portForward.to_record_dict() if self.pam_settings.portForward else {} + connection = self.pam_settings.connection.to_record_dict() if self.pam_settings.connection else {} + if portForward or connection or allowSupplyHost: + val = json.dumps({"allowSupplyHost": allowSupplyHost, "portForward": portForward or {}, "connection": connection or {}}) + fields.append(f"c.pamSettings=$JSON:{val}") + # switch to f.* once RT definition(s) update w/ pamSettings field + + if fields: args["fields"] = fields + uid = RecordEditAddCommand().execute(params, **args) + if uid and isinstance(uid, str): + self.uid = uid + + # after record creation add PAM scripts + if uid and self.scripts and self.scripts.scripts: + add_pam_scripts(params, uid, self.scripts.scripts) + + # DAG: after record creation - self.scripts, self.pam_settings.options + return uid + + def validate_record(self): + # Warning! unused - use users[] to link users, rotation scripts etc. + if self.login or self.password or self.privatePEMKey: + logging.warning(f"""PAM Machine "{self.title}" detected legacy format - """ + "please create separate pamUser record with login, password, privatePEMKey") + if not (self.host or self.port): + logging.warning(f"""PAM Machine "{self.title}" is missing required field `pamHostname` data (host/port)""") + errmsg = validate_pam_connection(self.pam_settings.connection, "pamMachine") if self.pam_settings else "" + if errmsg: + logging.warning(f"""PAM Machine "{self.title}" has incorrect connection setup: {errmsg}""") + if self.uid_imported is not None and (not isinstance(self.uid_imported, str) or not RecordV3.is_valid_ref_uid(self.uid_imported)): + logging.error(f"PAM Machine uid_imported is not a valid UID: {self.uid_imported}") + +def validate_pam_connection(connection, record_type): + errmsg = "" + if connection: + # Apparently all machine types allow connections using ANY protocol + # ex. pamDatabase allowing SSH/RDP or pamMachine allowing proto: mysql + # known_mach_types = [ConnectionSettingsRDP, ConnectionSettingsVNC, ConnectionSettingsTelnet, ConnectionSettingsSSH, ConnectionSettingsKubernetes] + # known_db_types = [ConnectionSettingsSqlServer, ConnectionSettingsPostgreSQL, ConnectionSettingsMySQL] + + known_conn_types = PamSettingsFieldData.pam_connection_classes + [ConnectionSettingsHTTP] + known_mach_types = PamSettingsFieldData.pam_connection_classes + known_db_types = known_mach_types + known_rbi_types = [ConnectionSettingsHTTP] + + # known_conn_proto = [x.protocol.value.lower() for x in known_conn_types] # pylint: disable=E1101 + known_mach_proto = [x.protocol.value.lower() for x in known_mach_types] # pylint: disable=E1101 + known_db_proto = [x.protocol.value.lower() for x in known_db_types] # pylint: disable=E1101 + known_rbi_proto = [x.protocol.value.lower() for x in known_rbi_types] # pylint: disable=E1101 + + rt = str(record_type).lower().strip() + if type(connection) not in known_conn_types: + errmsg = f"""PAM Connection of unknown type "{type(connection).__name__}" """ + elif rt == "pamMachine".lower(): + if type(connection) not in known_mach_types: + errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ + if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and + connection.protocol.value.lower() not in known_mach_proto): + errmsg = errmsg + f""" Unexpected PAM Machine connection protocol "{connection.protocol.value}" """ + elif rt == "pamDatabase".lower(): + if type(connection) not in known_db_types: + errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ + if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and + connection.protocol.value.lower() not in known_db_proto): + errmsg = errmsg + f""" Unexpected PAM Database connection protocol "{connection.protocol.value}" """ + elif rt == "pamDirectory".lower(): + if type(connection) not in known_mach_types: + errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ + if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and + connection.protocol.value.lower() not in known_mach_proto): + errmsg = errmsg + f""" Unexpected PAM Directory connection protocol "{connection.protocol.value}" """ + elif rt == "pamRemoteBrowser".lower(): + if type(connection) not in known_rbi_types: + errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ + if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and + connection.protocol.value.lower() not in known_rbi_proto): + errmsg = errmsg + f""" Unexpected PAM Remote Browser connection protocol "{connection.protocol.value}" """ + if errmsg: + logging.debug(errmsg) + return errmsg + + +class PamDatabaseObject(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.uid = "" + self.type = "pamDatabase" + self.title = None + self.notes = None + self.host = None # pamHostname + self.port = None # pamHostname + self.useSSL = None + self.databaseId = None + self.databaseType = None + self.providerGroup = None + self.providerRegion = None + self.oneTimeCode = None + self.attachments = None # fileRef + self.scripts = None # script + + self.trafficEncryptionSeed = None + self.pam_settings : Optional[PamSettingsFieldData] = None + self.users = None # List[PamUserObject] - one is admin(istrative credential) + + @classmethod + def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"""Record type "pamDatabase" failed to load from: {str(data)[:80]}""") + if not isinstance(data, dict): return obj + + bmp = PamBaseMachineParser.load("pamDatabase", data) + + if bmp and bmp.type.lower() != "pamDatabase".lower(): + logging.warning(f"""PAM Database data using wrong type "pamDatabase" != "{bmp.type}" """) + + obj.type = "pamDatabase" + obj.title = bmp.title + obj.notes = bmp.notes + obj.host = bmp.host + obj.port = bmp.port + obj.useSSL = bmp.useSSL + obj.databaseId = bmp.databaseId + obj.databaseType = bmp.databaseType + obj.providerGroup = bmp.providerGroup + obj.providerRegion = bmp.providerRegion + obj.oneTimeCode = bmp.oneTimeCode + obj.attachments = bmp.attachments + obj.scripts = bmp.scripts + obj.pam_settings = bmp.pam_settings + obj.folder_path = bmp.folder_path + obj.uid_imported = bmp.uid_imported + + if (obj.title is None or not obj.title.strip()) and obj.databaseId and obj.databaseId.strip(): + obj.title = f"PAM Database - {str(obj.databaseId).strip()}" + if rotation_params: + rotation_params.ownerTitle = obj.title or "" + + obj.users = [] + users = data.get("users", None) + if users: + for user in users: + rt = str(user.get("type", "")) if isinstance(user, dict) else "" + if not rt: rt = "pamUser" # pamDatabase user list is pamUser recs only + if rt.lower() != "pamUser".lower(): + logging.error(f"""{obj.title}:{obj.type}.users[] Expected record type pamUser, got "{rt}" - skipped.""") + continue + usr = PamUserObject.load(user, rotation_params) + if usr: + obj.users.append(usr) + else: + logging.warning(f"""Warning: PAM Database "{obj.title}" with empty users section.""") + + obj.validate_record() + + return obj + + def create_record(self, params, folder_uid): + args = { + "force": True, + "folder": folder_uid, + "record_type": self.type + } + if self.uid: args["record_uid"] = self.uid + if self.title: args["title"] = self.title + if self.notes: args["notes"] = self.notes + + fields = [] + hostname = self.host.strip() if isinstance(self.host, str) and self.host.strip() else "" + port = self.port.strip() if isinstance(self.port, str) and self.port.strip() else "" + if hostname or port: + val = json.dumps({"hostName": hostname, "port": port}) + fields.append(f"f.pamHostname=$JSON:{val}") + + ssl = utils.value_to_boolean(self.useSSL) + if ssl is not None: fields.append(f"f.checkbox.useSSL={str(ssl).lower()}") + if self.databaseId: fields.append(f"f.text.databaseId={self.databaseId}") + if self.databaseType: fields.append(f"f.databaseType={self.databaseType}") + if self.providerGroup: fields.append(f"f.text.providerGroup={self.providerGroup}") + if self.providerRegion: fields.append(f"f.text.providerRegion={self.providerRegion}") + + if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") + + files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] + if files and isinstance(files, list): + for x in files: + if x and isinstance(x, PamAttachmentObject) and x.file: + fields.append(f"file=@{x.file}") + + # pam_settings port_forward/connection belong to the record + if self.pam_settings and isinstance(self.pam_settings, PamSettingsFieldData): + allowSupplyHost = True if self.pam_settings.allowSupplyHost is True else False + portForward = self.pam_settings.portForward.to_record_dict() if self.pam_settings.portForward else {} + connection = self.pam_settings.connection.to_record_dict() if self.pam_settings.connection else {} + if portForward or connection or allowSupplyHost: + val = json.dumps({"allowSupplyHost": allowSupplyHost, "portForward": portForward or {}, "connection": connection or {}}) + fields.append(f"c.pamSettings=$JSON:{val}") + # switch to f.* once RT definition(s) update w/ pamSettings field + + if fields: args["fields"] = fields + uid = RecordEditAddCommand().execute(params, **args) + if uid and isinstance(uid, str): + self.uid = uid + + # after record creation add PAM scripts + if uid and self.scripts and self.scripts.scripts: + add_pam_scripts(params, uid, self.scripts.scripts) + + # DAG: after record creation - self.scripts, self.pam_settings.options + return uid + + def validate_record(self): + if not (self.host or self.port): + logging.warning(f"""PAM Database "{self.title}" is missing required field "pamHostname" data (host/port)""") + errmsg = validate_pam_connection(self.pam_settings.connection, "pamDatabase") if self.pam_settings else "" + if errmsg: + logging.warning(f"""PAM Database "{self.title}" has incorrect connection setup: {errmsg}""") + if self.uid_imported is not None and (not isinstance(self.uid_imported, str) or not RecordV3.is_valid_ref_uid(self.uid_imported)): + logging.error(f"PAM Database uid_imported is not a valid UID: {self.uid_imported}") + +class PamDirectoryObject(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.uid = "" + self.type = "pamDirectory" + self.title = None + self.notes = None + self.host = None # pamHostname + self.port = None # pamHostname + self.useSSL = None + self.domainName = None + self.alternativeIPs = None + self.directoryId = None + self.directoryType = None # " + self.userMatch = None + self.providerGroup = None + self.providerRegion = None + self.oneTimeCode = None + self.attachments = None # fileRef + self.scripts = None # script + + self.pam_settings : Optional[PamSettingsFieldData] = None + self.users = None # List[PamUserObject] - one is admin(istrative credential) + + @classmethod + def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"""Record type "pamDirectory" failed to load from: {str(data)[:80]}""") + if not isinstance(data, dict): return obj + + bmp = PamBaseMachineParser.load("pamDirectory", data) + + if bmp and bmp.type.lower() != "pamDirectory".lower(): + logging.warning(f"""PAM Directory data using wrong type "pamDirectory" != "{bmp.type}" """) + + obj.type = "pamDirectory" + obj.title = bmp.title + obj.notes = bmp.notes + obj.host = bmp.host + obj.port = bmp.port + obj.useSSL = bmp.useSSL + obj.domainName = bmp.domainName + obj.alternativeIPs = bmp.alternativeIPs + obj.directoryId = bmp.directoryId + obj.directoryType = bmp.directoryType + obj.userMatch = bmp.userMatch + obj.providerGroup = bmp.providerGroup + obj.providerRegion = bmp.providerRegion + obj.oneTimeCode = bmp.oneTimeCode + obj.attachments = bmp.attachments + obj.scripts = bmp.scripts + obj.pam_settings = bmp.pam_settings + obj.folder_path = bmp.folder_path + obj.uid_imported = bmp.uid_imported + + if (obj.title is None or not obj.title.strip()) and obj.domainName and obj.domainName.strip(): + obj.title = f"PAM Directory - {str(obj.domainName).strip()}" + if rotation_params: + rotation_params.ownerTitle = obj.title or "" + + obj.users = [] + users = data.get("users", None) + if users: + for user in users: + rt = str(user.get("type", "")) if isinstance(user, dict) else "" + if not rt: rt = "pamUser" # pamDirectory user list is pamUser recs only + if rt.lower() != "pamUser".lower(): + logging.error(f"""{obj.title}:{obj.type}.users[] Expected record type pamUser, got "{rt}" - skipped.""") + continue + usr = PamUserObject.load(user, rotation_params) + if usr: + obj.users.append(usr) + else: + logging.warning(f"""Warning: PAM Directory "{obj.title}" with empty users section.""") + + obj.validate_record() + + return obj + + def create_record(self, params, folder_uid): + args = { + "force": True, + "folder": folder_uid, + "record_type": self.type + } + if self.uid: args["record_uid"] = self.uid + if self.title: args["title"] = self.title + if self.notes: args["notes"] = self.notes + + fields = [] + hostname = self.host.strip() if isinstance(self.host, str) and self.host.strip() else "" + port = self.port.strip() if isinstance(self.port, str) and self.port.strip() else "" + if hostname or port: + val = json.dumps({"hostName": hostname, "port": port}) + fields.append(f"f.pamHostname=$JSON:{val}") + + ssl = utils.value_to_boolean(self.useSSL) + if ssl is not None: fields.append(f"f.checkbox.useSSL={str(ssl).lower()}") + if self.domainName: fields.append(f"f.text.domainName={self.domainName}") + if self.alternativeIPs: fields.append(f"f.multiline.alternativeIPs={self.alternativeIPs}") + if self.directoryId: fields.append(f"f.text.directoryId={self.directoryId}") + if self.directoryType: fields.append(f"f.directoryType={self.directoryType}") + if self.userMatch: fields.append(f"f.text.userMatch={self.userMatch}") + if self.providerGroup: fields.append(f"f.text.providerGroup={self.providerGroup}") + if self.providerRegion: fields.append(f"f.text.providerRegion={self.providerRegion}") + + if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") + + files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] + if files and isinstance(files, list): + for x in files: + if x and isinstance(x, PamAttachmentObject) and x.file: + fields.append(f"file=@{x.file}") + + # pam_settings port_forward/connection belong to the record + if self.pam_settings and isinstance(self.pam_settings, PamSettingsFieldData): + allowSupplyHost = True if self.pam_settings.allowSupplyHost is True else False + portForward = self.pam_settings.portForward.to_record_dict() if self.pam_settings.portForward else {} + connection = self.pam_settings.connection.to_record_dict() if self.pam_settings.connection else {} + if portForward or connection or allowSupplyHost: + val = json.dumps({"allowSupplyHost": allowSupplyHost, "portForward": portForward or {}, "connection": connection or {}}) + fields.append(f"c.pamSettings=$JSON:{val}") + # switch to f.* once RT definition(s) update w/ pamSettings field + + if fields: args["fields"] = fields + uid = RecordEditAddCommand().execute(params, **args) + if uid and isinstance(uid, str): + self.uid = uid + + # after record creation add PAM scripts + if uid and self.scripts and self.scripts.scripts: + add_pam_scripts(params, uid, self.scripts.scripts) + + # DAG: after record creation - self.scripts, self.pam_settings.options + return uid + + def validate_record(self): + if not (self.host or self.port): + logging.warning(f"""PAM Directory "{self.title}" is missing required field `pamHostname` data (host/port)""") + errmsg = validate_pam_connection(self.pam_settings.connection, "pamDirectory") if self.pam_settings else "" + if errmsg: + logging.warning(f"""PAM Directory "{self.title}" has incorrect connection setup: {errmsg}""") + if self.uid_imported is not None and (not isinstance(self.uid_imported, str) or not RecordV3.is_valid_ref_uid(self.uid_imported)): + logging.error(f"PAM Directory uid_imported is not a valid UID: {self.uid_imported}") + +class PamRemoteBrowserObject(): + def __init__(self): + self.folder_path = None # pam extend only + self.uid_imported = None # pam extend only - lookup by 1) uid 2) folder_path/title + self.uid = "" + self.type = "pamRemoteBrowser" + self.title = None + self.notes = None + self.rbiUrl = None + self.oneTimeCode = None + self.attachments = None # fileRef + + self.rbi_settings : Optional[PamRemoteBrowserSettings] = None # ft: pamRemoteBrowserSettings + # Use httpCredentialsUid <- resolved from autofill_credentials (ref rt:Login in pam_data.users[]) + + @classmethod + def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"""Record type "pamRemoteBrowser" failed to load from: {str(data)[:80]}""") + if not isinstance(data, dict): return obj + + dtype = data.get("type", None) + if dtype and str(dtype).lower() != "pamRemoteBrowser".lower(): + logging.warning(f"""PAM RBI data using wrong type "pamRemoteBrowser" != "{dtype}" """) + + obj.type = "pamRemoteBrowser" + obj.title = str(data["title"]) if "title" in data else None + obj.notes = str(data["notes"]) if "notes" in data else None + obj.rbiUrl = str(data["url"]) if "url" in data else None + obj.oneTimeCode = str(data["otp"]) if "otp" in data else None + obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) + + psd = data.get("pam_settings", None) + rbi_settings = PamRemoteBrowserSettings.load(psd) + obj.rbi_settings = None if is_empty_instance(rbi_settings) else rbi_settings + if psd and not obj.rbi_settings: + logging.error(f"""{obj.type}: failed to load RBI Settings from "{str(psd)[:80]}" """) + + if (obj.title is None or not obj.title.strip()) and obj.rbiUrl and str(obj.rbiUrl).strip(): + hostname = str(obj.rbiUrl).lower() + hostname = re.sub(r"^\s*https?://", "", hostname, flags=re.IGNORECASE) + hostname = hostname.split("/", 1)[0] + if hostname: + obj.title = f"PAM RBI - {hostname}" + + obj.folder_path = str(data["folder_path"]) if "folder_path" in data else None + obj.uid_imported = str(data["uid"]) if "uid" in data else None + + obj.validate_record() + + return obj + + def create_record(self, params, folder_uid): + args = { + "force": True, + "folder": folder_uid, + "record_type": self.type + } + if self.uid: args["record_uid"] = self.uid + if self.title: args["title"] = self.title + if self.notes: args["notes"] = self.notes + + fields = [] + if self.rbiUrl: fields.append(f"rbiUrl={self.rbiUrl}") + + if self.oneTimeCode: fields.append(f"oneTimeCode={self.oneTimeCode}") + + files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] + if files and isinstance(files, list): + for x in files: + if x and isinstance(x, PamAttachmentObject) and x.file: + fields.append(f"file=@{x.file}") + + # pam_settings connection belongs to the record + connection = {} + if self.rbi_settings and isinstance(self.rbi_settings, PamRemoteBrowserSettings): + if self.rbi_settings.connection: + connection = self.rbi_settings.connection.to_record_dict() + if connection: + val = json.dumps({"connection": connection or {}}) + fields.append(f"pamRemoteBrowserSettings=$JSON:{val}") + # switch to f.* once RT definition(s) update w/ pamRemoteBrowserSettings field + + if fields: args["fields"] = fields + uid = RecordEditAddCommand().execute(params, **args) + if uid and isinstance(uid, str): + self.uid = uid + + # DAG: after record creation - self.pam_settings.options + return uid + + def validate_record(self): + errmsg = validate_pam_connection(self.rbi_settings.connection, "pamRemoteBrowser") if self.rbi_settings else "" + if errmsg: + logging.warning(f"""PAM RBI "{self.title}" has incorrect connection setup: {errmsg}""") + if self.uid_imported is not None and (not isinstance(self.uid_imported, str) or not RecordV3.is_valid_ref_uid(self.uid_imported)): + logging.error(f"PAM RBI uid_imported is not a valid UID: {self.uid_imported}") + +# PAM Settings field data +FONT_SIZES = (8, 9, 10, 11, 12, 14, 18, 24, 30, 36, 48, 60, 72, 96) +class ConnectionProtocol(Enum): + RDP = "rdp" + VNC = "vnc" + TELNET = "telnet" + SSH = "ssh" + KUBERNETES = "kubernetes" + SQLSERVER = "sql-server" + POSTGRESQL = "postgresql" + MYSQL = "mysql" + HTTP = "http" + +class RDPSecurity(Enum): + ANY = "any" + NLA = "nla" + TLS = "tls" + VMCONNECT = "vmconnect" + RDP = "rdp" + + @classmethod + def map(cls, rdp_security: str): + try: return cls(str(rdp_security).lower()) + except ValueError: return None + +class TerminalThemes(Enum): + BLACK_WHITE = "black-white" # Black on white + GRAY_BLACK = "gray-black" # Gray on black + GREEN_BLACK = "green-black" # Green on black + WHITE_BLACK = "white-black" # White on black + CUSTOM = "custom" # Not a valid value to send to guac + # example custom color scheme: + # "colorScheme": "background: rgb:00/3D/FC;\nforeground: rgb:74/1A/1A;\ncolor0: rgb:00/00/00;\ncolor1: rgb:99/3E/3E;\ncolor2: rgb:3E/99/3E;\ncolor3: rgb:99/99/3E;\ncolor4: rgb:3E/3E/99;\ncolor5: rgb:99/3E/99;\ncolor6: rgb:3E/99/99;\ncolor7: rgb:99/99/99;\ncolor8: rgb:3E/3E/3E;\ncolor9: rgb:FF/67/67;\ncolor10: rgb:67/FF/67;\ncolor11: rgb:FF/FF/67;\ncolor12: rgb:67/67/FF;\ncolor13: rgb:FF/67/FF;\ncolor14: rgb:67/FF/FF;\ncolor15: rgb:FF/FF/FF;" + + @classmethod + def map(cls, tty_theme: str): + try: return cls(str(tty_theme).lower()) + except ValueError: return None + +def parse_multiline(data: dict, key: str, message: str = "") -> Optional[List[str]]: + if data and isinstance(data, dict) and key and isinstance(key, str): + val = data.get(key, None) # "multiline": ["line1" "line2"] + if isinstance(val, str): val = [val] # allow for "multiline": "line1" + if val and isinstance(val, list): + if any(not isinstance(x, str) or x == "" for x in val): + logging.warning(f"{message} - value: {val[:24]}" if (isinstance(message, str) and message != "") + else "Error parsing multiline value (skipped): "\ + f"found empty or non string values - value: {val[:24]}") + else: + return val + return None + +def multiline_to_str(lines: Optional[List[str]]) -> Optional[str]: + if lines and isinstance(lines, list): + return "\n".join(lines) + return None + +def multiline_stringify(lines: Optional[List[str]]) -> Optional[str]: + if lines and isinstance(lines, list): + # nb! strip() may remove more quotes esp. at end of string + val = json.dumps("\n".join(lines)) + if val and val.startswith("\"") and val.endswith("\""): + val = val[1:-1] + return val + return None + +def parse_dag_option(option: Optional[str]) -> Optional[str]: + key = str(option).lower() + if key in ("on", "off", "default"): + return key + return None + +class ClipboardConnectionSettings: + def __init__(self, disableCopy: Optional[bool] = None, disablePaste: Optional[bool] = None): + self.disableCopy = disableCopy + self.disablePaste = disablePaste + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Clipboard Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + obj.disableCopy = utils.value_to_boolean(data.get("disable_copy", None)) + obj.disablePaste = utils.value_to_boolean(data.get("disable_paste", None)) + return obj + +def clipboard_connection_settings(connection_settings: Union[PamConnectionSettings, ConnectionSettingsHTTP]) -> Optional[ClipboardConnectionSettings]: + if connection_settings and connection_settings.protocol and connection_settings.protocol in ( + ConnectionProtocol.RDP, + ConnectionProtocol.VNC, + ConnectionProtocol.TELNET, + ConnectionProtocol.SSH, + ConnectionProtocol.SQLSERVER, + ConnectionProtocol.MYSQL, + ConnectionProtocol.POSTGRESQL, + ConnectionProtocol.HTTP + ): + disableCopy = getattr(connection_settings, "disableCopy", None) + disablePaste = getattr(connection_settings, "disablePaste", None) + return ClipboardConnectionSettings(disableCopy, disablePaste) + +class SFTPRootDirectorySettings: + def __init__(self, enableSftp: Optional[bool] = None, sftpRootDirectory: Optional[str] = None): + self.enableSftp = enableSftp + self.sftpRootDirectory = sftpRootDirectory + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"SFTP Root Directory Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + obj.enableSftp = utils.value_to_boolean(data.get("enable_sftp", None)) + val = data.get("sftp_root_directory", None) + if isinstance(val, str): obj.sftpRootDirectory = val + return obj + + def to_dict(self): + dict: Dict[str, Any] = {} + if self.enableSftp is not None and isinstance(self.enableSftp, bool): + dict["enableSftp"] = self.enableSftp + if self.sftpRootDirectory and isinstance(self.sftpRootDirectory, str) and self.sftpRootDirectory.strip(): + dict["sftpRootDirectory"] = self.sftpRootDirectory.strip() + + return dict + +class SFTPConnectionSettings(SFTPRootDirectorySettings): + def __init__( + self, + enableSftp: Optional[bool] = None, + sftpRootDirectory: Optional[str] = None, + sftpResource: Optional[List[str]] = None, + sftpUser: Optional[List[str]] = None, + sftpDirectory: Optional[str] = None, + sftpServerAliveInterval: Optional[int] = None + ): + super().__init__(enableSftp, sftpRootDirectory) + self.sftpResource = sftpResource + self.sftpUser = sftpUser + self.sftpDirectory = sftpDirectory + self.sftpServerAliveInterval = sftpServerAliveInterval + self.sftpResourceUid = None # resolve from sftpResource + self.sftpUserUid = None # resolve from sftpUser + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"SFTP Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + rds = SFTPRootDirectorySettings.load(data) + if rds: + obj.enableSftp = rds.enableSftp + obj.sftpRootDirectory = rds.sftpRootDirectory + + # which is the resource record (not yet in web UI) + obj.sftpResource = parse_multiline(data, "sftp_resource", "Error parsing sftp_resource") + obj.sftpUser = parse_multiline(data, "sftp_user_credentials", "Error parsing sftp_user_credentials") + val = data.get("sftp_upload_directory", None) + if isinstance(val, str): obj.sftpDirectory = val + val = data.get("sftp_keepalive_interval", None) + if type(val) is int: obj.sftpServerAliveInterval = abs(val) + elif val and str(val).isdecimal(): obj.sftpServerAliveInterval = int(val) + + return obj + + def to_dict(self): + dict: Dict[str, Any] = {} + if self.sftpRootDirectory and isinstance(self.sftpRootDirectory, str) and self.sftpRootDirectory.strip(): + dict["sftpRootDirectory"] = self.sftpRootDirectory.strip() + if self.enableSftp is not None and isinstance(self.enableSftp, bool): + dict["enableSftp"] = self.enableSftp + + # if resolved from sftpResource + if self.sftpResourceUid and isinstance(self.sftpResourceUid, str) and self.sftpResourceUid.strip(): + dict["sftpResourceUid"] = self.sftpResourceUid.strip() + # if resolved from sftpUser + if self.sftpUserUid and isinstance(self.sftpUserUid, str) and self.sftpUserUid.strip(): + dict["sftpUserUid"] = self.sftpUserUid.strip() + + if self.sftpDirectory and isinstance(self.sftpDirectory, str) and self.sftpDirectory.strip(): + dict["sftpDirectory"] = self.sftpDirectory.strip() + if self.sftpServerAliveInterval and type(self.sftpServerAliveInterval) is int and abs(self.sftpServerAliveInterval) > 0: + dict["sftpServerAliveInterval"] = abs(self.sftpServerAliveInterval) + + return dict + +def sftp_enabled(connection_settings: Union[PamConnectionSettings, ConnectionSettingsHTTP]) -> Optional[bool]: + if connection_settings and connection_settings.protocol and connection_settings.protocol in ( + ConnectionProtocol.RDP, + ConnectionProtocol.VNC, + ConnectionProtocol.SSH + ): + sftp = getattr(connection_settings, "sftp", None) + if sftp: + enabled = getattr(sftp, "enableSftp", None) + return enabled + +class TerminalDisplayConnectionSettings: + fontSizes: List[int] = [8,9,10,11,12,14,18,24,30,36,48,60,72,96] + def __init__(self, colorScheme: Optional[str] = None, fontSize: Optional[int] = None): + self.colorScheme = colorScheme + self.fontSize = fontSize + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Terminal Display Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + val = data.get("color_scheme", None) + if isinstance(val, str): obj.colorScheme = val + val = data.get("font_size", None) + if type(val) is int: obj.fontSize = val + elif val and str(val).isdecimal(): obj.fontSize = int(val) + if obj.fontSize and type(obj.fontSize) is int: + font_size: int = obj.fontSize + closest_number = min(obj.fontSizes, key=lambda x: abs(x - font_size)) + if closest_number != font_size: + logging.error(f"Terminal Display Connection Settings - adjusted invalid font_size from: {obj.fontSize} to: {closest_number}") + obj.fontSize = closest_number + return obj + +class BaseConnectionSettings: + def __init__(self, port: Optional[str] = None, allowSupplyUser: Optional[bool] = None, userRecords: Optional[List[str]] = None, recordingIncludeKeys: Optional[bool] = None): + self.port = port # Override port from host + self.allowSupplyUser = allowSupplyUser + self.recordingIncludeKeys = recordingIncludeKeys + self.userRecords = userRecords + self.userRecordUid = None # resolved from userRecords + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Base Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + val = data.get("port", None) # Override port from host + if isinstance(val, str) or str(val).isdecimal(): obj.port = str(val) + + obj.allowSupplyUser = utils.value_to_boolean(data.get("allow_supply_user", None)) + obj.userRecords = parse_multiline(data, "administrative_credentials", "Error parsing administrative_credentials") + obj.recordingIncludeKeys = utils.value_to_boolean(data.get("recording_include_keys", None)) + return obj + +class ConnectionSettingsRDP(BaseConnectionSettings, ClipboardConnectionSettings): + protocol = ConnectionProtocol.RDP + def __init__( + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + security: Optional[RDPSecurity] = None, + disableAuth: Optional[bool] = None, + ignoreCert: Optional[bool] = None, + loadBalanceInfo: Optional[str] = None, + preconnectionId: Optional[str] = None, + preconnectionBlob: Optional[str] = None, + sftp: Optional[SFTPConnectionSettings] = None, + disableAudio: Optional[bool] = None, + resizeMethod: Optional[str] = None, + enableWallpaper: Optional[bool] = None, + enableFullWindowDrag: Optional[bool] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) + self.security = security if isinstance(security, RDPSecurity) else None + self.disableAuth = disableAuth + self.ignoreCert = ignoreCert + self.loadBalanceInfo = loadBalanceInfo + self.preconnectionId = preconnectionId + self.preconnectionBlob = preconnectionBlob + self.sftp = sftp if isinstance(sftp, SFTPConnectionSettings) else None + self.disableAudio = disableAudio + self.resizeMethod = resizeMethod # disable_dynamic_resizing ? "" : "display-update" + # Performance Properties + self.enableWallpaper = enableWallpaper + self.enableFullWindowDrag = enableFullWindowDrag + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Connection Settings RDP failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + obj.port = bcs.port + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + ccs = ClipboardConnectionSettings.load(data) + if ccs: + obj.disableCopy = ccs.disableCopy + obj.disablePaste = ccs.disablePaste + + val = data.get("security", None) + if isinstance(val, str): obj.security = RDPSecurity.map(val) + obj.disableAuth = utils.value_to_boolean(data.get("disable_authentication", None)) + obj.ignoreCert = utils.value_to_boolean(data.get("ignore_server_cert", None)) + + val = data.get("load_balance_info", None) + if isinstance(val, str): obj.loadBalanceInfo = val # LoadBalance Info/Cookie + val = data.get("preconnection_id", None) + if isinstance(val, str): obj.preconnectionId = val + val = data.get("preconnection_blob", None) + if isinstance(val, str): obj.preconnectionBlob = val + sftp = data.get("sftp", None) + if isinstance(sftp, dict): obj.sftp = SFTPConnectionSettings.load(sftp) + + obj.disableAudio = utils.value_to_boolean(data.get("disable_audio", None)) + obj.enableWallpaper = utils.value_to_boolean(data.get("enable_wallpaper", None)) + obj.enableFullWindowDrag = utils.value_to_boolean(data.get("enable_full_window_drag", None)) + + # disable_dynamic_resizing ? "" : "display-update" + val = utils.value_to_boolean(data.get("disable_dynamic_resizing", None)) + if val is not True: obj.resizeMethod = "display-update" + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.RDP.value } # pylint: disable=E1101 + + # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) + recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["userRecords"] = uids + + if self.port and isinstance(self.port, str) and self.port.strip(): + kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.disableCopy is not None and isinstance(self.disableCopy, bool): + kvp["disableCopy"] = self.disableCopy + if self.disablePaste is not None and isinstance(self.disablePaste, bool): + kvp["disablePaste"] = self.disablePaste + if isinstance(self.security, RDPSecurity): + kvp["security"] = self.security.value.lower() + + if self.disableAuth is not None and isinstance(self.disableAuth, bool): + kvp["disableAuth"] = self.disableAuth + if self.ignoreCert is not None and isinstance(self.ignoreCert, bool): + kvp["ignoreCert"] = self.ignoreCert + + if self.loadBalanceInfo and isinstance(self.loadBalanceInfo, str) and self.loadBalanceInfo.strip(): + kvp["loadBalanceInfo"] = self.loadBalanceInfo.strip() + if self.preconnectionId and isinstance(self.preconnectionId, str) and self.preconnectionId.strip(): + kvp["preconnectionId"] = self.preconnectionId.strip() + if self.preconnectionBlob and isinstance(self.preconnectionBlob, str) and self.preconnectionBlob.strip(): + kvp["preconnectionBlob"] = self.preconnectionBlob.strip() + + if self.disableAudio is not None and isinstance(self.disableAudio, bool): + kvp["disableAudio"] = self.disableAudio + if self.enableFullWindowDrag is not None and isinstance(self.enableFullWindowDrag, bool): + kvp["enableFullWindowDrag"] = self.enableFullWindowDrag + if self.enableWallpaper is not None and isinstance(self.enableWallpaper, bool): + kvp["enableWallpaper"] = self.enableWallpaper + + # populated on load - "resizeMethod": disable_dynamic_resizing ? "" : "display-update" + if str(self.resizeMethod) == "display-update": + kvp["resizeMethod"] = self.resizeMethod + + if isinstance(self.sftp, SFTPConnectionSettings): + sftp = self.sftp.to_dict() + if sftp: + kvp["sftp"] = sftp + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +# field type: pamRemoteBrowserSettings +class ConnectionSettingsHTTP(BaseConnectionSettings, ClipboardConnectionSettings): + protocol = ConnectionProtocol.HTTP + def __init__( + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + allowUrlManipulation: Optional[bool] = None, + allowedUrlPatterns: Optional[str] = None, + allowedResourceUrlPatterns: Optional[str] = None, + httpCredentials: Optional[List[str]] = None, # autofill_credentials: login|pamUser + autofillConfiguration: Optional[str] = None, + ignoreInitialSslCert: Optional[bool] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) + self.allowUrlManipulation = allowUrlManipulation + self.allowedUrlPatterns = allowedUrlPatterns + self.allowedResourceUrlPatterns = allowedResourceUrlPatterns + self.httpCredentials = httpCredentials # autofill_credentials: login|pamUser + self.autofillConfiguration = autofillConfiguration + self.ignoreInitialSslCert = ignoreInitialSslCert + self.httpCredentialsUid = None # resolved from httpCredentials + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Connection Settings HTTP failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + # obj.port = bcs.port # not yet in web UI of RBI + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + ccs = ClipboardConnectionSettings.load(data) + if ccs: + obj.disableCopy = ccs.disableCopy + obj.disablePaste = ccs.disablePaste + + obj.allowUrlManipulation = utils.value_to_boolean(data.get("allow_url_manipulation", None)) + obj.allowedUrlPatterns = multiline_to_str(parse_multiline(data, "allowed_url_patterns", "Error parsing allowed_url_patterns")) + obj.allowedResourceUrlPatterns = multiline_to_str(parse_multiline(data, "allowed_resource_url_patterns", "Error parsing allowed_resource_url_patterns")) + obj.httpCredentials = parse_multiline(data, "autofill_credentials", "Error parsing autofill_credentials") + obj.autofillConfiguration = multiline_to_str(parse_multiline(data, "autofill_targets", "Error parsing autofill_targets")) + obj.ignoreInitialSslCert = utils.value_to_boolean(data.get("ignore_server_cert", None)) + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.HTTP.value } # pylint: disable=E1101 + + # if resolved (autofill_credentials->httpCredentialsUid) login|pamUser + recs: list = self.httpCredentialsUid if self.httpCredentialsUid and isinstance(self.httpCredentialsUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["httpCredentialsUid"] = uids[0] # single credential + + # port - unused for RBI + # if self.port and isinstance(self.port, str) and self.port.strip(): + # kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.disableCopy is not None and isinstance(self.disableCopy, bool): + kvp["disableCopy"] = self.disableCopy + if self.disablePaste is not None and isinstance(self.disablePaste, bool): + kvp["disablePaste"] = self.disablePaste + + if self.allowUrlManipulation is not None and isinstance(self.allowUrlManipulation, bool): + kvp["allowUrlManipulation"] = self.allowUrlManipulation + if self.allowedUrlPatterns and isinstance(self.allowedUrlPatterns, str) and self.allowedUrlPatterns.strip(): + kvp["allowedUrlPatterns"] = self.allowedUrlPatterns.strip() + if self.allowedResourceUrlPatterns and isinstance(self.allowedResourceUrlPatterns, str) and self.allowedResourceUrlPatterns.strip(): + kvp["allowedResourceUrlPatterns"] = self.allowedResourceUrlPatterns.strip() + if self.autofillConfiguration and isinstance(self.autofillConfiguration, str) and self.autofillConfiguration.strip(): + kvp["autofillConfiguration"] = self.autofillConfiguration.strip() + if self.ignoreInitialSslCert is not None and isinstance(self.ignoreInitialSslCert, bool): + kvp["ignoreInitialSslCert"] = self.ignoreInitialSslCert + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class ConnectionSettingsVNC(BaseConnectionSettings, ClipboardConnectionSettings): + protocol = ConnectionProtocol.VNC + def __init__( # pylint: disable=R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + destHost: Optional[str] = None, + destPort: Optional[str] = None, + sftp: Optional[SFTPConnectionSettings] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) + self.destHost = destHost + self.destPort = destPort + self.sftp = sftp if isinstance(sftp, SFTPConnectionSettings) else None + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Connection Settings VNC failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + obj.port = bcs.port + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + ccs = ClipboardConnectionSettings.load(data) + if ccs: + obj.disableCopy = ccs.disableCopy + obj.disablePaste = ccs.disablePaste + + val = data.get("destination_host", None) + if isinstance(val, str): obj.destHost = val + val = data.get("destination_port", None) + if isinstance(val, str): obj.destPort = val + + sftp = data.get("sftp", None) + if isinstance(sftp, dict): obj.sftp = SFTPConnectionSettings.load(sftp) + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.VNC.value } # pylint: disable=E1101 + + # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) + recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["userRecords"] = uids + + if self.port and isinstance(self.port, str) and self.port.strip(): + kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.disableCopy is not None and isinstance(self.disableCopy, bool): + kvp["disableCopy"] = self.disableCopy + if self.disablePaste is not None and isinstance(self.disablePaste, bool): + kvp["disablePaste"] = self.disablePaste + + if self.destHost and isinstance(self.destHost, str) and self.destHost.strip(): + kvp["destHost"] = self.destHost.strip() + if self.destPort and isinstance(self.destPort, str) and self.destPort.strip(): + kvp["destPort"] = self.destPort.strip() + + if isinstance(self.sftp, SFTPConnectionSettings): + sftp = self.sftp.to_dict() + if sftp: + kvp["sftp"] = sftp + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class ConnectionSettingsTelnet(BaseConnectionSettings, ClipboardConnectionSettings, TerminalDisplayConnectionSettings): + protocol = ConnectionProtocol.TELNET + def __init__( # pylint: disable=R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + colorScheme: Optional[str] = None, + fontSize: Optional[int] = None, + usernameRegex: Optional[str] = None, + passwordRegex: Optional[str] = None, + loginSuccessRegex: Optional[str] = None, + loginFailureRegex: Optional[str] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) + TerminalDisplayConnectionSettings.__init__(self, colorScheme, fontSize) + self.usernameRegex = usernameRegex + self.passwordRegex = passwordRegex + self.loginSuccessRegex = loginSuccessRegex + self.loginFailureRegex = loginFailureRegex + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Connection Settings Telnet failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + obj.port = bcs.port + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + ccs = ClipboardConnectionSettings.load(data) + if ccs: + obj.disableCopy = ccs.disableCopy + obj.disablePaste = ccs.disablePaste + + tcs = TerminalDisplayConnectionSettings.load(data) + if tcs: + obj.colorScheme = tcs.colorScheme + obj.fontSize = tcs.fontSize + + val = data.get("username_regex", None) + if isinstance(val, str): obj.usernameRegex = val + val = data.get("password_regex", None) + if isinstance(val, str): obj.passwordRegex = val + val = data.get("login_success_regex", None) + if isinstance(val, str): obj.loginSuccessRegex = val + val = data.get("login_failure_regex", None) + if isinstance(val, str): obj.loginFailureRegex = val + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.TELNET.value } # pylint: disable=E1101 + + # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) + recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["userRecords"] = uids + + if self.port and isinstance(self.port, str) and self.port.strip(): + kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.disableCopy is not None and isinstance(self.disableCopy, bool): + kvp["disableCopy"] = self.disableCopy + if self.disablePaste is not None and isinstance(self.disablePaste, bool): + kvp["disablePaste"] = self.disablePaste + + if self.colorScheme and isinstance(self.colorScheme, str) and self.colorScheme.strip(): + kvp["colorScheme"] = self.colorScheme.strip() + if self.fontSize and type(self.fontSize) is int and self.fontSize > 4: + kvp["fontSize"] = str(self.fontSize) + if self.usernameRegex and isinstance(self.usernameRegex, str) and self.usernameRegex.strip(): + kvp["usernameRegex"] = self.usernameRegex.strip() + if self.passwordRegex and isinstance(self.passwordRegex, str) and self.passwordRegex.strip(): + kvp["passwordRegex"] = self.passwordRegex.strip() + if self.loginSuccessRegex and isinstance(self.loginSuccessRegex, str) and self.loginSuccessRegex.strip(): + kvp["loginSuccessRegex"] = self.loginSuccessRegex.strip() + if self.loginFailureRegex and isinstance(self.loginFailureRegex, str) and self.loginFailureRegex.strip(): + kvp["loginFailureRegex"] = self.loginFailureRegex.strip() + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class ConnectionSettingsSSH(BaseConnectionSettings, ClipboardConnectionSettings, TerminalDisplayConnectionSettings): + protocol = ConnectionProtocol.SSH + def __init__( # pylint: disable=R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + colorScheme: Optional[str] = None, + fontSize: Optional[int] = None, + hostKey: Optional[str] = None, + command: Optional[str] = None, + sftp: Optional[SFTPRootDirectorySettings] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) + TerminalDisplayConnectionSettings.__init__(self, colorScheme, fontSize) + self.hostKey = hostKey + self.command = command + self.sftp = sftp if isinstance(sftp, SFTPRootDirectorySettings) else None + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Connection Settings SSH failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + obj.port = bcs.port + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + ccs = ClipboardConnectionSettings.load(data) + if ccs: + obj.disableCopy = ccs.disableCopy + obj.disablePaste = ccs.disablePaste + + tcs = TerminalDisplayConnectionSettings.load(data) + if tcs: + obj.colorScheme = tcs.colorScheme + obj.fontSize = tcs.fontSize + + val = data.get("public_host_key", None) + if isinstance(val, str): obj.hostKey = val + val = data.get("command", None) + if isinstance(val, str): obj.command = val + sftp = data.get("sftp", None) + if isinstance(sftp, dict): obj.sftp = SFTPRootDirectorySettings.load(sftp) + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.SSH.value } # pylint: disable=E1101 + + # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) + recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["userRecords"] = uids + + if self.port and isinstance(self.port, str) and self.port.strip(): + kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.disableCopy is not None and isinstance(self.disableCopy, bool): + kvp["disableCopy"] = self.disableCopy + if self.disablePaste is not None and isinstance(self.disablePaste, bool): + kvp["disablePaste"] = self.disablePaste + + if self.colorScheme and isinstance(self.colorScheme, str) and self.colorScheme.strip(): + kvp["colorScheme"] = self.colorScheme.strip() + if self.fontSize and type(self.fontSize) is int and self.fontSize > 4: + kvp["fontSize"] = str(self.fontSize) + if self.hostKey and isinstance(self.hostKey, str) and self.hostKey.strip(): + kvp["hostKey"] = self.hostKey.strip() + if self.command and isinstance(self.command, str) and self.command.strip(): + kvp["command"] = self.command.strip() + + if isinstance(self.sftp, SFTPRootDirectorySettings): + srds = self.sftp.to_dict() + if srds: + kvp["sftp"] = srds + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class ConnectionSettingsKubernetes(BaseConnectionSettings, TerminalDisplayConnectionSettings): + protocol = ConnectionProtocol.KUBERNETES + def __init__( # pylint: disable=R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + colorScheme: Optional[str] = None, + fontSize: Optional[int] = None, + ignoreCert: Optional[bool] = None, + caCert: Optional[str] = None, + namespace: Optional[str] = None, + pod: Optional[str] = None, + container: Optional[str] = None, + clientCert: Optional[str] = None, + clientKey: Optional[str] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + TerminalDisplayConnectionSettings.__init__(self, colorScheme, fontSize) + self.ignoreCert = ignoreCert + self.caCert = caCert + self.namespace = namespace + self.pod = pod + self.container = container + self.clientCert = clientCert + self.clientKey = clientKey + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Connection Settings K8S failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + obj.port = bcs.port + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + tcs = TerminalDisplayConnectionSettings.load(data) + if tcs: + obj.colorScheme = tcs.colorScheme + obj.fontSize = tcs.fontSize + + val = data.get("namespace", None) + if isinstance(val, str): obj.namespace = val + val = data.get("pod_name", None) + if isinstance(val, str): obj.pod = val + val = data.get("container", None) + if isinstance(val, str): obj.container = val + obj.ignoreCert = utils.value_to_boolean(data.get("ignore_server_cert", None)) + obj.caCert = multiline_to_str(parse_multiline(data, "ca_certificate", "Error parsing ca_certificate")) + obj.clientCert = multiline_to_str(parse_multiline(data, "client_certificate", "Error parsing client_certificate")) + obj.clientKey = multiline_to_str(parse_multiline(data, "client_key", "Error parsing client_key")) + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.KUBERNETES.value } # pylint: disable=E1101 + + # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) + recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["userRecords"] = uids + + if self.port and isinstance(self.port, str) and self.port.strip(): + kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.colorScheme and isinstance(self.colorScheme, str) and self.colorScheme.strip(): + kvp["colorScheme"] = self.colorScheme.strip() + if self.fontSize and type(self.fontSize) is int and self.fontSize > 4: + kvp["fontSize"] = str(self.fontSize) + if self.namespace and isinstance(self.namespace, str) and self.namespace.strip(): + kvp["namespace"] = self.namespace.strip() + if self.pod and isinstance(self.pod, str) and self.pod.strip(): + kvp["pod"] = self.pod.strip() + + if self.container and isinstance(self.container, str) and self.container.strip(): + kvp["container"] = self.container.strip() + if self.ignoreCert is not None and isinstance(self.ignoreCert, bool): + kvp["ignoreCert"] = self.ignoreCert + if self.caCert and isinstance(self.caCert, str) and self.caCert.strip(): + kvp["caCert"] = self.caCert.strip() + if self.clientCert and isinstance(self.clientCert, str) and self.clientCert.strip(): + kvp["clientCert"] = self.clientCert.strip() + if self.clientKey and isinstance(self.clientKey, str) and self.clientKey.strip(): + kvp["clientKey"] = self.clientKey.strip() + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class BaseDatabaseConnectionSettings(BaseConnectionSettings, ClipboardConnectionSettings): + def __init__( # pylint: disable=R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + database: Optional[str] = None, + disableCsvExport: Optional[bool] = None, + disableCsvImport: Optional[bool] = None + ): + BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) + ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) + self.database = database + self.disableCsvExport = disableCsvExport + self.disableCsvImport = disableCsvImport + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Database Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bcs = BaseConnectionSettings.load(data) + if bcs: + obj.port = bcs.port + obj.allowSupplyUser = bcs.allowSupplyUser + obj.userRecords = bcs.userRecords + obj.recordingIncludeKeys = bcs.recordingIncludeKeys + + ccs = ClipboardConnectionSettings.load(data) + if ccs: + obj.disableCopy = ccs.disableCopy + obj.disablePaste = ccs.disablePaste + + val = data.get("default_database", None) + if isinstance(val, str): obj.database = val + obj.disableCsvExport = utils.value_to_boolean(data.get("disable_csv_export", None)) + obj.disableCsvImport = utils.value_to_boolean(data.get("disable_csv_import", None)) + + return obj + + def to_record_dict(self): + kvp: Dict[str, Any] = {} + + # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) + recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] + uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] + if uids: + kvp["userRecords"] = uids + + if self.port and isinstance(self.port, str) and self.port.strip(): + kvp["port"] = self.port.strip() + if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): + kvp["allowSupplyUser"] = self.allowSupplyUser + if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): + kvp["recordingIncludeKeys"] = self.recordingIncludeKeys + if self.disableCopy is not None and isinstance(self.disableCopy, bool): + kvp["disableCopy"] = self.disableCopy + if self.disablePaste is not None and isinstance(self.disablePaste, bool): + kvp["disablePaste"] = self.disablePaste + if self.disableCsvExport is not None and isinstance(self.disableCsvExport, bool): + kvp["disableCsvExport"] = self.disableCsvExport + if self.disableCsvImport is not None and isinstance(self.disableCsvImport, bool): + kvp["disableCsvImport"] = self.disableCsvImport + if self.database and isinstance(self.database, str) and self.database.strip(): + kvp["database"] = self.database.strip() + + return kvp + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class ConnectionSettingsSqlServer(BaseDatabaseConnectionSettings): + protocol = ConnectionProtocol.SQLSERVER + def __init__( # pylint: disable=W0246 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + database: Optional[str] = None, + disableCsvExport: Optional[bool] = None, + disableCsvImport: Optional[bool] = None + ): + super().__init__(port, allowSupplyUser, userRecords, recordingIncludeKeys, + disableCopy, disablePaste, database, + disableCsvExport, disableCsvImport) + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"SQLServer Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bdcs = BaseDatabaseConnectionSettings.load(data) + if bdcs: + obj.port = bdcs.port + obj.allowSupplyUser = bdcs.allowSupplyUser + obj.userRecords = bdcs.userRecords + obj.recordingIncludeKeys = bdcs.recordingIncludeKeys + obj.disableCopy = bdcs.disableCopy + obj.disablePaste = bdcs.disablePaste + obj.database = bdcs.database + obj.disableCsvExport = bdcs.disableCsvExport + obj.disableCsvImport = bdcs.disableCsvImport + + return obj + + def to_record_dict(self): + dict = super().to_record_dict() + dict["protocol"] = ConnectionProtocol.SQLSERVER.value # pylint: disable=E1101 + return dict + +class ConnectionSettingsPostgreSQL(BaseDatabaseConnectionSettings): + protocol = ConnectionProtocol.POSTGRESQL + def __init__( # pylint: disable=W0246,R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + database: Optional[str] = None, + disableCsvExport: Optional[bool] = None, + disableCsvImport: Optional[bool] = None + ): + super().__init__(port, allowSupplyUser, userRecords, recordingIncludeKeys, + disableCopy, disablePaste, database, + disableCsvExport, disableCsvImport) + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"PostgreSQL Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bdcs = BaseDatabaseConnectionSettings.load(data) + if bdcs: + obj.port = bdcs.port + obj.allowSupplyUser = bdcs.allowSupplyUser + obj.userRecords = bdcs.userRecords + obj.recordingIncludeKeys = bdcs.recordingIncludeKeys + obj.disableCopy = bdcs.disableCopy + obj.disablePaste = bdcs.disablePaste + obj.database = bdcs.database + obj.disableCsvExport = bdcs.disableCsvExport + obj.disableCsvImport = bdcs.disableCsvImport + + return obj + + def to_record_dict(self): + dict = super().to_record_dict() + dict["protocol"] = ConnectionProtocol.POSTGRESQL.value # pylint: disable=E1101 + return dict + +class ConnectionSettingsMySQL(BaseDatabaseConnectionSettings): + protocol = ConnectionProtocol.MYSQL + def __init__( # pylint: disable=W0246,R0917 + self, + port: Optional[str] = None, # Override port from host + allowSupplyUser: Optional[bool] = None, + userRecords: Optional[List[str]] = None, + recordingIncludeKeys: Optional[bool] = None, + disableCopy: Optional[bool] = None, + disablePaste: Optional[bool] = None, + database: Optional[str] = None, + disableCsvExport: Optional[bool] = None, + disableCsvImport: Optional[bool] = None + ): + super().__init__(port, allowSupplyUser, userRecords, recordingIncludeKeys, + disableCopy, disablePaste, database, + disableCsvExport, disableCsvImport) + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"MySQL Connection Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + bdcs = BaseDatabaseConnectionSettings.load(data) + if bdcs: + obj.port = bdcs.port + obj.allowSupplyUser = bdcs.allowSupplyUser + obj.userRecords = bdcs.userRecords + obj.recordingIncludeKeys = bdcs.recordingIncludeKeys + obj.disableCopy = bdcs.disableCopy + obj.disablePaste = bdcs.disablePaste + obj.database = bdcs.database + obj.disableCsvExport = bdcs.disableCsvExport + obj.disableCsvImport = bdcs.disableCsvImport + + return obj + + def to_record_dict(self): + dict = super().to_record_dict() + dict["protocol"] = ConnectionProtocol.MYSQL.value # pylint: disable=E1101 + return dict + +PamConnectionSettings = Optional[ + Union[ + ConnectionSettingsRDP, + ConnectionSettingsVNC, + ConnectionSettingsTelnet, + ConnectionSettingsSSH, + ConnectionSettingsKubernetes, + ConnectionSettingsSqlServer, + ConnectionSettingsPostgreSQL, + ConnectionSettingsMySQL + ] +] + +class PamPortForwardSettings: + def __init__(self, port: Optional[str] = None, reusePort: Optional[bool] = None): + self.port = port # Override Port from host + self.reusePort = reusePort # Attempt to use the last connected port if available + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"Port Forward Settings failed to load from: {str(data)[:80]}") + if not isinstance(data, dict): return obj + + obj.port = data.get("port", None) + obj.reusePort = utils.value_to_boolean(data.get("reuse_port", None)) + return obj + + def to_record_dict(self): + dict = {} + if self.port and isinstance(self.port, str) and self.port.strip(): + dict["port"] = self.port.strip() + if self.reusePort is not None and isinstance(self.reusePort, bool): + dict["reusePort"] = self.reusePort + return dict + + def to_record_json(self): + dict = self.to_record_dict() or {} + rec_json = json.dumps(dict) + return rec_json + +class PamRemoteBrowserSettings: + def __init__( + self, + options: Optional[DagSettingsObject] = None, + connection: Optional[ConnectionSettingsHTTP] = None + ): + self.options = options + self.connection = connection + + @classmethod + def load(cls, data: Optional[Union[str, dict]]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"PAM RBI Settings field failed to load from: {str(data)[:80]}...") + if not isinstance(data, dict): return obj + + options = DagSettingsObject.load(data.get("options", {})) + if not is_empty_instance(options): + obj.options = options + + cdata = data.get("connection", {}) + # TO DO: if isinstance(cdata, str): lookup_by_name(pam_data.connections) + if not isinstance(cdata, dict): + logging.warning(f"""PAM RBI Settings: Connection must be a JSON object - skipping... "{str(cdata)[:24]}" """) + if cdata and isinstance(cdata, dict): + proto = cdata.get("protocol", "") + if proto and isinstance(proto, str): + if proto.lower() == "http": + conn = ConnectionSettingsHTTP.load(cdata) + if not is_empty_instance(conn): + obj.connection = conn + else: + logging.warning(f"""Connection skipped: unknown protocol "{str(proto)[:24]}" """) + + if not obj.connection and cdata and isinstance(cdata, dict): + logging.error(f"PAM RBI Settings failed to load from: {str(cdata)[:80]}...") + + return obj + +class PamSettingsFieldData: + def __init__( + self, + allowSupplyHost: Optional[bool] = None, + connection: PamConnectionSettings = None, # Optional[PamConnectionSettings] + portForward: Optional[PamPortForwardSettings] = None, + options: Optional[DagSettingsObject] = None, + jit_settings: Optional[DagJitSettingsObject] = None, + ai_settings: Optional[DagAiSettingsObject] = None, + ): + self.allowSupplyHost = allowSupplyHost + self.connection = connection + self.portForward = portForward + self.options = options + self.jit_settings = jit_settings + self.ai_settings = ai_settings + + # PamConnectionSettings excludes ConnectionSettingsHTTP + pam_connection_classes = [ + ConnectionSettingsRDP, + ConnectionSettingsVNC, + ConnectionSettingsTelnet, + ConnectionSettingsSSH, + ConnectionSettingsKubernetes, + ConnectionSettingsSqlServer, + ConnectionSettingsPostgreSQL, + ConnectionSettingsMySQL + ] + + @classmethod + def get_connection_class(cls, cdata: dict): + if cdata and isinstance(cdata, dict): + proto = cdata.get("protocol", "") + if proto and isinstance(proto, str): + proto = proto.lower() + for con in cls.pam_connection_classes: + pr = getattr(con, "protocol", "") + if isinstance(pr, ConnectionProtocol) and pr.value.lower() == proto: # pylint: disable=E1101 + return con.load(cdata) + logging.warning(f"""Connection skipped: unknown protocol "{str(proto)[:24]}" """) + return None + + def is_empty(self): + empty = is_empty_instance(self.options) + empty = empty and is_empty_instance(self.portForward) + empty = empty and is_empty_instance(self.connection, ["protocol"]) + # NB! JIT and AI settings are in import json but not in record json (just DAG json) + empty = empty and self.jit_settings is None and self.ai_settings is None + return empty + + @classmethod + def load(cls, data: Union[str, dict]): + obj = cls() + try: data = json.loads(data) if isinstance(data, str) else data + except: logging.error(f"PAM Settings Field failed to load from: {str(data)[:80]}...") + if not isinstance(data, dict): return obj + + obj.allowSupplyHost = utils.value_to_boolean(data.get("allow_supply_host", None)) + options_dict = data.get("options", {}) or {} + options = DagSettingsObject.load(options_dict) + if not is_empty_instance(options): + obj.options = options + if isinstance(options_dict, dict): + jit_value = options_dict.get("jit_settings", None) + if jit_value is not None: + jit_settings = DagJitSettingsObject.load(jit_value) + if jit_settings: + obj.jit_settings = jit_settings + ai_value = options_dict.get("ai_settings", None) + if ai_value is not None: + ai_settings = DagAiSettingsObject.load(ai_value) + if ai_settings: + obj.ai_settings = ai_settings + + portForward = PamPortForwardSettings.load(data.get("port_forward", {})) + if not is_empty_instance(portForward): + obj.portForward = portForward + + cdata = data.get("connection", {}) + # TO DO: if isinstance(cdata, str): lookup_by_name(pam_data.connections) + if not isinstance(cdata, dict): + logging.warning(f"""PAM Settings: Connection must be a JSON object - skipping... "{str(cdata)[:24]}" """) + obj.connection = cls.get_connection_class(cdata) + if not obj.connection and cdata and isinstance(cdata, dict): + logging.error(f"PAM Settings failed to load from: {str(cdata)[:80]}...") + + return obj + +def is_empty_instance(obj, skiplist: Optional[List[str]] = None): + """ Checks if all attributes (not on skiplist) are None """ + if not obj: return True + if not isinstance(skiplist, list): skiplist= [] + for attr, value in vars(obj).items(): + if not (attr in skiplist or value is None): + return False + return True + +def is_blank_instance(obj, skiplist: Optional[List[str]] = None): + """ Checks if all attributes (not on skiplist) are None or empty """ + if not obj: return True + if not isinstance(skiplist, list): skiplist= [] + for attr, value in vars(obj).items(): + if not (attr in skiplist or not value): + return False + return True + +def get_sftp_attribute(obj, name: str) -> str: + # Get one of pam_settings.connection.sftp.{sftpResource,sftpResourceUid,sftpUser,sftpUserUid} + value: str = "" + if (name and obj and + hasattr(obj, "pam_settings") and + hasattr(obj.pam_settings, "connection") and + hasattr(obj.pam_settings.connection, "sftp")): + if name == "sftpResource" and hasattr(obj.pam_settings.connection.sftp, "sftpResource"): + value = obj.pam_settings.connection.sftp.sftpResource or "" + elif name == "sftpResourceUid" and hasattr(obj.pam_settings.connection.sftp, "sftpResourceUid"): + value = obj.pam_settings.connection.sftp.sftpResourceUid or "" + elif name == "sftpUser" and hasattr(obj.pam_settings.connection.sftp, "sftpUser"): + value = obj.pam_settings.connection.sftp.sftpUser or "" + elif name == "sftpUserUid" and hasattr(obj.pam_settings.connection.sftp, "sftpUserUid"): + value = obj.pam_settings.connection.sftp.sftpUserUid or "" + else: + logging.debug(f"""Unknown sftp attribute "{name}" (skipped)""") + value = value[0] if isinstance(value, list) else value + value = value if isinstance(value, str) else "" + return value + +def set_sftp_uid(obj, name: str, uid: str) -> bool: + if not(obj and name): + return False + if not(uid and isinstance(uid, str) and RecordV3.is_valid_ref_uid(uid)): + logging.debug(f"""Invalid sftp UID "{uid}" (skipped)""") + return False + if (hasattr(obj, "pam_settings") and + hasattr(obj.pam_settings, "connection") and + hasattr(obj.pam_settings.connection, "sftp")): + if name == "sftpResourceUid" and hasattr(obj.pam_settings.connection.sftp, "sftpResourceUid"): + obj.pam_settings.connection.sftp.sftpResourceUid = uid + return True + elif name == "sftpUserUid" and hasattr(obj.pam_settings.connection.sftp, "sftpUserUid"): + obj.pam_settings.connection.sftp.sftpUserUid = uid + return True + else: + logging.debug(f"""Unknown sftp UID attribute "{name}" (skipped)""") + return False + +def is_admin_external(mach) -> bool: + res = False + if (mach and hasattr(mach, "is_admin_external") and mach.is_admin_external is True): + res = True + return res + +def get_admin_credential(obj, uid:bool=False) -> str: + # Get one of pam_settings.connection.{userRecords,userRecordUid} + value: str = "" + if (obj and hasattr(obj, "pam_settings") and + hasattr(obj.pam_settings, "connection") and + ((uid and hasattr(obj.pam_settings.connection, "userRecordUid")) or + (not uid and hasattr(obj.pam_settings.connection, "userRecords")))): + if uid and obj.pam_settings.connection.userRecordUid: + value = obj.pam_settings.connection.userRecordUid + elif not uid and obj.pam_settings.connection.userRecords: + value = obj.pam_settings.connection.userRecords + value = value[0] if isinstance(value, list) else value + value = value if isinstance(value, str) else "" + return value + +def set_user_record_uid(obj, uid: str, is_external: bool = False) -> bool: + if not(uid and isinstance(uid, str) and RecordV3.is_valid_ref_uid(uid)): + logging.debug(f"""Invalid userRecordUid "{uid}" (skipped)""") + return False + + if (uid and obj and hasattr(obj, "pam_settings") and + hasattr(obj.pam_settings, "connection") and + hasattr(obj.pam_settings.connection, "userRecordUid")): + obj.pam_settings.connection.userRecordUid = uid + if is_external is True: + if hasattr(obj, "is_admin_external"): + obj.is_admin_external = True + if hasattr(obj, "administrative_credentials_uid"): + obj.administrative_credentials_uid = uid + return True + else: + logging.debug("""Object has no attribute "userRecordUid" (skipped)""") + return False + +def find_external_user(mach, machines, title: str) -> list: + # Local pamMachine could reference pamDirectory AD user as its admin + res = [] + if title and machines and mach.type == "pamMachine": + mu = title.split(".", 1) # machine/user titles + mname = mu[0] if len(mu) > 1 else "" + uname = mu[1] if len(mu) > 1 else mu[0] + for m in machines: + if m.type == "pamDirectory" and (not mname or mname == m.title): + res.extend(search_machine(m, uname) or []) + return res + +def find_user(mach, users, title: str) -> list: + if not isinstance(mach, list): + res = search_machine(mach, title) or search_users(users, title) + else: + res = search_users(users, title) + for m in mach: + res = res or search_machine(m, title) + if res: break + return res or [] + +def search_users(users, user: str) -> list: + res = [] + if isinstance(users, list): + res = [x for x in users if getattr(x, "title", None) == user] + res = res or [x for x in users if getattr(x, "login", None) == user] + return res + +def search_machine(mach, user: str) -> list: + if mach and hasattr(mach, "users") and isinstance(mach.users, list): + return search_users(mach.users, user) + return [] + +def parse_command_options(obj, enable:bool) -> dict: + # Parse command options from DagSettingsObject (pam_resource - skiped/external) + args = {} + if not obj: return args + choices = {"on": True, "off": False} + record_key = "record" if enable else "resource_uid" + args[record_key] = obj.uid + opts = None + if isinstance(obj, PamRemoteBrowserObject): + opts = obj.rbi_settings.options if obj.rbi_settings and obj.rbi_settings.options else None + elif isinstance(obj, PamUserObject): + logging.warning("Trying to parse DAG settings from PAM User (skipped)") # PamUserObject.rotation_settings are different + elif not isinstance(obj, LoginUserObject): + opts = obj.pam_settings.options if obj.pam_settings and obj.pam_settings.options else None + if opts: + if enable: # PAMTunnelEditCommand.execute format enable_rotation=True/disable_rotation=True + val = opts.rotation.value if opts.rotation else "" + key = "enable_rotation" if val == "on" else "disable_rotation" if val == "off" else None + if key is not None: args[key] = True + val = opts.connections.value if opts.connections else "" + key = "enable_connections" if val == "on" else "disable_connections" if val == "off" else None + if key is not None: args[key] = True + val = opts.tunneling.value if opts.tunneling else "" + key = "enable_tunneling" if val == "on" else "disable_tunneling" if val == "off" else None + if key is not None: args[key] = True + val = opts.text_session_recording.value if opts.text_session_recording else "" + key = "enable_typescript_recording" if val == "on" else "disable_typescript_recording" if val == "off" else None + if key is not None: + args[key] = True + args[key.replace("_typescript_", "_typescripts_")] = True # legacy compat. + val = opts.graphical_session_recording.value if opts.graphical_session_recording else "" + key = "enable_connections_recording" if val == "on" else "disable_connections_recording" if val == "off" else None + if key is not None: args[key] = True + val = opts.remote_browser_isolation.value if opts.remote_browser_isolation else "" + key = "enable_remote_browser_isolation" if val == "on" else "disable_remote_browser_isolation" if val == "off" else None + if key is not None: args[key] = True + # AI and JIT settings don't apply to RBI records + if not isinstance(obj, PamRemoteBrowserObject): + val = opts.ai_threat_detection.value if opts.ai_threat_detection else "" + key = "enable_ai_threat_detection" if val == "on" else "disable_ai_threat_detection" if val == "off" else None + if key is not None: args[key] = True + val = opts.ai_terminate_session_on_detection.value if opts.ai_terminate_session_on_detection else "" + key = "enable_ai_terminate_session_on_detection" if val == "on" else "disable_ai_terminate_session_on_detection" if val == "off" else None + if key is not None: args[key] = True + else: # TunnelDAG.set_resource_allowed format rotation=True/False + if opts.rotation and opts.rotation.value in ("on", "off"): + args["rotation"] = choices[opts.rotation.value] + if opts.connections and opts.connections.value in ("on", "off"): + args["connections"] = choices[opts.connections.value] + if opts.tunneling and opts.tunneling.value in ("on", "off"): + args["tunneling"] = choices[opts.tunneling.value] + if opts.text_session_recording and opts.text_session_recording.value in ("on", "off"): + # args["typescriptrecording"] = choices[opts.text_session_recording.value] + args["typescript_recording"] = choices[opts.text_session_recording.value] + if opts.graphical_session_recording and opts.graphical_session_recording.value in ("on", "off"): + # args["recording"] = choices[opts.graphical_session_recording.value] + args["session_recording"] = choices[opts.graphical_session_recording.value] + if opts.remote_browser_isolation and opts.remote_browser_isolation.value in ("on", "off"): + args["remote_browser_isolation"] = choices[opts.remote_browser_isolation.value] + # AI and JIT settings don't apply to RBI records + if not isinstance(obj, PamRemoteBrowserObject): + if opts.ai_threat_detection and opts.ai_threat_detection.value in ("on", "off"): + args["ai_enabled"] = choices[opts.ai_threat_detection.value] + if opts.ai_terminate_session_on_detection and opts.ai_terminate_session_on_detection.value in ("on", "off"): + args["ai_session_terminate"] = choices[opts.ai_terminate_session_on_detection.value] + + return args + +def resolve_domain_admin(pce, users): + if not(users and isinstance(users, list)): + return + if (pce and hasattr(pce, "dom_administrative_credential") and pce.dom_administrative_credential and + hasattr(pce, "admin_credential_ref")): + dac = pce.dom_administrative_credential + res = {"titles": set(), "logins": set()} + for obj in users: + uid = getattr(obj, "uid", "") or "" + title = getattr(obj, "title", "") or "" + login = getattr(obj, "login", "") or "" + if not uid: # cannot resolve script credential to an empty UID + logging.debug(f"""Unable to resolve domain admin creds from rec without UID - "{title}:{login}" (skipped)""") + continue + if title and title == dac: + res["titles"].add(uid) + elif login and login == dac: + res["logins"].add(uid) + num_unique_uids = len(res["titles"] | res["logins"]) + if num_unique_uids != 1: + logging.debug(f"{num_unique_uids} matches while resolving domain admin creds for '{dac}' ") + if res["titles"]: + pce.admin_credential_ref = next(iter(res["titles"])) + elif res["logins"]: + pce.admin_credential_ref = next(iter(res["logins"])) + if pce.admin_credential_ref: + logging.debug(f"Domain admin credential '{dac}' resolved to '{pce.admin_credential_ref}' ") + +def resolve_script_creds(rec, users, resources): + creds = set() + if (rec and hasattr(rec, "scripts") and rec.scripts and + hasattr(rec.scripts, "scripts") and rec.scripts.scripts): + creds = set(chain.from_iterable( + (x.additional_credentials for x in rec.scripts.scripts if x.additional_credentials)) + ) + if not creds: # nothing to resolve + return + res = {x: {"titles":[], "logins":[]} for x in creds} + for obj in chain(users, resources): + uid = getattr(obj, "uid", "") or "" + title = getattr(obj, "title", "") or "" + login = getattr(obj, "login", "") or "" + if not uid: # cannot resolve script credential to an empty UID + logging.debug(f"""Unable to resolve script creds from rec without UID - "{title}:{login}" (skipped)""") + continue + if title and title in creds: + res[title]["titles"].append(uid) + elif login and login in creds: + res[login]["logins"].append(login) + + # recursive search in machine users + if hasattr(obj, "users") and obj.users and isinstance(obj.users, list): + for usr in obj.users: + uid = getattr(usr, "uid", "") or "" + title = getattr(usr, "title", "") or "" + login = getattr(usr, "login", "") or "" + if not uid: # cannot resolve script credential to an empty UID + logging.debug(f"""Unable to resolve script creds from rec without UID - "{title}:{login}" (skipped)""") + continue + if title and title in creds: + res[title]["titles"].append(uid) + elif login and login in creds: + res[login]["logins"].append(login) + + if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: + for k, v in res.items(): + tlen = len(v.get("titles", [])) + llen = len(v.get("login", [])) + if tlen+llen != 1: + logging.debug(f"{tlen+llen} matches while resolving script creds for {k}") + + for script in (x for x in rec.scripts.scripts if x.additional_credentials): + for cred in script.additional_credentials: + matches = res.get(cred) or {} + match = next(chain(matches.get("titles") or [], matches.get("logins") or []), None) + if match: + script.record_refs.append(match) + else: + title = getattr(rec, "title", "") or "" + login = getattr(rec, "login", "") or "" + logging.warning(f"""Unable to resolve script creds "{cred}" from "{title}:{login}" """) + if script.record_refs: + script.record_refs = list(set(script.record_refs)) + +def add_pam_scripts(params, record, scripts): + """Add post-rotation script(s) to a rotation record""" + if not (isinstance(record, str) and record != "" + and isinstance(scripts, list) and len(scripts) > 0): + return # nothing to do - no record or no script(s) + + ruid = record if record in params.record_cache else "" + if not ruid: + records = list(vault_extensions.find_records( + params, search_str=record, record_version=(3, 6), + record_type=PAM_ROTATION_TYPES + PAM_CONFIG_TYPES)) + if len(records) == 0: + logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - Record "{record}" not found!""") + elif len(records) > 1: + logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - Record "{record}" is not unique. Use record UID!""") + else: + ruid = records[0].record_uid + rec = vault.KeeperRecord.load(params, ruid) if ruid else None + if rec and isinstance(rec, vault.TypedRecord): + if rec.version not in (3, 6): + logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - Record "{rec.record_uid}" is not a rotation record (skipped).""") + return + + script_field = next((x for x in rec.fields if x.type == "script"), None) + if not script_field: + script_field = vault.TypedField.new_field("script", [], "rotationScripts") + rec.fields.append(script_field) + for script in scripts: + file_name = script.file + full_name = os.path.abspath(os.path.expanduser(file_name)) + if not os.path.isfile(full_name): + logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - File "{file_name}" not found (skipped).""") + continue + facade = record_facades.FileRefRecordFacade() + facade.record = rec + pre = set(facade.file_ref) + upload_task = attachment.FileUploadTask(full_name) + attachment.upload_attachments(params, rec, [upload_task]) + post = set(facade.file_ref) + df = post.difference(pre) + if len(df) == 1: + file_uid = df.pop() + facade.file_ref.remove(file_uid) + script_value = { + "fileRef": file_uid, + "recordRef": [], + "command": "", + } + # command and recordRef are optional + if script.script_command: + script_value["command"] = script.script_command + if script.record_refs: + for ref in script.record_refs: + script_value["recordRef"].append(ref) + if ref not in params.record_cache: + logging.debug(f"{bcolors.WARNING}Warning: {bcolors.ENDC} " + "Add rotation script - Additional Credentials Record " + f""" "{ref}" not found (recordRef added)!""") + script_field.value.append(script_value) # type: ignore + + record_management.update_record(params, rec) + api.sync_down(params) + params.sync_data = True diff --git a/keepercommander/commands/pam_import/commands.py b/keepercommander/commands/pam_import/commands.py new file mode 100644 index 000000000..63ac5f78a --- /dev/null +++ b/keepercommander/commands/pam_import/commands.py @@ -0,0 +1,20 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' pam_directory_uid (pamDirectory in pam_data.resources by title) - if (mach.pam_settings and mach.pam_settings.jit_settings and - getattr(mach.pam_settings.jit_settings, "pam_directory_record", None)): - jit = mach.pam_settings.jit_settings + # RBI has rbi_settings only (no pam_settings.jit_settings) + ps = getattr(mach, "pam_settings", None) + jit = getattr(ps, "jit_settings", None) if ps else None + if jit and getattr(jit, "pam_directory_record", None): ref = (jit.pam_directory_record or "").strip() if ref: matches = [x for x in pam_directories if getattr(x, "title", None) == ref] @@ -1683,31 +1629,34 @@ def process_data(self, params, project): tdag.set_resource_allowed(**args) # After setting allowedSettings, save JIT settings if present - # JIT settings don't apply to RBI records (only machine/db/directory) - if mach.pam_settings and mach.pam_settings.jit_settings: - jit_dag_dict = mach.pam_settings.jit_settings.to_dag_dict() + # JIT settings don't apply to RBI records (only machine/db/directory); RBI has rbi_settings, no pam_settings.jit_settings + ps = getattr(mach, "pam_settings", None) + jit = getattr(ps, "jit_settings", None) if ps else None + ai = getattr(ps, "ai_settings", None) if ps else None + if jit: + jit_dag_dict = jit.to_dag_dict() if jit_dag_dict: # Only save if not empty set_resource_jit_settings(params, mach.uid, jit_dag_dict, pam_cfg_uid) # After setting allowedSettings, save AI settings if present # AI settings don't apply to RBI records (only machine/db/directory) - if mach.pam_settings and mach.pam_settings.ai_settings: + if ai: user_id = "" if getattr(params, "account_uid_bytes", None): user_id = utils.base64_url_encode(params.account_uid_bytes) elif getattr(params, "user", ""): user_id = params.user - ai_dag_dict = mach.pam_settings.ai_settings.to_dag_dict(user_id=user_id) + ai_dag_dict = ai.to_dag_dict(user_id=user_id) if ai_dag_dict: # Only save if not empty set_resource_keeper_ai_settings(params, mach.uid, ai_dag_dict, pam_cfg_uid) # Web vault UI visualizer shows only latest and meta is most wanted path. # Note: DAG may take a while to sync in web vault # Dummy update to meta so it is latest among DATA (after jit/ai). - if mach.pam_settings and (mach.pam_settings.jit_settings or mach.pam_settings.ai_settings): + if jit or ai: refresh_meta_to_latest(params, mach.uid, pam_cfg_uid) # Bump LINK to config only when AI is present (AI adds the encryption KEY). - if mach.pam_settings and mach.pam_settings.ai_settings: + if ai: refresh_link_to_config_to_latest(params, mach.uid, pam_cfg_uid) # Machine - create its users (if any) @@ -1740,11 +1689,13 @@ def process_data(self, params, project): if resources: print(f"{len(resources)}/{len(resources)}\n") # link machine -> pamDirectory (LINK, path=domain) for jit_settings.pam_directory_uid + # RBI has rbi_settings only (no pam_settings.jit_settings) jit_domain_links_added = False for mach in resources: - if not (mach and mach.pam_settings and mach.pam_settings.jit_settings): + ps = getattr(mach, "pam_settings", None) + jit = getattr(ps, "jit_settings", None) if ps else None + if not (mach and jit): continue - jit = mach.pam_settings.jit_settings dir_uid = getattr(jit, "pam_directory_uid", None) if not dir_uid: continue @@ -1776,3200 +1727,13 @@ def process_data(self, params, project): prf = vault.TypedField.new_field('pamResources', {}) pcrec.fields.append(prf) prf.value = prf.value or [{}] - prf.value[0]["adminCredentialRef"] = pce.admin_credential_ref - record_management.update_record(params, pcrec) - tdag.link_user_to_config_with_options(pce.admin_credential_ref, is_admin='on') + if isinstance(prf.value[0], dict): + prf.value[0]["adminCredentialRef"] = pce.admin_credential_ref + record_management.update_record(params, pcrec) + tdag.link_user_to_config_with_options(pce.admin_credential_ref, is_admin='on') + else: + logging.error(f"Unable to add adminCredentialRef - bad pamResources field in PAM Config {pcuid}") else: logging.debug(f"Unable to resolve domain admin '{pce.dom_administrative_credential}' for PAM Domain configuration.") logging.debug("Done processing project data.") - - -class PamConfigEnvironment(): - def _initialize(self): - self.uid: str = "" # known after creation - self.environment: str = "" # local|aws|azure|domain|gcp|oci - self.title: str = "" - # self.gateway_name: str = "" # used externally, use controllerUid here - # self.ksm_app_name: str = "" # used externally, use controllerUid here - # self.application_folder_uid: str = "" # auto (Users folder) in pam_resources - - # default settings - self.connections: str = "on" - self.rotation: str = "on" - self.tunneling: str = "on" - self.remote_browser_isolation: str = "on" - self.graphical_session_recording: str = "on" - self.text_session_recording: str = "on" - self.ai_threat_detection: str = "off" - self.ai_terminate_session_on_detection: str = "off" - - self.port_mapping: List[str] = [] # ex. ["2222=ssh", "33306=mysql"] for discovery, rotation etc. - self.default_rotation_schedule: dict = {} # "type": "On-Demand|CRON" - self.scripts = None # PamScriptsObject - PAM Config scripts run on gateway after every rotation - self.attachments = None # PamAttachmentsObject - - # common settings (shared across all config types) - self.pam_resources = {} # {"folderUid": "", "controllerUid": ""} - "resourceRef": unused/legacy - - # Local environment: pamNetworkConfiguration - self.network_id: str = "" # required, text:networkId prefix for naming resources during discovery - self.network_cidr: str = "" # required, text:networkCIDR network CIDR used for discovery - - # AWS environment: pamAwsConfiguration - self.aws_id: str = "" # required, text:awsId - self.aws_access_key_id: str = "" # required, secret:accessKeyId - self.aws_secret_access_key: str = "" # required, secret:accessSecretKey - self.aws_region_names: List[str] = [] # optional, multiline:regionNames - - # Azure environment: pamAzureConfiguration - self.az_entra_id: str = "" # required, text:azureId - self.az_client_id: str = "" # required, secret:clientId - self.az_client_secret: str = "" # required, secret:clientSecret - self.az_subscription_id: str = "" # required, secret:subscriptionId - self.az_tenant_id: str = "" # required, secret:tenantId - self.az_resource_groups: List[str] = [] # optional, multiline:resourceGroups - - # Domain environment: pamDomainConfiguration - self.dom_domain_id: str = "" # required, text:pamDomainId - self.dom_hostname: str = "" # required, pamHostname: - self.dom_port: str = "" # required, pamHostname: - self.dom_use_ssl: bool = False # required, checkbox:useSSL - self.dom_scan_dc_cidr: bool = False # optional, checkbox:scanDCCIDR - self.dom_network_cidr: str = "" # optional, text:networkCIDR - self.dom_administrative_credential: str = "" # required, existing pamUser: pamResources.value[0][adminCredentialRef] - self.admin_credential_ref: str = "" # UID resolved from dom_administrative_credential by record title - # Domain Administrator User: pamUser record should have an ACL edge to the pamDomainConfiguration record with is_admin = True - # Domain users are the equivalent to cloud users, IAM/Azure users. The parent of the pamUser is the configuration record. - # The user does not belong to a machine, database or directory resource. - - # Google Cloud Platform (GCP) environment: pamGcpConfiguration - self.gcp_id: str = "" # required, text:pamGcpId - self.gcp_service_account_key: str = "" # required, json:pamServiceAccountKey - self.gcp_google_admin_email: str = "" # required, email:pamGoogleAdminEmail - self.gcp_region_names: List[str] = [] # required, multiline:pamGcpRegionName - - # Oracle Cloud Infrastructure (OCI) environment: pamOciConfiguration - # NB! OCI settings subject to change: - self.oci_id: str = "" # required, text:pamOciId - self.oci_admin_id: str = "" # required, secret:adminOcid - self.oci_admin_public_key: str = "" # required, secret:adminPublicKey - self.oci_admin_private_key: str = "" # required, secret:adminPrivateKey - self.oci_tenancy: str = "" # required, text:tenancyOci - self.oci_region: str = "" # required, text:regionOci - - def __init__(self, environment_type:str, settings:dict, controller_uid:str, folder_uid:str) -> None: - self._initialize() - settings = settings if isinstance(settings, dict) else {} - environment_type = str(environment_type).strip() - if environment_type not in PAM_ENVIRONMENT_TYPES: - environment_type = str(settings.get("environment", "")).strip() - if environment_type not in PAM_ENVIRONMENT_TYPES: - logging.warning("Unrecognized environment type " - f"""{bcolors.WARNING}"{environment_type}"{bcolors.ENDC} """ - f"""must be one of {PAM_ENVIRONMENT_TYPES} - switching to "local" """) - environment_type = "local" - self.environment = environment_type - - # common properties shared across all PAM config types: - self.pam_resources = { - "controllerUid": controller_uid, - "folderUid": folder_uid - # "resourceRef": "" - unused/legacy - } - val = settings.get("title", None) - if isinstance(val, str): self.title = val - - # gateway_name, ksm_app_name used externally during gw creation, use controllerUid here - - choices = ("on", "off", "default") - val = settings.get("connections", None) - if isinstance(val, str) and val in choices: self.connections = val - val = settings.get("rotation", None) - if isinstance(val, str) and val in choices: self.rotation = val - val = settings.get("tunneling", None) - if isinstance(val, str) and val in choices: self.tunneling = val - val = settings.get("remote_browser_isolation", None) - if isinstance(val, str) and val in choices: self.remote_browser_isolation = val - val = settings.get("graphical_session_recording", None) - if isinstance(val, str) and val in choices: self.graphical_session_recording = val - val = settings.get("text_session_recording", None) - if isinstance(val, str) and val in choices: self.text_session_recording = val - val = settings.get("ai_threat_detection", None) - if isinstance(val, str) and val in choices: self.ai_threat_detection = val - val = settings.get("ai_terminate_session_on_detection", None) - if isinstance(val, str) and val in choices: self.ai_terminate_session_on_detection = val - - val = settings.get("port_mapping", None) # multiline - if isinstance(val, str): val = [val] - if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): - self.port_mapping = val - elif val is not None: - logging.warning("Unrecognized port_mapping values (skipped) - expecting list of strings,"\ - """ ex. ["2222=ssh", "33060=mysql"]""") - - # {"type": "on-demand"} or {"type": "CRON", "cron": "30 18 * * *", "tz": "America/Chicago" } - val = settings.get("default_rotation_schedule", None) - if isinstance(val, dict): - schedule_type = str(val.get("type", "")).lower() - schedule_type = {"on-demand": "ON_DEMAND", "cron": "CRON"}.get(schedule_type, "") - if schedule_type != "": - if schedule_type == "ON_DEMAND": - self.default_rotation_schedule = { "type": "ON_DEMAND" } - elif schedule_type == "CRON": - cron = str(val.get("cron", "")).strip() - if cron: - self.default_rotation_schedule = { "type": "CRON", "cron": cron } - tz = str(val.get("tz", "")).strip() - if tz: self.default_rotation_schedule["tz"] = tz - else: - logging.warning("Skipped unrecognized CRON settings in default_rotation_schedule") - else: - logging.warning("Skipped unrecognized default_rotation_schedule type") - - self.scripts = PamScriptsObject.load(settings.get("scripts", None)) - self.attachments = PamAttachmentsObject.load(settings.get("attachments", None)) - - # Local Network - if environment_type == "local": - val = settings.get("network_id", None) - if isinstance(val, str): self.network_id = val - val = settings.get("network_cidr", None) - if isinstance(val, str): self.network_cidr = val - elif environment_type == "aws": - val = settings.get("aws_id", None) # required - if isinstance(val, str): self.aws_id = val - val = settings.get("aws_access_key_id", None) - if isinstance(val, str): self.aws_access_key_id = val - val = settings.get("aws_secret_access_key", None) - if isinstance(val, str): self.aws_secret_access_key = val - - val = settings.get("aws_region_names", None) # multiline - if isinstance(val, str): val = [val] - if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): - self.aws_region_names = val - elif val is not None: - logging.warning("Unrecognized aws_region_names values (skipped) - expecting list of strings") - elif environment_type == "azure": - val = settings.get("az_entra_id", None) # required - if isinstance(val, str): self.az_entra_id = val - val = settings.get("az_client_id", None) # required - if isinstance(val, str): self.az_client_id = val - val = settings.get("az_client_secret", None) # required - if isinstance(val, str): self.az_client_secret = val - val = settings.get("az_subscription_id", None) # required - if isinstance(val, str): self.az_subscription_id = val - val = settings.get("az_tenant_id", None) # required - if isinstance(val, str): self.az_tenant_id = val - val = settings.get("az_resource_groups", None) # multiline - if isinstance(val, str): val = [val] - if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): - self.az_resource_groups = val - elif val is not None: - logging.warning("Unrecognized az_resource_groups values (skipped) - expecting list of strings") - elif environment_type == "domain": - val = settings.get("dom_domain_id", None) # required - if isinstance(val, str): self.dom_domain_id = val - val = settings.get("dom_hostname", None) # required - if isinstance(val, str): self.dom_hostname = val - val = settings.get("dom_port", None) # required - if isinstance(val, int) and 0 <= val <= 65535: val = str(val) - if isinstance(val, str): self.dom_port = val - val = utils.value_to_boolean(settings.get("dom_use_ssl")) # required, bool - if isinstance(val, bool): self.dom_use_ssl = val - val = utils.value_to_boolean(settings.get("dom_scan_dc_cidr")) # optional, bool - if isinstance(val, bool): self.dom_scan_dc_cidr = val - val = settings.get("dom_network_cidr", None) # optional - if isinstance(val, str): self.dom_network_cidr = val - val = settings.get("dom_administrative_credential", None) # required, existing pamUser - if isinstance(val, str): self.dom_administrative_credential = val - # self.admin_credential_ref - will be resolved from dom_administrative_credential (later) - elif environment_type == "gcp": - val = settings.get("gcp_id", None) # required - if isinstance(val, str): self.gcp_id = val - # --service-account-key accepts only JSON.stringify(value) anyways - val = settings.get("gcp_service_account_key", None) # required - if isinstance(val, str): self.gcp_service_account_key = val - val = settings.get("gcp_google_admin_email", None) # required - if isinstance(val, str): self.gcp_google_admin_email = val - val = settings.get("gcp_region_names", None) # required, multiline - if isinstance(val, str): val = [val] - if (isinstance(val, list) and all(isinstance(x, str) and x != "" for x in val)): - self.gcp_region_names = val - elif val is not None: - logging.warning("Unrecognized gcp_region_names values (skipped) - expecting list of strings") - elif environment_type == "oci": - val = settings.get("oci_id", None) # required - if isinstance(val, str): self.oci_id = val - val = settings.get("oci_admin_id", None) # required - if isinstance(val, str): self.oci_admin_id = val - val = settings.get("oci_admin_public_key", None) # required - if isinstance(val, str): self.oci_admin_public_key = val - val = settings.get("oci_admin_private_key", None) # required - if isinstance(val, str): self.oci_admin_private_key = val - val = settings.get("oci_tenancy", None) # required - if isinstance(val, str): self.oci_tenancy = val - val = settings.get("oci_region", None) # required - if isinstance(val, str): self.oci_region = val - - -class PamScriptsObject(): - def __init__(self): - self.scripts: List[PamScriptObject] = [] - - @classmethod - def load(cls, data: Optional[Union[str, list]]) -> PamScriptsObject: - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Pam Scripts failed to load from: {str(data)[:80]}...") - if not(data and isinstance(data, list)): return obj - - for s in data: - so = PamScriptObject.load(s) - if so.validate(): - obj.scripts.append(so) - else: - logging.warning(f"""Script file not found (skipped): "{str(so.file)}" """) - if not obj.scripts: logging.warning("Skipped empty scripts section") - return obj - - # def to_json(self): pass # File upload will create the JSON format - - -class PamScriptObject(): - def __init__(self): - self.file: str = "" - self.script_command: str = "" - self.additional_credentials: List[str] = [] - self.file_ref: str = "" # fileRef generated by file upload - self.record_refs: List[str] = [] # "recordRef":["uid1","uid2"] from additional_credentials - - def validate(self): - valid = isinstance(self.file, str) - valid = valid and Path(self.file).resolve().exists() - return valid - - @classmethod - def load(cls, data: Union[str, dict]) -> PamScriptObject: - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"PAM script failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - # TUI: "script": { "script_command": "pwsh.exe", "file": "path/file.ext", "additional_credentials": ["admin1", "user2"] }, - # JSON: "script": [{"command":"", "fileRef":"path/file.ext", "recordRef": ["uid1", "uid2"]}] - # use file upload to attach to existing record and get UIDs - cmd = data.get("script_command", None) - if isinstance(cmd, str) and cmd.strip() != "": obj.script_command = cmd.strip() - file = data.get("file", None) - if isinstance(file, str) and file.strip() != "": obj.file = file.strip() - # before use call validate() which also checks if file exists - - # NB! If script has additional_credentials these must be added later, - # after pamUser creation - acs = data.get("additional_credentials", None) - if isinstance(acs, str): acs = [acs] - if isinstance(acs, list) and acs: obj.additional_credentials = acs - - return obj - - # def to_json(self): pass # File upload will create the JSON format - - -class PamAttachmentsObject(): - def __init__(self): - self.attachments: List[PamAttachmentObject] = [] - # self.file_ref: List[str] # fileRef: [] populated by file upload - - @classmethod - def load(cls, data: Optional[Union[str, list]]) -> PamAttachmentsObject: - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"PAM Attachments failed to load from: {str(data)[:80]}...") - if not(data and isinstance(data, list)): return obj - - for a in data: - if isinstance(a, str): a = { "file": a } - ao = PamAttachmentObject.load(a) - if ao.validate(): - obj.attachments.append(ao) - else: - logging.warning(f"""File attachment not found (skipped): "{str(ao.file)}" """) - if not obj.attachments: logging.warning("Skipped empty file attachments section") - return obj - - # def to_json(self): pass # File upload will create the JSON format - - -class PamAttachmentObject(): - def __init__(self): - self.file: str = "" - self.title: str = "" - self.file_ref: str = "" # fileRef generated by file upload - - def validate(self): - valid = isinstance(self.file, str) - valid = valid and Path(self.file).resolve().exists() - return valid - - @classmethod - def load(cls, data: Union[str, dict]) -> PamAttachmentObject: - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Failed to load file attachment from: {str(data)[:80]}") - if isinstance(data, str): data = {"file": data} - if not isinstance(data, dict): return obj - - # TUI: "attachments": [{ "file": "path/file.ext", "title": "File1" }] - # TUI: "attachments": ["path/file1", "file2"] - currently / title=filename - # JSON: "fileRef": ["uid1", "uid2"] # file upload generated - # use file upload to attach to existing record and get UIDs - title = data.get("title", None) - if isinstance(title, str) and title.strip() != "": obj.title = title.strip() - file = data.get("file", None) - if isinstance(file, str) and file.strip() != "": obj.file = file.strip() - # before use call validate() which also checks if file exists - - return obj - - # def to_json(self): pass # File upload will create the JSON format - - -class PamRotationScheduleObject(): - def __init__(self): - self.type: str = "" # on-demand|CRON - self.cron: str = "" # ex. "cron": "30 18 * * *" - self.tz: str = "" # timezone - default = "Etc/UTC" - # {"type": "on-demand"}|{"type": "CRON", "cron": "30 18 * * *"} - # http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html#examples - - @classmethod - def load(cls, data: Union[str, dict]) -> PamRotationScheduleObject: - schedule_types = ("on-demand", "cron") - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Failed to load rotation schedule from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - type = data.get("type", None) - if type and isinstance(type, str) and type.strip().lower() in schedule_types: - obj.type = type.strip().lower() - elif type: - logging.error(f"""Schedule type "{str(type)[:80]}" is unknown - must be one of {schedule_types}""") - - if obj.type.lower() == "cron": - cron = data.get("cron", None) - if isinstance(cron, str) and cron.strip() != "": obj.cron = cron.strip() - if obj.cron: # validate - try: - parsed_cron = vault.TypedField.import_schedule_field(obj.cron) - except: - parsed_cron = {} - if not (parsed_cron and parsed_cron.get("time", "")): - logging.error(f"Failed to load CRON from: {obj.cron}") - tz = data.get("tz", None) - if isinstance(tz, str) and tz.strip() != "": obj.tz = tz.strip() - - return obj - -class PamRotationParams(): - def __init__(self, configUid: str, profiles: dict): - self.configUid: str = configUid # iam_user|scripts_only=NOOP - self.ownerUid: str = "" # general - pamMachine rec UID - self.ownerTitle: str = "" # general - pamMachine rec title - self.rotation_profiles: dict = profiles or {} - -class PamRotationSettingsObject(): - def __init__(self): - self.rotation: str = "" # general|iam_user|scripts_only=NOOP - self.resource: str = "" # general:MachineTitle, IAM/Scripts:skip - auto/PamConfig - self.enabled: str = "" # on|off|default - self.schedule = None # {"type": "on-demand"}|{"type": "CRON", "cron": "30 18 * * *"} - self.password_complexity: str = "" # "32,5,5,5,5" - self.resourceUid: str = "" # general:machineUID, iam_user,scripts_only:PamConfigUID - - @classmethod - def load(cls, data: Optional[Union[str, dict]], rotation_params: Optional[PamRotationParams] = None) -> PamRotationSettingsObject: - rotation_types = ("general", "iam_user", "scripts_only") - enabled_types = ("on", "off", "default") - rx_complexity = r"^(\d+,\d+,\d+,\d+,\d+)$" - obj = cls() - - # autodetect profile name (and load from rotation_profiles section) - if isinstance(data, str) and rotation_params and isinstance(rotation_params.rotation_profiles, dict): - profile = rotation_params.rotation_profiles.get(data, None) - if profile and isinstance(profile, dict): - data = profile - - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Failed to load rotation settings from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - rotation = data.get("rotation", None) - if rotation and isinstance(rotation, str) and rotation.strip().lower() in rotation_types: - obj.rotation = rotation.strip().lower() - elif rotation: - logging.error(f"""Rotation type "{str(rotation)[:80]}" is unknown - must be one of {rotation_types}""") - - # type: iam_user|scripts_only=NOOP - automatically pick up current PAM Config - # type: general - automatically picks owner record (uid by title) - if obj.rotation == "general": - resource = data.get("resource", None) - if isinstance(resource, str) and resource.strip() != "": - obj.resource = resource.strip() - if rotation_params and rotation_params.ownerTitle: - if obj.resource and obj.resource.lower() != rotation_params.ownerTitle.lower(): - logging.warning("Rotation record owner must be its parent - replacing " - f"""configured owner "resource":"{obj.resource}" """ - f"""with actual parent "{rotation_params.ownerTitle}" """) - obj.resource = rotation_params.ownerTitle - elif obj.rotation in ("iam_user", "scripts_only"): - if rotation_params and rotation_params.configUid: - obj.resource = rotation_params.configUid - - enabled = data.get("enabled", None) - if enabled and isinstance(enabled, str) and enabled.strip().lower() in enabled_types: - obj.enabled = enabled.strip().lower() - elif enabled: - logging.error(f"""Unknown rotation enablement type "{str(enabled)[:80]}" - must be one of {enabled_types}""") - - obj.schedule = PamRotationScheduleObject.load(data.get("schedule", None) or "") - complexity = data.get("password_complexity", None) - if complexity and isinstance(complexity, str): - if re.fullmatch(rx_complexity, complexity): - obj.password_complexity = complexity.strip() - if complexity and not obj.password_complexity: - logging.error(f"""Invalid password complexity "{str(enabled)[:20]}" - must be in csv format, ex. "32,5,5,5,5" """) - # pwd_complexity_rule_list = {} populated by password_complexity - - return obj - - -class DagOptionValue(Enum): - ON = "on" - OFF = "off" - DEFAULT = "default" - - @classmethod - def map(cls, dag_option: str): - try: return cls(str(dag_option).lower()) - except ValueError: return None - -class DagSettingsObject(): - def __init__(self): - self.pam_resource: Optional[str] = None - self.rotation: Optional[DagOptionValue] = None - self.connections: Optional[DagOptionValue] = None - self.tunneling: Optional[DagOptionValue] = None - self.remote_browser_isolation: Optional[DagOptionValue] = None - self.graphical_session_recording: Optional[DagOptionValue] = None - self.text_session_recording: Optional[DagOptionValue] = None - self.ai_threat_detection: Optional[DagOptionValue] = None - self.ai_terminate_session_on_detection: Optional[DagOptionValue] = None - # NB! PAM User has its own rotation_settings: {}, cannot enable con/tun on user anyways - # remote_browser_isolation uses rbi, pam_resource, graphical_session_recording - # rotation uses only pam_resource, rotation - # machine/db/dir uses all - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"DAG settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - val = data.get("resource", None) - if isinstance(val, str): obj.pam_resource = val - obj.rotation = DagOptionValue.map(data.get("rotation", None) or "") - obj.connections = DagOptionValue.map(data.get("connections", None) or "") - obj.tunneling = DagOptionValue.map(data.get("tunneling", None) or "") - obj.remote_browser_isolation = DagOptionValue.map(data.get("remote_browser_isolation", None) or "") - obj.graphical_session_recording = DagOptionValue.map(data.get("graphical_session_recording", None) or "") - obj.text_session_recording = DagOptionValue.map(data.get("text_session_recording", None) or "") - obj.ai_threat_detection = DagOptionValue.map(data.get("ai_threat_detection", None) or "") - obj.ai_terminate_session_on_detection = DagOptionValue.map(data.get("ai_terminate_session_on_detection", None) or "") - - return obj - - -class DagJitSettingsObject(): - def __init__(self): - self.create_ephemeral: bool = False - self.elevate: bool = False - self.elevation_method: str = "group" - self.elevation_string: str = "" - self.base_distinguished_name: str = "" - self.ephemeral_account_type: Optional[str] = None # Omit if missing - self.pam_directory_record: Optional[str] = None # Title of pamDirectory from pam_data.resources[], resolved to UID - self.pam_directory_uid: Optional[str] = None # Resolved pamDirectory record UID (set in process_data) - - @classmethod - def validate_enum_value(cls, value: str, allowed_values: List[str], field_name: str) -> Optional[str]: - """Validate value against predefined list. Returns validated value or None if invalid.""" - if not value or value == "": - return None # Empty string not allowed for enum fields - value_lower = value.lower() - allowed_lower = [v.lower() for v in allowed_values] - if value_lower in allowed_lower: - # Return original case from allowed_values - idx = allowed_lower.index(value_lower) - return allowed_values[idx] - logging.warning(f"Invalid {field_name} value '{value}'. Allowed: {allowed_values}. Skipping.") - return None - - @classmethod - def load(cls, data: Union[str, dict]) -> Optional['DagJitSettingsObject']: - """Load JIT settings from JSON. Returns None if data is missing/empty.""" - obj = cls() - try: - data = json.loads(data) if isinstance(data, str) else data - except: - logging.error(f"JIT settings failed to load from: {str(data)[:80]}") - return None - - if not isinstance(data, dict): - return None - - # Check if object is empty (no valid fields) - has_valid_fields = False - - # Parse boolean fields with defaults - create_ephemeral = utils.value_to_boolean(data.get("create_ephemeral", None)) - if create_ephemeral is not None: - obj.create_ephemeral = create_ephemeral - has_valid_fields = True - - elevate = utils.value_to_boolean(data.get("elevate", None)) - if elevate is not None: - obj.elevate = elevate - has_valid_fields = True - - # Parse elevation_method with validation (defaults to "group" if missing or invalid) - elevation_method = data.get("elevation_method", None) - if elevation_method is not None: - validated = cls.validate_enum_value(str(elevation_method), ["group", "role"], "elevation_method") - if validated: - obj.elevation_method = validated - has_valid_fields = True - # If validation fails, keep default "group" from __init__() - still include in DAG JSON - # If missing, keep default "group" from __init__() - still include in DAG JSON - - # Parse string fields - elevation_string = data.get("elevation_string", None) - if elevation_string is not None and str(elevation_string).strip(): - obj.elevation_string = str(elevation_string).strip() - has_valid_fields = True - - base_distinguished_name = data.get("base_distinguished_name", None) - if base_distinguished_name is not None and str(base_distinguished_name).strip(): - obj.base_distinguished_name = str(base_distinguished_name).strip() - has_valid_fields = True - - # Parse ephemeral_account_type with validation (omit if missing) - ephemeral_account_type = data.get("ephemeral_account_type", None) - if ephemeral_account_type is not None: - validated = cls.validate_enum_value( - str(ephemeral_account_type), - ["linux", "mac", "windows", "domain"], - "ephemeral_account_type" - ) - if validated: - obj.ephemeral_account_type = validated - has_valid_fields = True - - # Parse pam_directory_record (title of pamDirectory from pam_data.resources[]; resolved to pam_directory_uid later) - pam_directory_record = data.get("pam_directory_record", None) - if pam_directory_record is not None and str(pam_directory_record).strip(): - obj.pam_directory_record = str(pam_directory_record).strip() - has_valid_fields = True - - # Silently ignore any other unknown fields (permissive parsing) - - # Return None if no valid fields were found (empty object) - return obj if has_valid_fields else None - - def to_dag_dict(self) -> Dict[str, Any]: - """Convert to DAG JSON format (camelCase).""" - result = { - "createEphemeral": self.create_ephemeral, - "elevate": self.elevate, - "elevationMethod": self.elevation_method, # Always included (defaults to "group" if missing/invalid) - "elevationString": self.elevation_string, - "baseDistinguishedName": self.base_distinguished_name - } - # Only include ephemeralAccountType if it was set (omit if missing/invalid) - if self.ephemeral_account_type: - result["ephemeralAccountType"] = self.ephemeral_account_type - return result - - -class DagAiSettingsObject(): - def __init__(self): - self.version: str = "v1.0.0" - self.risk_levels: Dict[str, Dict[str, Any]] = {} - - @classmethod - def _parse_tag_list(cls, items: Any) -> List[str]: - tags: List[str] = [] - if not isinstance(items, list): - return tags - for item in items: - tag = "" - if isinstance(item, str): - tag = item.strip() - elif isinstance(item, dict): - tag = str(item.get("tag", "")).strip() - if tag: - tags.append(tag) - return tags - - @classmethod - def load(cls, data: Union[str, dict]) -> Optional['DagAiSettingsObject']: - """Load AI settings from JSON. Returns None if data is missing/empty.""" - obj = cls() - try: - data = json.loads(data) if isinstance(data, str) else data - except: - logging.error(f"AI settings failed to load from: {str(data)[:80]}") - return None - - if not isinstance(data, dict): - return None - - risk_levels = data.get("risk_levels", None) - if not isinstance(risk_levels, dict): - return None - - for level in ["critical", "high", "medium", "low"]: - level_data = risk_levels.get(level, None) - if not isinstance(level_data, dict): - continue - - ai_session_terminate = utils.value_to_boolean(level_data.get("ai_session_terminate", None)) - activities = level_data.get("activities", None) or {} - if not isinstance(activities, dict): - activities = {} - - allow_tags = cls._parse_tag_list(activities.get("allow", [])) - deny_tags = cls._parse_tag_list(activities.get("deny", [])) - - if ai_session_terminate is None and not allow_tags and not deny_tags: - continue - - obj.risk_levels[level] = { - "ai_session_terminate": ai_session_terminate, - "allow": allow_tags, - "deny": deny_tags - } - - return obj if obj.risk_levels else None - - def _build_tag_entries(self, tags: List[str], action: str, user_id: str) -> List[Dict[str, Any]]: - entries: List[Dict[str, Any]] = [] - for tag in tags: - if not tag: - continue - entries.append({ - "tag": tag, - "auditLog": [{ - "date": utils.current_milli_time(), - "userId": user_id, - "action": action - }] - }) - return entries - - def to_dag_dict(self, user_id: str) -> Optional[Dict[str, Any]]: - if not self.risk_levels: - return None - - if not user_id: - logging.warning("AI settings auditLog userId is missing; auditLog will have empty userId.") - user_id = "" - - risk_levels: Dict[str, Any] = {} - for level, data in self.risk_levels.items(): - level_out: Dict[str, Any] = {} - - if data.get("ai_session_terminate") is not None: - level_out["aiSessionTerminate"] = data["ai_session_terminate"] - - tags_out: Dict[str, Any] = {} - allow_entries = self._build_tag_entries(data.get("allow", []), "added_to_allow", user_id) - if allow_entries: - tags_out["allow"] = allow_entries - deny_entries = self._build_tag_entries(data.get("deny", []), "added_to_deny", user_id) - if deny_entries: - tags_out["deny"] = deny_entries - - if tags_out: - level_out["tags"] = tags_out - - if level_out: - risk_levels[level] = level_out - - if not risk_levels: - return None - - return { - "version": self.version, - "riskLevels": risk_levels - } - - -class PamUserObject(): - def __init__(self): - self.uid = "" - self.type = "pamUser" - self.title = None - self.notes = None - self.login = None - self.password = None - self.privatePEMKey = None - self.distinguishedName = None - self.connectDatabase = None - self.managed = None - self.oneTimeCode = None - self.attachments = None # fileRef - self.scripts = None # script - self.rotation_settings = None # DAG: rotation settings - - @classmethod - def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"PAM User failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - dtype = str(data["type"]) if "type" in data else "pamUser" - if dtype and dtype.lower() != "pamUser".lower(): - logging.warning(f"""PAM User data using wrong type "pamUser" != "{dtype[:80]}" """) - - obj.type = "pamUser" - obj.title = str(data["title"]) if "title" in data else None - obj.notes = str(data["notes"]) if "notes" in data else None - - obj.login = str(data["login"]) if "login" in data else None - obj.password = str(data["password"]) if "password" in data else None - obj.privatePEMKey = str(data["private_pem_key"]) if "private_pem_key" in data else None - obj.distinguishedName = str(data["distinguished_name"]) if "distinguished_name" in data else None - obj.connectDatabase = str(data["connect_database"]) if "connect_database" in data else None - obj.managed = utils.value_to_boolean(data["managed"]) if "managed" in data else None - obj.oneTimeCode = str(data["otp"]) if "otp" in data else None - - obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) - obj.scripts = PamScriptsObject.load(data.get("scripts", None)) - rso = PamRotationSettingsObject.load(data.get("rotation_settings", None), rotation_params) - if not is_blank_instance(rso): - obj.rotation_settings = rso - - if (obj.title is None or not obj.title.strip()) and obj.login and obj.login.strip(): - obj.title = f"PAM User - {str(obj.login).strip()}" - - obj.validate_record() - - return obj - - def create_record(self, params, folder_uid): - args = { - "force": True, - "folder": folder_uid, - "record_type": self.type - } - if self.uid: args["record_uid"] = self.uid - if self.title: args["title"] = self.title - if self.notes: args["notes"] = self.notes - - fields = [] - if self.login: fields.append(f"f.login={self.login}") - if self.password: fields.append(f"f.password={self.password}") - if self.privatePEMKey: fields.append(f"f.secret.privatePEMKey={self.privatePEMKey}") - if self.distinguishedName: fields.append(f"f.text.distinguishedName={self.distinguishedName}") - if self.connectDatabase: fields.append(f"f.text.connectDatabase={self.connectDatabase}") - - managed = utils.value_to_boolean(self.managed) - if managed is not None: fields.append(f"f.checkbox.managed={str(managed).lower()}") - - if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") - - files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] - if files and isinstance(files, list): - for x in files: - if x and isinstance(x, PamAttachmentObject) and x.file: - fields.append(f"file=@{x.file}") - - if fields: args["fields"] = fields - uid = RecordEditAddCommand().execute(params, **args) - if uid and isinstance(uid, str): - self.uid = uid - - # after record creation add PAM scripts - if uid and self.scripts and self.scripts.scripts: - add_pam_scripts(params, uid, self.scripts.scripts) - - # DAG: after record creation - self.scripts, self.rotation_settings - return uid - - def validate_record(self): - if not self.password: - logging.warning("PAM User is missing required field `login`") - if not self.rotation_settings: - logging.debug("PAM User is missing rotation settings") - if isinstance(self.rotation_settings, PamRotationSettingsObject): - if (str(self.rotation_settings.rotation).lower() == "general" and - not self.rotation_settings.resource): - logging.warning("PAM User with rotation type=general is missing required machine `resource=xxx`") - - -class LoginUserObject(): - def __init__(self): - self.uid = "" - self.type = "login" - self.title = None - self.notes = None - self.login = None - self.password = None - self.url = None - self.oneTimeCode = None - self.attachments = None - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Record type `login` failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - dtype = str(data["type"]) if "type" in data else "login" - if dtype.lower() != "login".lower(): - logging.warning(f"""User data using wrong type "login" != "{dtype[:80]}" """) - - obj.uid = "" - obj.type = "login" - obj.title = str(data["title"]) if "title" in data else None - obj.notes = str(data["notes"]) if "notes" in data else None - - obj.login = str(data["login"]) if "login" in data else None - obj.password = str(data["password"]) if "password" in data else None - obj.url = str(data["url"]) if "url" in data else None - obj.oneTimeCode = str(data["otp"]) if "otp" in data else None - obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) - - return obj - - def create_record(self, params, folder_uid): - args = { - "force": True, - "folder": folder_uid, - "record_type": self.type - } - if self.uid: args["record_uid"] = self.uid - if self.title: args["title"] = self.title - if self.notes: args["notes"] = self.notes - - fields = [] - if self.login: fields.append(f"f.login={self.login}") - if self.password: fields.append(f"f.password={self.password}") - if self.url: fields.append(f"f.url={self.url}") - if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") - - files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] - if files and isinstance(files, list): - for x in files: - if x and isinstance(x, PamAttachmentObject) and x.file: - fields.append(f"file=@{x.file}") - - if fields: args["fields"] = fields - uid = RecordEditAddCommand().execute(params, **args) - if uid and isinstance(uid, str): - self.uid = uid - return uid - -class PamBaseMachineParser(): - def __init__(self): - self.type = "" - self.title = None - self.notes = None - self.host = None - self.port = None - self.sslVerification = None - self.providerGroup = None - self.providerRegion = None - self.oneTimeCode = None - self.attachments = None - self.scripts = None - self.pam_settings : Optional[PamSettingsFieldData] = None - - # pamMachine - self.operatingSystem = None - self.instanceName = None - self.instanceId = None - # Warning! Unused, split into linked pamUser record - self.login = None - self.password = None - self.privatePEMKey = None - - # pamDatabase - self.useSSL = None - self.databaseId = None - self.databaseType = None # postgresql|postgresql-flexible|mysql|mysql-flexible|mariadb|mariadb-flexible|mssql|oracle|mongodb - - # pamDirectory - self.domainName = None - self.alternativeIPs = None - self.directoryId = None - self.directoryType = None # active_directory|openldap - self.userMatch = None - - @classmethod - def load(cls, record_type: str, data: Union[str, dict]): - pam_machine_types = ("pamMachine", "pamDatabase", "pamDirectory") - pam_db_types = ("postgresql", "postgresql-flexible", "mysql", "mysql-flexible", "mariadb", "mariadb-flexible", "mssql", "oracle", "mongodb") - pam_dir_types = ("active_directory", "openldap") - - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"""Record type "{record_type}" failed to load from: {str(data)[:80]}""") - if not isinstance(data, dict): return obj - - dtype = str(data.get("type", None)) - data_type = next((s for s in pam_machine_types if s.lower() == dtype.lower()), None) - rec_type = next((s for s in pam_machine_types if s.lower() == str(record_type).lower()), None) - if rec_type and data_type and rec_type != data_type: - logging.warning(f"""Expected machine record type "{rec_type}" != data record type "{data_type}" - Parsing as "{rec_type}" """) - if rec_type is None: - msg = f"""Unknown expected record type "{record_type}". """ - if data_type is None: - msg = msg + f"""Unknown data record type "{dtype}" - Parsing it as generic pamMachine.""" - else: - msg = msg + f"""Using data record type "{data_type}".""" - logging.error(f"""{msg} Expected record types "{pam_machine_types}" """) - - obj.type = rec_type or data_type or "pamMachine" - obj.title = str(data["title"]) if "title" in data else None - obj.notes = str(data["notes"]) if "notes" in data else None - obj.host = str(data["host"]) if "host" in data else None - obj.port = str(data["port"]) if "port" in data else None - obj.sslVerification = utils.value_to_boolean(data["ssl_verification"]) if "ssl_verification" in data else None - obj.providerGroup = str(data["provider_group"]) if "provider_group" in data else None - obj.providerRegion = str(data["provider_region"]) if "provider_region" in data else None - obj.oneTimeCode = str(data["otp"]) if "otp" in data else None - obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) - obj.scripts = PamScriptsObject.load(data.get("scripts", None)) - - psd = data.get("pam_settings", None) - if psd: - obj.pam_settings = PamSettingsFieldData.load(psd) - if not obj.pam_settings: - logging.error(f"""{obj.type}: failed to load PAM Settings from "{str(data)[:80]}" """) - - # pamMachine - obj.operatingSystem = str(data["operating_system"]) if "operating_system" in data else None - obj.instanceName = str(data["instance_name"]) if "instance_name" in data else None - obj.instanceId = str(data["instance_id"]) if "instance_id" in data else None - # Warning! Unused, split into linked pamUser record - obj.login = str(data["login"]) if "login" in data else None - obj.password = str(data["password"]) if "password" in data else None - obj.privatePEMKey = str(data["private_pem_key"]) if "private_pem_key" in data else None - - # pamDatabase - obj.useSSL = utils.value_to_boolean(data["use_ssl"]) if "use_ssl" in data else None - obj.databaseId = str(data["database_id"]) if "database_id" in data else None - - dbtype = str(data["database_type"]) if "database_type" in data else None - pamdbt = next((s for s in pam_db_types if s.lower() == str(dbtype).lower()), None) - if dbtype and not pamdbt: - logging.error(f"""Unexpected DB type "{dbtype}" - should be one of the known DB types "{pam_db_types}" """) - pamdbt = dbtype.lower() # use provided db type "as-is" - if not pamdbt and obj.type == "pamDatabase": - logging.debug(f"""pamDatabase - unable to determine DB type: database_type should be one of "{pam_db_types}" """) - obj.databaseType = pamdbt - - # pamDirectory - obj.domainName = str(data["domain_name"]) if "domain_name" in data else None - obj.alternativeIPs = multiline_to_str(parse_multiline(data, "alternative_ips", "Error parsing alternative_ips")) - obj.directoryId = str(data["directory_id"]) if "directory_id" in data else None - obj.userMatch = str(data["user_match"]) if "user_match" in data else None - - dt = str(data["directory_type"]) if "directory_type" in data else None - pamdt = next((s for s in pam_dir_types if s.lower() == str(dt).lower()), None) - if dt and not pamdt: - logging.error(f"""Unexpected Directory type "{dt}" - should be one of "{pam_dir_types}" """) - pamdt = dt.lower() # use provided directory type "as-is" - if not pamdt and obj.type == "pamDirectory": - logging.debug(f"""pamDirectory - unable to determine Directory type: directory_type should be one of "{pam_dir_types}" """) - obj.directoryType = pamdt # active_directory|openldap - - return obj - -class PamMachineObject(): - def __init__(self): - self.uid = "" - self.type = "pamMachine" - self.title = None - self.notes = None - self.host = None # pamHostname - self.port = None # pamHostname - self.sslVerification = None - self.operatingSystem = None - self.instanceName = None - self.instanceId = None - self.providerGroup = None - self.providerRegion = None - self.oneTimeCode = None - self.attachments = None # fileRef - self.scripts = None # script - - # Warning! unused - use users[] to link users, rotation scripts etc. - self.login = None - self.password = None - self.privatePEMKey = None - - self.pam_settings : Optional[PamSettingsFieldData] = None - self.users = None # List[PamUserObject] - one is admin(istrative credential) - - self.is_admin_external: bool = False # (True<=>found:pamDirectory#Title.pamUser#Title) - self.administrative_credentials_uid: str = "" # external or internal user UID - - @classmethod - def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"""Record type "pamMachine" failed to load from: {str(data)[:80]}""") - if not isinstance(data, dict): return obj - - bmp = PamBaseMachineParser.load("pamMachine", data) - - if bmp and bmp.type.lower() != "pamMachine".lower(): - logging.warning(f"""PAM Machine data using wrong type "pamMachine" != "{bmp.type}" """) - - obj.type = "pamMachine" - obj.title = bmp.title - obj.notes = bmp.notes - obj.host = bmp.host - obj.port = bmp.port - obj.sslVerification = bmp.sslVerification - obj.operatingSystem = bmp.operatingSystem - obj.instanceName = bmp.instanceName - obj.instanceId = bmp.instanceId - obj.providerGroup = bmp.providerGroup - obj.providerRegion = bmp.providerRegion - obj.oneTimeCode = bmp.oneTimeCode - obj.attachments = bmp.attachments - obj.scripts = bmp.scripts - obj.pam_settings = bmp.pam_settings - - # Warning! unused - use users[] to link users, rotation scripts etc. - obj.login = bmp.login - obj.password = bmp.password - obj.privatePEMKey = bmp.privatePEMKey - - if (obj.title is None or not obj.title.strip()) and obj.login and obj.login.strip(): - obj.title = f"PAM Machine - {str(obj.login).strip()}" - if rotation_params: - rotation_params.ownerTitle = obj.title or "" - - obj.users = [] - users = data.get("users", None) - if users: - for user in users: - rt = str(user.get("type", "")) if isinstance(user, dict) else "" - if not rt: rt = "pamUser" # pamMachine user list is pamUser recs only - if rt.lower() != "pamUser".lower(): - logging.error(f"""{obj.title}:{obj.type}.users[] Expected record type pamUser, got "{rt}" - skipped.""") - continue - usr = PamUserObject.load(user, rotation_params) - if usr: - obj.users.append(usr) - else: - logging.warning(f"""Warning: PAM Machine "{obj.title}" with empty users section.""") - - obj.validate_record() - - return obj - - def create_record(self, params, folder_uid): - args = { - "force": True, - "folder": folder_uid, - "record_type": self.type - } - if self.uid: args["record_uid"] = self.uid - if self.title: args["title"] = self.title - if self.notes: args["notes"] = self.notes - - fields = [] - hostname = self.host.strip() if isinstance(self.host, str) and self.host.strip() else "" - port = self.port.strip() if isinstance(self.port, str) and self.port.strip() else "" - if hostname or port: - val = json.dumps({"hostName": hostname, "port": port}) - fields.append(f"f.pamHostname=$JSON:{val}") - - sslv = utils.value_to_boolean(self.sslVerification) - if sslv is not None: fields.append(f"checkbox.sslVerification={str(sslv).lower()}") - if self.operatingSystem: fields.append(f"f.text.operatingSystem={self.operatingSystem}") - if self.instanceName: fields.append(f"f.text.instanceName={self.instanceName}") - if self.instanceId: fields.append(f"f.text.instanceId={self.instanceId}") - if self.providerGroup: fields.append(f"f.text.providerGroup={self.providerGroup}") - if self.providerRegion: fields.append(f"f.text.providerRegion={self.providerRegion}") - - # Warning! unused - use users[] to link users, rotation scripts etc. - # if self.login: fields.append(f"f.login={self.login}") - # if self.password: fields.append(f"f.password={self.password}") - # if self.privatePEMKey: fields.append(f"f.secret.privatePEMKey={self.privatePEMKey}") - - if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") - - files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] - if files and isinstance(files, list): - for x in files: - if x and isinstance(x, PamAttachmentObject) and x.file: - fields.append(f"file=@{x.file}") - - # pam_settings port_forward/connection belong to the record - if self.pam_settings and isinstance(self.pam_settings, PamSettingsFieldData): - allowSupplyHost = True if self.pam_settings.allowSupplyHost is True else False - portForward = self.pam_settings.portForward.to_record_dict() if self.pam_settings.portForward else {} - connection = self.pam_settings.connection.to_record_dict() if self.pam_settings.connection else {} - if portForward or connection or allowSupplyHost: - val = json.dumps({"allowSupplyHost": allowSupplyHost, "portForward": portForward or {}, "connection": connection or {}}) - fields.append(f"c.pamSettings=$JSON:{val}") - # switch to f.* once RT definition(s) update w/ pamSettings field - - if fields: args["fields"] = fields - uid = RecordEditAddCommand().execute(params, **args) - if uid and isinstance(uid, str): - self.uid = uid - - # after record creation add PAM scripts - if uid and self.scripts and self.scripts.scripts: - add_pam_scripts(params, uid, self.scripts.scripts) - - # DAG: after record creation - self.scripts, self.pam_settings.options - return uid - - def validate_record(self): - # Warning! unused - use users[] to link users, rotation scripts etc. - if self.login or self.password or self.privatePEMKey: - logging.warning(f"""PAM Machine "{self.title}" detected legacy format - """ - "please create separate pamUser record with login, password, privatePEMKey") - if not (self.host or self.port): - logging.warning(f"""PAM Machine "{self.title}" is missing required field `pamHostname` data (host/port)""") - errmsg = validate_pam_connection(self.pam_settings.connection, "pamMachine") if self.pam_settings else "" - if errmsg: - logging.warning(f"""PAM Machine "{self.title}" has incorrect connection setup: {errmsg}""") - -def validate_pam_connection(connection, record_type): - errmsg = "" - if connection: - # Apparently all machine types allow connections using ANY protocol - # ex. pamDatabase allowing SSH/RDP or pamMachine allowing proto: mysql - # known_mach_types = [ConnectionSettingsRDP, ConnectionSettingsVNC, ConnectionSettingsTelnet, ConnectionSettingsSSH, ConnectionSettingsKubernetes] - # known_db_types = [ConnectionSettingsSqlServer, ConnectionSettingsPostgreSQL, ConnectionSettingsMySQL] - - known_conn_types = PamSettingsFieldData.pam_connection_classes + [ConnectionSettingsHTTP] - known_mach_types = PamSettingsFieldData.pam_connection_classes - known_db_types = known_mach_types - known_rbi_types = [ConnectionSettingsHTTP] - - # known_conn_proto = [x.protocol.value.lower() for x in known_conn_types] # pylint: disable=E1101 - known_mach_proto = [x.protocol.value.lower() for x in known_mach_types] # pylint: disable=E1101 - known_db_proto = [x.protocol.value.lower() for x in known_db_types] # pylint: disable=E1101 - known_rbi_proto = [x.protocol.value.lower() for x in known_rbi_types] # pylint: disable=E1101 - - rt = str(record_type).lower().strip() - if type(connection) not in known_conn_types: - errmsg = f"""PAM Connection of unknown type "{type(connection).__name__}" """ - elif rt == "pamMachine".lower(): - if type(connection) not in known_mach_types: - errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ - if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and - connection.protocol.value.lower() not in known_mach_proto): - errmsg = errmsg + f""" Unexpected PAM Machine connection protocol "{connection.protocol.value}" """ - elif rt == "pamDatabase".lower(): - if type(connection) not in known_db_types: - errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ - if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and - connection.protocol.value.lower() not in known_db_proto): - errmsg = errmsg + f""" Unexpected PAM Database connection protocol "{connection.protocol.value}" """ - elif rt == "pamDirectory".lower(): - if type(connection) not in known_mach_types: - errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ - if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and - connection.protocol.value.lower() not in known_mach_proto): - errmsg = errmsg + f""" Unexpected PAM Directory connection protocol "{connection.protocol.value}" """ - elif rt == "pamRemoteBrowser".lower(): - if type(connection) not in known_rbi_types: - errmsg = f"""PAM Connection of type "{type(connection).__name__}" is incompatible with "{record_type}" """ - if (isinstance(getattr(connection, "protocol", ""), ConnectionProtocol) and - connection.protocol.value.lower() not in known_rbi_proto): - errmsg = errmsg + f""" Unexpected PAM Remote Browser connection protocol "{connection.protocol.value}" """ - if errmsg: - logging.debug(errmsg) - return errmsg - - -class PamDatabaseObject(): - def __init__(self): - self.uid = "" - self.type = "pamDatabase" - self.title = None - self.notes = None - self.host = None # pamHostname - self.port = None # pamHostname - self.useSSL = None - self.databaseId = None - self.databaseType = None - self.providerGroup = None - self.providerRegion = None - self.oneTimeCode = None - self.attachments = None # fileRef - self.scripts = None # script - - self.trafficEncryptionSeed = None - self.pam_settings : Optional[PamSettingsFieldData] = None - self.users = None # List[PamUserObject] - one is admin(istrative credential) - - @classmethod - def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"""Record type "pamDatabase" failed to load from: {str(data)[:80]}""") - if not isinstance(data, dict): return obj - - bmp = PamBaseMachineParser.load("pamDatabase", data) - - if bmp and bmp.type.lower() != "pamDatabase".lower(): - logging.warning(f"""PAM Database data using wrong type "pamDatabase" != "{bmp.type}" """) - - obj.type = "pamDatabase" - obj.title = bmp.title - obj.notes = bmp.notes - obj.host = bmp.host - obj.port = bmp.port - obj.useSSL = bmp.useSSL - obj.databaseId = bmp.databaseId - obj.databaseType = bmp.databaseType - obj.providerGroup = bmp.providerGroup - obj.providerRegion = bmp.providerRegion - obj.oneTimeCode = bmp.oneTimeCode - obj.attachments = bmp.attachments - obj.scripts = bmp.scripts - obj.pam_settings = bmp.pam_settings - - if (obj.title is None or not obj.title.strip()) and obj.databaseId and obj.databaseId.strip(): - obj.title = f"PAM Database - {str(obj.databaseId).strip()}" - if rotation_params: - rotation_params.ownerTitle = obj.title or "" - - obj.users = [] - users = data.get("users", None) - if users: - for user in users: - rt = str(user.get("type", "")) if isinstance(user, dict) else "" - if not rt: rt = "pamUser" # pamDatabase user list is pamUser recs only - if rt.lower() != "pamUser".lower(): - logging.error(f"""{obj.title}:{obj.type}.users[] Expected record type pamUser, got "{rt}" - skipped.""") - continue - usr = PamUserObject.load(user, rotation_params) - if usr: - obj.users.append(usr) - else: - logging.warning(f"""Warning: PAM Database "{obj.title}" with empty users section.""") - - obj.validate_record() - - return obj - - def create_record(self, params, folder_uid): - args = { - "force": True, - "folder": folder_uid, - "record_type": self.type - } - if self.uid: args["record_uid"] = self.uid - if self.title: args["title"] = self.title - if self.notes: args["notes"] = self.notes - - fields = [] - hostname = self.host.strip() if isinstance(self.host, str) and self.host.strip() else "" - port = self.port.strip() if isinstance(self.port, str) and self.port.strip() else "" - if hostname or port: - val = json.dumps({"hostName": hostname, "port": port}) - fields.append(f"f.pamHostname=$JSON:{val}") - - ssl = utils.value_to_boolean(self.useSSL) - if ssl is not None: fields.append(f"f.checkbox.useSSL={str(ssl).lower()}") - if self.databaseId: fields.append(f"f.text.databaseId={self.databaseId}") - if self.databaseType: fields.append(f"f.databaseType={self.databaseType}") - if self.providerGroup: fields.append(f"f.text.providerGroup={self.providerGroup}") - if self.providerRegion: fields.append(f"f.text.providerRegion={self.providerRegion}") - - if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") - - files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] - if files and isinstance(files, list): - for x in files: - if x and isinstance(x, PamAttachmentObject) and x.file: - fields.append(f"file=@{x.file}") - - # pam_settings port_forward/connection belong to the record - if self.pam_settings and isinstance(self.pam_settings, PamSettingsFieldData): - allowSupplyHost = True if self.pam_settings.allowSupplyHost is True else False - portForward = self.pam_settings.portForward.to_record_dict() if self.pam_settings.portForward else {} - connection = self.pam_settings.connection.to_record_dict() if self.pam_settings.connection else {} - if portForward or connection or allowSupplyHost: - val = json.dumps({"allowSupplyHost": allowSupplyHost, "portForward": portForward or {}, "connection": connection or {}}) - fields.append(f"c.pamSettings=$JSON:{val}") - # switch to f.* once RT definition(s) update w/ pamSettings field - - if fields: args["fields"] = fields - uid = RecordEditAddCommand().execute(params, **args) - if uid and isinstance(uid, str): - self.uid = uid - - # after record creation add PAM scripts - if uid and self.scripts and self.scripts.scripts: - add_pam_scripts(params, uid, self.scripts.scripts) - - # DAG: after record creation - self.scripts, self.pam_settings.options - return uid - - def validate_record(self): - if not (self.host or self.port): - logging.warning(f"""PAM Database "{self.title}" is missing required field "pamHostname" data (host/port)""") - errmsg = validate_pam_connection(self.pam_settings.connection, "pamDatabase") if self.pam_settings else "" - if errmsg: - logging.warning(f"""PAM Database "{self.title}" has incorrect connection setup: {errmsg}""") - -class PamDirectoryObject(): - def __init__(self): - self.uid = "" - self.type = "pamDirectory" - self.title = None - self.notes = None - self.host = None # pamHostname - self.port = None # pamHostname - self.useSSL = None - self.domainName = None - self.alternativeIPs = None - self.directoryId = None - self.directoryType = None # " - self.userMatch = None - self.providerGroup = None - self.providerRegion = None - self.oneTimeCode = None - self.attachments = None # fileRef - self.scripts = None # script - - self.pam_settings : Optional[PamSettingsFieldData] = None - self.users = None # List[PamUserObject] - one is admin(istrative credential) - - @classmethod - def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"""Record type "pamDirectory" failed to load from: {str(data)[:80]}""") - if not isinstance(data, dict): return obj - - bmp = PamBaseMachineParser.load("pamDirectory", data) - - if bmp and bmp.type.lower() != "pamDirectory".lower(): - logging.warning(f"""PAM Directory data using wrong type "pamDirectory" != "{bmp.type}" """) - - obj.type = "pamDirectory" - obj.title = bmp.title - obj.notes = bmp.notes - obj.host = bmp.host - obj.port = bmp.port - obj.useSSL = bmp.useSSL - obj.domainName = bmp.domainName - obj.alternativeIPs = bmp.alternativeIPs - obj.directoryId = bmp.directoryId - obj.directoryType = bmp.directoryType - obj.userMatch = bmp.userMatch - obj.providerGroup = bmp.providerGroup - obj.providerRegion = bmp.providerRegion - obj.oneTimeCode = bmp.oneTimeCode - obj.attachments = bmp.attachments - obj.scripts = bmp.scripts - obj.pam_settings = bmp.pam_settings - - if (obj.title is None or not obj.title.strip()) and obj.domainName and obj.domainName.strip(): - obj.title = f"PAM Directory - {str(obj.domainName).strip()}" - if rotation_params: - rotation_params.ownerTitle = obj.title or "" - - obj.users = [] - users = data.get("users", None) - if users: - for user in users: - rt = str(user.get("type", "")) if isinstance(user, dict) else "" - if not rt: rt = "pamUser" # pamDirectory user list is pamUser recs only - if rt.lower() != "pamUser".lower(): - logging.error(f"""{obj.title}:{obj.type}.users[] Expected record type pamUser, got "{rt}" - skipped.""") - continue - usr = PamUserObject.load(user, rotation_params) - if usr: - obj.users.append(usr) - else: - logging.warning(f"""Warning: PAM Directory "{obj.title}" with empty users section.""") - - obj.validate_record() - - return obj - - def create_record(self, params, folder_uid): - args = { - "force": True, - "folder": folder_uid, - "record_type": self.type - } - if self.uid: args["record_uid"] = self.uid - if self.title: args["title"] = self.title - if self.notes: args["notes"] = self.notes - - fields = [] - hostname = self.host.strip() if isinstance(self.host, str) and self.host.strip() else "" - port = self.port.strip() if isinstance(self.port, str) and self.port.strip() else "" - if hostname or port: - val = json.dumps({"hostName": hostname, "port": port}) - fields.append(f"f.pamHostname=$JSON:{val}") - - ssl = utils.value_to_boolean(self.useSSL) - if ssl is not None: fields.append(f"f.checkbox.useSSL={str(ssl).lower()}") - if self.domainName: fields.append(f"f.text.domainName={self.domainName}") - if self.alternativeIPs: fields.append(f"f.multiline.alternativeIPs={self.alternativeIPs}") - if self.directoryId: fields.append(f"f.text.directoryId={self.directoryId}") - if self.directoryType: fields.append(f"f.directoryType={self.directoryType}") - if self.userMatch: fields.append(f"f.text.userMatch={self.userMatch}") - if self.providerGroup: fields.append(f"f.text.providerGroup={self.providerGroup}") - if self.providerRegion: fields.append(f"f.text.providerRegion={self.providerRegion}") - - if self.oneTimeCode: fields.append(f"f.oneTimeCode={self.oneTimeCode}") - - files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] - if files and isinstance(files, list): - for x in files: - if x and isinstance(x, PamAttachmentObject) and x.file: - fields.append(f"file=@{x.file}") - - # pam_settings port_forward/connection belong to the record - if self.pam_settings and isinstance(self.pam_settings, PamSettingsFieldData): - allowSupplyHost = True if self.pam_settings.allowSupplyHost is True else False - portForward = self.pam_settings.portForward.to_record_dict() if self.pam_settings.portForward else {} - connection = self.pam_settings.connection.to_record_dict() if self.pam_settings.connection else {} - if portForward or connection or allowSupplyHost: - val = json.dumps({"allowSupplyHost": allowSupplyHost, "portForward": portForward or {}, "connection": connection or {}}) - fields.append(f"c.pamSettings=$JSON:{val}") - # switch to f.* once RT definition(s) update w/ pamSettings field - - if fields: args["fields"] = fields - uid = RecordEditAddCommand().execute(params, **args) - if uid and isinstance(uid, str): - self.uid = uid - - # after record creation add PAM scripts - if uid and self.scripts and self.scripts.scripts: - add_pam_scripts(params, uid, self.scripts.scripts) - - # DAG: after record creation - self.scripts, self.pam_settings.options - return uid - - def validate_record(self): - if not (self.host or self.port): - logging.warning(f"""PAM Directory "{self.title}" is missing required field `pamHostname` data (host/port)""") - errmsg = validate_pam_connection(self.pam_settings.connection, "pamDirectory") if self.pam_settings else "" - if errmsg: - logging.warning(f"""PAM Directory "{self.title}" has incorrect connection setup: {errmsg}""") - -class PamRemoteBrowserObject(): - def __init__(self): - self.uid = "" - self.type = "pamRemoteBrowser" - self.title = None - self.notes = None - self.rbiUrl = None - self.oneTimeCode = None - self.attachments = None # fileRef - - self.rbi_settings : Optional[PamRemoteBrowserSettings] = None # ft: pamRemoteBrowserSettings - # Use httpCredentialsUid <- resolved from autofill_credentials (ref rt:Login in pam_data.users[]) - - @classmethod - def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"""Record type "pamRemoteBrowser" failed to load from: {str(data)[:80]}""") - if not isinstance(data, dict): return obj - - dtype = data.get("type", None) - if dtype and str(dtype).lower() != "pamRemoteBrowser".lower(): - logging.warning(f"""PAM RBI data using wrong type "pamRemoteBrowser" != "{dtype}" """) - - obj.type = "pamRemoteBrowser" - obj.title = str(data["title"]) if "title" in data else None - obj.notes = str(data["notes"]) if "notes" in data else None - obj.rbiUrl = str(data["url"]) if "url" in data else None - obj.oneTimeCode = str(data["otp"]) if "otp" in data else None - obj.attachments = PamAttachmentsObject.load(data.get("attachments", None)) - - psd = data.get("pam_settings", None) - rbi_settings = PamRemoteBrowserSettings.load(psd) - obj.rbi_settings = None if is_empty_instance(rbi_settings) else rbi_settings - if psd and not obj.rbi_settings: - logging.error(f"""{obj.type}: failed to load RBI Settings from "{str(psd)[:80]}" """) - - if (obj.title is None or not obj.title.strip()) and obj.rbiUrl and str(obj.rbiUrl).strip(): - hostname = str(obj.rbiUrl).lower() - hostname = re.sub(r"^\s*https?://", "", hostname, flags=re.IGNORECASE) - hostname = hostname.split("/", 1)[0] - if hostname: - obj.title = f"PAM RBI - {hostname}" - - obj.validate_record() - - return obj - - def create_record(self, params, folder_uid): - args = { - "force": True, - "folder": folder_uid, - "record_type": self.type - } - if self.uid: args["record_uid"] = self.uid - if self.title: args["title"] = self.title - if self.notes: args["notes"] = self.notes - - fields = [] - if self.rbiUrl: fields.append(f"rbiUrl={self.rbiUrl}") - - if self.oneTimeCode: fields.append(f"oneTimeCode={self.oneTimeCode}") - - files = self.attachments.attachments if self.attachments and isinstance(self.attachments, PamAttachmentsObject) else [] - if files and isinstance(files, list): - for x in files: - if x and isinstance(x, PamAttachmentObject) and x.file: - fields.append(f"file=@{x.file}") - - # pam_settings connection belongs to the record - connection = {} - if self.rbi_settings and isinstance(self.rbi_settings, PamRemoteBrowserSettings): - if self.rbi_settings.connection: - connection = self.rbi_settings.connection.to_record_dict() - if connection: - val = json.dumps({"connection": connection or {}}) - fields.append(f"pamRemoteBrowserSettings=$JSON:{val}") - # switch to f.* once RT definition(s) update w/ pamRemoteBrowserSettings field - - if fields: args["fields"] = fields - uid = RecordEditAddCommand().execute(params, **args) - if uid and isinstance(uid, str): - self.uid = uid - - # DAG: after record creation - self.pam_settings.options - return uid - - def validate_record(self): - errmsg = validate_pam_connection(self.rbi_settings.connection, "pamRemoteBrowser") if self.rbi_settings else "" - if errmsg: - logging.warning(f"""PAM RBI "{self.title}" has incorrect connection setup: {errmsg}""") - -# PAM Settings field data -FONT_SIZES = (8, 9, 10, 11, 12, 14, 18, 24, 30, 36, 48, 60, 72, 96) -class ConnectionProtocol(Enum): - RDP = "rdp" - VNC = "vnc" - TELNET = "telnet" - SSH = "ssh" - KUBERNETES = "kubernetes" - SQLSERVER = "sql-server" - POSTGRESQL = "postgresql" - MYSQL = "mysql" - HTTP = "http" - -class RDPSecurity(Enum): - ANY = "any" - NLA = "nla" - TLS = "tls" - VMCONNECT = "vmconnect" - RDP = "rdp" - - @classmethod - def map(cls, rdp_security: str): - try: return cls(str(rdp_security).lower()) - except ValueError: return None - -class TerminalThemes(Enum): - BLACK_WHITE = "black-white" # Black on white - GRAY_BLACK = "gray-black" # Gray on black - GREEN_BLACK = "green-black" # Green on black - WHITE_BLACK = "white-black" # White on black - CUSTOM = "custom" # Not a valid value to send to guac - # example custom color scheme: - # "colorScheme": "background: rgb:00/3D/FC;\nforeground: rgb:74/1A/1A;\ncolor0: rgb:00/00/00;\ncolor1: rgb:99/3E/3E;\ncolor2: rgb:3E/99/3E;\ncolor3: rgb:99/99/3E;\ncolor4: rgb:3E/3E/99;\ncolor5: rgb:99/3E/99;\ncolor6: rgb:3E/99/99;\ncolor7: rgb:99/99/99;\ncolor8: rgb:3E/3E/3E;\ncolor9: rgb:FF/67/67;\ncolor10: rgb:67/FF/67;\ncolor11: rgb:FF/FF/67;\ncolor12: rgb:67/67/FF;\ncolor13: rgb:FF/67/FF;\ncolor14: rgb:67/FF/FF;\ncolor15: rgb:FF/FF/FF;" - - @classmethod - def map(cls, tty_theme: str): - try: return cls(str(tty_theme).lower()) - except ValueError: return None - -def parse_multiline(data: dict, key: str, message: str = "") -> Optional[List[str]]: - if data and isinstance(data, dict) and key and isinstance(key, str): - val = data.get(key, None) # "multiline": ["line1" "line2"] - if isinstance(val, str): val = [val] # allow for "multiline": "line1" - if val and isinstance(val, list): - if any(not isinstance(x, str) or x == "" for x in val): - logging.warning(f"{message} - value: {val[:24]}" if (isinstance(message, str) and message != "") - else "Error parsing multiline value (skipped): "\ - f"found empty or non string values - value: {val[:24]}") - else: - return val - return None - -def multiline_to_str(lines: Optional[List[str]]) -> Optional[str]: - if lines and isinstance(lines, list): - return "\n".join(lines) - return None - -def multiline_stringify(lines: Optional[List[str]]) -> Optional[str]: - if lines and isinstance(lines, list): - # nb! strip() may remove more quotes esp. at end of string - val = json.dumps("\n".join(lines)) - if val and val.startswith("\"") and val.endswith("\""): - val = val[1:-1] - return val - return None - -def parse_dag_option(option: Optional[str]) -> Optional[str]: - key = str(option).lower() - if key in ("on", "off", "default"): - return key - return None - -class ClipboardConnectionSettings: - def __init__(self, disableCopy: Optional[bool] = None, disablePaste: Optional[bool] = None): - self.disableCopy = disableCopy - self.disablePaste = disablePaste - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Clipboard Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - obj.disableCopy = utils.value_to_boolean(data.get("disable_copy", None)) - obj.disablePaste = utils.value_to_boolean(data.get("disable_paste", None)) - return obj - -def clipboard_connection_settings(connection_settings: Union[PamConnectionSettings, ConnectionSettingsHTTP]) -> Optional[ClipboardConnectionSettings]: - if connection_settings and connection_settings.protocol and connection_settings.protocol in ( - ConnectionProtocol.RDP, - ConnectionProtocol.VNC, - ConnectionProtocol.TELNET, - ConnectionProtocol.SSH, - ConnectionProtocol.SQLSERVER, - ConnectionProtocol.MYSQL, - ConnectionProtocol.POSTGRESQL, - ConnectionProtocol.HTTP - ): - disableCopy = getattr(connection_settings, "disableCopy", None) - disablePaste = getattr(connection_settings, "disablePaste", None) - return ClipboardConnectionSettings(disableCopy, disablePaste) - -class SFTPRootDirectorySettings: - def __init__(self, enableSftp: Optional[bool] = None, sftpRootDirectory: Optional[str] = None): - self.enableSftp = enableSftp - self.sftpRootDirectory = sftpRootDirectory - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"SFTP Root Directory Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - obj.enableSftp = utils.value_to_boolean(data.get("enable_sftp", None)) - val = data.get("sftp_root_directory", None) - if isinstance(val, str): obj.sftpRootDirectory = val - return obj - - def to_dict(self): - dict: Dict[str, Any] = {} - if self.enableSftp is not None and isinstance(self.enableSftp, bool): - dict["enableSftp"] = self.enableSftp - if self.sftpRootDirectory and isinstance(self.sftpRootDirectory, str) and self.sftpRootDirectory.strip(): - dict["sftpRootDirectory"] = self.sftpRootDirectory.strip() - - return dict - -class SFTPConnectionSettings(SFTPRootDirectorySettings): - def __init__( - self, - enableSftp: Optional[bool] = None, - sftpRootDirectory: Optional[str] = None, - sftpResource: Optional[List[str]] = None, - sftpUser: Optional[List[str]] = None, - sftpDirectory: Optional[str] = None, - sftpServerAliveInterval: Optional[int] = None - ): - super().__init__(enableSftp, sftpRootDirectory) - self.sftpResource = sftpResource - self.sftpUser = sftpUser - self.sftpDirectory = sftpDirectory - self.sftpServerAliveInterval = sftpServerAliveInterval - self.sftpResourceUid = None # resolve from sftpResource - self.sftpUserUid = None # resolve from sftpUser - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"SFTP Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - rds = SFTPRootDirectorySettings.load(data) - if rds: - obj.enableSftp = rds.enableSftp - obj.sftpRootDirectory = rds.sftpRootDirectory - - # which is the resource record (not yet in web UI) - obj.sftpResource = parse_multiline(data, "sftp_resource", "Error parsing sftp_resource") - obj.sftpUser = parse_multiline(data, "sftp_user_credentials", "Error parsing sftp_user_credentials") - val = data.get("sftp_upload_directory", None) - if isinstance(val, str): obj.sftpDirectory = val - val = data.get("sftp_keepalive_interval", None) - if type(val) is int: obj.sftpServerAliveInterval = abs(val) - elif val and str(val).isdecimal(): obj.sftpServerAliveInterval = int(val) - - return obj - - def to_dict(self): - dict: Dict[str, Any] = {} - if self.sftpRootDirectory and isinstance(self.sftpRootDirectory, str) and self.sftpRootDirectory.strip(): - dict["sftpRootDirectory"] = self.sftpRootDirectory.strip() - if self.enableSftp is not None and isinstance(self.enableSftp, bool): - dict["enableSftp"] = self.enableSftp - - # if resolved from sftpResource - if self.sftpResourceUid and isinstance(self.sftpResourceUid, str) and self.sftpResourceUid.strip(): - dict["sftpResourceUid"] = self.sftpResourceUid.strip() - # if resolved from sftpUser - if self.sftpUserUid and isinstance(self.sftpUserUid, str) and self.sftpUserUid.strip(): - dict["sftpUserUid"] = self.sftpUserUid.strip() - - if self.sftpDirectory and isinstance(self.sftpDirectory, str) and self.sftpDirectory.strip(): - dict["sftpDirectory"] = self.sftpDirectory.strip() - if self.sftpServerAliveInterval and type(self.sftpServerAliveInterval) is int and abs(self.sftpServerAliveInterval) > 0: - dict["sftpServerAliveInterval"] = abs(self.sftpServerAliveInterval) - - return dict - -def sftp_enabled(connection_settings: Union[PamConnectionSettings, ConnectionSettingsHTTP]) -> Optional[bool]: - if connection_settings and connection_settings.protocol and connection_settings.protocol in ( - ConnectionProtocol.RDP, - ConnectionProtocol.VNC, - ConnectionProtocol.SSH - ): - sftp = getattr(connection_settings, "sftp", None) - if sftp: - enabled = getattr(sftp, "enableSftp", None) - return enabled - -class TerminalDisplayConnectionSettings: - fontSizes: List[int] = [8,9,10,11,12,14,18,24,30,36,48,60,72,96] - def __init__(self, colorScheme: Optional[str] = None, fontSize: Optional[int] = None): - self.colorScheme = colorScheme - self.fontSize = fontSize - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Terminal Display Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - val = data.get("color_scheme", None) - if isinstance(val, str): obj.colorScheme = val - val = data.get("font_size", None) - if type(val) is int: obj.fontSize = val - elif val and str(val).isdecimal(): obj.fontSize = int(val) - if obj.fontSize and type(obj.fontSize) is int: - font_size: int = obj.fontSize - closest_number = min(obj.fontSizes, key=lambda x: abs(x - font_size)) - if closest_number != font_size: - logging.error(f"Terminal Display Connection Settings - adjusted invalid font_size from: {obj.fontSize} to: {closest_number}") - obj.fontSize = closest_number - return obj - -class BaseConnectionSettings: - def __init__(self, port: Optional[str] = None, allowSupplyUser: Optional[bool] = None, userRecords: Optional[List[str]] = None, recordingIncludeKeys: Optional[bool] = None): - self.port = port # Override port from host - self.allowSupplyUser = allowSupplyUser - self.recordingIncludeKeys = recordingIncludeKeys - self.userRecords = userRecords - self.userRecordUid = None # resolved from userRecords - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Base Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - val = data.get("port", None) # Override port from host - if isinstance(val, str) or str(val).isdecimal(): obj.port = str(val) - - obj.allowSupplyUser = utils.value_to_boolean(data.get("allow_supply_user", None)) - obj.userRecords = parse_multiline(data, "administrative_credentials", "Error parsing administrative_credentials") - obj.recordingIncludeKeys = utils.value_to_boolean(data.get("recording_include_keys", None)) - return obj - -class ConnectionSettingsRDP(BaseConnectionSettings, ClipboardConnectionSettings): - protocol = ConnectionProtocol.RDP - def __init__( - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - security: Optional[RDPSecurity] = None, - disableAuth: Optional[bool] = None, - ignoreCert: Optional[bool] = None, - loadBalanceInfo: Optional[str] = None, - preconnectionId: Optional[str] = None, - preconnectionBlob: Optional[str] = None, - sftp: Optional[SFTPConnectionSettings] = None, - disableAudio: Optional[bool] = None, - resizeMethod: Optional[str] = None, - enableWallpaper: Optional[bool] = None, - enableFullWindowDrag: Optional[bool] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) - self.security = security if isinstance(security, RDPSecurity) else None - self.disableAuth = disableAuth - self.ignoreCert = ignoreCert - self.loadBalanceInfo = loadBalanceInfo - self.preconnectionId = preconnectionId - self.preconnectionBlob = preconnectionBlob - self.sftp = sftp if isinstance(sftp, SFTPConnectionSettings) else None - self.disableAudio = disableAudio - self.resizeMethod = resizeMethod # disable_dynamic_resizing ? "" : "display-update" - # Performance Properties - self.enableWallpaper = enableWallpaper - self.enableFullWindowDrag = enableFullWindowDrag - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Connection Settings RDP failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - obj.port = bcs.port - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - ccs = ClipboardConnectionSettings.load(data) - if ccs: - obj.disableCopy = ccs.disableCopy - obj.disablePaste = ccs.disablePaste - - val = data.get("security", None) - if isinstance(val, str): obj.security = RDPSecurity.map(val) - obj.disableAuth = utils.value_to_boolean(data.get("disable_authentication", None)) - obj.ignoreCert = utils.value_to_boolean(data.get("ignore_server_cert", None)) - - val = data.get("load_balance_info", None) - if isinstance(val, str): obj.loadBalanceInfo = val # LoadBalance Info/Cookie - val = data.get("preconnection_id", None) - if isinstance(val, str): obj.preconnectionId = val - val = data.get("preconnection_blob", None) - if isinstance(val, str): obj.preconnectionBlob = val - sftp = data.get("sftp", None) - if isinstance(sftp, dict): obj.sftp = SFTPConnectionSettings.load(sftp) - - obj.disableAudio = utils.value_to_boolean(data.get("disable_audio", None)) - obj.enableWallpaper = utils.value_to_boolean(data.get("enable_wallpaper", None)) - obj.enableFullWindowDrag = utils.value_to_boolean(data.get("enable_full_window_drag", None)) - - # disable_dynamic_resizing ? "" : "display-update" - val = utils.value_to_boolean(data.get("disable_dynamic_resizing", None)) - if val is not True: obj.resizeMethod = "display-update" - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.RDP.value } # pylint: disable=E1101 - - # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) - recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["userRecords"] = uids - - if self.port and isinstance(self.port, str) and self.port.strip(): - kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.disableCopy is not None and isinstance(self.disableCopy, bool): - kvp["disableCopy"] = self.disableCopy - if self.disablePaste is not None and isinstance(self.disablePaste, bool): - kvp["disablePaste"] = self.disablePaste - if isinstance(self.security, RDPSecurity): - kvp["security"] = self.security.value.lower() - - if self.disableAuth is not None and isinstance(self.disableAuth, bool): - kvp["disableAuth"] = self.disableAuth - if self.ignoreCert is not None and isinstance(self.ignoreCert, bool): - kvp["ignoreCert"] = self.ignoreCert - - if self.loadBalanceInfo and isinstance(self.loadBalanceInfo, str) and self.loadBalanceInfo.strip(): - kvp["loadBalanceInfo"] = self.loadBalanceInfo.strip() - if self.preconnectionId and isinstance(self.preconnectionId, str) and self.preconnectionId.strip(): - kvp["preconnectionId"] = self.preconnectionId.strip() - if self.preconnectionBlob and isinstance(self.preconnectionBlob, str) and self.preconnectionBlob.strip(): - kvp["preconnectionBlob"] = self.preconnectionBlob.strip() - - if self.disableAudio is not None and isinstance(self.disableAudio, bool): - kvp["disableAudio"] = self.disableAudio - if self.enableFullWindowDrag is not None and isinstance(self.enableFullWindowDrag, bool): - kvp["enableFullWindowDrag"] = self.enableFullWindowDrag - if self.enableWallpaper is not None and isinstance(self.enableWallpaper, bool): - kvp["enableWallpaper"] = self.enableWallpaper - - # populated on load - "resizeMethod": disable_dynamic_resizing ? "" : "display-update" - if str(self.resizeMethod) == "display-update": - kvp["resizeMethod"] = self.resizeMethod - - if isinstance(self.sftp, SFTPConnectionSettings): - sftp = self.sftp.to_dict() - if sftp: - kvp["sftp"] = sftp - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -# field type: pamRemoteBrowserSettings -class ConnectionSettingsHTTP(BaseConnectionSettings, ClipboardConnectionSettings): - protocol = ConnectionProtocol.HTTP - def __init__( - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - allowUrlManipulation: Optional[bool] = None, - allowedUrlPatterns: Optional[str] = None, - allowedResourceUrlPatterns: Optional[str] = None, - httpCredentials: Optional[List[str]] = None, # autofill_credentials: login|pamUser - autofillConfiguration: Optional[str] = None, - ignoreInitialSslCert: Optional[bool] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) - self.allowUrlManipulation = allowUrlManipulation - self.allowedUrlPatterns = allowedUrlPatterns - self.allowedResourceUrlPatterns = allowedResourceUrlPatterns - self.httpCredentials = httpCredentials # autofill_credentials: login|pamUser - self.autofillConfiguration = autofillConfiguration - self.ignoreInitialSslCert = ignoreInitialSslCert - self.httpCredentialsUid = None # resolved from httpCredentials - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Connection Settings HTTP failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - # obj.port = bcs.port # not yet in web UI of RBI - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - ccs = ClipboardConnectionSettings.load(data) - if ccs: - obj.disableCopy = ccs.disableCopy - obj.disablePaste = ccs.disablePaste - - obj.allowUrlManipulation = utils.value_to_boolean(data.get("allow_url_manipulation", None)) - obj.allowedUrlPatterns = multiline_to_str(parse_multiline(data, "allowed_url_patterns", "Error parsing allowed_url_patterns")) - obj.allowedResourceUrlPatterns = multiline_to_str(parse_multiline(data, "allowed_resource_url_patterns", "Error parsing allowed_resource_url_patterns")) - obj.httpCredentials = parse_multiline(data, "autofill_credentials", "Error parsing autofill_credentials") - obj.autofillConfiguration = multiline_to_str(parse_multiline(data, "autofill_targets", "Error parsing autofill_targets")) - obj.ignoreInitialSslCert = utils.value_to_boolean(data.get("ignore_server_cert", None)) - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.HTTP.value } # pylint: disable=E1101 - - # if resolved (autofill_credentials->httpCredentialsUid) login|pamUser - recs: list = self.httpCredentialsUid if self.httpCredentialsUid and isinstance(self.httpCredentialsUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["httpCredentialsUid"] = uids[0] # single credential - - # port - unused for RBI - # if self.port and isinstance(self.port, str) and self.port.strip(): - # kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.disableCopy is not None and isinstance(self.disableCopy, bool): - kvp["disableCopy"] = self.disableCopy - if self.disablePaste is not None and isinstance(self.disablePaste, bool): - kvp["disablePaste"] = self.disablePaste - - if self.allowUrlManipulation is not None and isinstance(self.allowUrlManipulation, bool): - kvp["allowUrlManipulation"] = self.allowUrlManipulation - if self.allowedUrlPatterns and isinstance(self.allowedUrlPatterns, str) and self.allowedUrlPatterns.strip(): - kvp["allowedUrlPatterns"] = self.allowedUrlPatterns.strip() - if self.allowedResourceUrlPatterns and isinstance(self.allowedResourceUrlPatterns, str) and self.allowedResourceUrlPatterns.strip(): - kvp["allowedResourceUrlPatterns"] = self.allowedResourceUrlPatterns.strip() - if self.autofillConfiguration and isinstance(self.autofillConfiguration, str) and self.autofillConfiguration.strip(): - kvp["autofillConfiguration"] = self.autofillConfiguration.strip() - if self.ignoreInitialSslCert is not None and isinstance(self.ignoreInitialSslCert, bool): - kvp["ignoreInitialSslCert"] = self.ignoreInitialSslCert - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class ConnectionSettingsVNC(BaseConnectionSettings, ClipboardConnectionSettings): - protocol = ConnectionProtocol.VNC - def __init__( # pylint: disable=R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - destHost: Optional[str] = None, - destPort: Optional[str] = None, - sftp: Optional[SFTPConnectionSettings] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) - self.destHost = destHost - self.destPort = destPort - self.sftp = sftp if isinstance(sftp, SFTPConnectionSettings) else None - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Connection Settings VNC failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - obj.port = bcs.port - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - ccs = ClipboardConnectionSettings.load(data) - if ccs: - obj.disableCopy = ccs.disableCopy - obj.disablePaste = ccs.disablePaste - - val = data.get("destination_host", None) - if isinstance(val, str): obj.destHost = val - val = data.get("destination_port", None) - if isinstance(val, str): obj.destPort = val - - sftp = data.get("sftp", None) - if isinstance(sftp, dict): obj.sftp = SFTPConnectionSettings.load(sftp) - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.VNC.value } # pylint: disable=E1101 - - # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) - recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["userRecords"] = uids - - if self.port and isinstance(self.port, str) and self.port.strip(): - kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.disableCopy is not None and isinstance(self.disableCopy, bool): - kvp["disableCopy"] = self.disableCopy - if self.disablePaste is not None and isinstance(self.disablePaste, bool): - kvp["disablePaste"] = self.disablePaste - - if self.destHost and isinstance(self.destHost, str) and self.destHost.strip(): - kvp["destHost"] = self.destHost.strip() - if self.destPort and isinstance(self.destPort, str) and self.destPort.strip(): - kvp["destPort"] = self.destPort.strip() - - if isinstance(self.sftp, SFTPConnectionSettings): - sftp = self.sftp.to_dict() - if sftp: - kvp["sftp"] = sftp - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class ConnectionSettingsTelnet(BaseConnectionSettings, ClipboardConnectionSettings, TerminalDisplayConnectionSettings): - protocol = ConnectionProtocol.TELNET - def __init__( # pylint: disable=R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - colorScheme: Optional[str] = None, - fontSize: Optional[int] = None, - usernameRegex: Optional[str] = None, - passwordRegex: Optional[str] = None, - loginSuccessRegex: Optional[str] = None, - loginFailureRegex: Optional[str] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) - TerminalDisplayConnectionSettings.__init__(self, colorScheme, fontSize) - self.usernameRegex = usernameRegex - self.passwordRegex = passwordRegex - self.loginSuccessRegex = loginSuccessRegex - self.loginFailureRegex = loginFailureRegex - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Connection Settings Telnet failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - obj.port = bcs.port - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - ccs = ClipboardConnectionSettings.load(data) - if ccs: - obj.disableCopy = ccs.disableCopy - obj.disablePaste = ccs.disablePaste - - tcs = TerminalDisplayConnectionSettings.load(data) - if tcs: - obj.colorScheme = tcs.colorScheme - obj.fontSize = tcs.fontSize - - val = data.get("username_regex", None) - if isinstance(val, str): obj.usernameRegex = val - val = data.get("password_regex", None) - if isinstance(val, str): obj.passwordRegex = val - val = data.get("login_success_regex", None) - if isinstance(val, str): obj.loginSuccessRegex = val - val = data.get("login_failure_regex", None) - if isinstance(val, str): obj.loginFailureRegex = val - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.TELNET.value } # pylint: disable=E1101 - - # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) - recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["userRecords"] = uids - - if self.port and isinstance(self.port, str) and self.port.strip(): - kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.disableCopy is not None and isinstance(self.disableCopy, bool): - kvp["disableCopy"] = self.disableCopy - if self.disablePaste is not None and isinstance(self.disablePaste, bool): - kvp["disablePaste"] = self.disablePaste - - if self.colorScheme and isinstance(self.colorScheme, str) and self.colorScheme.strip(): - kvp["colorScheme"] = self.colorScheme.strip() - if self.fontSize and type(self.fontSize) is int and self.fontSize > 4: - kvp["fontSize"] = str(self.fontSize) - if self.usernameRegex and isinstance(self.usernameRegex, str) and self.usernameRegex.strip(): - kvp["usernameRegex"] = self.usernameRegex.strip() - if self.passwordRegex and isinstance(self.passwordRegex, str) and self.passwordRegex.strip(): - kvp["passwordRegex"] = self.passwordRegex.strip() - if self.loginSuccessRegex and isinstance(self.loginSuccessRegex, str) and self.loginSuccessRegex.strip(): - kvp["loginSuccessRegex"] = self.loginSuccessRegex.strip() - if self.loginFailureRegex and isinstance(self.loginFailureRegex, str) and self.loginFailureRegex.strip(): - kvp["loginFailureRegex"] = self.loginFailureRegex.strip() - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class ConnectionSettingsSSH(BaseConnectionSettings, ClipboardConnectionSettings, TerminalDisplayConnectionSettings): - protocol = ConnectionProtocol.SSH - def __init__( # pylint: disable=R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - colorScheme: Optional[str] = None, - fontSize: Optional[int] = None, - hostKey: Optional[str] = None, - command: Optional[str] = None, - sftp: Optional[SFTPRootDirectorySettings] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) - TerminalDisplayConnectionSettings.__init__(self, colorScheme, fontSize) - self.hostKey = hostKey - self.command = command - self.sftp = sftp if isinstance(sftp, SFTPRootDirectorySettings) else None - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Connection Settings SSH failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - obj.port = bcs.port - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - ccs = ClipboardConnectionSettings.load(data) - if ccs: - obj.disableCopy = ccs.disableCopy - obj.disablePaste = ccs.disablePaste - - tcs = TerminalDisplayConnectionSettings.load(data) - if tcs: - obj.colorScheme = tcs.colorScheme - obj.fontSize = tcs.fontSize - - val = data.get("public_host_key", None) - if isinstance(val, str): obj.hostKey = val - val = data.get("command", None) - if isinstance(val, str): obj.command = val - sftp = data.get("sftp", None) - if isinstance(sftp, dict): obj.sftp = SFTPRootDirectorySettings.load(sftp) - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.SSH.value } # pylint: disable=E1101 - - # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) - recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["userRecords"] = uids - - if self.port and isinstance(self.port, str) and self.port.strip(): - kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.disableCopy is not None and isinstance(self.disableCopy, bool): - kvp["disableCopy"] = self.disableCopy - if self.disablePaste is not None and isinstance(self.disablePaste, bool): - kvp["disablePaste"] = self.disablePaste - - if self.colorScheme and isinstance(self.colorScheme, str) and self.colorScheme.strip(): - kvp["colorScheme"] = self.colorScheme.strip() - if self.fontSize and type(self.fontSize) is int and self.fontSize > 4: - kvp["fontSize"] = str(self.fontSize) - if self.hostKey and isinstance(self.hostKey, str) and self.hostKey.strip(): - kvp["hostKey"] = self.hostKey.strip() - if self.command and isinstance(self.command, str) and self.command.strip(): - kvp["command"] = self.command.strip() - - if isinstance(self.sftp, SFTPRootDirectorySettings): - srds = self.sftp.to_dict() - if srds: - kvp["sftp"] = srds - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class ConnectionSettingsKubernetes(BaseConnectionSettings, TerminalDisplayConnectionSettings): - protocol = ConnectionProtocol.KUBERNETES - def __init__( # pylint: disable=R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - colorScheme: Optional[str] = None, - fontSize: Optional[int] = None, - ignoreCert: Optional[bool] = None, - caCert: Optional[str] = None, - namespace: Optional[str] = None, - pod: Optional[str] = None, - container: Optional[str] = None, - clientCert: Optional[str] = None, - clientKey: Optional[str] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - TerminalDisplayConnectionSettings.__init__(self, colorScheme, fontSize) - self.ignoreCert = ignoreCert - self.caCert = caCert - self.namespace = namespace - self.pod = pod - self.container = container - self.clientCert = clientCert - self.clientKey = clientKey - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Connection Settings K8S failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - obj.port = bcs.port - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - tcs = TerminalDisplayConnectionSettings.load(data) - if tcs: - obj.colorScheme = tcs.colorScheme - obj.fontSize = tcs.fontSize - - val = data.get("namespace", None) - if isinstance(val, str): obj.namespace = val - val = data.get("pod_name", None) - if isinstance(val, str): obj.pod = val - val = data.get("container", None) - if isinstance(val, str): obj.container = val - obj.ignoreCert = utils.value_to_boolean(data.get("ignore_server_cert", None)) - obj.caCert = multiline_to_str(parse_multiline(data, "ca_certificate", "Error parsing ca_certificate")) - obj.clientCert = multiline_to_str(parse_multiline(data, "client_certificate", "Error parsing client_certificate")) - obj.clientKey = multiline_to_str(parse_multiline(data, "client_key", "Error parsing client_key")) - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = { "protocol": ConnectionProtocol.KUBERNETES.value } # pylint: disable=E1101 - - # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) - recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["userRecords"] = uids - - if self.port and isinstance(self.port, str) and self.port.strip(): - kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.colorScheme and isinstance(self.colorScheme, str) and self.colorScheme.strip(): - kvp["colorScheme"] = self.colorScheme.strip() - if self.fontSize and type(self.fontSize) is int and self.fontSize > 4: - kvp["fontSize"] = str(self.fontSize) - if self.namespace and isinstance(self.namespace, str) and self.namespace.strip(): - kvp["namespace"] = self.namespace.strip() - if self.pod and isinstance(self.pod, str) and self.pod.strip(): - kvp["pod"] = self.pod.strip() - - if self.container and isinstance(self.container, str) and self.container.strip(): - kvp["container"] = self.container.strip() - if self.ignoreCert is not None and isinstance(self.ignoreCert, bool): - kvp["ignoreCert"] = self.ignoreCert - if self.caCert and isinstance(self.caCert, str) and self.caCert.strip(): - kvp["caCert"] = self.caCert.strip() - if self.clientCert and isinstance(self.clientCert, str) and self.clientCert.strip(): - kvp["clientCert"] = self.clientCert.strip() - if self.clientKey and isinstance(self.clientKey, str) and self.clientKey.strip(): - kvp["clientKey"] = self.clientKey.strip() - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class BaseDatabaseConnectionSettings(BaseConnectionSettings, ClipboardConnectionSettings): - def __init__( # pylint: disable=R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - database: Optional[str] = None, - disableCsvExport: Optional[bool] = None, - disableCsvImport: Optional[bool] = None - ): - BaseConnectionSettings.__init__(self, port, allowSupplyUser, userRecords, recordingIncludeKeys) - ClipboardConnectionSettings.__init__(self, disableCopy, disablePaste) - self.database = database - self.disableCsvExport = disableCsvExport - self.disableCsvImport = disableCsvImport - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Database Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bcs = BaseConnectionSettings.load(data) - if bcs: - obj.port = bcs.port - obj.allowSupplyUser = bcs.allowSupplyUser - obj.userRecords = bcs.userRecords - obj.recordingIncludeKeys = bcs.recordingIncludeKeys - - ccs = ClipboardConnectionSettings.load(data) - if ccs: - obj.disableCopy = ccs.disableCopy - obj.disablePaste = ccs.disablePaste - - val = data.get("default_database", None) - if isinstance(val, str): obj.database = val - obj.disableCsvExport = utils.value_to_boolean(data.get("disable_csv_export", None)) - obj.disableCsvImport = utils.value_to_boolean(data.get("disable_csv_import", None)) - - return obj - - def to_record_dict(self): - kvp: Dict[str, Any] = {} - - # if resolved (userRecords->userRecordUid) from administrative_credentials (usually after user create) - recs: list = self.userRecordUid if self.userRecordUid and isinstance(self.userRecordUid, list) else [] - uids = [x.strip() for x in recs if isinstance(x, str) and x.strip() != ""] - if uids: - kvp["userRecords"] = uids - - if self.port and isinstance(self.port, str) and self.port.strip(): - kvp["port"] = self.port.strip() - if self.allowSupplyUser is not None and isinstance(self.allowSupplyUser, bool): - kvp["allowSupplyUser"] = self.allowSupplyUser - if self.recordingIncludeKeys is not None and isinstance(self.recordingIncludeKeys, bool): - kvp["recordingIncludeKeys"] = self.recordingIncludeKeys - if self.disableCopy is not None and isinstance(self.disableCopy, bool): - kvp["disableCopy"] = self.disableCopy - if self.disablePaste is not None and isinstance(self.disablePaste, bool): - kvp["disablePaste"] = self.disablePaste - if self.disableCsvExport is not None and isinstance(self.disableCsvExport, bool): - kvp["disableCsvExport"] = self.disableCsvExport - if self.disableCsvImport is not None and isinstance(self.disableCsvImport, bool): - kvp["disableCsvImport"] = self.disableCsvImport - if self.database and isinstance(self.database, str) and self.database.strip(): - kvp["database"] = self.database.strip() - - return kvp - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class ConnectionSettingsSqlServer(BaseDatabaseConnectionSettings): - protocol = ConnectionProtocol.SQLSERVER - def __init__( # pylint: disable=W0246 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - database: Optional[str] = None, - disableCsvExport: Optional[bool] = None, - disableCsvImport: Optional[bool] = None - ): - super().__init__(port, allowSupplyUser, userRecords, recordingIncludeKeys, - disableCopy, disablePaste, database, - disableCsvExport, disableCsvImport) - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"SQLServer Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bdcs = BaseDatabaseConnectionSettings.load(data) - if bdcs: - obj.port = bdcs.port - obj.allowSupplyUser = bdcs.allowSupplyUser - obj.userRecords = bdcs.userRecords - obj.recordingIncludeKeys = bdcs.recordingIncludeKeys - obj.disableCopy = bdcs.disableCopy - obj.disablePaste = bdcs.disablePaste - obj.database = bdcs.database - obj.disableCsvExport = bdcs.disableCsvExport - obj.disableCsvImport = bdcs.disableCsvImport - - return obj - - def to_record_dict(self): - dict = super().to_record_dict() - dict["protocol"] = ConnectionProtocol.SQLSERVER.value # pylint: disable=E1101 - return dict - -class ConnectionSettingsPostgreSQL(BaseDatabaseConnectionSettings): - protocol = ConnectionProtocol.POSTGRESQL - def __init__( # pylint: disable=W0246,R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - database: Optional[str] = None, - disableCsvExport: Optional[bool] = None, - disableCsvImport: Optional[bool] = None - ): - super().__init__(port, allowSupplyUser, userRecords, recordingIncludeKeys, - disableCopy, disablePaste, database, - disableCsvExport, disableCsvImport) - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"PostgreSQL Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bdcs = BaseDatabaseConnectionSettings.load(data) - if bdcs: - obj.port = bdcs.port - obj.allowSupplyUser = bdcs.allowSupplyUser - obj.userRecords = bdcs.userRecords - obj.recordingIncludeKeys = bdcs.recordingIncludeKeys - obj.disableCopy = bdcs.disableCopy - obj.disablePaste = bdcs.disablePaste - obj.database = bdcs.database - obj.disableCsvExport = bdcs.disableCsvExport - obj.disableCsvImport = bdcs.disableCsvImport - - return obj - - def to_record_dict(self): - dict = super().to_record_dict() - dict["protocol"] = ConnectionProtocol.POSTGRESQL.value # pylint: disable=E1101 - return dict - -class ConnectionSettingsMySQL(BaseDatabaseConnectionSettings): - protocol = ConnectionProtocol.MYSQL - def __init__( # pylint: disable=W0246,R0917 - self, - port: Optional[str] = None, # Override port from host - allowSupplyUser: Optional[bool] = None, - userRecords: Optional[List[str]] = None, - recordingIncludeKeys: Optional[bool] = None, - disableCopy: Optional[bool] = None, - disablePaste: Optional[bool] = None, - database: Optional[str] = None, - disableCsvExport: Optional[bool] = None, - disableCsvImport: Optional[bool] = None - ): - super().__init__(port, allowSupplyUser, userRecords, recordingIncludeKeys, - disableCopy, disablePaste, database, - disableCsvExport, disableCsvImport) - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"MySQL Connection Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - bdcs = BaseDatabaseConnectionSettings.load(data) - if bdcs: - obj.port = bdcs.port - obj.allowSupplyUser = bdcs.allowSupplyUser - obj.userRecords = bdcs.userRecords - obj.recordingIncludeKeys = bdcs.recordingIncludeKeys - obj.disableCopy = bdcs.disableCopy - obj.disablePaste = bdcs.disablePaste - obj.database = bdcs.database - obj.disableCsvExport = bdcs.disableCsvExport - obj.disableCsvImport = bdcs.disableCsvImport - - return obj - - def to_record_dict(self): - dict = super().to_record_dict() - dict["protocol"] = ConnectionProtocol.MYSQL.value # pylint: disable=E1101 - return dict - -PamConnectionSettings = Optional[ - Union[ - ConnectionSettingsRDP, - ConnectionSettingsVNC, - ConnectionSettingsTelnet, - ConnectionSettingsSSH, - ConnectionSettingsKubernetes, - ConnectionSettingsSqlServer, - ConnectionSettingsPostgreSQL, - ConnectionSettingsMySQL - ] -] - -class PamPortForwardSettings: - def __init__(self, port: Optional[str] = None, reusePort: Optional[bool] = None): - self.port = port # Override Port from host - self.reusePort = reusePort # Attempt to use the last connected port if available - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"Port Forward Settings failed to load from: {str(data)[:80]}") - if not isinstance(data, dict): return obj - - obj.port = data.get("port", None) - obj.reusePort = utils.value_to_boolean(data.get("reuse_port", None)) - return obj - - def to_record_dict(self): - dict = {} - if self.port and isinstance(self.port, str) and self.port.strip(): - dict["port"] = self.port.strip() - if self.reusePort is not None and isinstance(self.reusePort, bool): - dict["reusePort"] = self.reusePort - return dict - - def to_record_json(self): - dict = self.to_record_dict() or {} - rec_json = json.dumps(dict) - return rec_json - -class PamRemoteBrowserSettings: - def __init__( - self, - options: Optional[DagSettingsObject] = None, - connection: Optional[ConnectionSettingsHTTP] = None - ): - self.options = options - self.connection = connection - - @classmethod - def load(cls, data: Optional[Union[str, dict]]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"PAM RBI Settings field failed to load from: {str(data)[:80]}...") - if not isinstance(data, dict): return obj - - options = DagSettingsObject.load(data.get("options", {})) - if not is_empty_instance(options): - obj.options = options - - cdata = data.get("connection", {}) - # TO DO: if isinstance(cdata, str): lookup_by_name(pam_data.connections) - if not isinstance(cdata, dict): - logging.warning(f"""PAM RBI Settings: Connection must be a JSON object - skipping... "{str(cdata)[:24]}" """) - if cdata and isinstance(cdata, dict): - proto = cdata.get("protocol", "") - if proto and isinstance(proto, str): - if proto.lower() == "http": - conn = ConnectionSettingsHTTP.load(cdata) - if not is_empty_instance(conn): - obj.connection = conn - else: - logging.warning(f"""Connection skipped: unknown protocol "{str(proto)[:24]}" """) - - if not obj.connection and cdata and isinstance(cdata, dict): - logging.error(f"PAM RBI Settings failed to load from: {str(cdata)[:80]}...") - - return obj - -class PamSettingsFieldData: - def __init__( - self, - allowSupplyHost: Optional[bool] = None, - connection: PamConnectionSettings = None, # Optional[PamConnectionSettings] - portForward: Optional[PamPortForwardSettings] = None, - options: Optional[DagSettingsObject] = None, - jit_settings: Optional[DagJitSettingsObject] = None, - ai_settings: Optional[DagAiSettingsObject] = None, - ): - self.allowSupplyHost = allowSupplyHost - self.connection = connection - self.portForward = portForward - self.options = options - self.jit_settings = jit_settings - self.ai_settings = ai_settings - - # PamConnectionSettings excludes ConnectionSettingsHTTP - pam_connection_classes = [ - ConnectionSettingsRDP, - ConnectionSettingsVNC, - ConnectionSettingsTelnet, - ConnectionSettingsSSH, - ConnectionSettingsKubernetes, - ConnectionSettingsSqlServer, - ConnectionSettingsPostgreSQL, - ConnectionSettingsMySQL - ] - - @classmethod - def get_connection_class(cls, cdata: dict): - if cdata and isinstance(cdata, dict): - proto = cdata.get("protocol", "") - if proto and isinstance(proto, str): - proto = proto.lower() - for con in cls.pam_connection_classes: - pr = getattr(con, "protocol", "") - if isinstance(pr, ConnectionProtocol) and pr.value.lower() == proto: # pylint: disable=E1101 - return con.load(cdata) - logging.warning(f"""Connection skipped: unknown protocol "{str(proto)[:24]}" """) - return None - - def is_empty(self): - empty = is_empty_instance(self.options) - empty = empty and is_empty_instance(self.portForward) - empty = empty and is_empty_instance(self.connection, ["protocol"]) - return empty - - @classmethod - def load(cls, data: Union[str, dict]): - obj = cls() - try: data = json.loads(data) if isinstance(data, str) else data - except: logging.error(f"PAM Settings Field failed to load from: {str(data)[:80]}...") - if not isinstance(data, dict): return obj - - obj.allowSupplyHost = utils.value_to_boolean(data.get("allow_supply_host", None)) - options = DagSettingsObject.load(data.get("options", {})) - if not is_empty_instance(options): - obj.options = options - - # Parse jit_settings from options dict (nested inside options) - options_dict = data.get("options", {}) - if isinstance(options_dict, dict): - jit_value = options_dict.get("jit_settings", None) - if jit_value is not None: - jit_settings = DagJitSettingsObject.load(jit_value) - if jit_settings: - obj.jit_settings = jit_settings - - # Parse ai_settings from options dict (nested inside options) - options_dict = data.get("options", {}) - if isinstance(options_dict, dict): - ai_value = options_dict.get("ai_settings", None) - if ai_value is not None: - ai_settings = DagAiSettingsObject.load(ai_value) - if ai_settings: - obj.ai_settings = ai_settings - - portForward = PamPortForwardSettings.load(data.get("port_forward", {})) - if not is_empty_instance(portForward): - obj.portForward = portForward - - cdata = data.get("connection", {}) - # TO DO: if isinstance(cdata, str): lookup_by_name(pam_data.connections) - if not isinstance(cdata, dict): - logging.warning(f"""PAM Settings: Connection must be a JSON object - skipping... "{str(cdata)[:24]}" """) - obj.connection = cls.get_connection_class(cdata) - if not obj.connection and cdata and isinstance(cdata, dict): - logging.error(f"PAM Settings failed to load from: {str(cdata)[:80]}...") - - return obj - -def is_empty_instance(obj, skiplist: Optional[List[str]] = None): - """ Checks if all attributes (not on skiplist) are None """ - if not obj: return True - if not isinstance(skiplist, list): skiplist= [] - for attr, value in vars(obj).items(): - if not (attr in skiplist or value is None): - return False - return True - -def is_blank_instance(obj, skiplist: Optional[List[str]] = None): - """ Checks if all attributes (not on skiplist) are None or empty """ - if not obj: return True - if not isinstance(skiplist, list): skiplist= [] - for attr, value in vars(obj).items(): - if not (attr in skiplist or not value): - return False - return True - -def get_sftp_attribute(obj, name: str) -> str: - # Get one of pam_settings.connection.sftp.{sftpResource,sftpResourceUid,sftpUser,sftpUserUid} - value: str = "" - if (name and obj and - hasattr(obj, "pam_settings") and - hasattr(obj.pam_settings, "connection") and - hasattr(obj.pam_settings.connection, "sftp")): - if name == "sftpResource" and hasattr(obj.pam_settings.connection.sftp, "sftpResource"): - value = obj.pam_settings.connection.sftp.sftpResource or "" - elif name == "sftpResourceUid" and hasattr(obj.pam_settings.connection.sftp, "sftpResourceUid"): - value = obj.pam_settings.connection.sftp.sftpResourceUid or "" - elif name == "sftpUser" and hasattr(obj.pam_settings.connection.sftp, "sftpUser"): - value = obj.pam_settings.connection.sftp.sftpUser or "" - elif name == "sftpUserUid" and hasattr(obj.pam_settings.connection.sftp, "sftpUserUid"): - value = obj.pam_settings.connection.sftp.sftpUserUid or "" - else: - logging.debug(f"""Unknown sftp attribute "{name}" (skipped)""") - value = value[0] if isinstance(value, list) else value - value = value if isinstance(value, str) else "" - return value - -def set_sftp_uid(obj, name: str, uid: str) -> bool: - if not(obj and name): - return False - if not(uid and isinstance(uid, str) and RecordV3.is_valid_ref_uid(uid)): - logging.debug(f"""Invalid sftp UID "{uid}" (skipped)""") - return False - if (hasattr(obj, "pam_settings") and - hasattr(obj.pam_settings, "connection") and - hasattr(obj.pam_settings.connection, "sftp")): - if name == "sftpResourceUid" and hasattr(obj.pam_settings.connection.sftp, "sftpResourceUid"): - obj.pam_settings.connection.sftp.sftpResourceUid = uid - return True - elif name == "sftpUserUid" and hasattr(obj.pam_settings.connection.sftp, "sftpUserUid"): - obj.pam_settings.connection.sftp.sftpUserUid = uid - return True - else: - logging.debug(f"""Unknown sftp UID attribute "{name}" (skipped)""") - return False - -def is_admin_external(mach) -> bool: - res = False - if (mach and hasattr(mach, "is_admin_external") and mach.is_admin_external is True): - res = True - return res - -def get_admin_credential(obj, uid:bool=False) -> str: - # Get one of pam_settings.connection.{userRecords,userRecordUid} - value: str = "" - if (obj and hasattr(obj, "pam_settings") and - hasattr(obj.pam_settings, "connection") and - ((uid and hasattr(obj.pam_settings.connection, "userRecordUid")) or - (not uid and hasattr(obj.pam_settings.connection, "userRecords")))): - if uid and obj.pam_settings.connection.userRecordUid: - value = obj.pam_settings.connection.userRecordUid - elif not uid and obj.pam_settings.connection.userRecords: - value = obj.pam_settings.connection.userRecords - value = value[0] if isinstance(value, list) else value - value = value if isinstance(value, str) else "" - return value - -def set_user_record_uid(obj, uid: str, is_external: bool = False) -> bool: - if not(uid and isinstance(uid, str) and RecordV3.is_valid_ref_uid(uid)): - logging.debug(f"""Invalid userRecordUid "{uid}" (skipped)""") - return False - - if (uid and obj and hasattr(obj, "pam_settings") and - hasattr(obj.pam_settings, "connection") and - hasattr(obj.pam_settings.connection, "userRecordUid")): - obj.pam_settings.connection.userRecordUid = uid - if is_external is True: - if hasattr(obj, "is_admin_external"): - obj.is_admin_external = True - if hasattr(obj, "administrative_credentials_uid"): - obj.administrative_credentials_uid = uid - return True - else: - logging.debug("""Object has no attribute "userRecordUid" (skipped)""") - return False - -def find_external_user(mach, machines, title: str) -> list: - # Local pamMachine could reference pamDirectory AD user as its admin - res = [] - if title and machines and mach.type == "pamMachine": - mu = title.split(".", 1) # machine/user titles - mname = mu[0] if len(mu) > 1 else "" - uname = mu[1] if len(mu) > 1 else mu[0] - for m in machines: - if m.type == "pamDirectory" and (not mname or mname == m.title): - res.extend(search_machine(m, uname) or []) - return res - -def find_user(mach, users, title: str) -> list: - if not isinstance(mach, list): - res = search_machine(mach, title) or search_users(users, title) - else: - res = search_users(users, title) - for m in mach: - res = res or search_machine(m, title) - if res: break - return res or [] - -def search_users(users, user: str) -> list: - res = [] - if isinstance(users, list): - res = [x for x in users if getattr(x, "title", None) == user] - res = res or [x for x in users if getattr(x, "login", None) == user] - return res - -def search_machine(mach, user: str) -> list: - if mach and hasattr(mach, "users") and isinstance(mach.users, list): - return search_users(mach.users, user) - return [] - -def parse_command_options(obj, enable:bool) -> dict: - # Parse command options from DagSettingsObject (pam_resource - skiped/external) - args = {} - if not obj: return args - choices = {"on": True, "off": False} - record_key = "record" if enable else "resource_uid" - args[record_key] = obj.uid - opts = None - if isinstance(obj, PamRemoteBrowserObject): - opts = obj.rbi_settings.options if obj.rbi_settings and obj.rbi_settings.options else None - elif isinstance(obj, PamUserObject): - logging.warning("Trying to parse DAG settings from PAM User (skipped)") # PamUserObject.rotation_settings are different - elif not isinstance(obj, LoginUserObject): - opts = obj.pam_settings.options if obj.pam_settings and obj.pam_settings.options else None - if opts: - if enable: # PAMTunnelEditCommand.execute format enable_rotation=True/disable_rotation=True - val = opts.rotation.value if opts.rotation else "" - key = "enable_rotation" if val == "on" else "disable_rotation" if val == "off" else None - if key is not None: args[key] = True - val = opts.connections.value if opts.connections else "" - key = "enable_connections" if val == "on" else "disable_connections" if val == "off" else None - if key is not None: args[key] = True - val = opts.tunneling.value if opts.tunneling else "" - key = "enable_tunneling" if val == "on" else "disable_tunneling" if val == "off" else None - if key is not None: args[key] = True - val = opts.text_session_recording.value if opts.text_session_recording else "" - key = "enable_typescript_recording" if val == "on" else "disable_typescript_recording" if val == "off" else None - if key is not None: - args[key] = True - args[key.replace("_typescript_", "_typescripts_")] = True # legacy compat. - val = opts.graphical_session_recording.value if opts.graphical_session_recording else "" - key = "enable_connections_recording" if val == "on" else "disable_connections_recording" if val == "off" else None - if key is not None: args[key] = True - val = opts.remote_browser_isolation.value if opts.remote_browser_isolation else "" - key = "enable_remote_browser_isolation" if val == "on" else "disable_remote_browser_isolation" if val == "off" else None - if key is not None: args[key] = True - # AI and JIT settings don't apply to RBI records - if not isinstance(obj, PamRemoteBrowserObject): - val = opts.ai_threat_detection.value if opts.ai_threat_detection else "" - key = "enable_ai_threat_detection" if val == "on" else "disable_ai_threat_detection" if val == "off" else None - if key is not None: args[key] = True - val = opts.ai_terminate_session_on_detection.value if opts.ai_terminate_session_on_detection else "" - key = "enable_ai_terminate_session_on_detection" if val == "on" else "disable_ai_terminate_session_on_detection" if val == "off" else None - if key is not None: args[key] = True - else: # TunnelDAG.set_resource_allowed format rotation=True/False - if opts.rotation and opts.rotation.value in ("on", "off"): - args["rotation"] = choices[opts.rotation.value] - if opts.connections and opts.connections.value in ("on", "off"): - args["connections"] = choices[opts.connections.value] - if opts.tunneling and opts.tunneling.value in ("on", "off"): - args["tunneling"] = choices[opts.tunneling.value] - if opts.text_session_recording and opts.text_session_recording.value in ("on", "off"): - # args["typescriptrecording"] = choices[opts.text_session_recording.value] - args["typescript_recording"] = choices[opts.text_session_recording.value] - if opts.graphical_session_recording and opts.graphical_session_recording.value in ("on", "off"): - # args["recording"] = choices[opts.graphical_session_recording.value] - args["session_recording"] = choices[opts.graphical_session_recording.value] - if opts.remote_browser_isolation and opts.remote_browser_isolation.value in ("on", "off"): - args["remote_browser_isolation"] = choices[opts.remote_browser_isolation.value] - # AI and JIT settings don't apply to RBI records - if not isinstance(obj, PamRemoteBrowserObject): - if opts.ai_threat_detection and opts.ai_threat_detection.value in ("on", "off"): - args["ai_enabled"] = choices[opts.ai_threat_detection.value] - if opts.ai_terminate_session_on_detection and opts.ai_terminate_session_on_detection.value in ("on", "off"): - args["ai_session_terminate"] = choices[opts.ai_terminate_session_on_detection.value] - - return args - -def resolve_domain_admin(pce, users): - if not(users and isinstance(users, list)): - return - if (pce and hasattr(pce, "dom_administrative_credential") and pce.dom_administrative_credential and - hasattr(pce, "admin_credential_ref")): - dac = pce.dom_administrative_credential - res = {"titles": set(), "logins": set()} - for obj in users: - uid = getattr(obj, "uid", "") or "" - title = getattr(obj, "title", "") or "" - login = getattr(obj, "login", "") or "" - if not uid: # cannot resolve script credential to an empty UID - logging.debug(f"""Unable to resolve domain admin creds from rec without UID - "{title}:{login}" (skipped)""") - continue - if title and title == dac: - res["titles"].add(uid) - elif login and login == dac: - res["logins"].add(uid) - num_unique_uids = len(res["titles"] | res["logins"]) - if num_unique_uids != 1: - logging.debug(f"{num_unique_uids} matches while resolving domain admin creds for '{dac}' ") - if res["titles"]: - pce.admin_credential_ref = next(iter(res["titles"])) - elif res["logins"]: - pce.admin_credential_ref = next(iter(res["logins"])) - if pce.admin_credential_ref: - logging.debug(f"Domain admin credential '{dac}' resolved to '{pce.admin_credential_ref}' ") - -def resolve_script_creds(rec, users, resources): - creds = set() - if (rec and hasattr(rec, "scripts") and rec.scripts and - hasattr(rec.scripts, "scripts") and rec.scripts.scripts): - creds = set(chain.from_iterable( - (x.additional_credentials for x in rec.scripts.scripts if x.additional_credentials)) - ) - if not creds: # nothing to resolve - return - res = {x: {"titles":[], "logins":[]} for x in creds} - for obj in chain(users, resources): - uid = getattr(obj, "uid", "") or "" - title = getattr(obj, "title", "") or "" - login = getattr(obj, "login", "") or "" - if not uid: # cannot resolve script credential to an empty UID - logging.debug(f"""Unable to resolve script creds from rec without UID - "{title}:{login}" (skipped)""") - continue - if title and title in creds: - res[title]["titles"].append(uid) - elif login and login in creds: - res[login]["logins"].append(login) - - # recursive search in machine users - if hasattr(obj, "users") and obj.users and isinstance(obj.users, list): - for usr in obj.users: - uid = getattr(usr, "uid", "") or "" - title = getattr(usr, "title", "") or "" - login = getattr(usr, "login", "") or "" - if not uid: # cannot resolve script credential to an empty UID - logging.debug(f"""Unable to resolve script creds from rec without UID - "{title}:{login}" (skipped)""") - continue - if title and title in creds: - res[title]["titles"].append(uid) - elif login and login in creds: - res[login]["logins"].append(login) - - if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: - for k, v in res.items(): - tlen = len(v.get("titles", [])) - llen = len(v.get("login", [])) - if tlen+llen != 1: - logging.debug(f"{tlen+llen} matches while resolving script creds for {k}") - - for script in (x for x in rec.scripts.scripts if x.additional_credentials): - for cred in script.additional_credentials: - matches = res.get(cred) or {} - match = next(chain(matches.get("titles") or [], matches.get("logins") or []), None) - if match: - script.record_refs.append(match) - else: - title = getattr(rec, "title", "") or "" - login = getattr(rec, "login", "") or "" - logging.warning(f"""Unable to resolve script creds "{cred}" from "{title}:{login}" """) - if script.record_refs: - script.record_refs = list(set(script.record_refs)) - -def add_pam_scripts(params, record, scripts): - """Add post-rotation script(s) to a rotation record""" - if not (isinstance(record, str) and record != "" - and isinstance(scripts, list) and len(scripts) > 0): - return # nothing to do - no record or no script(s) - - ruid = record if record in params.record_cache else "" - if not ruid: - records = list(vault_extensions.find_records( - params, search_str=record, record_version=(3, 6), - record_type=PAM_ROTATION_TYPES + PAM_CONFIG_TYPES)) - if len(records) == 0: - logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - Record "{record}" not found!""") - elif len(records) > 1: - logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - Record "{record}" is not unique. Use record UID!""") - else: - ruid = records[0].record_uid - rec = vault.KeeperRecord.load(params, ruid) if ruid else None - if rec and isinstance(rec, vault.TypedRecord): - if rec.version not in (3, 6): - logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - Record "{rec.record_uid}" is not a rotation record (skipped).""") - return - - script_field = next((x for x in rec.fields if x.type == "script"), None) - if not script_field: - script_field = vault.TypedField.new_field("script", [], "rotationScripts") - rec.fields.append(script_field) - for script in scripts: - file_name = script.file - full_name = os.path.abspath(os.path.expanduser(file_name)) - if not os.path.isfile(full_name): - logging.warning(f"""{bcolors.WARNING}Warning: {bcolors.ENDC} Add rotation script - File "{file_name}" not found (skipped).""") - continue - facade = record_facades.FileRefRecordFacade() - facade.record = rec - pre = set(facade.file_ref) - upload_task = attachment.FileUploadTask(full_name) - attachment.upload_attachments(params, rec, [upload_task]) - post = set(facade.file_ref) - df = post.difference(pre) - if len(df) == 1: - file_uid = df.pop() - facade.file_ref.remove(file_uid) - script_value = { - "fileRef": file_uid, - "recordRef": [], - "command": "", - } - # command and recordRef are optional - if script.script_command: - script_value["command"] = script.script_command - if script.record_refs: - for ref in script.record_refs: - script_value["recordRef"].append(ref) - if ref not in params.record_cache: - logging.debug(f"{bcolors.WARNING}Warning: {bcolors.ENDC} " - "Add rotation script - Additional Credentials Record " - f""" "{ref}" not found (recordRef added)!""") - script_field.value.append(script_value) # type: ignore - - record_management.update_record(params, rec) - api.sync_down(params) - params.sync_data = True diff --git a/keepercommander/commands/pam_import/extend.py b/keepercommander/commands/pam_import/extend.py new file mode 100644 index 000000000..87304553d --- /dev/null +++ b/keepercommander/commands/pam_import/extend.py @@ -0,0 +1,1351 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' list[str]: + """Split folder path using path deilmiter / (escape: / -> //)""" + + # Escape char / confusion: a///b -> [a/]/[b] or [a]/[/b] + # Escape char ` or ^ (since \ is hard to put in strings and JSON) + # ...yet again a``/b -> [a`/b] or [a`]/[b] + + # Note: using / as escape char and path delimiter: / <-> // + placeholder = "\x00" # unlikely to appear in folder names + tmp = path.replace("//", placeholder).rstrip("/") + parts = tmp.split("/") # split on remaining single slashes + res = [part.replace(placeholder, "/") for part in parts] + + # check for bad path (odd number of slashes): a///b or a/////b etc. + if re.search(r"(? 0 and existing_parts[-1] == root: + possible_parents.append(existing_path) + + if len(possible_parents) > 1: + # Ambiguous: this partial path could belong to multiple locations + bad_paths.append((path, f"Ambiguous: '{root}' appears in multiple locations {possible_parents}")) + processed_paths.add(path) + elif len(possible_parents) == 1: + # This is a dependent path that needs parent to exist first + # Wait for parent to be processed (don't mark as processed yet) + pass + else: + # No possible parents found - truly a bad path + bad_paths.append((path, f"Root folder '{root}' not found in shared folders")) + processed_paths.add(path) + else: + # Check for ambiguous paths (multiple possible locations) + matching_roots = [] + for sf_name in sf_name_map: + if path.startswith(sf_name + "/") or path == sf_name: + matching_roots.append(sf_name) + + if len(matching_roots) > 1: + bad_paths.append((path, f"Ambiguous: maps to multiple roots {matching_roots}")) + else: + good_paths.append((path, parts)) + processed_paths.add(path) + + # Add paths to the corresponding folder trees + for path, parts in good_paths: + if parts and parts[0] in sf_name_map: + shf = sf_name_map[parts[0]] + current_level = shf['folder_tree'] + + # Navigate/create the folder structure + for _, folder_name in enumerate(parts[1:], 1): + if folder_name not in current_level: + current_level[folder_name] = { + 'uid': '', # Empty UID for new folders + 'name': folder_name, + 'subfolders': {} + } + current_level = current_level[folder_name]['subfolders'] + + return good_paths, bad_paths + +def build_tree_recursive(params, folder_uid: str): + """Recursively build tree for a folder and its subfolders""" + tree = {} + folder = params.folder_cache.get(folder_uid) + if not folder: + return tree + + for subfolder_uid in folder.subfolders: + subfolder = params.folder_cache.get(subfolder_uid) + if subfolder: + folder_name = subfolder.name or '' + tree[folder_name] = { + 'uid': subfolder.uid, + 'name': folder_name, + 'subfolders': build_tree_recursive(params, subfolder.uid) + } + + return tree + + +def _collect_path_to_uid_from_tree(path_prefix: str, tree: dict, path_to_uid: dict, only_existing: bool) -> None: + """Walk folder tree and fill path_to_uid. path_prefix e.g. 'gwapp', tree is shf['folder_tree']. + If only_existing, only add when node['uid'] is non-empty.""" + for name, node in (tree or {}).items(): + path = f"{path_prefix}/{name}" if path_prefix else name + uid = (node or {}).get("uid") or "" + if only_existing and not uid: + continue + if uid: + path_to_uid[path] = uid + subfolders = (node or {}).get("subfolders") or {} + if subfolders: + _collect_path_to_uid_from_tree(path, subfolders, path_to_uid, only_existing) + + +def _count_existing_and_new_paths(ksm_shared_folders: list, good_paths: list) -> tuple: + """Return (x_count, y_count, existing_paths_set, new_nodes_list). + existing_paths_set = set of full paths that exist (all segments have uid). + new_nodes_list = list of (full_path, parent_path, segment_name, node_ref) for each node with uid '', sorted by path (parent before child).""" + sf_name_map = {shf["name"]: shf for shf in ksm_shared_folders} + existing_paths = set() + new_nodes_list = [] # (full_path, parent_path, segment_name, node_dict) + + for path, parts in good_paths: + if not parts or parts[0] not in sf_name_map: + continue + root_name = parts[0] + if len(parts) == 1: + existing_paths.add(path) + continue + tree = sf_name_map[root_name].get("folder_tree") or {} + current = tree + prefix = root_name + parent_path = root_name + for i in range(1, len(parts)): + name = parts[i] + path_so_far = f"{prefix}/{name}" if prefix else name + node = current.get(name) if isinstance(current, dict) else None + if not node: + break + uid = node.get("uid") or "" + if uid: + existing_paths.add(path_so_far) + parent_path = path_so_far + else: + new_nodes_list.append((path_so_far, parent_path, name, node)) + parent_path = path_so_far + current = node.get("subfolders") or {} + prefix = path_so_far + + # Dedupe new nodes by path and sort so parent before child + seen = set() + deduped = [] + for item in new_nodes_list: + if item[0] not in seen: + seen.add(item[0]) + deduped.append(item) + deduped.sort(key=lambda x: (x[0].count("/"), x[0])) + x_count = len(existing_paths) + y_count = len(deduped) + return (x_count, y_count, existing_paths, deduped) + + +def _collect_all_folder_uids_under_ksm(ksm_shared_folders: list) -> set: + """Return set of all folder UIDs (shared folder roots + all descendants) under KSM app.""" + out = set() + for shf in ksm_shared_folders: + out.add(shf["uid"]) + tree = shf.get("folder_tree") or {} + + def walk(t): + for name, node in (t or {}).items(): + uid = (node or {}).get("uid") + if uid: + out.add(uid) + walk((node or {}).get("subfolders") or {}) + + walk(tree) + return out + + +def _get_ksm_app_record_uids(params, ksm_shared_folders: list) -> set: + """Return set of all record UIDs in any folder shared to the KSM app.""" + folder_uids = _collect_all_folder_uids_under_ksm(ksm_shared_folders) + record_uids = set() + subfolder_record_cache = getattr(params, "subfolder_record_cache", None) or {} + for fuid in folder_uids: + if fuid in subfolder_record_cache: + record_uids.update(subfolder_record_cache[fuid]) + return record_uids + + +def _get_records_in_folder(params, folder_uid: str): + """Return list of (record_uid, title, record_type) for records in folder_uid. + record_type from record for autodetect (e.g. pamUser, pamMachine, login).""" + subfolder_record_cache = getattr(params, "subfolder_record_cache", None) or {} + result = [] + for ruid in subfolder_record_cache.get(folder_uid, []): + try: + rec = vault.KeeperRecord.load(params, ruid) + title = getattr(rec, "title", "") or "" + rtype = "" + if hasattr(rec, "record_type"): + rtype = getattr(rec, "record_type", "") or "" + result.append((ruid, title, rtype)) + except Exception: + pass + return result + + +def _get_all_ksm_app_records(params, ksm_shared_folders: list) -> list: + """Return list of (record_uid, title, record_type) for every record in any folder under KSM app.""" + folder_uids = _collect_all_folder_uids_under_ksm(ksm_shared_folders) + out = [] + for fuid in folder_uids: + out.extend(_get_records_in_folder(params, fuid)) + return out + + +def _folder_uids_under_shf(shf: dict) -> set: + """Return set of folder UIDs under this shared folder (root + all descendants from folder_tree).""" + out = {shf.get("uid")} + tree = shf.get("folder_tree") or {} + + def walk(t): + for name, node in (t or {}).items(): + uid = (node or {}).get("uid") + if uid: + out.add(uid) + walk((node or {}).get("subfolders") or {}) + + walk(tree) + return out + + +def _is_resource_type(obj) -> bool: + """True if object is a PAM resource (machine, database, directory, remote browser).""" + t = (getattr(obj, "type", None) or "").lower() + return t in ("pammachine", "pamdatabase", "pamdirectory", "pamremotebrowser") + + +def _record_identifier(obj, fallback_login: str = "") -> str: + """Return identifier for error messages: uid if present, else title, else login (for users).""" + uid = getattr(obj, "uid_imported", None) or getattr(obj, "uid", None) + if uid and isinstance(uid, str) and RecordV3.is_valid_ref_uid(uid): + return f'uid "{uid}"' + title = getattr(obj, "title", None) or "" + if title: + return f'"{title}"' + login = getattr(obj, "login", None) or fallback_login + return f'login "{login}"' if login else "record" + + +def _has_autogenerated_title(obj) -> bool: + """True if obj has title set by base.py when missing in JSON. base.py uses: + pamUser -> PAM User - {login}; pamMachine -> PAM Machine - {login}; + pamDatabase -> PAM Database - {databaseId}; pamDirectory -> PAM Directory - {domainName}; + pamRemoteBrowser (RBI) -> PAM RBI - {hostname from rbiUrl}.""" + rtype = (getattr(obj, "type", None) or "").lower() + title = (getattr(obj, "title", None) or "").strip() + login = (getattr(obj, "login", None) or "").strip() + if rtype == "pamuser" and login and title == f"PAM User - {login}": + return True + if rtype == "pammachine" and login and title == f"PAM Machine - {login}": + return True + database_id = (getattr(obj, "databaseId", None) or "").strip() + if rtype == "pamdatabase" and database_id and title == f"PAM Database - {database_id}": + return True + domain_name = (getattr(obj, "domainName", None) or "").strip() + if rtype == "pamdirectory" and domain_name and title == f"PAM Directory - {domain_name}": + return True + if rtype == "pamremotebrowser" and getattr(obj, "rbiUrl", None) and title.startswith("PAM RBI - "): + return True + return False + + +def _vault_title_matches_import(vault_title: str, import_title: str) -> bool: + """True if vault record title matches import title verbatim (both already in same form, e.g. from base.py).""" + return (vault_title or "").strip() == (import_title or "").strip() + + +class PAMProjectExtendCommand(Command): + parser = argparse.ArgumentParser(prog="pam project extend") + parser.add_argument("--config", "-c", required=True, dest="config", action="store", help="PAM Configuration UID or Title") + parser.add_argument("--filename", "-f", required=True, dest="file_name", action="store", help="File to load import data from.") + parser.add_argument("--dry-run", "-d", required=False, dest="dry_run", action="store_true", default=False, help="Test import without modifying vault.") + + def get_parser(self): + return PAMProjectExtendCommand.parser + + def execute(self, params, **kwargs): + dry_run = kwargs.get("dry_run", False) is True + file_name = str(kwargs.get("file_name") or "") + config_name = str(kwargs.get("config") or "") + + api.sync_down(params) + + configuration = None + if config_name in params.record_cache: + configuration = vault.KeeperRecord.load(params, config_name) + else: + l_name = config_name.casefold() + for c in vault_extensions.find_records(params, record_version=6): + if c.title.casefold() == l_name: + configuration = c + break + + if not (configuration and isinstance(configuration, vault.TypedRecord) and configuration.version == 6): + raise CommandError("pam project extend", f"""PAM configuration not found: "{config_name}" """) + + if not (file_name != "" and os.path.isfile(file_name)): + raise CommandError("pam project extend", f"""PAM Import JSON file not found: "{file_name}" """) + + data = {} + try: + with open(file_name, encoding="utf-8") as f: + data = json.load(f) + except Exception: + data = {} + + pam_data = data.get("pam_data") if isinstance(data, dict) else {} + pam_data = pam_data if isinstance(pam_data, dict) else {} + users = pam_data["users"] if isinstance(pam_data.get("users"), list) else [] + resources = pam_data["resources"] if isinstance(pam_data.get("resources"), list) else [] + if not (resources or users): + raise CommandError("pam project extend", f"""PAM data missing - file "{file_name}" """ + """must be a valid JSON ex. {"pam_data": {"resources": [], "users":[]}} """) + + has_extra_keys = any(key != "pam_data" for key in data) if isinstance(data, dict) else False + if has_extra_keys: + logging.warning(f"{bcolors.WARNING}WARNING: Import JSON contains extra data - " + f"""`extend` command uses only "pam_data": {{ }} {bcolors.ENDC}""") + + if dry_run: + print("[DRY RUN] No changes will be made. This is a simulation only.") + + # Find Controller/Gateway/App from PAM Configuration + controller = configuration_controller_get(params, url_safe_str_to_bytes(configuration.record_uid)) + if not (controller and isinstance(controller, pam_pb2.PAMController) and controller.controllerUid): # pylint: disable=no-member + raise CommandError("pam project extend", f"{bcolors.FAIL}" + f"Gateway UID not found for configuration {configuration.record_uid}.") + + ksmapp_uid = None + gateway_uid = utils.base64_url_encode(controller.controllerUid) + all_gateways = gateway_helper.get_all_gateways(params) + found_gateways = list(filter(lambda g: g.controllerUid == controller.controllerUid, all_gateways)) + if found_gateways and found_gateways[0]: + ksmapp_uid = utils.base64_url_encode(found_gateways[0].applicationUid) + if ksmapp_uid is None: + raise CommandError("pam project extend", f"{bcolors.FAIL}" + f"KSM APP UID not found for Gateway {gateway_uid}.") + ksm_app_record = vault.KeeperRecord.load(params, ksmapp_uid) + if not (ksm_app_record and isinstance(ksm_app_record, vault.ApplicationRecord) and ksm_app_record.version == 5): + raise CommandError("pam project extend", f"""PAM KSM Application record not found: "{ksmapp_uid}" """) + + # Find KSM Application shared folders + ksm_shared_folders = self.get_app_shared_folders(params, ksmapp_uid) + if not ksm_shared_folders: + raise CommandError("pam project extend", f""" No shared folders found for KSM Application: "{ksmapp_uid}" """) + + if dry_run: + print(f"[DRY RUN] Will use PAM Configuration: {configuration.record_uid} {configuration.title}") + print(f"[DRY RUN] Will use PAM Gateway: {gateway_uid} {controller.controllerName}") + print(f"[DRY RUN] Will use KSM Application: {ksmapp_uid} {ksm_app_record.title}") + print(f"[DRY RUN] Total shared folders found for the KSM App: {len(ksm_shared_folders)}") + for shf in ksm_shared_folders: + uid, name, permissions = shf.get("uid"), shf.get("name"), shf.get("permissions") + print(f"""[DRY RUN] Found shared folder: {uid} "{name}" ({permissions})""") + + for shf in ksm_shared_folders: + shf["folder_tree"] = build_tree_recursive(params, shf["uid"]) + + project = { + "data": {"pam_data": pam_data}, + "options": {"dry_run": dry_run}, + "ksm_shared_folders": ksm_shared_folders, + "folders": {}, + "pam_config": {"pam_config_uid": configuration.record_uid, "pam_config_object": None}, + "error_count": 0, + } + + self.process_folders(params, project) + self.map_records(params, project) + if project.get("error_count", 0) == 0: + has_new_no_path = False + for o in chain(project.get("mapped_resources", []), project.get("mapped_users", [])): + if getattr(o, "_extend_tag", None) == "new" and not (getattr(o, "folder_path", None) or "").strip(): + has_new_no_path = True + break + if not has_new_no_path: + for mach in project.get("mapped_resources", []): + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + if getattr(u, "_extend_tag", None) == "new" and not (getattr(u, "folder_path", None) or "").strip(): + has_new_no_path = True + break + if has_new_no_path: + break + if has_new_no_path: + self.autodetect_folders(params, project) + + err_count = project.get("error_count", 0) + new_count = project.get("new_record_count", 0) + if err_count > 0: + print(f"{err_count} errors; aborting. No changes made to vault.") + print("Use --dry-run option to see detailed error messages.") + return + if new_count == 0: + print("Nothing to update") + return + + path_to_folder_uid = (project.get("folders") or {}).get("path_to_folder_uid") or {} + res_folder_uid = (project.get("folders") or {}).get("resources_folder_uid", "") + usr_folder_uid = (project.get("folders") or {}).get("users_folder_uid", "") + + for o in chain(project.get("mapped_resources", []), project.get("mapped_users", [])): + if getattr(o, "_extend_tag", None) != "new": + continue + fp = (getattr(o, "folder_path", None) or "").strip() + o.resolved_folder_uid = path_to_folder_uid.get(fp) or (res_folder_uid if _is_resource_type(o) else usr_folder_uid) + for mach in project.get("mapped_resources", []): + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + if getattr(u, "_extend_tag", None) != "new": + continue + fp = (getattr(u, "folder_path", None) or "").strip() + u.resolved_folder_uid = path_to_folder_uid.get(fp) or usr_folder_uid + + if dry_run: + print("[DRY RUN COMPLETE] No changes were made. All actions were validated but not executed.") + return + self.process_data(params, project) + + def get_app_shared_folders(self, params, ksm_app_uid: str) -> list[dict]: + ksm_shared_folders = [] + + try: + app_info_list = KSMCommand.get_app_info(params, ksm_app_uid) + if app_info_list and len(app_info_list) > 0: + app_info = app_info_list[0] + shares = [x for x in app_info.shares if x.shareType == APIRequest_pb2.SHARE_TYPE_FOLDER] # pylint: disable=no-member + for share in shares: + folder_uid = utils.base64_url_encode(share.secretUid) + if folder_uid in params.shared_folder_cache: + cached_sf = params.shared_folder_cache[folder_uid] + folder_name = cached_sf.get('name_unencrypted', 'Unknown') + is_editable = share.editable if hasattr(share, 'editable') else False + + ksm_shared_folders.append({ + 'uid': folder_uid, + 'name': folder_name, + 'editable': is_editable, + 'permissions': "Editable" if is_editable else "Read-Only" + }) + except Exception as e: + logging.error(f"Could not retrieve KSM application shares: {e}") + + return ksm_shared_folders + + def process_folders(self, params, project: dict) -> dict: + """Step 1: Parse folder_paths from pam_data, build tree, process paths, optionally create new folders. + Fills project['folders'] with path_to_folder_uid, good_paths, bad_paths; updates project['error_count'].""" + data = project.get("data") or {} + pam_data = data.get("pam_data") or {} + resources = pam_data.get("resources") or [] + users = pam_data.get("users") or [] + options = project.get("options") or {} + dry_run = options.get("dry_run", False) is True + ksm_shared_folders = project.get("ksm_shared_folders") or [] + folders_out = project.get("folders") or {} + project["folders"] = folders_out + + # Collect unique folder_paths from resources, nested machine.users[], and top-level users (raw dicts) + folder_paths_set = set() + for r in resources: + if isinstance(r, dict): + if r.get("folder_path"): + folder_paths_set.add((r["folder_path"],)) + for nested in r.get("users") or []: + if isinstance(nested, dict) and nested.get("folder_path"): + folder_paths_set.add((nested["folder_path"],)) + for u in users: + if isinstance(u, dict) and u.get("folder_path"): + folder_paths_set.add((u["folder_path"],)) + folder_paths = list(set(fp[0] for fp in folder_paths_set)) + + good_paths, bad_paths = process_folder_paths(folder_paths, ksm_shared_folders) + + path_to_folder_uid = {} + has_errors = bool(bad_paths) + for shf in ksm_shared_folders: + name = shf.get("name") or "" + if name: + path_to_folder_uid[name] = shf["uid"] + _collect_path_to_uid_from_tree( + name, + shf.get("folder_tree") or {}, + path_to_folder_uid, + only_existing=has_errors, + ) + + x_count, y_count, existing_paths_set, new_nodes_list = _count_existing_and_new_paths( + ksm_shared_folders, good_paths + ) + + # Pre-generate UIDs for new folders (same as records: known before create). Fills path_to_folder_uid + # so dry run and map_records can resolve folder_path for all good paths. + for full_path, _parent_path, _name, node in new_nodes_list: + if not (node or {}).get("uid"): + uid = api.generate_record_uid() + node["uid"] = uid + path_to_folder_uid[full_path] = uid + + step1_errors = [(path, reason) for path, reason in bad_paths] + if step1_errors: + project["error_count"] = project.get("error_count", 0) + len(step1_errors) + + # Folder path printing: dry run always; normal run only if errors or Y > 0 + print_paths = dry_run or step1_errors or y_count > 0 + if print_paths: + prefix = "[DRY RUN] " if dry_run else "" + print(f"{prefix}Processed {len(folder_paths)} folder paths:") + print(f"{prefix} - Good paths: {len(good_paths)}") + for path, _ in good_paths: + tag = "existing" if path in existing_paths_set else "new" + if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: + print(f"{prefix} [{tag}] {path}") + else: + print(f"{prefix} ✓ {path}") + print(f"{prefix} - Bad paths: {len(bad_paths)}") + for path, reason in bad_paths: + print(f"{prefix} ✗ {path}: {reason}") + if step1_errors: + print(f"Total: {len(step1_errors)} errors") + + if not dry_run and not step1_errors and new_nodes_list: + sf_name_map = {shf["name"]: shf for shf in ksm_shared_folders} + for full_path, parent_path, name, node in new_nodes_list: + parent_uid = path_to_folder_uid.get(parent_path, "") + if not parent_uid and parent_path in sf_name_map: + parent_uid = sf_name_map[parent_path]["uid"] + new_uid = self.create_subfolder(params, name, parent_uid, folder_uid=node.get("uid")) + node["uid"] = new_uid + path_to_folder_uid[full_path] = new_uid + api.sync_down(params) + + existing_msg = f"{x_count} existing folders (skipped)" if x_count else "0 existing folders" + if dry_run: + print(f"[DRY RUN] {existing_msg}, {y_count} new folders to be created") + else: + print(f"{existing_msg}, {y_count} new folders created") + + if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: + for path, _ in good_paths: + tag = "existing" if path in existing_paths_set else "new" + print(f" [DEBUG] [{tag}] {path}") + + folders_out["path_to_folder_uid"] = path_to_folder_uid + folders_out["good_paths"] = good_paths + folders_out["bad_paths"] = bad_paths + folders_out["folder_stats_x"] = x_count + folders_out["folder_stats_y"] = y_count + return folders_out + + def map_records(self, params, project: dict) -> tuple: + """Step 2: Parse resources/users, tag existing vs new, set obj.uid; collect errors. + Returns (resources, users, step2_errors, new_record_count). Updates project['error_count'].""" + data = project.get("data") or {} + pam_data = data.get("pam_data") or {} + path_to_folder_uid = (project.get("folders") or {}).get("path_to_folder_uid") or {} + ksm_shared_folders = project.get("ksm_shared_folders") or [] + options = project.get("options") or {} + dry_run = options.get("dry_run", False) is True + + rotation_profiles = pam_data.get("rotation_profiles") or {} + if not isinstance(rotation_profiles, dict): + rotation_profiles = {} + pam_cfg_uid = (project.get("pam_config") or {}).get("pam_config_uid", "") + rotation_params = PamRotationParams(configUid=pam_cfg_uid, profiles=rotation_profiles) + + usrs = pam_data.get("users") or [] + rsrs = pam_data.get("resources") or [] + users = [] + resources = [] + + for user in usrs: + rt = str(user.get("type", "")) if isinstance(user, dict) else "" + rt = next((x for x in ("login", "pamUser") if x.lower() == rt.lower()), rt) + if rt not in ("login", "pamUser") and isinstance(user, dict): + pam_keys = ("private_pem_key", "distinguished_name", "connect_database", "managed", "scripts", "rotation_settings") + if user.get("url"): rt = "login" + elif any(k in user for k in pam_keys): rt = "pamUser" + rt = next((x for x in ("login", "pamUser") if x.lower() == rt.lower()), "login") + if rt == "login": + usr = LoginUserObject.load(user) + else: + usr = PamUserObject.load(user) + if usr: + users.append(usr) + + for machine in rsrs: + rt = str(machine.get("type", "")).strip() if isinstance(machine, dict) else "" + if rt.lower() not in (x.lower() for x in PAM_RESOURCES_RECORD_TYPES): + title = str(machine.get("title", "")).strip() if isinstance(machine, dict) else "" + logging.error(f"Incorrect record type \"{rt}\" - should be one of {PAM_RESOURCES_RECORD_TYPES}, \"{title}\" record skipped.") + continue + obj = None + rtl = rt.lower() + if rtl == "pamdatabase": + obj = PamDatabaseObject.load(machine, rotation_params) + elif rtl == "pamdirectory": + obj = PamDirectoryObject.load(machine, rotation_params) + elif rtl == "pammachine": + obj = PamMachineObject.load(machine, rotation_params) + elif rtl == "pamremotebrowser": + obj = PamRemoteBrowserObject.load(machine, rotation_params) + if obj: + resources.append(obj) + + for obj in chain(resources, users): + if not (isinstance(getattr(obj, "uid", None), str) and RecordV3.is_valid_ref_uid(obj.uid)): + obj.uid = utils.generate_uid() + if hasattr(obj, "users") and isinstance(obj.users, list): + for usr in obj.users: + if not (isinstance(getattr(usr, "uid", None), str) and RecordV3.is_valid_ref_uid(usr.uid)): + usr.uid = utils.generate_uid() + + ksm_app_uids = _get_ksm_app_record_uids(params, ksm_shared_folders) + all_ksm_records = _get_all_ksm_app_records(params, ksm_shared_folders) + good_paths = (project.get("folders") or {}).get("good_paths") or [] + good_paths_set = {p for p, _ in good_paths} + step2_errors = [] + + def _scope_key(obj, good_paths_set): + # Scope by folder only if path is good (exists or to be created); else "global". + # "Global" means: SHF shared to KSM App; for users → autodetected users folder (or single + # folder for both); for resources → autodetected resources folder (or same single folder). + # 0 or 3+ autodetected folders is an error anyway. Users are never scoped by machine. + fp = (getattr(obj, "folder_path", None) or "").strip() + if fp and fp in good_paths_set: + return fp + return "global" + + seen_scope_title = {} # (scope_key, title) -> list of (ident, machine_suffix) for error message + for o in chain(resources, users): + scope = _scope_key(o, good_paths_set) + title = (getattr(o, "title", None) or "").strip() + if title: + key = (scope, title) + ident = _record_identifier(o) + seen_scope_title.setdefault(key, []).append((ident, "")) + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + scope = _scope_key(u, good_paths_set) + title = (getattr(u, "title", None) or "").strip() + if title: + key = (scope, title) + ident = _record_identifier(u) + suffix = f' (nested on machine "{getattr(mach, "title", "")}")' + seen_scope_title.setdefault(key, []).append((ident, suffix)) + + for (scope, title), idents in seen_scope_title.items(): + if len(idents) > 1: + scope_msg = f"folder {scope}" if scope != "global" else "global" + step2_errors.append( + f'ERROR: Duplicate import records with same title "{title}" in same scope ({scope_msg}). ' + f'Add explicit "title" in JSON to disambiguate.' + ) + + def resolve_one(obj, parent_machine=None): + ident = _record_identifier(obj) + machine_suffix = "" + if parent_machine: + mt = getattr(parent_machine, "title", None) or "" + mu = getattr(parent_machine, "uid", None) or "" + machine_suffix = f' user on machine "{mt}"' if mt else f" user on machine <{mu}>" + + uid_imp = getattr(obj, "uid_imported", None) + if uid_imp and isinstance(uid_imp, str) and RecordV3.is_valid_ref_uid(uid_imp): + if uid_imp not in ksm_app_uids: + step2_errors.append(f'uid "{uid_imp}" not found in KSM app for record {ident}{machine_suffix}') + return + obj.uid = uid_imp + obj._extend_tag = "existing" + return + + folder_path = getattr(obj, "folder_path", None) or "" + title = (getattr(obj, "title", None) or "").strip() + login = (getattr(obj, "login", None) or "").strip() + + if folder_path: + folder_uid = path_to_folder_uid.get(folder_path) + if not folder_uid: + if folder_path in good_paths_set: + obj._extend_tag = "new" + return + step2_errors.append(f'folder_path "{folder_path}" could not be resolved for record {ident}{machine_suffix}') + return + if not title and not login: + obj._extend_tag = "new" + return + recs = _get_records_in_folder(params, folder_uid) + matches = [r for r in recs if _vault_title_matches_import(r[1], title)] + if len(matches) == 0: + obj._extend_tag = "new" + return + if len(matches) == 1: + obj.uid = matches[0][0] + obj._extend_tag = "existing" + return + step2_errors.append(f'Multiple matches for record {ident} in folder "{folder_path}"; add folder_path to disambiguate{machine_suffix}') + return + + if not title and not login: + obj._extend_tag = "new" + return + matches = [r for r in all_ksm_records if _vault_title_matches_import(r[1], title)] + if len(matches) == 0: + obj._extend_tag = "new" + return + if len(matches) == 1: + obj.uid = matches[0][0] + obj._extend_tag = "existing" + return + step2_errors.append(f'Multiple matches for record {ident}; add folder_path to disambiguate{machine_suffix}') + + for obj in resources: + resolve_one(obj, None) + for obj in users: + resolve_one(obj, None) + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + for usr in mach.users: + resolve_one(usr, mach) + + autogenerated_titles = [] + for o in chain(resources, users): + if _has_autogenerated_title(o): + autogenerated_titles.append(getattr(o, "title", None) or "") + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + if _has_autogenerated_title(u): + autogenerated_titles.append(getattr(u, "title", None) or "") + if autogenerated_titles: + print( + f"{bcolors.WARNING}Warning: {len(autogenerated_titles)} record(s) have autogenerated titles " + f"(e.g. PAM User/Machine/Database/Directory/RBI - ). Add \"title\" in import JSON to set an explicit record title.{bcolors.ENDC}" + ) + if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: + for t in autogenerated_titles: + print(f" [DEBUG] autogenerated title: {t}") + + machines = [x for x in resources if not isinstance(x, PamRemoteBrowserObject)] + pam_directories = [x for x in machines if (getattr(x, "type", "") or "").lower() == "pamdirectory"] + for mach in resources: + if not mach: + continue + admin_cred = get_admin_credential(mach) + sftp_user = get_sftp_attribute(mach, "sftpUser") + sftp_res = get_sftp_attribute(mach, "sftpResource") + if sftp_res: + ruids = [x for x in machines if getattr(x, "title", None) == sftp_res] + ruids = ruids or [x for x in machines if getattr(x, "login", None) == sftp_res] + if len(ruids) == 1 and getattr(ruids[0], "uid", ""): + set_sftp_uid(mach, "sftpResourceUid", ruids[0].uid) + if sftp_user: + ruids = find_user(mach, users, sftp_user) or find_user(machines, users, sftp_user) + if len(ruids) == 1 and getattr(ruids[0], "uid", ""): + set_sftp_uid(mach, "sftpUserUid", ruids[0].uid) + if admin_cred: + ruids = find_user(mach, users, admin_cred) + is_external = False + if not ruids: + ruids = find_external_user(mach, machines, admin_cred) + is_external = True + if len(ruids) == 1 and getattr(ruids[0], "uid", ""): + set_user_record_uid(mach, ruids[0].uid, is_external) + if mach.pam_settings and getattr(mach.pam_settings, "jit_settings", None): + jit = mach.pam_settings.jit_settings + ref = getattr(jit, "pam_directory_record", None) or "" + if ref and isinstance(ref, str) and ref.strip(): + matches = [x for x in pam_directories if getattr(x, "title", None) == ref.strip()] + if len(matches) == 1: + jit.pam_directory_uid = matches[0].uid + resolve_script_creds(mach, users, resources) + if hasattr(mach, "users") and isinstance(mach.users, list): + for usr in mach.users: + if usr and hasattr(usr, "rotation_settings") and usr.rotation_settings: + rot = getattr(usr.rotation_settings, "rotation", None) + if rot == "general": + usr.rotation_settings.resourceUid = mach.uid + elif rot in ("iam_user", "scripts_only"): + usr.rotation_settings.resourceUid = pam_cfg_uid + resolve_script_creds(usr, users, resources) + if hasattr(mach, "rbi_settings") and getattr(mach.rbi_settings, "connection", None): + conn = mach.rbi_settings.connection + if getattr(conn, "protocol", None) and str(getattr(conn.protocol, "value", "") or "").lower() == "http": + creds = getattr(conn, "httpCredentials", None) + if creds: + cred = str(creds[0]) if isinstance(creds, list) else str(creds) + matches = [x for x in users if getattr(x, "title", None) == cred] + matches = matches or [x for x in users if getattr(x, "login", None) == cred] + if len(matches) == 1 and getattr(matches[0], "uid", ""): + mach.rbi_settings.connection.httpCredentialsUid = [matches[0].uid] + for usr in users: + if usr and hasattr(usr, "rotation_settings") and usr.rotation_settings: + rot = getattr(usr.rotation_settings, "rotation", None) + if rot in ("iam_user", "scripts_only"): + usr.rotation_settings.resourceUid = pam_cfg_uid + elif rot == "general": + res = getattr(usr.rotation_settings, "resource", "") or "" + if res: + ruids = [x for x in machines if getattr(x, "title", None) == res] + ruids = ruids or [x for x in machines if getattr(x, "login", None) == res] + if ruids: + usr.rotation_settings.resourceUid = ruids[0].uid + resolve_script_creds(usr, users, resources) + + if step2_errors: + project["error_count"] = project.get("error_count", 0) + len(step2_errors) + + x_count = sum(1 for o in chain(resources, users) if getattr(o, "_extend_tag", None) == "existing") + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + x_count += sum(1 for u in mach.users if getattr(u, "_extend_tag", None) == "existing") + y_count = 0 + for o in chain(resources, users): + if getattr(o, "_extend_tag", None) == "new": + y_count += 1 + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + y_count += sum(1 for u in mach.users if getattr(u, "_extend_tag", None) == "new") + + existing_rec_msg = f"{x_count} existing records (skipped)" if x_count else "0 existing records" + total_line = f"{existing_rec_msg}, {y_count} new records to be created" + for err in step2_errors: + print(f" {err}") + if step2_errors: + print(f"Total: {len(step2_errors)} errors") + + if dry_run: + for o in chain(resources, users): + tag = getattr(o, "_extend_tag", "?") + path = getattr(o, "folder_path", "") or "autodetect" + otype = getattr(o, "type", "") or "" + label = getattr(o, "title", None) or getattr(o, "login", None) or "" + uid_suffix = f"\tuid={getattr(o, 'uid', '')}" if tag == "existing" else "" + print(f" [DRY RUN] [{tag}] folder={path}\trecord={otype}: {label}{uid_suffix}") + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + tag = getattr(u, "_extend_tag", "?") + path = getattr(u, "folder_path", "") or "autodetect" + utype = getattr(u, "type", "") or "" + label = getattr(u, "title", None) or getattr(u, "login", None) or "" + uid_suffix = f"\tuid={getattr(u, 'uid', '')}" if tag == "existing" else "" + print(f" [DRY RUN] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix}") + print(f"[DRY RUN] {total_line}") + else: + if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: + for o in chain(resources, users): + tag = getattr(o, "_extend_tag", "?") + path = getattr(o, "folder_path", "") or "autodetect" + otype = getattr(o, "type", "") or "" + label = getattr(o, "title", None) or getattr(o, "login", None) or "" + uid_suffix = f"\tuid={getattr(o, 'uid', '')}" if tag == "existing" else "" + print(f" [DEBUG] [{tag}] folder={path}\trecord={otype}: {label}{uid_suffix}") + for mach in resources: + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + tag = getattr(u, "_extend_tag", "?") + path = getattr(u, "folder_path", "") or "autodetect" + utype = getattr(u, "type", "") or "" + label = getattr(u, "title", None) or getattr(u, "login", None) or "" + uid_suffix = f"\tuid={getattr(u, 'uid', '')}" if tag == "existing" else "" + print(f" [DEBUG] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix}") + print(total_line) + + project["mapped_resources"] = resources + project["mapped_users"] = users + project["new_record_count"] = y_count + return (resources, users, step2_errors, y_count) + + def autodetect_folders(self, params, project: dict) -> list: + """Step 3: Autodetect resources_folder_uid and users_folder_uid when new records have no folder_path. + Call only when error_count==0 and there are records with no uid and no folder_path (tagged new). + Returns list of step3 errors; updates project['folders'] with resources_folder_uid/users_folder_uid on success.""" + step3_errors = [] + folders_out = project.get("folders") or {} + ksm_shared_folders = project.get("ksm_shared_folders") or [] + subfolder_record_cache = getattr(params, "subfolder_record_cache", None) or {} + + new_no_path = [] + for o in chain(project.get("mapped_resources", []), project.get("mapped_users", [])): + if getattr(o, "_extend_tag", None) == "new": + if not (getattr(o, "folder_path", None) or "").strip(): + new_no_path.append(o) + for mach in project.get("mapped_resources", []): + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + if getattr(u, "_extend_tag", None) == "new" and not (getattr(u, "folder_path", None) or "").strip(): + new_no_path.append(u) + if not new_no_path: + return step3_errors + + shf_list = [(shf["uid"], shf.get("name") or "") for shf in ksm_shared_folders] + if len(shf_list) == 1: + folders_out["resources_folder_uid"] = shf_list[0][0] + folders_out["users_folder_uid"] = shf_list[0][0] + print("Warning: Using single shared folder for both resources and users (best practice: separate).") + return step3_errors + + if len(shf_list) == 2: + names = [n for _, n in shf_list] + r_idx = next((i for i, n in enumerate(names) if n.endswith(" - Resources") or n.endswith("- Resources")), -1) + u_idx = next((i for i, n in enumerate(names) if n.endswith(" - Users") or n.endswith("- Users")), -1) + if r_idx >= 0 and u_idx >= 0 and r_idx != u_idx: + folders_out["resources_folder_uid"] = shf_list[r_idx][0] + folders_out["users_folder_uid"] = shf_list[u_idx][0] + return step3_errors + + non_empty = [] + for shf in ksm_shared_folders: + uids = _folder_uids_under_shf(shf) + if any(subfolder_record_cache.get(fuid) for fuid in uids): + non_empty.append(shf) + if len(non_empty) == 0: + step3_errors.append("Autodetect: no folders contain records; cannot assign resources/users folders.") + project["error_count"] = project.get("error_count", 0) + len(step3_errors) + for e in step3_errors: + print(f" {e}") + print(f"Total: {len(step3_errors)} errors") + return step3_errors + if len(non_empty) == 1: + folders_out["resources_folder_uid"] = non_empty[0]["uid"] + folders_out["users_folder_uid"] = non_empty[0]["uid"] + print("Warning: Using single non-empty folder for both resources and users.") + return step3_errors + if len(non_empty) == 2: + res_uid = users_uid = None + for shf in non_empty: + uids = _folder_uids_under_shf(shf) + for fuid in uids: + recs = _get_records_in_folder(params, fuid) + if not recs: + continue + for ruid, _title, rtype in recs: + rtype = (rtype or "").lower() + if rtype in ("pamuser", "login"): + users_uid = shf["uid"] + break + if rtype in ("pammachine", "pamdatabase", "pamdirectory", "pamremotebrowser"): + res_uid = shf["uid"] + break + if users_uid is not None or res_uid is not None: + break + if users_uid is not None and res_uid is not None: + break + if res_uid is not None and users_uid is not None: + folders_out["resources_folder_uid"] = res_uid + folders_out["users_folder_uid"] = users_uid + return step3_errors + step3_errors.append("Autodetect: could not determine which folder is resources vs users.") + else: + step3_errors.append("Autodetect: three or more non-empty folders; add folder_path to disambiguate.") + project["error_count"] = project.get("error_count", 0) + len(step3_errors) + for e in step3_errors: + print(f" {e}") + if step3_errors: + print(f"Total: {len(step3_errors)} errors") + return step3_errors + + def create_subfolder(self, params, folder_name:str, parent_uid:str="", permissions:Optional[Dict]=None, folder_uid:Optional[str]=None): + # folder_uid: if provided, create folder with this UID (same as records with pre-generated uid). + + name = str(folder_name or "").strip() + base_folder = params.folder_cache.get(parent_uid, None) or params.root_folder + + shared_folder = True if permissions else False + user_folder = True if not permissions else False # uf or sff (split later) + if not folder_uid: + folder_uid = api.generate_record_uid() + request: Dict[str, Any] = { + "command": "folder_add", + "folder_type": "user_folder", + "folder_uid": folder_uid + } + + if shared_folder: + if base_folder.type in {BaseFolderNode.RootFolderType, BaseFolderNode.UserFolderType}: + request["folder_type"] = "shared_folder" + for perm in ["manage_users", "manage_records", "can_share", "can_edit"]: + if permissions and permissions.get(perm, False) == True: + request[perm] = True + else: + raise CommandError("pam", "Shared folders cannot be nested") + elif user_folder: + if base_folder.type in {BaseFolderNode.SharedFolderType, BaseFolderNode.SharedFolderFolderType}: + request["folder_type"] = "shared_folder_folder" + else: + request["folder_type"] = "user_folder" + + if request.get("folder_type") is None: + if base_folder.type in {BaseFolderNode.SharedFolderType, BaseFolderNode.SharedFolderFolderType}: + request["folder_type"] = "shared_folder_folder" + + folder_key = os.urandom(32) + encryption_key = params.data_key + if request["folder_type"] == "shared_folder_folder": + sf_uid = base_folder.shared_folder_uid if base_folder.type == BaseFolderNode.SharedFolderFolderType else base_folder.uid + sf = params.shared_folder_cache[sf_uid] + encryption_key = sf["shared_folder_key_unencrypted"] + request["shared_folder_uid"] = sf_uid + + request["key"] = utils.base64_url_encode(crypto.encrypt_aes_v1(folder_key, encryption_key)) + if base_folder.type not in {BaseFolderNode.RootFolderType, BaseFolderNode.SharedFolderType}: + request["parent_uid"] = base_folder.uid + + if request["folder_type"] == "shared_folder": + request["name"] = utils.base64_url_encode(crypto.encrypt_aes_v1(name.encode("utf-8"), folder_key)) + data_dict = {"name": name} + data = json.dumps(data_dict) + request["data"] = utils.base64_url_encode(crypto.encrypt_aes_v1(data.encode("utf-8"), folder_key)) + + api.communicate(params, request) + api.sync_down(params) + params.environment_variables[LAST_FOLDER_UID] = folder_uid + if request["folder_type"] == "shared_folder": + params.environment_variables[LAST_SHARED_FOLDER_UID] = folder_uid + return folder_uid + + def find_folders(self, params, parent_uid:str, folder:str, is_shared_folder:bool) -> List[BaseFolderNode]: + result: List[BaseFolderNode] = [] + folders = params.folder_cache if params and params.folder_cache else {} + if not isinstance(folders, dict): + return result + + puid = parent_uid if parent_uid else None # root folder parent uid is set to None + matches = {k: v for k, v in folders.items() if v.parent_uid == puid and v.name == folder} + result = [v for k, v in matches.items() if + (is_shared_folder and v.type == BaseFolderNode.SharedFolderType) or + (not is_shared_folder and v.type == BaseFolderNode.UserFolderType)] + return result + + def create_ksm_app(self, params, app_name) -> str: + app_record_data = { + "title": app_name, + "type": "app" + } + + data_json = json.dumps(app_record_data) + record_key_unencrypted = utils.generate_aes_key() + record_key_encrypted = crypto.encrypt_aes_v2(record_key_unencrypted, params.data_key) + + app_record_uid_str = api.generate_record_uid() + app_record_uid = utils.base64_url_decode(app_record_uid_str) + + data = data_json.decode("utf-8") if isinstance(data_json, bytes) else data_json + data = api.pad_aes_gcm(data) + + rdata = bytes(data, "utf-8") # type: ignore + rdata = crypto.encrypt_aes_v2(rdata, record_key_unencrypted) + + ra = record_pb2.ApplicationAddRequest() # pylint: disable=E1101 + ra.app_uid = app_record_uid # type: ignore + ra.record_key = record_key_encrypted # type: ignore + ra.client_modified_time = api.current_milli_time() # type: ignore + ra.data = rdata # type: ignore + + api.communicate_rest(params, ra, "vault/application_add") + api.sync_down(params) + return app_record_uid_str + + def create_gateway( + self, params, gateway_name, ksm_app, config_init, ott_expire_in_min=5 + ): + token = KSMCommand.add_client( + params, + app_name_or_uid=ksm_app, + count=1, + unlock_ip=True, + first_access_expire_on=ott_expire_in_min, + access_expire_in_min=None, # None=Never, int = num of min + client_name=gateway_name, + config_init=config_init, + silent=True, + client_type=enterprise_pb2.DISCOVERY_AND_ROTATION_CONTROLLER) # pylint: disable=E1101 + api.sync_down(params) + + return token + + def verify_users_and_teams(self, params, users_and_teams): + api.load_available_teams(params) + for item in users_and_teams: + name = item.get("name", "") + teams = [] + # do not use params.team_cache: + for team in params.available_team_cache or []: + team = api.Team(team_uid=team.get("team_uid", ""), name=team.get("team_name", "")) + if name == team.team_uid or name.casefold() == team.name.casefold(): + teams.append(team) + users = [] + for user in params.enterprise.get("users", []): + # if user["node_id"] not in node_scope: continue + # skip: node_id, status, lock, tfa_enabled, account_share_expiration + usr = { + "id": user.get("enterprise_user_id", "") or "", + "username": user.get("username", "") or "", + "name": user.get("data", {}).get("displayname", "") or "" + } + if name in usr.values(): users.append(usr) + + teams_users = teams + users + num_found = len(teams_users) + if num_found == 0: + logging.warning(f"""Team/User: {bcolors.WARNING}"{name}"{bcolors.ENDC} - not found (skipped).""") + elif num_found > 1: + logging.warning(f"""Multiple matches ({num_found}) for team/user: {bcolors.WARNING}"{name}"{bcolors.ENDC} found (skipped).""") + if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: + msg = "" + for x in teams_users: + msg += "\n" + (f"team_uid: {x.team_uid}, name: {x.name}" if isinstance(x, api.Team) else str(x)) + logging.debug(f"Matches from team/user lookup: {msg}") + + + def process_data(self, params, project): + """Extend: only create records tagged new; use resolved_folder_uid; for existing machines only add new users.""" + if project.get("options", {}).get("dry_run", False) is True: + return + from ..tunnel_and_connections import PAMTunnelEditCommand + from ..discoveryrotation import PAMCreateRecordRotationCommand + + resources = project.get("mapped_resources") or [] + users = project.get("mapped_users") or [] + pam_cfg_uid = (project.get("pam_config") or {}).get("pam_config_uid", "") + shfres = (project.get("folders") or {}).get("resources_folder_uid", "") + shfusr = (project.get("folders") or {}).get("users_folder_uid", "") + pce = (project.get("pam_config") or {}).get("pam_config_object") + + print("Started importing data...") + encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) + tdag = TunnelDAG(params, encrypted_session_token, encrypted_transmission_key, pam_cfg_uid, True, + transmission_key=transmission_key) + pte = PAMTunnelEditCommand() + prc = PAMCreateRecordRotationCommand() + pdelta = 10 + + new_users = [u for u in users if getattr(u, "_extend_tag", None) == "new"] + if new_users: + logging.warning(f"Processing external users: {len(new_users)}") + for n, user in enumerate(new_users): + folder_uid = getattr(user, "resolved_folder_uid", None) or shfusr + user.create_record(params, folder_uid) + if n % pdelta == 0: + print(f"{n}/{len(new_users)}") + print(f"{len(new_users)}/{len(new_users)}\n") + + resources_sorted = sorted(resources, key=lambda r: (getattr(r, "type", "") or "").lower() != "pamdirectory") + new_resources = [r for r in resources_sorted if getattr(r, "_extend_tag", None) == "new"] + existing_resources = [r for r in resources_sorted if getattr(r, "_extend_tag", None) == "existing"] + if new_resources: + logging.warning(f"Processing resources: {len(new_resources)}") + for n, mach in enumerate(new_resources): + if n % pdelta == 0: + print(f"{n}/{len(new_resources)}") + folder_uid = getattr(mach, "resolved_folder_uid", None) or shfres + admin_uid = get_admin_credential(mach, True) + mach.create_record(params, folder_uid) + tdag.link_resource_to_config(mach.uid) + if isinstance(mach, PamRemoteBrowserObject): + args = parse_command_options(mach, True) + pte.execute(params, config=pam_cfg_uid, silent=True, **args) + args = parse_command_options(mach, False) + if args.get("remote_browser_isolation", False) is True: + args["connections"] = True + tdag.set_resource_allowed(**args) + else: + args = parse_command_options(mach, True) + if admin_uid: + args["admin"] = admin_uid + pte.execute(params, config=pam_cfg_uid, silent=True, **args) + if admin_uid and is_admin_external(mach): + tdag.link_user_to_resource(admin_uid, mach.uid, is_admin=True, belongs_to=False) + args = parse_command_options(mach, False) + tdag.set_resource_allowed(**args) + mach_users = getattr(mach, "users", []) or [] + for user in mach_users: + if getattr(user, "_extend_tag", None) != "new": + continue + rs = getattr(user, "rotation_settings", None) + if isinstance(user, PamUserObject) and rs and (getattr(rs, "rotation", "") or "").lower() == "general": + rs.resourceUid = mach.uid + ufolder = getattr(user, "resolved_folder_uid", None) or shfusr + user.create_record(params, ufolder) + if isinstance(user, PamUserObject): + tdag.link_user_to_resource(user.uid, mach.uid, admin_uid == user.uid, True) + if rs: + args = {"force": True, "config": pam_cfg_uid, "record_name": user.uid, "admin": admin_uid, "resource": mach.uid} + enabled = getattr(rs, "enabled", "") + key = {"on": "enable", "off": "disable"}.get(enabled, "") + if key: + args[key] = True + schedule = getattr(rs, "schedule", None) + schedule_type = getattr(schedule, "type", "") if schedule else "" + if schedule_type == "on-demand": + args["on_demand"] = True + elif schedule_type == "cron" and schedule and getattr(schedule, "cron", None): + args["schedule_cron_data"] = rs.schedule.cron + if getattr(rs, "password_complexity", None): + args["pwd_complexity"] = rs.password_complexity + prc.execute(params, silent=True, **args) + if new_resources: + print(f"{len(new_resources)}/{len(new_resources)}\n") + + for mach in existing_resources: + mach_users = getattr(mach, "users", []) or [] + admin_uid = get_admin_credential(mach, True) + for user in mach_users: + if getattr(user, "_extend_tag", None) != "new": + continue + rs = getattr(user, "rotation_settings", None) + if isinstance(user, PamUserObject) and rs and (getattr(rs, "rotation", "") or "").lower() == "general": + rs.resourceUid = mach.uid + ufolder = getattr(user, "resolved_folder_uid", None) or shfusr + user.create_record(params, ufolder) + if isinstance(user, PamUserObject): + tdag.link_user_to_resource(user.uid, mach.uid, admin_uid == user.uid, True) + if rs: + args = {"force": True, "config": pam_cfg_uid, "record_name": user.uid, "admin": admin_uid, "resource": mach.uid} + enabled = getattr(rs, "enabled", "") + key = {"on": "enable", "off": "disable"}.get(enabled, "") + if key: + args[key] = True + schedule = getattr(rs, "schedule", None) + schedule_type = getattr(schedule, "type", "") if schedule else "" + if schedule_type == "on-demand": + args["on_demand"] = True + elif schedule_type == "cron" and schedule and getattr(schedule, "cron", None): + args["schedule_cron_data"] = rs.schedule.cron + if getattr(rs, "password_complexity", None): + args["pwd_complexity"] = rs.password_complexity + prc.execute(params, silent=True, **args) + + if pce and getattr(pce, "scripts", None) and getattr(pce.scripts, "scripts", None): + refs = [x for x in pce.scripts.scripts if getattr(x, "record_refs", None)] + if refs: + api.sync_down(params) + add_pam_scripts(params, pam_cfg_uid, refs) + logging.debug("Done processing project data.") + return + From 57e7d233e658584b366a6a457ff1baa6a24fd154 Mon Sep 17 00:00:00 2001 From: John Walstra Date: Wed, 4 Feb 2026 23:02:42 -0600 Subject: [PATCH 08/16] Update SaaS commands to use new saasConfiguration record type. --- keepercommander/commands/pam_saas/config.py | 4 ++-- keepercommander/commands/pam_saas/set.py | 7 ++++-- keepercommander/commands/pam_saas/update.py | 24 ++++++++++++++++----- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/keepercommander/commands/pam_saas/config.py b/keepercommander/commands/pam_saas/config.py index 21dbd2e9e..36d8e82be 100644 --- a/keepercommander/commands/pam_saas/config.py +++ b/keepercommander/commands/pam_saas/config.py @@ -166,7 +166,7 @@ def _create_config(params: KeeperParams, value = get_field_input(item) if value is not None: field_type = item.type - if field_type in ["url", "int", "number", "bool", "enum"]: + if field_type in ["int", "number", "bool", "enum"]: field_type = "text" field_args = { @@ -189,7 +189,7 @@ def _create_config(params: KeeperParams, print(f"{bcolors.FAIL}Require a record title.") record = vault.TypedRecord() - record.type_name = "login" + record.type_name = "saasConfiguration" record.record_uid = utils.generate_uid() record.record_key = utils.generate_aes_key() record.title = title diff --git a/keepercommander/commands/pam_saas/set.py b/keepercommander/commands/pam_saas/set.py index 2c8f0d18d..5c428cd3b 100644 --- a/keepercommander/commands/pam_saas/set.py +++ b/keepercommander/commands/pam_saas/set.py @@ -1,5 +1,7 @@ from __future__ import annotations import argparse +import logging + from ..discover import PAMGatewayActionDiscoverCommandBase, GatewayContext from ... import vault from . import get_plugins_map @@ -74,8 +76,9 @@ def execute(self, params: KeeperParams, **kwargs): # Make sure this config is a Login record. - if config_record.record_type != "login": - print(self._f("The SaaS configuration record is not a Login record.")) + if config_record.record_type not in ["login", "saasConfiguration"]: + print(self._f("The SaaS configuration record is not a SaaS configuration record: " + f"{config_record.record_type}")) return plugin_name_field = next((x for x in config_record.custom if x.label == "SaaS Type"), None) diff --git a/keepercommander/commands/pam_saas/update.py b/keepercommander/commands/pam_saas/update.py index 3adfe5675..08b136141 100644 --- a/keepercommander/commands/pam_saas/update.py +++ b/keepercommander/commands/pam_saas/update.py @@ -242,11 +242,17 @@ def _update_config(cls, else: print(f" {bcolors.FAIL}* the configuration record's required field(s) are missing or blank: " f"{', '.join(missing_fields)}{bcolors.ENDC}") + + # If the record type is login, migrate to saasConfiguration + if config_record.record_type == "login": + print(f" {bcolors.OKGREEN}* migrate record type to SaaS Configuration.{bcolors.ENDC}") + config_record.type_name = "saasConfiguration" + record_management.update_record(params, config_record) + print("") - return plugin logging.debug("plugin doesn't used attached scripts, or bad SaaS type in config record.") - return None + return plugin def execute(self, params: KeeperParams, **kwargs): @@ -281,9 +287,10 @@ def execute(self, params: KeeperParams, **kwargs): if do_all: logging.debug("search vault for login record types") - for record in list(vault_extensions.find_records(params, record_type="login")): + for record in list(vault_extensions.find_records(params, record_type=["login", "saasConfiguration"])): logging.debug("--------------------------------------------------------------------------------------") config_record = vault.TypedRecord.load(params, record.record_uid) # type: vault.TypedRecord + logging.debug(f"checking record {record.record_uid}, {record.title}") try: self._update_config( @@ -299,6 +306,8 @@ def execute(self, params: KeeperParams, **kwargs): logging.debug(traceback.format_exc()) logging.debug(f"ERROR (no fatal): {err}") + params.sync_data = True + elif config_record_uid is not None: config_record = vault.TypedRecord.load(params, config_record_uid) # type: vault.TypedRecord if config_record is None: @@ -322,6 +331,11 @@ def execute(self, params: KeeperParams, **kwargs): api.sync_down(params) config_record = vault.TypedRecord.load(params, config_record_uid) # type: vault.TypedRecord + # If the record type is login, migrate to saasConfiguration + if config_record.record_type == "login": + logging.debug("migrating from login to saasConfiguration record type") + config_record.type_name = "saasConfiguration" + for required in [True, False]: for field in plugin.fields: if field.required is required: @@ -341,11 +355,11 @@ def execute(self, params: KeeperParams, **kwargs): if not do_dry_run: record_management.update_record(params, config_record) print("") - print(f" {bcolors.OKGREEN}* the configuration record has been updated.{bcolors.ENDC}") + print(f"{bcolors.OKGREEN}The SaaS configuration record has been updated.{bcolors.ENDC}") print("") else: print("") - print(f" {bcolors.OKBLUE}* the configuration record was not saved due " + print(f"{bcolors.OKBLUE}The SaaS configuration record was not saved due " f"to dry run.{bcolors.ENDC}") print("") From eb032489dec13dd04f214101965905a07fe6340b Mon Sep 17 00:00:00 2001 From: pvagare-ks Date: Wed, 18 Feb 2026 00:45:24 +0530 Subject: [PATCH 09/16] Enterprise and MSP command improvements (#1816) (#1817) --- keepercommander/commands/enterprise.py | 18 ++++++++++++++---- keepercommander/commands/msp.py | 4 ++-- .../service/util/parse_keeper_response.py | 3 ++- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index 41a073a55..f4d58ddbf 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -100,9 +100,9 @@ def register_command_info(aliases, command_info): SUPPORTED_NODE_COLUMNS = ['parent_node', 'parent_id', 'user_count', 'users', 'team_count', 'teams', 'role_count', 'roles', - 'provisioning'] + 'provisioning', 'isolated'] SUPPORTED_USER_COLUMNS = ['name', 'status', 'transfer_status', 'node', 'team_count', 'teams', 'role_count', - 'roles', 'alias', '2fa_enabled'] + 'roles', 'alias', '2fa_enabled', 'job_title'] SUPPORTED_TEAM_COLUMNS = ['restricts', 'node', 'user_count', 'users', 'queued_user_count', 'queued_users', 'role_count', 'roles'] SUPPORTED_ROLE_COLUMNS = ['visible_below', 'default_role', 'admin', 'node', 'user_count', 'users', 'team_count', 'teams', 'enforcement_count', 'enforcements', 'managed_node_count', 'managed_nodes', 'managed_nodes_permissions'] @@ -460,6 +460,8 @@ def execute(self, params, **kwargs): } if 'account_share_expiration' in user: u['account_share_expiration'] = user['account_share_expiration'] + if 'job_title' in user: + u['job_title'] = user['job_title'] users[user_id] = u if node_id in nodes: nodes[node_id]['users'].append(user_id) @@ -760,6 +762,8 @@ def tree_node(node): elif column == 'sso_provisioning': status = sso_provisioning.get(node_id) if sso_provisioning else None row.append(status) + elif column == 'isolated': + row.append(n.get('isolated', False)) else: row.append(None) @@ -846,6 +850,8 @@ def tree_node(node): if x['enterprise_user_id'] == user_id and x['username'] != email]) elif column == '2fa_enabled': row.append(u.get('tfa_enabled') or '') + elif column == 'job_title': + row.append(u.get('job_title') or '') if pattern: if not any(1 for x in row if x and str(x).lower().find(pattern) >= 0): continue @@ -1964,9 +1970,13 @@ def execute(self, params, **kwargs): command = rq.get('command') if command == 'enterprise_user_add': if rs['result'] == 'success': - logging.info('%s user invited', rq['enterprise_user_username']) + logging.info('%s user invited with Enterprise User ID : %s', + rq['enterprise_user_username'], rq['enterprise_user_id']) else: - logging.warning('%s failed to invite user: %s', rq['enterprise_user_username'], rs['message']) + error_msg = rs['message'] + if error_msg and ';' in error_msg: + error_msg = error_msg.split(';')[0].strip() + logging.warning('%s failed to invite user: %s', rq['enterprise_user_username'], error_msg) else: user = None if not user and 'username' in rq: diff --git a/keepercommander/commands/msp.py b/keepercommander/commands/msp.py index 447f10f74..950d6bf0a 100644 --- a/keepercommander/commands/msp.py +++ b/keepercommander/commands/msp.py @@ -434,8 +434,8 @@ def execute(self, params, **kwargs): plan = plan_map.get(plan, plan) seats = mc['number_of_seats'] - if seats > 2000000: - seats = None + if seats > 2147483646: + seats = -1 if verbose: table.append([mc['mc_enterprise_id'], mc['mc_enterprise_name'], node_path, diff --git a/keepercommander/service/util/parse_keeper_response.py b/keepercommander/service/util/parse_keeper_response.py index a75c9067a..7f8f004f1 100644 --- a/keepercommander/service/util/parse_keeper_response.py +++ b/keepercommander/service/util/parse_keeper_response.py @@ -994,7 +994,8 @@ def _parse_logging_based_command(command: str, response_str: str) -> Dict[str, A "cannot assign", "cannot move", "cannot get", - "not integer" + "not integer", + "expects" ] error_patterns = [ From c058b35369b190e90d3e33d608b80ba8f6015bda Mon Sep 17 00:00:00 2001 From: Ayrris Aunario <105313137+aaunario-keeper@users.noreply.github.com> Date: Tue, 17 Feb 2026 13:39:49 -0600 Subject: [PATCH 10/16] KC-1142 Per-user cache invalidation for filtered compliance reports (#1814) * KC-1142 Per-user cache invalidation for filtered compliance reports Add granular per-user cache invalidation so filtered compliance reports (--username, --team) only fetch stale users from the API instead of the entire enterprise. Adds last_refreshed timestamp per StorageUser and a selective upsert method that preserves cached data for other users. Co-Authored-By: Claude Opus 4.6 * KC-1142 Address PR review feedback - Early error for invalid --username in aging-report - Fix team resolution fall-through in compliance filter - Warn when username/team filters match no enterprise users - Always update shared_records_only flag in per-user cache path - Update global prelim timestamp on selective user refresh - Guard against NULL last_refreshed from schema migration * KC-1142 Fix type annotations for user_filter parameter * KC-1142 Fix misleading warning message for unmatched user filters * KC-1142 Fix --username and --team filters to use OR instead of AND Previously, --username filtered first, then --team filtered the result, meaning an invalid username with a valid team returned no records. Now both are unioned before applying other filters. * KC-1142 Replace loops with dict lookups for user/team resolution * KC-1142 Always refresh compliance data when user_filter is set The global last_compliance_data_update timestamp caused filtered reports for different users to skip the compliance sync after the first user's run had already set it. * KC-1142 Revert global timestamp update in per-user prelim sync Setting set_prelim_data_updated() after a partial user sync caused subsequent unfiltered runs to skip the full sync, showing only previously-cached users. Per-user timestamps on StorageUser are sufficient for the per-user path. * KC-1142 Only refresh compliance data when prelim had stale users Unconditionally forcing compliance sync for filtered runs caused re-fetches on warm cache. Now tracks whether get_prelim_data found stale users and only triggers compliance sync when it did. * KC-1142 Add per-user compliance cache with last_compliance_refreshed Replace _had_stale_users hack with proper per-user compliance timestamp on StorageUser. Prelim and compliance caches are now independently tracked per user, so aging-report warming prelim cache won't incorrectly gate compliance freshness. --------- Co-authored-by: Claude Opus 4.6 --- .gitignore | 3 +- keepercommander/commands/aram.py | 13 +++- keepercommander/commands/compliance.py | 43 +++++++++++-- keepercommander/sox/__init__.py | 84 ++++++++++++++++++++------ keepercommander/sox/sqlite_storage.py | 9 +++ keepercommander/sox/storage_types.py | 2 + 6 files changed, 131 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index 02c4882b3..585b35a76 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,5 @@ dr-logs CLAUDE.md AGENTS.md keeper_db.sqlite -.keeper-memory-mcp/ \ No newline at end of file +.keeper-memory-mcp/ +.mcp/ \ No newline at end of file diff --git a/keepercommander/commands/aram.py b/keepercommander/commands/aram.py index a48dc3214..6651c7be5 100644 --- a/keepercommander/commands/aram.py +++ b/keepercommander/commands/aram.py @@ -1941,10 +1941,21 @@ def parse_date(date_str): in_shared_folder = kwargs.get('in_shared_folder') node_id = get_node_id(params, enterprise_id) + # Pre-filter to specific user if --username is set + user_filter = None + username_arg = kwargs.get('username') + if username_arg: + user_lookup = {eu.get('username'): eu.get('enterprise_user_id') + for eu in params.enterprise.get('users', [])} + user_id = user_lookup.get(username_arg) + if user_id is None: + raise CommandError('aram', f'User "{username_arg}" not found in enterprise') + user_filter = {user_id} + get_sox_data_fn = get_compliance_data if exclude_deleted or in_shared_folder else get_prelim_data sd_args = [params, node_id, enterprise_id, rebuild] if exclude_deleted or in_shared_folder \ else [params, enterprise_id, rebuild] - sd_kwargs = {'min_updated': period_min_ts} + sd_kwargs = {'min_updated': period_min_ts, 'user_filter': user_filter} sd = get_sox_data_fn(*sd_args, **sd_kwargs) AgingReportCommand.update_aging_data(params, sd, period_start_ts=period_min_ts, rebuild=rebuild) diff --git a/keepercommander/commands/compliance.py b/keepercommander/commands/compliance.py index c813850bb..054e55a9b 100644 --- a/keepercommander/commands/compliance.py +++ b/keepercommander/commands/compliance.py @@ -147,9 +147,40 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any rebuild = kwargs.get('rebuild') no_cache = kwargs.get('no_cache') shared_only = kwargs.get('shared') + + # Pre-filter users for --username and --team to avoid fetching data for all enterprise users + user_filter = None + usernames = kwargs.get('username') + team_refs = kwargs.get('team') + if usernames or team_refs: + filtered_user_ids = set() + enterprise_users = params.enterprise.get('users', []) + if usernames: + username_set = set(usernames) + user_lookup = {eu.get('username'): eu.get('enterprise_user_id') for eu in enterprise_users} + filtered_user_ids.update(uid for u, uid in user_lookup.items() if u in username_set) + if team_refs: + enterprise_teams = params.enterprise.get('teams', []) + team_uids = {t.get('team_uid') for t in enterprise_teams} + team_name_lookup = {t.get('name'): t.get('team_uid') for t in enterprise_teams} + resolved_team_uids = set() + for t_ref in team_refs: + if t_ref in team_uids: + resolved_team_uids.add(t_ref) + elif t_ref in team_name_lookup: + resolved_team_uids.add(team_name_lookup[t_ref]) + enterprise_team_users = params.enterprise.get('team_users', []) + filtered_user_ids.update(tu.get('enterprise_user_id') for tu in enterprise_team_users + if tu.get('team_uid') in resolved_team_uids) + if not filtered_user_ids: + logging.warning('No enterprise users matched the provided filters (usernames=%s, teams=%s).', + usernames, team_refs) + user_filter = filtered_user_ids if filtered_user_ids else None + get_sox_data_fn = sox.get_prelim_data if self.prelim_only else sox.get_compliance_data fn_args = [params, enterprise_id] if self.prelim_only else [params, node_id, enterprise_id] - fn_kwargs = {'rebuild': rebuild, 'min_updated': min_data_ts, 'no_cache': no_cache, 'shared_only': shared_only} + fn_kwargs = {'rebuild': rebuild, 'min_updated': min_data_ts, 'no_cache': no_cache, 'shared_only': shared_only, + 'user_filter': user_filter} sd = get_sox_data_fn(*fn_args, **fn_kwargs) report_fmt = kwargs.get('format', 'table') report_data = self.generate_report_data(params, kwargs, sd, report_fmt, node_id, root_node_id) @@ -226,12 +257,16 @@ def get_team_users(team_ref): return [u for u in users if u.user_uid in team_users] usernames = kwargs.get('username') - filtered = [o for o in rec_owners if o.email in usernames] if usernames else rec_owners + team_refs = kwargs.get('team') + if usernames or team_refs: + username_matched = {o for o in rec_owners if o.email in usernames} if usernames else set() + team_matched = set(filter_by_teams(rec_owners, team_refs)) if team_refs else set() + filtered = list(username_matched | team_matched) + else: + filtered = rec_owners job_titles = kwargs.get('job_title') filtered = [o for o in filtered if o.job_title in job_titles] if job_titles else filtered filtered = [o for o in filtered if o.node_id == node] if node != root_node else filtered - team_refs = kwargs.get('team') - filtered = filter_by_teams(filtered, team_refs) if team_refs else filtered return filtered def filter_records(records): diff --git a/keepercommander/sox/__init__.py b/keepercommander/sox/__init__.py index 6d731d376..b69b92227 100644 --- a/keepercommander/sox/__init__.py +++ b/keepercommander/sox/__init__.py @@ -3,7 +3,7 @@ import os import sqlite3 import sys -from typing import Dict, Tuple +from typing import Dict, Optional, Set, Tuple from .. import api, crypto, utils from ..display import Spinner @@ -74,8 +74,8 @@ def get_sox_database_name(params, enterprise_id): # type: (KeeperParams, int) - return os.path.join(path, f'sox_{enterprise_id}.db') -def get_prelim_data(params, enterprise_id=0, rebuild=False, min_updated=0, cache_only=False, no_cache=False, shared_only=False): - # type: (KeeperParams, int, bool, int, bool, bool, bool) -> sox_data.SoxData +def get_prelim_data(params, enterprise_id=0, rebuild=False, min_updated=0, cache_only=False, no_cache=False, shared_only=False, user_filter=None): + # type: (KeeperParams, int, bool, int, bool, bool, bool, Optional[Set[int]]) -> sox_data.SoxData def sync_down(name_by_id, store): # type: (Dict[int, str], sqlite_storage.SqliteSoxStorage) -> None spinner = None use_spinner = not params.batch_mode @@ -108,6 +108,7 @@ def to_user_entity(user, email_lookup): entity.user_uid = user_id email = email_lookup.get(user_id) entity.email = encrypt_data(params, email) if email else b'' + entity.last_refreshed = int(datetime.datetime.now().timestamp()) return entity def to_user_record_link(uuid, ruid): @@ -191,7 +192,10 @@ def sync_all(): problem_emails = '\n'.join([name_by_id.get(id) for id in problem_ids]) logging.error(f'Data could not fetched for the following users: \n{problem_emails}') - store.rebuild_prelim_data(users, records, links) + if user_filter is not None: + store.update_user_prelim_data(users, records, links, set(name_by_id.keys())) + else: + store.rebuild_prelim_data(users, records, links) success = False try: sync_all() @@ -216,18 +220,39 @@ def sync_all(): database_name=database_name, close_connection=lambda: close_cached_connection(database_name) ) - last_updated = storage.last_prelim_data_update - only_shared_cached = storage.shared_records_only - refresh_data = rebuild or not last_updated or min_updated > last_updated or only_shared_cached and not shared_only - if refresh_data and not cache_only: - user_lookup = {x['enterprise_user_id']: x['username'] for x in params.enterprise.get('users', [])} - storage.clear_non_aging_data() - sync_down(user_lookup, storage) + if user_filter is not None and not cache_only: + # Per-user cache invalidation: only fetch users with stale or missing cache entries + all_users = params.enterprise.get('users', []) + full_filter_lookup = {x['enterprise_user_id']: x['username'] for x in all_users + if x['enterprise_user_id'] in user_filter} + only_shared_cached = storage.shared_records_only + if rebuild or (only_shared_cached and not shared_only): + stale_ids = set(full_filter_lookup.keys()) + else: + stale_ids = set() + for uid in full_filter_lookup: + user_entity = storage.get_users().get_entity(uid) + if not user_entity or (user_entity.last_refreshed or 0) < min_updated: + stale_ids.add(uid) + if stale_ids: + user_lookup = {uid: full_filter_lookup[uid] for uid in stale_ids} + sync_down(user_lookup, storage) storage.set_shared_records_only(shared_only) + else: + # Full cache invalidation: existing behavior + last_updated = storage.last_prelim_data_update + only_shared_cached = storage.shared_records_only + refresh_data = rebuild or not last_updated or min_updated > last_updated or only_shared_cached and not shared_only + if refresh_data and not cache_only: + user_lookup = {x['enterprise_user_id']: x['username'] for x in params.enterprise.get('users', [])} + storage.clear_non_aging_data() + sync_down(user_lookup, storage) + storage.set_shared_records_only(shared_only) return sox_data.SoxData(params, storage=storage) -def get_compliance_data(params, node_id, enterprise_id=0, rebuild=False, min_updated=0, no_cache=False, shared_only=False): +def get_compliance_data(params, node_id, enterprise_id=0, rebuild=False, min_updated=0, no_cache=False, shared_only=False, user_filter=None): + # type: (KeeperParams, int, int, bool, int, bool, bool, Optional[Set[int]]) -> sox_data.SoxData def sync_down(sdata, node_uid, user_node_id_lookup): recs_processed = 0 spinner = None @@ -440,13 +465,38 @@ def save_records(records): run_sync_tasks() - sd = get_prelim_data(params, enterprise_id, rebuild=rebuild, min_updated=min_updated, cache_only=not min_updated, shared_only=shared_only) + sd = get_prelim_data(params, enterprise_id, rebuild=rebuild, min_updated=min_updated, cache_only=not min_updated, shared_only=shared_only, user_filter=user_filter) last_compliance_data_update = sd.storage.last_compliance_data_update - refresh_data = rebuild or min_updated > last_compliance_data_update - if refresh_data: + if user_filter is not None: + # Per-user compliance cache invalidation + stale_compliance_ids = set() enterprise_users = params.enterprise.get('users', []) - user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') for e_user in enterprise_users} - sync_down(sd, node_id, user_node_id_lookup=user_node_ids) + for e_user in enterprise_users: + uid = e_user.get('enterprise_user_id') + if uid in user_filter: + user_entity = sd.storage.get_users().get_entity(uid) + if not user_entity or (user_entity.last_compliance_refreshed or 0) < min_updated: + stale_compliance_ids.add(uid) + if rebuild or stale_compliance_ids: + user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') + for e_user in enterprise_users if e_user.get('enterprise_user_id') in user_filter} + sync_down(sd, node_id, user_node_id_lookup=user_node_ids) + # Update per-user compliance timestamps + now_ts = int(datetime.datetime.now().timestamp()) + updated_users = [] + for uid in user_filter: + user_entity = sd.storage.get_users().get_entity(uid) + if user_entity: + user_entity.last_compliance_refreshed = now_ts + updated_users.append(user_entity) + if updated_users: + sd.storage.get_users().put_entities(updated_users) + else: + refresh_data = rebuild or min_updated > last_compliance_data_update + if refresh_data: + enterprise_users = params.enterprise.get('users', []) + user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') for e_user in enterprise_users} + sync_down(sd, node_id, user_node_id_lookup=user_node_ids) rebuild_task = sox_data.RebuildTask(is_full_sync=False, load_compliance_data=True) sd.rebuild_data(rebuild_task) return sd diff --git a/keepercommander/sox/sqlite_storage.py b/keepercommander/sox/sqlite_storage.py index de7066adc..ced4414fa 100644 --- a/keepercommander/sox/sqlite_storage.py +++ b/keepercommander/sox/sqlite_storage.py @@ -201,6 +201,15 @@ def rebuild_prelim_data(self, users, records, links): self._user_record_links.put_links(links) self.set_prelim_data_updated() + def update_user_prelim_data(self, users, records, links, user_ids): + """Selectively update prelim data for specific users, preserving cache for others.""" + # Remove old record links for refreshed users (their record set may have changed) + self._user_record_links.delete_links_for_objects(list(user_ids)) + # Upsert user entities and records (INSERT OR REPLACE) + self._users.put_entities(users) + self._records.put_entities(records) + self._user_record_links.put_links(links) + def clear_all(self): self.clear_non_aging_data() self._record_aging.delete_all() diff --git a/keepercommander/sox/storage_types.py b/keepercommander/sox/storage_types.py index 4ea451751..884adcb21 100644 --- a/keepercommander/sox/storage_types.py +++ b/keepercommander/sox/storage_types.py @@ -9,6 +9,8 @@ def __init__(self): self.job_title = b'' self.full_name = b'' self.node_id = 0 + self.last_refreshed = 0 + self.last_compliance_refreshed = 0 def uid(self): # -> int From c8707284ae13c46ae5d8ebff7e42eba55e15c957 Mon Sep 17 00:00:00 2001 From: Ayrris Aunario <105313137+aaunario-keeper@users.noreply.github.com> Date: Wed, 18 Feb 2026 23:22:36 -0600 Subject: [PATCH 11/16] KC-1143 Add --aging flag to main compliance report (#1819) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * KC-1143 Add --aging flag to main compliance report Extract aging fetch/cache functions to module level and wire aging data support into ComplianceReportCommand with per-user cache invalidation. Remove last_pw_change fallback to created date. * KC-1143 Compliance report enhancements: aging, filters, incremental sync - Add --aging flag to compliance report with record aging data (created, last_modified, last_rotation, last_pw_change) - KC-1146: filter false-positive event 80 (record_password_change) by occurrence count — first-set events are discarded, only 2+ occurrences count as real password changes - Union last_pw_change with PAM rotation events (250/252) - Add --username and --team filters to all compliance subcommands - Add --resolve-teams flag to sfr and team-report for expanding team filter to include individual team members in shared folder matching - Implement incremental compliance data sync using per-user/per-record staleness checks (last_refreshed, last_compliance_refreshed) - Dynamic chunking with probe timeouts for preliminary data fetching - Client-side request timeouts to prevent indefinite hangs - Suppress throttling log messages from clobbering spinner output - Add portable compliance test suite with auto-discovery from vault --- .gitignore | 6 +- keepercommander/api.py | 6 +- keepercommander/commands/compliance.py | 487 +++++++++++++++++-------- keepercommander/rest_api.py | 12 +- keepercommander/sox/__init__.py | 227 ++++++++---- keepercommander/sox/storage_types.py | 3 + tests/compliance_test.batch | 90 +++++ tests/compliance_test.env.example | 30 ++ tests/compliance_test.sh | 286 +++++++++++++++ 9 files changed, 911 insertions(+), 236 deletions(-) create mode 100644 tests/compliance_test.batch create mode 100644 tests/compliance_test.env.example create mode 100755 tests/compliance_test.sh diff --git a/.gitignore b/.gitignore index 585b35a76..0f3836ed6 100644 --- a/.gitignore +++ b/.gitignore @@ -20,5 +20,9 @@ dr-logs CLAUDE.md AGENTS.md keeper_db.sqlite +__pycache__/ .keeper-memory-mcp/ -.mcp/ \ No newline at end of file +.mcp/ +tests/*_results/ +tests/*.log +tests/compliance_test.env \ No newline at end of file diff --git a/keepercommander/api.py b/keepercommander/api.py index 392c57730..71a5b1875 100644 --- a/keepercommander/api.py +++ b/keepercommander/api.py @@ -812,7 +812,7 @@ def execute_router_json(params, endpoint, request): return None -def communicate_rest(params, request, endpoint, *, rs_type=None, payload_version=None): +def communicate_rest(params, request, endpoint, *, rs_type=None, payload_version=None, timeout=None): api_request_payload = APIRequest_pb2.ApiRequestPayload() if params.session_token: api_request_payload.encryptedSessionToken = utils.base64_url_decode(params.session_token) @@ -824,7 +824,7 @@ def communicate_rest(params, request, endpoint, *, rs_type=None, payload_version if isinstance(payload_version, int): api_request_payload.apiVersion = payload_version - rs = rest_api.execute_rest(params.rest_context, endpoint, api_request_payload) + rs = rest_api.execute_rest(params.rest_context, endpoint, api_request_payload, timeout=timeout) if isinstance(rs, bytes): TTK.update_time_of_last_activity() if rs_type: @@ -856,7 +856,7 @@ def communicate(params, request, retry_on_throttle=True): response_json = run_command(params, request) if response_json['result'] != 'success': if retry_on_throttle and response_json.get('result_code') == 'throttled': - logging.info('Throttled. sleeping for 10 seconds') + logging.debug('Throttled, retrying in 10 seconds') time.sleep(10) # Allow maximum 1 retry per call return communicate(params, request, retry_on_throttle=False) diff --git a/keepercommander/commands/compliance.py b/keepercommander/commands/compliance.py index 054e55a9b..81657ee2a 100644 --- a/keepercommander/commands/compliance.py +++ b/keepercommander/commands/compliance.py @@ -17,6 +17,8 @@ from ..params import KeeperParams from ..sox import sox_types, get_node_id from ..sox.sox_data import SoxData +from ..sox.storage_types import StorageRecordAging +from .aram import API_EVENT_SUMMARY_ROW_LIMIT compliance_parser = argparse.ArgumentParser(add_help=False, parents=[report_output_parser]) rebuild_group = compliance_parser.add_mutually_exclusive_group() @@ -26,17 +28,19 @@ compliance_parser.add_argument('--no-cache', '-nc', action='store_true', help='remove any local non-memory storage of data after report is generated') compliance_parser.add_argument('--node', action='store', help='ID or name of node (defaults to root node)') +username_opt_help = 'user(s) whose records are to be included in report (set option once per user)' +compliance_parser.add_argument('--username', '-u', action='append', help=username_opt_help) +team_opt_help = 'name or UID of team(s) whose members\' records are to be included in report (set once per team)' +compliance_parser.add_argument('--team', action='append', help=team_opt_help) compliance_parser.add_argument('--regex', action='store_true', help='Allow use of regular expressions in search criteria') compliance_parser.add_argument('pattern', type=str, nargs='*', help='Search string / pattern to filter results by. Multiple values allowed.') +aging_help = 'include record-aging data (last modified, created, and last password rotation dates)' + default_report_parser = argparse.ArgumentParser(prog='compliance report', description='Run a compliance report.', parents=[compliance_parser]) -username_opt_help = 'user(s) whose records are to be included in report (set option once per user)' -default_report_parser.add_argument('--username', '-u', action='append', help=username_opt_help) job_title_opt_help = 'job title(s) of users whose records are to be included in report (set option once per job title)' default_report_parser.add_argument('--job-title', '-jt', action='append', help=job_title_opt_help) -team_opt_help = 'name or UID of team(s) whose members\' records are to be included in report (set once per team)' -default_report_parser.add_argument('--team', action='append', help=team_opt_help) record_search_help = 'UID or title of record(s) to include in report (set once per record). To allow non-exact ' \ 'matching on record titles, include "*" where appropriate (e.g., to include records with titles' \ ' ending in "Login", set option value to "*Login")' @@ -50,11 +54,14 @@ help='show deleted records only (not valid with --active-items flag)') deleted_status_group.add_argument('--active-items', action='store_true', help='show active records only (not valid with --deleted-items flag)') +default_report_parser.add_argument('--aging', action='store_true', help=aging_help) team_report_desc = 'Run a report showing which shared folders enterprise teams have access to' team_report_parser = argparse.ArgumentParser(prog='compliance team-report', description=team_report_desc, parents=[compliance_parser]) team_report_parser.add_argument('-tu', '--show-team-users', action='store_true', help='show all members of each team') +team_report_parser.add_argument('--resolve-teams', action='store_true', + help='expand --team filter to also match shared folders where team members are individually present') access_report_desc = 'Run a report showing all records a user has accessed or can access' access_report_parser = argparse.ArgumentParser(prog='compliance record-access-report', description=access_report_desc, @@ -66,8 +73,7 @@ ACCESS_REPORT_TYPES = ('history', 'vault') access_report_parser.add_argument('--report-type', action='store', choices=ACCESS_REPORT_TYPES, default='history', help=report_type_help) -aging_help = 'include record-aging data (last modified, created, and last password rotation dates)' -access_report_parser.add_argument('--aging', action='store_true', help=aging_help) +access_report_parser.add_argument('--aging', action='store_true', help=aging_help) summary_report_desc = 'Run a summary compliance report' summary_report_parser = argparse.ArgumentParser(prog='compliance summary-report', description=summary_report_desc, @@ -76,6 +82,8 @@ sf_report_parser = argparse.ArgumentParser(prog='compliance shared-folder-report', description=sf_report_desc, parents=[compliance_parser]) sf_report_parser.add_argument('-tu', '--show-team-users', action='store_true', help='show all members of each team') +sf_report_parser.add_argument('--resolve-teams', action='store_true', + help='expand --team filter to also match shared folders where team members are individually present') def register_commands(commands): @@ -96,6 +104,202 @@ def get_team_usernames(sdata, team): # type: (SoxData, sox_types.Team) -> List[ return [get_email(sdata, userid) for userid in team.users] +def get_all_folder_user_uids(folder, team_lookup): + """All user UIDs with access to a folder: direct members + team members.""" + all_uids = set(folder.users) + for team_uid in folder.teams: + team = team_lookup.get(team_uid) + if team: + all_uids.update(team.users) + return all_uids + + +def _from_ts(ts): + return datetime.datetime.fromtimestamp(ts) if ts else None + + +def get_aging_data(params, sox_data, rec_ids, use_spinner=True, stale_rec_ids=None): + """Fetch/cache record-aging data for given record UIDs. + + stale_rec_ids: If provided, only fetch fresh data for these UIDs (per-user mode). + If None, standard 1-day TTL for all records. + """ + if not rec_ids: + return {} + aging_data = {r: {'created': None, 'last_modified': None, 'last_rotation': None, 'last_pw_change': None} + for r in rec_ids if r} + max_cache_age_ts = int((datetime.datetime.now() - datetime.timedelta(days=1)).timestamp()) + stored_aging_data = {} + stored_entities = sox_data.storage.get_record_aging().get_all() + stored_aging_data = { + e.record_uid: { + 'created': _from_ts(e.created), + 'last_modified': _from_ts(e.last_modified), + 'last_rotation': _from_ts(e.last_rotation), + 'last_pw_change': _from_ts(e.last_pw_change), + '_last_cached': e.last_cached or 0, + } + for e in stored_entities if e.record_uid + } + aging_data.update(stored_aging_data) + + types_by_aging_event = dict( + created = [], + last_modified = ['record_update'], + last_rotation = ['record_rotation_scheduled_ok', 'record_rotation_on_demand_ok'], + last_pw_change = ['record_password_change'] + ) + + def get_known_aging_data(event_type): + """Return cached records fetched within the 1-day TTL. + + Checks _last_cached timestamp (when we stored the data), not the event value itself. + A record with last_pw_change=None but recent _last_cached means "checked, no event found" + and should not be re-fetched until the cache expires. + """ + return {r: events.get(event_type) for r, events in stored_aging_data.items() + if events.get('_last_cached', 0) >= max_cache_age_ts} + + def get_request_params(record_aging_event): + # type: (str) -> Tuple[List[str], Union[List[str], None], Optional[str], Optional[str]] + known_events_map = get_known_aging_data(record_aging_event) + if stale_rec_ids is not None: + filter_recs = [uid for uid in rec_ids if uid in stale_rec_ids and uid not in known_events_map] + else: + filter_recs = [uid for uid in rec_ids if uid not in known_events_map] + filter_types = types_by_aging_event.get(record_aging_event) + order, aggregate = ('ascending', 'first_created') if record_aging_event == 'created' \ + else ('descending', 'last_created') + return filter_recs, filter_types, order, aggregate + + def get_requests(filter_recs, filter_type, order='descending', aggregate='last_created'): + columns = ['record_uid'] + requests = [] + while filter_recs: + chunk = filter_recs[:API_EVENT_SUMMARY_ROW_LIMIT] + filter_recs = filter_recs[API_EVENT_SUMMARY_ROW_LIMIT:] + rq_filter = {'record_uid': chunk} + if filter_type: rq_filter.update({'audit_event_type': filter_type}) + request = dict( + command = 'get_audit_event_reports', + report_type = 'span', + scope = 'enterprise', + aggregate = [aggregate], + limit = API_EVENT_SUMMARY_ROW_LIMIT, + filter = rq_filter, + columns = columns, + order = order + ) + requests.append(request) + return requests + + # Pre-compute request params once per stat (used for spinner check and fetch) + aging_stats = ['created', 'last_modified', 'last_rotation', 'last_pw_change'] + params_by_stat = {stat: get_request_params(stat) for stat in aging_stats} + + # Build all requests across all stats for a single execute_batch call + all_requests = [] + request_slices = {} # stat -> (start_idx, count) into the combined response list + for stat in aging_stats: + stat_requests = get_requests(*params_by_stat[stat]) + request_slices[stat] = (len(all_requests), len(stat_requests)) + all_requests.extend(stat_requests) + + # Additional request: count event 80 occurrences per record to detect first-sets + pw_filter_recs = params_by_stat['last_pw_change'][0] + pw_count_requests = get_requests(pw_filter_recs, ['record_password_change'], aggregate='occurrences') + request_slices['_pw_count'] = (len(all_requests), len(pw_count_requests)) + all_requests.extend(pw_count_requests) + + spinner = None + try: + if use_spinner and all_requests: + spinner = Spinner('Loading record aging events...') + spinner.start() + if all_requests: + responses = api.execute_batch(params, all_requests) + else: + responses = [] + finally: + if spinner: + spinner.stop() + + record_events_by_stat = {} + for stat in aging_stats: + start, count = request_slices[stat] + stat_responses = responses[start:start + count] + events = list(itertools.chain.from_iterable( + rs.get('audit_event_overview_report_rows', []) for rs in stat_responses + )) + aggregate = 'first_created' if stat == 'created' else 'last_created' + record_timestamps = {e.get('record_uid'): e.get(aggregate) for e in events if e.get('record_uid')} + record_events_by_stat[stat] = {rec: _from_ts(ts) for rec, ts in record_timestamps.items()} + + # Parse event 80 occurrence counts + pw_count_start, pw_count_n = request_slices['_pw_count'] + pw_count_responses = responses[pw_count_start:pw_count_start + pw_count_n] + pw_count_events = list(itertools.chain.from_iterable( + rs.get('audit_event_overview_report_rows', []) for rs in pw_count_responses + )) + pw_occurrences = {e.get('record_uid'): int(e.get('occurrences', 0)) + for e in pw_count_events if e.get('record_uid')} + + fetched_rec_ids = set() + for stat, record_event_dts in record_events_by_stat.items(): + for record, dt in record_event_dts.items(): + aging_data.get(record, {}).update({stat: dt}) + stat == 'created' and aging_data.get(record, {}).setdefault('last_modified', dt) + fetched_rec_ids.add(record) + + # KC-1146: discard first-set false positives for record_password_change. + # Event 80 is client-side and fires on initial password set (at creation or later edit), + # not just on actual password rotation. If a record has only 1 occurrence of event 80, + # it's a first-set — not a real password change. + for record in pw_occurrences: + if pw_occurrences[record] <= 1 and aging_data.get(record, {}).get('last_pw_change'): + aging_data[record]['last_pw_change'] = None + + if fetched_rec_ids: + save_aging_data(sox_data, aging_data, fetched_rec_ids) + + # Union last_pw_change (event 80) with last_rotation (events 250/252) for comprehensive signal. + # These are disjoint event sets: event 80 = client-side manual changes, + # events 250/252 = router-side PAM rotations. Both represent real password changes. + for record, events in aging_data.items(): + pw_change_dt = events.get('last_pw_change') + rotation_dt = events.get('last_rotation') + if rotation_dt and (not pw_change_dt or rotation_dt > pw_change_dt): + events['last_pw_change'] = rotation_dt + + return aging_data + + +def save_aging_data(sox_data, aging_data, dirty_rec_ids=None): + """Persist aging data. If dirty_rec_ids given, only write those records.""" + existing_entities = sox_data.storage.get_record_aging() + now_ts = int(datetime.datetime.now().timestamp()) + updated_entities = [] + records_to_save = dirty_rec_ids if dirty_rec_ids is not None else aging_data.keys() + for r in records_to_save: + if not r: + continue + events = aging_data.get(r) + if not events: + continue + entity = existing_entities.get_entity(r) or StorageRecordAging(r) + created_dt = events.get('created') + entity.created = int(created_dt.timestamp()) if created_dt else 0 + pw_change_dt = events.get('last_pw_change') + entity.last_pw_change = int(pw_change_dt.timestamp()) if pw_change_dt else 0 + modified_dt = events.get('last_modified') + entity.last_modified = int(modified_dt.timestamp()) if modified_dt else 0 + rotation_dt = events.get('last_rotation') + entity.last_rotation = int(rotation_dt.timestamp()) if rotation_dt else 0 + entity.last_cached = now_ts + updated_entities.append(entity) + sox_data.storage.record_aging.put_entities(updated_entities) + + class ComplianceCommand(GroupCommand): def __init__(self): super(ComplianceCommand, self).__init__() @@ -150,6 +354,7 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any # Pre-filter users for --username and --team to avoid fetching data for all enterprise users user_filter = None + explicit_user_ids = set() usernames = kwargs.get('username') team_refs = kwargs.get('team') if usernames or team_refs: @@ -158,7 +363,8 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any if usernames: username_set = set(usernames) user_lookup = {eu.get('username'): eu.get('enterprise_user_id') for eu in enterprise_users} - filtered_user_ids.update(uid for u, uid in user_lookup.items() if u in username_set) + explicit_user_ids.update(uid for u, uid in user_lookup.items() if u in username_set) + filtered_user_ids.update(explicit_user_ids) if team_refs: enterprise_teams = params.enterprise.get('teams', []) team_uids = {t.get('team_uid') for t in enterprise_teams} @@ -176,12 +382,17 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any logging.warning('No enterprise users matched the provided filters (usernames=%s, teams=%s).', usernames, team_refs) user_filter = filtered_user_ids if filtered_user_ids else None + kwargs['_team_filter'] = resolved_team_uids if team_refs else None + if kwargs.get('resolve_teams') and team_refs: + explicit_user_ids.update(filtered_user_ids) + kwargs['_explicit_user_filter'] = explicit_user_ids if explicit_user_ids else None get_sox_data_fn = sox.get_prelim_data if self.prelim_only else sox.get_compliance_data fn_args = [params, enterprise_id] if self.prelim_only else [params, node_id, enterprise_id] fn_kwargs = {'rebuild': rebuild, 'min_updated': min_data_ts, 'no_cache': no_cache, 'shared_only': shared_only, 'user_filter': user_filter} sd = get_sox_data_fn(*fn_args, **fn_kwargs) + kwargs['_user_filter'] = user_filter report_fmt = kwargs.get('format', 'table') report_data = self.generate_report_data(params, kwargs, sd, report_fmt, node_id, root_node_id) patterns = kwargs.get('pattern', []) @@ -219,7 +430,7 @@ def show_help_text(self, local_data): # type: (sox.sox_data.SoxData) -> None logging.info(msg) help_txt = "\nGet record and sharing information from all vaults in the enterprise\n" \ "Format:\ncompliance-report [-h] [--rebuild] [--no-cache] [--node NODE] [--username USERNAME] " \ - "[--job-title JOB_TITLE] [--team TEAM] [--record RECORD] [--url DOMAIN] [--shared] " \ + "[--job-title JOB_TITLE] [--team TEAM] [--record RECORD] [--url DOMAIN] [--shared] [--aging] " \ "[--format {table,csv,json,pdf}] [--output OUTPUT] " \ "\n\nExamples:" \ "\nSee all records for a user" \ @@ -239,6 +450,11 @@ def show_help_text(self, local_data): # type: (sox.sox_data.SoxData) -> None def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_node): # type: (KeeperParams, Dict[str, Any], SoxData, str, int, int) -> List[List[Union[str, Any]]] + aging = kwargs.get('aging') + aging_columns = ['created', 'last_pw_change', 'last_modified', 'last_rotation'] if aging else [] + self.report_headers = ['record_uid', 'title', 'type', 'username', 'permissions', + 'url', 'in_trash', 'shared_folder_uid'] + aging_columns + def filter_owners(rec_owners): def filter_by_teams(users, teams): enterprise_teams = params.enterprise.get('teams', []) @@ -349,6 +565,52 @@ def format_table(rows): return formatted_rows report_data = format_table(table) + + if aging: + all_rec_uids = {key[0] for key in permissions_lookup.keys()} + stale_rec_ids = None + user_filter = kwargs.get('_user_filter') + if user_filter is not None: + min_aging_ts = int((datetime.datetime.now() - datetime.timedelta(days=1)).timestamp()) + stale_rec_ids = set() + for uid in user_filter: + user_entity = sox_data.storage.get_users().get_entity(uid) + if not user_entity or (user_entity.last_aging_refreshed or 0) < min_aging_ts: + user = sox_data.get_user(uid) + if user: + stale_rec_ids.update(user.records & all_rec_uids) + + aging_data = get_aging_data(params, sox_data, all_rec_uids, + use_spinner=not params.batch_mode, + stale_rec_ids=stale_rec_ids) + + # Update last_aging_refreshed for stale users + if user_filter is not None: + now_ts = int(datetime.datetime.now().timestamp()) + updated_users = [] + for uid in user_filter: + user_entity = sox_data.storage.get_users().get_entity(uid) + if user_entity and (user_entity.last_aging_refreshed or 0) < min_aging_ts: + user_entity.last_aging_refreshed = now_ts + updated_users.append(user_entity) + if updated_users: + sox_data.storage.get_users().put_entities(updated_users) + + # Append aging columns to each row + record_lookup = sox_data.get_records() + for fmt_row in report_data: + rec_uid = fmt_row[0] + # Handle collapsed UIDs in table format + if not rec_uid and report_data: + idx = report_data.index(fmt_row) + for i in range(idx - 1, -1, -1): + if report_data[i][0]: + rec_uid = report_data[i][0] + break + rec_aging = aging_data.get(rec_uid, {}) + fmt_row.extend([rec_aging.get('created'), rec_aging.get('last_pw_change'), + rec_aging.get('last_modified'), rec_aging.get('last_rotation')]) + return report_data @@ -367,12 +629,22 @@ def get_sf_name(uid): return sf.get('name_unencrypted', '') show_team_users = kwargs.get('show_team_users') + explicit_user_filter = kwargs.get('_explicit_user_filter') + team_filter = kwargs.get('_team_filter') shared_folders = sox_data.get_shared_folders().items() team_lookup = sox_data.get_teams() report_data = [] for sf_uid, folder in shared_folders: + if explicit_user_filter or team_filter: + all_folder_uids = get_all_folder_user_uids(folder, team_lookup) + matches_user = explicit_user_filter and all_folder_uids & explicit_user_filter + matches_team = team_filter and folder.teams & team_filter + if not matches_user and not matches_team: + continue num_recs = len(folder.record_permissions) if folder.record_permissions else 0 for team_uid in folder.teams: + if team_filter and team_uid not in team_filter and not matches_user: + continue team = team_lookup.get(team_uid) perms = [not team.restrict_share and 'Can Share', not team.restrict_edit and 'Can Edit'] perms = [p for p in perms if p] @@ -407,6 +679,20 @@ def get_parser(self): # type: () -> Optional[argparse.ArgumentParser] def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any kwargs['shared'] = True + emails = kwargs.get('email') or ['@all'] + if '@all' not in emails: + enterprise_users = params.enterprise.get('users', []) + id_to_email = {eu.get('enterprise_user_id'): eu.get('username') for eu in enterprise_users} + resolved_emails = [] + for ref in emails: + if ref.isdigit(): + email = id_to_email.get(int(ref)) + if email: + resolved_emails.append(email) + else: + resolved_emails.append(ref) + if resolved_emails: + kwargs['username'] = resolved_emails return super().execute(params, **kwargs) def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_node): @@ -464,9 +750,6 @@ def format_datetime(dt_str): ts = datetime.datetime.fromisoformat(dt_str).timestamp() return datetime.datetime.fromtimestamp(int(ts)) - def from_ts(ts): - return datetime.datetime.fromtimestamp(ts) if ts else None - def compile_user_report(user, access_events): accessed_records = dict() rec_uids = access_events.keys() if report_type == report_type_default \ @@ -486,144 +769,43 @@ def compile_user_report(user, access_events): 'in_trash': sox_rec.in_trash if sox_rec else None, 'ip_address': access_event.get('ip_address'), 'device': access_event.get('keeper_version'), - 'last_access': from_ts(int(event_ts)) if event_ts else None, + 'last_access': _from_ts(int(event_ts)) if event_ts else None, 'vault_owner': user}} accessed_records.update(accessed_record) return accessed_records - def get_aging_data(rec_ids): - if not rec_ids: - return {} - aging_data = {r: {'created': None, 'last_modified': None, 'last_rotation': None, 'last_pw_change': None} - for r in rec_ids if r} - now = datetime.datetime.now() - max_stored_age_dt = now - datetime.timedelta(days=1) - max_stored_age_ts = int(max_stored_age_dt.timestamp()) - stored_aging_data = {} - if not kwargs.get('no_cache'): - stored_entities = sox_data.storage.get_record_aging().get_all() - stored_aging_data = { - e.record_uid: { - 'created': from_ts(e.created), - 'last_modified': from_ts(e.last_modified), - 'last_rotation': from_ts(e.last_rotation), - 'last_pw_change': from_ts(e.last_pw_change), - } - for e in stored_entities if e.record_uid - } - aging_data.update(stored_aging_data) - - def get_requests(filter_recs, filter_type, order='descending', aggregate='last_created'): - columns = ['record_uid'] - requests = [] - while filter_recs: - chunk = filter_recs[:API_EVENT_SUMMARY_ROW_LIMIT] - filter_recs = filter_recs[API_EVENT_SUMMARY_ROW_LIMIT:] - rq_filter = {'record_uid': chunk} - if filter_type: rq_filter.update({'audit_event_type': filter_type}) - request = dict( - command = 'get_audit_event_reports', - report_type = 'span', - scope = 'enterprise', - aggregate = [aggregate], - limit = API_EVENT_SUMMARY_ROW_LIMIT, - filter = rq_filter, - columns = columns, - order = order - ) - requests.append(request) - return requests - - def get_request_params(record_aging_event): - # type: (str) -> Tuple[List[str], Union[List[str], None], Optional[str], Optional[str]] - known_events_map = get_known_aging_data(record_aging_event) - filter_recs = [uid for uid in rec_ids if uid not in known_events_map] - types_by_aging_event = dict( - created = [], - last_modified = ['record_update'], - last_rotation = ['record_rotation_scheduled_ok', 'record_rotation_on_demand_ok'], - last_pw_change = ['record_password_change'] - ) - filter_types = types_by_aging_event.get(record_aging_event) - order, aggregate = ('ascending', 'first_created') if record_aging_event == 'created' \ - else ('descending', 'last_created') - return filter_recs, filter_types, order, aggregate - - def fetch_events(requests): - return list( - itertools.chain.from_iterable( - [rs.get('audit_event_overview_report_rows', []) for rs in api.execute_batch(params, requests)] - ) - ) - - def get_aging_events(aging_prop): - req_params = get_request_params(aging_prop) - requests = get_requests(*req_params) - return fetch_events(requests) - - def get_known_aging_data(event_type): - return {r: events.get(event_type) for r, events in stored_aging_data.items() if events.get(event_type) or 0 >= max_stored_age_ts} - - def get_aging_event_dts(event_type): - events = get_aging_events(event_type) - aggregate = 'first_created' if event_type == 'created' else 'last_created' - record_timestamps = {event.get('record_uid'): event.get(aggregate) for event in events if event.get('record_uid')} - return {rec: from_ts(ts) for rec, ts in record_timestamps.items()} - - aging_stats = ['created', 'last_modified', 'last_rotation', 'last_pw_change'] - spinner = None - try: - if use_spinner: - should_fetch_events = any(get_request_params(stat)[0] for stat in aging_stats) - if should_fetch_events: - spinner = Spinner('Loading record aging events...') - spinner.start() - record_events_by_stat = {} - for stat in aging_stats: - if spinner: - spinner.message = f'Loading record aging events - {stat}' - record_events_by_stat[stat] = get_aging_event_dts(stat) - finally: - if spinner: - spinner.stop() - for stat, record_event_dts in record_events_by_stat.items(): - for record, dt in record_event_dts.items(): - aging_data.get(record, {}).update({stat: dt}) - stat == 'created' and aging_data.get(record, {}).setdefault('last_modified', dt) - - for record, events in aging_data.items(): - if events.get('last_pw_change') is None: - events['last_pw_change'] = events.get('created') - - if not kwargs.get('no_cache'): - save_aging_data(aging_data) - return aging_data - - def save_aging_data(aging_data): - existing_entities = sox_data.storage.get_record_aging() - updated_entities = [] - for r, events in aging_data.items(): - if not r: - continue - entity = existing_entities.get_entity(r) or StorageRecordAging(r) - created_dt = events.get('created') - created_ts = int(created_dt.timestamp()) if created_dt else 0 - pw_change_dt = events.get('last_pw_change') - pw_change_ts = int(pw_change_dt.timestamp()) if pw_change_dt else 0 - modified_dt = events.get('last_modified') - modified_ts = int(modified_dt.timestamp()) if modified_dt else 0 - rotation_dt = events.get('last_rotation') - rotation_ts = int(rotation_dt.timestamp()) if rotation_dt else 0 - - entity.created = created_ts - entity.last_pw_change = pw_change_ts - entity.last_modified = modified_ts - entity.last_rotation = rotation_ts - updated_entities.append(entity) - sox_data.storage.record_aging.put_entities(updated_entities) - def compile_report_data(rec_ids): - aging_data = get_aging_data(rec_ids) + # Per-user aging cache invalidation when specific users selected + stale_rec_ids = None + is_filtered = '@all' not in (kwargs.get('email') or ['@all']) + email_to_uid = {v: k for k, v in user_lookup.items()} + user_filter_uids = {email_to_uid[e] for e in usernames if e in email_to_uid} if is_filtered else None + min_aging_ts = int((datetime.datetime.now() - datetime.timedelta(days=1)).timestamp()) + if rec_ids and user_filter_uids is not None: + stale_rec_ids = set() + for uid in user_filter_uids: + user_entity = sox_data.storage.get_users().get_entity(uid) + if not user_entity or (user_entity.last_aging_refreshed or 0) < min_aging_ts: + user_email = user_lookup.get(uid) + user_recs = user_access_lookup.get(user_email, {}) + stale_rec_ids.update(user_recs.keys() & rec_ids) + + aging_data = get_aging_data(params, sox_data, rec_ids, + use_spinner=use_spinner, + stale_rec_ids=stale_rec_ids) + + # Update last_aging_refreshed for stale users + if user_filter_uids is not None: + now_ts = int(datetime.datetime.now().timestamp()) + updated_users = [] + for uid in user_filter_uids: + user_entity = sox_data.storage.get_users().get_entity(uid) + if user_entity and (user_entity.last_aging_refreshed or 0) < min_aging_ts: + user_entity.last_aging_refreshed = now_ts + updated_users.append(user_entity) + if updated_users: + sox_data.storage.get_users().put_entities(updated_users) + for email, records in user_access_lookup.items(): for uid, access_data in records.items(): row = [email, uid] @@ -669,8 +851,6 @@ def get_records_accessed(emails, limit_to_vault=False): spinner.stop() return records_accessed_by_user - from ..sox.storage_types import StorageRecordAging - from .aram import API_EVENT_SUMMARY_ROW_LIMIT from .enterprise import EnterpriseInfoCommand report_data = [] @@ -722,6 +902,7 @@ def get_row(u): return email, num_total, num_owned, num_active, num_deleted filter_by_node = node != root_node + user_filter = kwargs.get('_user_filter') from .enterprise import EnterpriseInfoCommand cmd = EnterpriseInfoCommand() cmd_kwargs = { @@ -735,10 +916,12 @@ def get_row(u): managed_users = [mu for mu in managed_users if mu.get('status', '').lower() != 'invited'] managed_user_email_lookup = {mu.get('user_id'): mu.get('email') for mu in managed_users} managed_user_ids = set(managed_user_email_lookup.keys()) + if user_filter is not None: + managed_user_ids &= user_filter empty_vault_user_ids = {user_id for user_id in managed_user_ids if user_id not in sox_data.get_users()} empty_vault_users = [mu for mu in managed_users if mu.get('user_id') in empty_vault_user_ids] sox_users = sox_data.get_users().values() - report_data = [get_row(u) for u in sox_users if not filter_by_node or u.user_uid in managed_user_ids] + report_data = [get_row(u) for u in sox_users if u.user_uid in managed_user_ids] report_data.extend([(u.get('email'), 0, 0, 0, 0) for u in empty_vault_users]) total_active = sum([num_active for _, _, _, num_active, _ in report_data]) total_deleted = sum([num_deleted for _, _, _, _, num_deleted in report_data]) @@ -759,12 +942,20 @@ def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_ show_team_users = kwargs.get('show_team_users') team_users_title = '(TU) denotes a user whose membership in a team grants them access to the shared folder' self.title = team_users_title if show_team_users else None + explicit_user_filter = kwargs.get('_explicit_user_filter') + team_filter = kwargs.get('_team_filter') sfs = sox_data.get_shared_folders() teams = sox_data.get_teams() report_data = [] for sfuid, sf in sfs.items(): + if explicit_user_filter or team_filter: + all_folder_uids = get_all_folder_user_uids(sf, teams) + matches_user = explicit_user_filter and all_folder_uids & explicit_user_filter + matches_team = team_filter and sf.teams & team_filter + if not matches_user and not matches_team: + continue sf_team_uids = list(sf.teams) - sf_team_names = [teams.get(t).team_name for t in sf.teams] + sf_team_names = [teams.get(t).team_name for t in sf_team_uids] records = [rp.record_uid for rp in sf.record_permissions] users = [get_email(sox_data, u) for u in sf.users] team_users = [tu for tuid in sf_team_uids for tu in get_team_usernames(sox_data, teams.get(tuid))] if show_team_users else [] diff --git a/keepercommander/rest_api.py b/keepercommander/rest_api.py index 4e1a4d0a3..43af276ba 100644 --- a/keepercommander/rest_api.py +++ b/keepercommander/rest_api.py @@ -130,8 +130,11 @@ def encrypt_with_keeper_key(context, data: bytes) -> bytes: raise KeeperApiError('invalid_key_id', f'Key ID \"{key_id}\" is not valid.') -def execute_rest(context, endpoint, payload): - # type: (RestApiContext, str, proto.ApiRequestPayload) -> Optional[Union[bytes, dict]] +DEFAULT_TIMEOUT = (15, 120) + + +def execute_rest(context, endpoint, payload, timeout=None): + # type: (RestApiContext, str, proto.ApiRequestPayload, ...) -> Optional[Union[bytes, dict]] if not context.transmission_key: context.transmission_key = os.urandom(32) @@ -193,7 +196,8 @@ def execute_rest(context, endpoint, payload): try: rs = requests.post(url, data=request_data, headers={'Content-Type': 'application/octet-stream'}, - proxies=context.proxies, verify=context.certificate_check) + proxies=context.proxies, verify=context.certificate_check, + timeout=timeout or DEFAULT_TIMEOUT) except requests.exceptions.SSLError as e: doc_url = 'https://docs.keeper.io/secrets-manager/commander-cli/using-commander/troubleshooting-commander-cli#ssl-certificate-errors' if len(e.args) > 0: @@ -247,7 +251,7 @@ def execute_rest(context, endpoint, payload): continue elif rs.status_code == 403: if failure.get('error') == 'throttled' and not context.fail_on_throttle: - logging.info('Throttled. sleeping for 10 seconds') + logging.debug('Throttled, retrying in 10 seconds') time.sleep(10) run_request = True continue diff --git a/keepercommander/sox/__init__.py b/keepercommander/sox/__init__.py index b69b92227..315b2442d 100644 --- a/keepercommander/sox/__init__.py +++ b/keepercommander/sox/__init__.py @@ -5,6 +5,8 @@ import sys from typing import Dict, Optional, Set, Tuple +import requests + from .. import api, crypto, utils from ..display import Spinner @@ -38,6 +40,7 @@ def close_cached_connection(database_name): # type: (str) -> None API_SOX_REQUEST_USER_LIMIT = 1000 API_SOX_MAX_USERS_PER_REQUEST = 5000 # Server limit: MAX_CHOSEN_ENTERPRISE_USERS +API_SOX_MAX_RECORDS_PER_REQUEST = 1000 # Server limit: MAX_REPORTED_RECORD_LIMIT def validate_data_access(params, cmd=''): @@ -123,9 +126,21 @@ def to_user_record_link(uuid, ruid): record_ents} return user_ent, record_ents, user_rec_links - def print_status(users_loaded, users_total, records_loaded, records_total): + def print_status(users_loaded, users_total, records_loaded, records_total, + chunk_size=None, batch_loaded=0, batch_total=0): + if records_total > 0 and users_loaded < users_total and users_loaded > 0: + estimated_total = int(records_total * users_total / users_loaded) + records_str = f'{records_loaded}/~{estimated_total}' + elif records_total > 0: + records_str = f'{records_loaded}/{records_total}' + else: + records_str = str(records_loaded) message = (f'Loading record information - Users: {users_loaded}/{users_total}, ' - f'Current Batch: {records_loaded}/{records_total}') + f'Records: {records_str}') + if batch_total > 0: + message += f', Batch: {batch_loaded}/{batch_total}' + if chunk_size is not None: + message += f' (querying {chunk_size} user{"s" if chunk_size != 1 else ""})' if spinner: spinner.message = message return @@ -133,14 +148,17 @@ def print_status(users_loaded, users_total, records_loaded, records_total): print(f'\r{message}', file=sys.stderr, end='', flush=True) def sync_all(): + PRELIM_PAGE_LIMIT = 10000 + PROBE_TIMEOUT = (15, 30) start_spinner() user_ids = list(user_lookup.keys()) users_total = len(user_ids) records_total = 0 print_status(0, users_total, 0, records_total) users, records, links = [], [], [] - # Start with reasonable chunk size, back off on timeout - chunk_size = min(100, API_SOX_REQUEST_USER_LIMIT) + chunk_size = min(5, len(user_ids)) + avg_records_per_user = 0 + total_records_loaded = 0 problem_ids = set() while user_ids: token = b'' @@ -151,51 +169,89 @@ def sync_all(): rq.includeNonShared = not shared_only has_more = True current_batch_loaded = 0 + chunk_total = 0 + is_first_page = True + seen_user_ids = [] while has_more: rq.continuationToken = token or rq.continuationToken rq.includeTotalMatchingRecordsInFirstResponse = True endpoint = 'enterprise/get_preliminary_compliance_data' rs_type = enterprise_pb2.PreliminaryComplianceDataResponse try: - rs = api.communicate_rest(params, rq, endpoint, rs_type=rs_type) + call_timeout = PROBE_TIMEOUT if is_first_page and chunk_size > 1 else None + rs = api.communicate_rest(params, rq, endpoint, rs_type=rs_type, timeout=call_timeout) + is_first_page = False has_more = rs.hasMore if rs.totalMatchingRecords: current_batch_loaded = 0 - records_total = rs.totalMatchingRecords - # Ramp up on success (regardless of record count) - if chunk_size < API_SOX_REQUEST_USER_LIMIT: - chunk_size = min(chunk_size * 2, API_SOX_REQUEST_USER_LIMIT) + chunk_total = rs.totalMatchingRecords + records_total += chunk_total token = rs.continuationToken for user_data in rs.auditUserData: t_user, t_recs, t_links = to_storage_types(user_data, name_by_id) users += [t_user] records += t_recs current_batch_loaded += len(t_recs) - print_status(users_total - len(user_ids), users_total, current_batch_loaded, records_total) + total_records_loaded += len(t_recs) + if t_user.user_uid not in seen_user_ids: + seen_user_ids.append(t_user.user_uid) + print_status(users_total - len(user_ids), users_total, total_records_loaded, records_total, + len(chunk), current_batch_loaded, chunk_total) links += t_links + if chunk_total > PRELIM_PAGE_LIMIT and len(chunk) > 1 and has_more: + avg_records_per_user = chunk_total / len(chunk) + chunk_size = max(1, int(PRELIM_PAGE_LIMIT / avg_records_per_user)) + complete_ids = set(seen_user_ids[:-1]) if seen_user_ids else set() + requeue_ids = [uid for uid in chunk if uid not in complete_ids] + records_total -= (chunk_total - current_batch_loaded) + user_ids = [*requeue_ids, *user_ids] + break if not has_more: - print_status(users_total - len(user_ids), users_total, records_total, records_total) + print_status(users_total - len(user_ids), users_total, total_records_loaded, records_total, + len(chunk), current_batch_loaded, chunk_total) + except requests.exceptions.Timeout: + records_total -= chunk_total + complete_ids = set(seen_user_ids[:-1]) if seen_user_ids else set() + requeue_ids = [uid for uid in chunk if uid not in complete_ids] + if chunk_size > 1: + if avg_records_per_user > 0: + chunk_size = max(1, int(PRELIM_PAGE_LIMIT / avg_records_per_user)) + else: + chunk_size = max(1, chunk_size // 2) + user_ids = [*requeue_ids, *user_ids] + break except KeeperApiError as kae: if kae.message.lower() == 'gateway_timeout': - # Break up the request if the number of corresponding records exceeds the backend's limit - if chunk_size > 1: - chunk_size = max(1, chunk_size // 4) # Back off gradually - user_ids = [*chunk, *user_ids] + records_total -= chunk_total + complete_ids = set(seen_user_ids[:-1]) if seen_user_ids else set() + requeue_ids = [uid for uid in chunk if uid not in complete_ids] + if len(requeue_ids) > 1 or chunk_size > 1: + if avg_records_per_user > 0: + chunk_size = max(1, int(PRELIM_PAGE_LIMIT / avg_records_per_user)) + else: + chunk_size = max(1, chunk_size // 2) + user_ids = [*requeue_ids, *user_ids] else: - problem_ids.update(chunk) + problem_ids.update(requeue_ids) break else: - raise kae + raise except Exception as ex: raise ex + else: + if chunk_total > 0 and len(chunk) > 0: + avg_records_per_user = chunk_total / len(chunk) + if avg_records_per_user > 0: + chunk_size = max(1, min(int(PRELIM_PAGE_LIMIT / avg_records_per_user), + API_SOX_REQUEST_USER_LIMIT)) + elif chunk_size < API_SOX_REQUEST_USER_LIMIT: + chunk_size = min(chunk_size * 2, API_SOX_REQUEST_USER_LIMIT) if problem_ids: problem_emails = '\n'.join([name_by_id.get(id) for id in problem_ids]) - logging.error(f'Data could not fetched for the following users: \n{problem_emails}') + logging.error(f'Data could not be fetched for the following users: \n{problem_emails}') - if user_filter is not None: - store.update_user_prelim_data(users, records, links, set(name_by_id.keys())) - else: - store.rebuild_prelim_data(users, records, links) + store.update_user_prelim_data(users, records, links, set(name_by_id.keys())) + store.set_prelim_data_updated() success = False try: sync_all() @@ -220,34 +276,22 @@ def sync_all(): database_name=database_name, close_connection=lambda: close_cached_connection(database_name) ) - if user_filter is not None and not cache_only: - # Per-user cache invalidation: only fetch users with stale or missing cache entries - all_users = params.enterprise.get('users', []) - full_filter_lookup = {x['enterprise_user_id']: x['username'] for x in all_users - if x['enterprise_user_id'] in user_filter} - only_shared_cached = storage.shared_records_only - if rebuild or (only_shared_cached and not shared_only): - stale_ids = set(full_filter_lookup.keys()) - else: - stale_ids = set() - for uid in full_filter_lookup: - user_entity = storage.get_users().get_entity(uid) - if not user_entity or (user_entity.last_refreshed or 0) < min_updated: - stale_ids.add(uid) - if stale_ids: - user_lookup = {uid: full_filter_lookup[uid] for uid in stale_ids} - sync_down(user_lookup, storage) - storage.set_shared_records_only(shared_only) + all_users = params.enterprise.get('users', []) + candidate_lookup = {x['enterprise_user_id']: x['username'] for x in all_users + if user_filter is None or x['enterprise_user_id'] in user_filter} + only_shared_cached = storage.shared_records_only + if rebuild or (only_shared_cached and not shared_only): + stale_ids = set(candidate_lookup.keys()) else: - # Full cache invalidation: existing behavior - last_updated = storage.last_prelim_data_update - only_shared_cached = storage.shared_records_only - refresh_data = rebuild or not last_updated or min_updated > last_updated or only_shared_cached and not shared_only - if refresh_data and not cache_only: - user_lookup = {x['enterprise_user_id']: x['username'] for x in params.enterprise.get('users', [])} - storage.clear_non_aging_data() - sync_down(user_lookup, storage) - storage.set_shared_records_only(shared_only) + stale_ids = set() + for uid in candidate_lookup: + user_entity = storage.get_users().get_entity(uid) + if not user_entity or (user_entity.last_refreshed or 0) < min_updated: + stale_ids.add(uid) + if stale_ids and not cache_only: + user_lookup = {uid: candidate_lookup[uid] for uid in stale_ids} + sync_down(user_lookup, storage) + storage.set_shared_records_only(shared_only) return sox_data.SoxData(params, storage=storage) @@ -281,22 +325,53 @@ def do_tasks(): start_spinner() print_status(0) users_uids = [int(uid) for uid in sdata.get_users()] - records_by_uid = {rec.record_uid: rec.record_uid_bytes for rec in sdata.get_records().values()} - max_records = API_SOX_REQUEST_USER_LIMIT + storage_records = {e.record_uid: e for e in sdata.storage.records.get_all()} + if rebuild: + stale_record_uids = set(storage_records.keys()) + else: + stale_record_uids = { + uid for uid, rec in storage_records.items() + if (rec.last_compliance_refreshed or 0) < min_updated + } + if not stale_record_uids: + sdata.storage.set_compliance_data_updated() + return + records_by_uid = {rec.record_uid: rec.record_uid_bytes for rec in storage_records.values() + if rec.record_uid in stale_record_uids} + max_records = API_SOX_MAX_RECORDS_PER_REQUEST max_users = API_SOX_MAX_USERS_PER_REQUEST - user_chunks = [users_uids[x:x + max_users] for x in range(0, len(users_uids), max_users)] or [users_uids] + if not users_uids: + return + total_records = len(records_by_uid) + user_chunks = [users_uids[x:x + max_users] for x in range(0, len(users_uids), max_users)] for user_chunk in user_chunks: - # Get records owned by users in this chunk chunk_record_uids = set() for uid in user_chunk: user = sdata.get_user(uid) if user: - chunk_record_uids.update(user.records) - chunk_records_raw = [records_by_uid[r] for r in chunk_record_uids if r in records_by_uid] - # Chunk records by API limit + chunk_record_uids.update(user.records & stale_record_uids) + chunk_records_raw = [records_by_uid[r] for r in chunk_record_uids if r in records_by_uid and records_by_uid[r]] + if not chunk_records_raw: + continue + chunk_user_uids = [uid for uid in user_chunk + if any(r in stale_record_uids for r in (sdata.get_user(uid).records if sdata.get_user(uid) else set()))] + if not chunk_user_uids: + continue ruid_chunks = [chunk_records_raw[x:x + max_records] for x in range(0, len(chunk_records_raw), max_records)] - for ruid_chunk in (ruid_chunks or [[]]): - sync_chunk(ruid_chunk, user_chunk) + for ruid_chunk in ruid_chunks: + try: + sync_chunk(ruid_chunk, chunk_user_uids) + except KeeperApiError as kae: + if kae.message.lower() == 'gateway_timeout': + logging.warning('Compliance sync chunk timed out (%d records, %d users), skipping.', + len(ruid_chunk), len(chunk_user_uids)) + else: + logging.error('Compliance sync chunk error: %s (%d records, %d users)', + kae.message, len(ruid_chunk), len(chunk_user_uids)) + except Exception as ex: + logging.error('Compliance sync chunk unexpected error: %s', ex) + if total_records > 0: + print_status(recs_processed / total_records) sdata.storage.set_compliance_data_updated() if not spinner: print('', file=sys.stderr, flush=True) @@ -315,6 +390,9 @@ def sync_chunk(chunk, uuids): save_response(rs) def fetch_response(raw_ruids, user_uids): + if not user_uids or not raw_ruids: + logging.debug('Skipping compliance report request: users=%d, records=%d', len(user_uids), len(raw_ruids)) + return enterprise_pb2.ComplianceReportResponse() rq = enterprise_pb2.ComplianceReportRequest() rq.saveReport = False rq.reportName = f'Compliance Report on {datetime.datetime.now()}' @@ -450,6 +528,7 @@ def save_shared_folder_teams(sf_teams): sdata.storage.get_sf_team_links().put_links(links) def save_records(records): + now_ts = int(datetime.datetime.now().timestamp()) entities = [] for record in records: nonlocal recs_processed @@ -460,28 +539,22 @@ def save_records(records): if entity: entity.in_trash = record.inTrash entity.has_attachments = record.hasAttachments + entity.last_compliance_refreshed = now_ts entities.append(entity) sdata.storage.records.put_entities(entities) run_sync_tasks() sd = get_prelim_data(params, enterprise_id, rebuild=rebuild, min_updated=min_updated, cache_only=not min_updated, shared_only=shared_only, user_filter=user_filter) - last_compliance_data_update = sd.storage.last_compliance_data_update - if user_filter is not None: - # Per-user compliance cache invalidation - stale_compliance_ids = set() - enterprise_users = params.enterprise.get('users', []) - for e_user in enterprise_users: - uid = e_user.get('enterprise_user_id') - if uid in user_filter: - user_entity = sd.storage.get_users().get_entity(uid) - if not user_entity or (user_entity.last_compliance_refreshed or 0) < min_updated: - stale_compliance_ids.add(uid) - if rebuild or stale_compliance_ids: - user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') - for e_user in enterprise_users if e_user.get('enterprise_user_id') in user_filter} - sync_down(sd, node_id, user_node_id_lookup=user_node_ids) - # Update per-user compliance timestamps + enterprise_users = params.enterprise.get('users', []) + all_user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') for e_user in enterprise_users} + has_stale_records = rebuild or any( + (rec.last_compliance_refreshed or 0) < min_updated + for rec in sd.storage.records.get_all() + ) + if has_stale_records: + sync_down(sd, node_id, user_node_id_lookup=all_user_node_ids) + if user_filter is not None: now_ts = int(datetime.datetime.now().timestamp()) updated_users = [] for uid in user_filter: @@ -491,12 +564,6 @@ def save_records(records): updated_users.append(user_entity) if updated_users: sd.storage.get_users().put_entities(updated_users) - else: - refresh_data = rebuild or min_updated > last_compliance_data_update - if refresh_data: - enterprise_users = params.enterprise.get('users', []) - user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') for e_user in enterprise_users} - sync_down(sd, node_id, user_node_id_lookup=user_node_ids) rebuild_task = sox_data.RebuildTask(is_full_sync=False, load_compliance_data=True) sd.rebuild_data(rebuild_task) return sd diff --git a/keepercommander/sox/storage_types.py b/keepercommander/sox/storage_types.py index 884adcb21..1a93d033a 100644 --- a/keepercommander/sox/storage_types.py +++ b/keepercommander/sox/storage_types.py @@ -11,6 +11,7 @@ def __init__(self): self.node_id = 0 self.last_refreshed = 0 self.last_compliance_refreshed = 0 + self.last_aging_refreshed = 0 def uid(self): # -> int @@ -25,6 +26,7 @@ def __init__(self): self.shared = True self.in_trash = False self.has_attachments = False + self.last_compliance_refreshed = 0 def uid(self): # -> str @@ -38,6 +40,7 @@ def __init__(self, record_uid=''): self.last_pw_change = 0 self.last_modified = 0 self.last_rotation = 0 + self.last_cached = 0 def uid(self): return self.record_uid diff --git a/tests/compliance_test.batch b/tests/compliance_test.batch new file mode 100644 index 000000000..c877cb87b --- /dev/null +++ b/tests/compliance_test.batch @@ -0,0 +1,90 @@ +# Compliance Test Suite +# +# Batch file template for Keeper Commander's compliance commands. +# Placeholders are substituted by compliance_test.sh at runtime: +# {OUTDIR} results directory +# {USER1} primary admin email +# {USER2} secondary user email +# {TEAM_ONLY_USER} user with team-only shared-folder access +# {TEAM1} team name or UID +# +# Do not run this file directly — use the runner: +# bash tests/compliance_test.sh [after|before|diff|parallel|all] + +# ─── compliance report ──────────────────────────────────────────────────────── +# t01: full report (rebuild to ensure fresh data) +compliance report -r --format json --output {OUTDIR}/t01_report_full.json +# t02: filter by single user +compliance report -u {USER1} --format json --output {OUTDIR}/t02_report_user.json +# t03: filter by team +compliance report --team {TEAM1} --format json --output {OUTDIR}/t03_report_team.json +# t04: filter by user + team (OR union) +compliance report -u {USER1} --team {TEAM1} --format json --output {OUTDIR}/t04_report_user_team.json +# t05: aging columns present +compliance report --aging --format json --output {OUTDIR}/t05_report_aging.json +# t06: aging + user filter +compliance report --aging -u {USER1} --format json --output {OUTDIR}/t06_report_aging_user.json + +# ─── compliance record-access-report ────────────────────────────────────────── +# t10: single user +compliance record-access-report --email {USER1} --format json --output {OUTDIR}/t10_rar_user.json +# t11: single user + aging +compliance record-access-report --email {USER1} --aging --format json --output {OUTDIR}/t11_rar_user_aging.json +# t12: all users +compliance record-access-report --email @all --format json --output {OUTDIR}/t12_rar_all.json +# t13: all users + aging +compliance record-access-report --email @all --aging --format json --output {OUTDIR}/t13_rar_all_aging.json +# t14: second user (cache isolation — different user than t10) +compliance record-access-report --email {USER2} --aging --format json --output {OUTDIR}/t14_rar_user2_aging.json +# t15: first user again (should hit warm cache) +compliance record-access-report --email {USER1} --aging --format json --output {OUTDIR}/t15_rar_user1_cached.json +# t16: team filter +compliance record-access-report --email @all --team {TEAM1} --format json --output {OUTDIR}/t16_rar_team.json + +# ─── compliance team-report ─────────────────────────────────────────────────── +# t20: full team report +compliance team-report --format json --output {OUTDIR}/t20_tr_full.json +# t21: filter by team +compliance team-report --team {TEAM1} --format json --output {OUTDIR}/t21_tr_team.json +# t22: filter by user who is a direct shared-folder member +compliance team-report -u {USER1} --format json --output {OUTDIR}/t22_tr_user_direct.json +# t23: filter by user whose only SF access is via a team +compliance team-report -u {TEAM_ONLY_USER} --format json --output {OUTDIR}/t23_tr_user_team_member.json +# t24: show team users display flag +compliance team-report -tu --format json --output {OUTDIR}/t24_tr_tu.json +# t25: team + user combined filter (OR union) +compliance team-report --team {TEAM1} -u {USER1} --format json --output {OUTDIR}/t25_tr_team_user.json + +# ─── compliance summary-report ──────────────────────────────────────────────── +# t30: full summary +compliance summary-report --format json --output {OUTDIR}/t30_summary_full.json +# t31: filter by user +compliance summary-report -u {USER1} --format json --output {OUTDIR}/t31_summary_user.json +# t32: filter by team +compliance summary-report --team {TEAM1} --format json --output {OUTDIR}/t32_summary_team.json + +# ─── compliance shared-folder-report ────────────────────────────────────────── +# t40: full sfr +compliance shared-folder-report --format json --output {OUTDIR}/t40_sfr_full.json +# t41: filter by team +compliance shared-folder-report --team {TEAM1} --format json --output {OUTDIR}/t41_sfr_team.json +# t42: filter by user who is a direct SF member +compliance shared-folder-report -u {USER1} --format json --output {OUTDIR}/t42_sfr_user_direct.json +# t43: filter by user whose only SF access is via team +compliance shared-folder-report -u {TEAM_ONLY_USER} --format json --output {OUTDIR}/t43_sfr_user_team_member.json +# t44: team + user combined filter (OR union) +compliance shared-folder-report --team {TEAM1} -u {USER1} --format json --output {OUTDIR}/t44_sfr_team_user.json +# t45: show team users display flag +compliance shared-folder-report -tu --format json --output {OUTDIR}/t45_sfr_tu.json +# t46: --resolve-teams expands team members into SF user matching +compliance shared-folder-report --team {TEAM1} --resolve-teams --format json --output {OUTDIR}/t46_sfr_team_resolved.json + +# ─── incremental cache tests ───────────────────────────────────────────────── +# t50: rebuild to prime cache +compliance report -r --format json --output {OUTDIR}/t50_cache_prime.json +# t51: immediate re-run (warm cache, should be fast) +compliance report --format json --output {OUTDIR}/t51_cache_warm.json +# t52: --no-cache run (deletes local db after report) +compliance report -nc --format json --output {OUTDIR}/t52_cache_nocache.json +# t53: post-nocache run (db was deleted, must rebuild) +compliance report --format json --output {OUTDIR}/t53_cache_post_nocache.json diff --git a/tests/compliance_test.env.example b/tests/compliance_test.env.example new file mode 100644 index 000000000..252495415 --- /dev/null +++ b/tests/compliance_test.env.example @@ -0,0 +1,30 @@ +# compliance_test.env — test configuration +# Copy this file to compliance_test.env and fill in values for your environment. +# +# All variables are required unless marked (optional). +# Emails must be valid enterprise users visible to the admin running the tests. + +# ── Enterprise users ───────────────────────────────────────────────────────── +# Primary admin / power user (owns many records, is a direct shared-folder member) +USER1="admin@example.com" + +# Secondary user with few records (used for cache-isolation checks) +USER2="user2@example.com" + +# User whose only shared-folder access is through a team (not a direct SF member) +TEAM_ONLY_USER="team-member@example.com" + +# ── Teams ──────────────────────────────────────────────────────────────────── +# A team name (or UID) that has at least one shared folder linked to it +TEAM1="MyTeam" + +# ── Paths ──────────────────────────────────────────────────────────────────── +# Directory containing the Commander branch under test (.venv must exist here) +AFTER_DIR="$HOME/dev/Commander" + +# (optional) Directory containing a baseline Commander install for A/B comparison +BEFORE_DIR="" + +# (optional) Keeper config file relative to each Commander directory above +# Defaults to ./config.json +KEEPER_CONFIG="./config.json" diff --git a/tests/compliance_test.sh b/tests/compliance_test.sh new file mode 100755 index 000000000..c8b54f89c --- /dev/null +++ b/tests/compliance_test.sh @@ -0,0 +1,286 @@ +#!/usr/bin/env bash +# ═══════════════════════════════════════════════════════════════════════════════ +# Compliance Test Runner +# ═══════════════════════════════════════════════════════════════════════════════ +# +# A/B test harness for Keeper Commander's compliance commands. +# Runs a comprehensive batch of compliance subcommands and compares JSON output +# between two Commander installs (e.g. feature branch vs. baseline release). +# +# Prerequisites: +# - Each Commander directory must have a .venv with keeper installed +# - A logged-in session (run `keeper shell` once to cache credentials) +# - python3 on PATH +# +# Quick start: +# bash tests/compliance_test.sh after # run tests on current branch +# bash tests/compliance_test.sh before # run tests on baseline +# bash tests/compliance_test.sh diff # compare existing results +# bash tests/compliance_test.sh parallel # run both simultaneously +# bash tests/compliance_test.sh all # run both sequentially, then diff +# +# Configuration: +# The script auto-discovers users and teams from the vault. Override any +# value by exporting env vars or creating tests/compliance_test.env: +# +# AFTER_DIR Commander under test (default: repo root) +# BEFORE_DIR Baseline Commander (default: empty, skips 'before') +# KEEPER_CONFIG Config file path (default: ./config.json) +# USER1 Primary admin email (auto-discovered) +# USER2 Secondary user email (auto-discovered) +# TEAM_ONLY_USER User with team-only SF (auto-discovered from TEAM1) +# TEAM1 Team name or UID (auto-discovered) +# +# ═══════════════════════════════════════════════════════════════════════════════ +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +TEMPLATE="$SCRIPT_DIR/compliance_test.batch" +RESULTS_DIR="$SCRIPT_DIR/compliance_test_results" + +# Load env file if present (won't override already-exported vars) +ENV_FILE="$SCRIPT_DIR/compliance_test.env" +if [ -f "$ENV_FILE" ]; then + echo "Loading config from $ENV_FILE" + set -a; source "$ENV_FILE"; set +a +fi + +AFTER_DIR="${AFTER_DIR:-$REPO_DIR}" +BEFORE_DIR="${BEFORE_DIR:-}" +KEEPER_CONFIG="${KEEPER_CONFIG:-./config.json}" + +# ── Helper: run a keeper command and capture output ────────────────────────── +keeper_cmd() { + local dir="$1"; shift + (cd "$dir" && .venv/bin/keeper --config "$KEEPER_CONFIG" "$@" 2>/dev/null) +} + +# ── Auto-discover test parameters from the vault ──────────────────────────── +discover() { + local dir="$1" + echo "Discovering test parameters from vault ($dir) ..." + + if [ -z "${USER1:-}" ] || [ -z "${USER2:-}" ] || [ -z "${TEAM1:-}" ] || [ -z "${TEAM_ONLY_USER:-}" ]; then + local users_json="" teams_json="" + + if [ -z "${USER1:-}" ] || [ -z "${USER2:-}" ]; then + users_json=$(keeper_cmd "$dir" enterprise-info -u --format json || echo "[]") + if [ -z "${USER1:-}" ]; then + USER1=$(python3 -c " +import json, sys +users = json.loads(sys.stdin.read()) +active = [u for u in users if u.get('status','') == 'Active'] +print(active[0]['email'] if active else users[0]['email'] if users else '') +" <<< "$users_json") + echo " USER1=$USER1" + fi + if [ -z "${USER2:-}" ]; then + USER2=$(python3 -c " +import json, sys +users = json.loads(sys.stdin.read()) +active = [u for u in users if u.get('status','') == 'Active' and u.get('email','') != '$USER1'] +print(active[-1]['email'] if active else '') +" <<< "$users_json") + echo " USER2=$USER2" + fi + fi + + if [ -z "${TEAM1:-}" ] || [ -z "${TEAM_ONLY_USER:-}" ]; then + teams_json=$(keeper_cmd "$dir" enterprise-info -t --columns users --format json || echo "[]") + if [ -z "${TEAM1:-}" ]; then + TEAM1=$(python3 -c " +import json, sys +teams = json.loads(sys.stdin.read()) +skip = {'everyone', 'admins'} +candidates = [t for t in teams if t.get('team_name','').lower() not in skip] +print(candidates[0]['team_name'] if candidates else (teams[0]['team_name'] if teams else '')) +" <<< "$teams_json") + echo " TEAM1=$TEAM1" + fi + if [ -z "${TEAM_ONLY_USER:-}" ]; then + TEAM_ONLY_USER=$(python3 -c " +import json, sys +teams = json.loads(sys.stdin.read()) +target, u1 = '$TEAM1', '$USER1' +for t in teams: + if t.get('team_name','') == target: + members = t.get('users', []) + others = [m for m in members if m != u1] + if others: + print(others[0]) + sys.exit(0) +print('$USER2') +" <<< "$teams_json") + echo " TEAM_ONLY_USER=$TEAM_ONLY_USER" + fi + fi + fi + + # Validate + local missing=() + [ -z "${USER1:-}" ] && missing+=("USER1") + [ -z "${USER2:-}" ] && missing+=("USER2") + [ -z "${TEAM1:-}" ] && missing+=("TEAM1") + [ -z "${TEAM_ONLY_USER:-}" ] && missing+=("TEAM_ONLY_USER") + if [ ${#missing[@]} -gt 0 ]; then + echo "ERROR: Could not determine: ${missing[*]}" + echo "Set them in $ENV_FILE or export as env vars." + exit 1 + fi + echo "" +} + +# ── Generate a concrete batch file from the template ───────────────────────── +generate_batch() { + local outdir="$1" dest="$2" + sed -e "s|{OUTDIR}|$outdir|g" \ + -e "s|{USER1}|$USER1|g" \ + -e "s|{USER2}|$USER2|g" \ + -e "s|{TEAM_ONLY_USER}|$TEAM_ONLY_USER|g" \ + -e "s|{TEAM1}|$TEAM1|g" \ + "$TEMPLATE" > "$dest" +} + +# ── Run suites ─────────────────────────────────────────────────────────────── +run_after() { + discover "$AFTER_DIR" + local out="$RESULTS_DIR/after" + local batch="$RESULTS_DIR/after.batch" + mkdir -p "$out" + generate_batch "$out" "$batch" + + echo "=== Running AFTER (current branch) ===" + echo " Dir: $AFTER_DIR" + echo " Output: $out" + echo " Config:" + echo " USER1=$USER1 USER2=$USER2" + echo " TEAM1=$TEAM1 TEAM_ONLY_USER=$TEAM_ONLY_USER" + echo "" + cd "$AFTER_DIR" + .venv/bin/keeper --config "$KEEPER_CONFIG" run-batch "$batch" 2>&1 | tee "$out/_run.log" + echo "" + echo "=== AFTER complete ===" +} + +run_before() { + if [ -z "$BEFORE_DIR" ]; then + echo "ERROR: BEFORE_DIR is not set. Set it in $ENV_FILE or export it." + exit 1 + fi + discover "$BEFORE_DIR" + local out="$RESULTS_DIR/before" + local batch="$RESULTS_DIR/before.batch" + mkdir -p "$out" + generate_batch "$out" "$batch" + + echo "=== Running BEFORE (baseline) ===" + echo " Dir: $BEFORE_DIR" + echo " Output: $out" + echo " Config:" + echo " USER1=$USER1 USER2=$USER2" + echo " TEAM1=$TEAM1 TEAM_ONLY_USER=$TEAM_ONLY_USER" + echo "" + cd "$BEFORE_DIR" + .venv/bin/keeper --config "$KEEPER_CONFIG" run-batch "$batch" 2>&1 | tee "$out/_run.log" + echo "" + echo "=== BEFORE complete ===" +} + +# ── Compare results ────────────────────────────────────────────────────────── +diff_results() { + local after_out="$RESULTS_DIR/after" + local before_out="$RESULTS_DIR/before" + echo "" + echo "=== Comparing results ===" + echo "" + + if [ ! -d "$after_out" ]; then + echo "ERROR: No 'after' results found at $after_out"; exit 1 + fi + if [ ! -d "$before_out" ]; then + echo "ERROR: No 'before' results found at $before_out"; exit 1 + fi + + local any_diff=0 + for f in "$after_out"/t*.json; do + local fname + fname=$(basename "$f") + local before_f="$before_out/$fname" + if [ ! -f "$before_f" ]; then + echo " [SKIP] $fname — no baseline (new test or baseline error)" + continue + fi + local after_rows before_rows + after_rows=$(python3 -c "import json; d=json.load(open('$f')); print(len(d) if isinstance(d,list) else 'obj')" 2>/dev/null || echo "ERR") + before_rows=$(python3 -c "import json; d=json.load(open('$before_f')); print(len(d) if isinstance(d,list) else 'obj')" 2>/dev/null || echo "ERR") + if [ "$after_rows" = "$before_rows" ]; then + echo " [OK] $fname — rows: $after_rows" + else + echo " [DIFF] $fname — before=$before_rows, after=$after_rows" + any_diff=1 + fi + done + + for f in "$after_out"/t*.json; do + local fname + fname=$(basename "$f") + if [ ! -f "$before_out/$fname" ]; then + local after_rows + after_rows=$(python3 -c "import json; d=json.load(open('$f')); print(len(d) if isinstance(d,list) else 'obj')" 2>/dev/null || echo "ERR") + echo " [NEW] $fname — rows: $after_rows (no baseline to compare)" + fi + done + + echo "" + if [ "$any_diff" -eq 0 ]; then + echo "All comparable tests match." + else + echo "Some tests differ — review above." + fi +} + +# ── Main ───────────────────────────────────────────────────────────────────── +case "${1:-help}" in + after) run_after ;; + before) run_before ;; + diff) diff_results ;; + parallel) + run_after & + local_after_pid=$! + run_before & + local_before_pid=$! + echo "=== Running in parallel: after=$local_after_pid, before=$local_before_pid ===" + wait $local_after_pid + wait $local_before_pid + diff_results + ;; + all) + run_after + echo "" + run_before + diff_results + ;; + *) + cat <<'USAGE' +Compliance Test Runner — A/B test harness for Commander compliance commands. + +Usage: bash tests/compliance_test.sh + +Modes: + after Run the test suite against the current branch (AFTER_DIR) + before Run the test suite against the baseline install (BEFORE_DIR) + diff Compare existing after/before results + parallel Run both after and before simultaneously, then diff + all Run after, then before, then diff + +Configuration: + Set values in tests/compliance_test.env or export as env vars. + If not set, USER1/USER2/TEAM1/TEAM_ONLY_USER are auto-discovered + from the vault via enterprise-info. + + See tests/compliance_test.env.example for all options. +USAGE + exit 1 + ;; +esac From da9cec982d7a0159d32222818e48c8642af83cb3 Mon Sep 17 00:00:00 2001 From: Sergey Kolupaev Date: Thu, 19 Feb 2026 10:49:25 -0800 Subject: [PATCH 12/16] Release 17.2.8 --- keepercommander/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keepercommander/__init__.py b/keepercommander/__init__.py index ba03096e4..e3e86789d 100644 --- a/keepercommander/__init__.py +++ b/keepercommander/__init__.py @@ -10,4 +10,4 @@ # Contact: commander@keepersecurity.com # -__version__ = '17.2.7' +__version__ = '17.2.8' From 7100ab0a31cc6927981888eaf86d952250e8f639 Mon Sep 17 00:00:00 2001 From: Ivan Dimov <78815270+idimov-keeper@users.noreply.github.com> Date: Tue, 17 Feb 2026 20:14:04 -0600 Subject: [PATCH 13/16] Added launch_credentials to pam project import/extend commands --- keepercommander/commands/pam_import/README.md | 12 +++- keepercommander/commands/pam_import/base.py | 54 +++++++++++++++- keepercommander/commands/pam_import/edit.py | 19 ++++++ keepercommander/commands/pam_import/extend.py | 14 +++++ .../tunnel/port_forward/TunnelGraph.py | 61 ++++++++++++++++++- 5 files changed, 154 insertions(+), 6 deletions(-) diff --git a/keepercommander/commands/pam_import/README.md b/keepercommander/commands/pam_import/README.md index ed922152a..165ca8977 100644 --- a/keepercommander/commands/pam_import/README.md +++ b/keepercommander/commands/pam_import/README.md @@ -211,11 +211,12 @@ _You can have only one `pam_configuration` section and the only required paramet #### Resources (users, machines etc.): -Each Machine (pamMachine, pamDatabase, pamDirectory) can specify admin user which will be identified by its unique title or login/username (ex. `"admin_credentials": "admin1"`, or `pamRemoteBrowser` → `pam_settings.connection.autofill_credentials: "BingLogin"`) +Each Machine (pamMachine, pamDatabase, pamDirectory) can specify **Administrative Credentials** (admin user) and **Launch Credentials** (the credentials used to establish the protocol connection). Both are identified by title or login/username of a pamUser (e.g. `"administrative_credentials": "admin1"`, `"launch_credentials": "user1"`). pamUser and pamRemoteBrowser do not have launch credentials; pamRemoteBrowser uses `pam_settings.connection.autofill_credentials` for RBI login. - **Machines** are defined in `pam_data.resources` where each machine can have its own list of `"users": []` one of which is the admin user for that machine. Users that don't belong to a single machine are into global `pam_data.users` section (record type: `login`, `pamUser` for NOOP rotation or shared across multiple machines /ex. same user for ssh, vnc, rdp etc./) > **Note 1:** `pam_settings` _(options, connection)_ are explained only in pamMachine section below (per protocol) but they are present in all machine types. > **Note 2:** `attachments` and `scripts` examples are in `pam_configuration: local` section. > **Note 3:** Post rotation scripts (a.k.a. `scripts`) are executed in following order: `pamUser` scripts after any **successful** rotation for that user, `pamMachine` scripts after any **successful** rotation on the machine and `pamConfiguration` scripts after any rotation using that configuration. + > **Note 4:** When `allow_supply_user` is false and JIT ephemeral is not used, vault may require a launch credential; import can provide it via `launch_credentials` in the resource's `connection` block. JIT and KeeperAI settings below are shared across all resource types (pamMachine, pamDatabase, pamDirectory) except User and RBI (pamRemoteBrowser) records. @@ -365,6 +366,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "disable_copy": true, "disable_paste": true, @@ -433,6 +435,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "disable_copy": true, "disable_paste": true, @@ -488,6 +491,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "disable_copy": true, "disable_paste": true, @@ -546,6 +550,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "disable_copy": true, "disable_paste": true, @@ -600,6 +605,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "color_scheme": "gray-black", "font_size": "18", @@ -659,6 +665,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "disable_copy": true, "disable_paste": true, @@ -727,6 +734,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "port": "2222", "allow_supply_user": true, "administrative_credentials": "admin1", + "launch_credentials": "user1", "recording_include_keys": true, "disable_copy": true, "disable_paste": true, @@ -739,7 +747,7 @@ JIT and KeeperAI settings below are shared across all resource types (pamMachine "sftp_root_directory": "/tmp" } } - } + }, "users": [] }, { diff --git a/keepercommander/commands/pam_import/base.py b/keepercommander/commands/pam_import/base.py index 54a903825..16bcfb633 100644 --- a/keepercommander/commands/pam_import/base.py +++ b/keepercommander/commands/pam_import/base.py @@ -1967,12 +1967,14 @@ def load(cls, data: Union[str, dict]): return obj class BaseConnectionSettings: - def __init__(self, port: Optional[str] = None, allowSupplyUser: Optional[bool] = None, userRecords: Optional[List[str]] = None, recordingIncludeKeys: Optional[bool] = None): + def __init__(self, port: Optional[str] = None, allowSupplyUser: Optional[bool] = None, userRecords: Optional[List[str]] = None, recordingIncludeKeys: Optional[bool] = None, launch_credentials: Optional[str] = None): self.port = port # Override port from host self.allowSupplyUser = allowSupplyUser self.recordingIncludeKeys = recordingIncludeKeys self.userRecords = userRecords - self.userRecordUid = None # resolved from userRecords + self.userRecordUid = None # resolved from userRecords + self.launch_credentials = launch_credentials # title or login of pamUser for launch + self.launchRecordUid = None # resolved from launch_credentials @classmethod def load(cls, data: Union[str, dict]): @@ -1987,6 +1989,10 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = utils.value_to_boolean(data.get("allow_supply_user", None)) obj.userRecords = parse_multiline(data, "administrative_credentials", "Error parsing administrative_credentials") obj.recordingIncludeKeys = utils.value_to_boolean(data.get("recording_include_keys", None)) + launch_creds = parse_multiline(data, "launch_credentials", "Error parsing launch_credentials") + creds = next((s for s in launch_creds if s.strip()), "") if launch_creds else "" + if creds: obj.launch_credentials = creds + return obj class ConnectionSettingsRDP(BaseConnectionSettings, ClipboardConnectionSettings): @@ -2039,6 +2045,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) ccs = ClipboardConnectionSettings.load(data) if ccs: @@ -2167,6 +2175,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) ccs = ClipboardConnectionSettings.load(data) if ccs: @@ -2254,6 +2264,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) ccs = ClipboardConnectionSettings.load(data) if ccs: @@ -2345,6 +2357,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) ccs = ClipboardConnectionSettings.load(data) if ccs: @@ -2443,6 +2457,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) ccs = ClipboardConnectionSettings.load(data) if ccs: @@ -2545,6 +2561,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) tcs = TerminalDisplayConnectionSettings.load(data) if tcs: @@ -2638,6 +2656,8 @@ def load(cls, data: Union[str, dict]): obj.allowSupplyUser = bcs.allowSupplyUser obj.userRecords = bcs.userRecords obj.recordingIncludeKeys = bcs.recordingIncludeKeys + obj.launch_credentials = getattr(bcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bcs, "launchRecordUid", None) ccs = ClipboardConnectionSettings.load(data) if ccs: @@ -2720,6 +2740,8 @@ def load(cls, data: Union[str, dict]): obj.database = bdcs.database obj.disableCsvExport = bdcs.disableCsvExport obj.disableCsvImport = bdcs.disableCsvImport + obj.launch_credentials = getattr(bdcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bdcs, "launchRecordUid", None) return obj @@ -2764,6 +2786,8 @@ def load(cls, data: Union[str, dict]): obj.database = bdcs.database obj.disableCsvExport = bdcs.disableCsvExport obj.disableCsvImport = bdcs.disableCsvImport + obj.launch_credentials = getattr(bdcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bdcs, "launchRecordUid", None) return obj @@ -2808,6 +2832,8 @@ def load(cls, data: Union[str, dict]): obj.database = bdcs.database obj.disableCsvExport = bdcs.disableCsvExport obj.disableCsvImport = bdcs.disableCsvImport + obj.launch_credentials = getattr(bdcs, "launch_credentials", None) + obj.launchRecordUid = getattr(bdcs, "launchRecordUid", None) return obj @@ -3083,6 +3109,30 @@ def set_user_record_uid(obj, uid: str, is_external: bool = False) -> bool: logging.debug("""Object has no attribute "userRecordUid" (skipped)""") return False +def get_launch_credential(obj, uid: bool = False) -> str: + """Get launch credential: resolved UID if uid=True, else string reference (title/login).""" + value: str = "" + if not (obj and hasattr(obj, "pam_settings") and hasattr(obj.pam_settings, "connection")): + return value + conn = obj.pam_settings.connection + if uid and getattr(conn, "launchRecordUid", None): + value = conn.launchRecordUid + elif not uid and getattr(conn, "launch_credentials", None): + value = conn.launch_credentials + value = value[0] if isinstance(value, list) else value + return value if isinstance(value, str) else "" + +def set_launch_record_uid(obj, uid: str) -> bool: + if not (uid and isinstance(uid, str) and RecordV3.is_valid_ref_uid(uid)): + logging.debug(f"""Invalid launchRecordUid "{uid}" (skipped)""") + return False + if (obj and hasattr(obj, "pam_settings") and hasattr(obj.pam_settings, "connection") + and hasattr(obj.pam_settings.connection, "launchRecordUid")): + obj.pam_settings.connection.launchRecordUid = uid + return True + logging.debug("""Object has no attribute "launchRecordUid" (skipped)""") + return False + def find_external_user(mach, machines, title: str) -> list: # Local pamMachine could reference pamDirectory AD user as its admin res = [] diff --git a/keepercommander/commands/pam_import/edit.py b/keepercommander/commands/pam_import/edit.py index 805c8d6cc..d4a261d43 100644 --- a/keepercommander/commands/pam_import/edit.py +++ b/keepercommander/commands/pam_import/edit.py @@ -34,11 +34,13 @@ find_external_user, find_user, get_admin_credential, + get_launch_credential, get_sftp_attribute, is_admin_external, parse_command_options, resolve_domain_admin, resolve_script_creds, + set_launch_record_uid, set_sftp_uid, set_user_record_uid ) @@ -1480,6 +1482,18 @@ def process_data(self, params, project): if ruid: set_user_record_uid(mach, ruid, is_external) + # launch_credentials: resolve to pamUser UID for pamMachine, pamDatabase, pamDirectory (not RBI) + launch_cred = get_launch_credential(mach) + if launch_cred and not isinstance(mach, PamRemoteBrowserObject): + ruids = find_user(mach, users, launch_cred) + if not ruids: + ruids = find_external_user(mach, machines, launch_cred) + if len(ruids) != 1: + logging.warning(f"{bcolors.WARNING}{len(ruids)} matches found for launch_credentials in {mach.title}.{bcolors.ENDC} ") + ruid = getattr(ruids[0], "uid", "") if ruids else "" + if ruid: + set_launch_record_uid(mach, ruid) + # jit_settings.pam_directory_record -> pam_directory_uid (pamDirectory in pam_data.resources by title) # RBI has rbi_settings only (no pam_settings.jit_settings) ps = getattr(mach, "pam_settings", None) @@ -1626,6 +1640,7 @@ def process_data(self, params, project): if admin_uid and is_admin_external(mach): tdag.link_user_to_resource(admin_uid, mach.uid, is_admin=True, belongs_to=False) args = parse_command_options(mach, False) + args["meta_version"] = 1 tdag.set_resource_allowed(**args) # After setting allowedSettings, save JIT settings if present @@ -1686,6 +1701,10 @@ def process_data(self, params, project): if user.rotation_settings.password_complexity: args["pwd_complexity"]=user.rotation_settings.password_complexity prc.execute(params, silent=True, **args) + # Launch credentials: link for pamMachine, pamDatabase, pamDirectory (not RBI) + launch_uid = get_launch_credential(mach, True) + if launch_uid and not isinstance(mach, PamRemoteBrowserObject): + tdag.link_user_to_resource(launch_uid, mach.uid, is_launch_credential=True, belongs_to=True) if resources: print(f"{len(resources)}/{len(resources)}\n") # link machine -> pamDirectory (LINK, path=domain) for jit_settings.pam_directory_uid diff --git a/keepercommander/commands/pam_import/extend.py b/keepercommander/commands/pam_import/extend.py index 87304553d..495e93523 100644 --- a/keepercommander/commands/pam_import/extend.py +++ b/keepercommander/commands/pam_import/extend.py @@ -37,10 +37,12 @@ find_external_user, find_user, get_admin_credential, + get_launch_credential, get_sftp_attribute, is_admin_external, parse_command_options, resolve_script_creds, + set_launch_record_uid, set_sftp_uid, set_user_record_uid ) @@ -875,6 +877,11 @@ def resolve_one(obj, parent_machine=None): is_external = True if len(ruids) == 1 and getattr(ruids[0], "uid", ""): set_user_record_uid(mach, ruids[0].uid, is_external) + launch_cred = get_launch_credential(mach) + if launch_cred and not isinstance(mach, PamRemoteBrowserObject): + ruids = find_user(mach, users, launch_cred) or find_external_user(mach, machines, launch_cred) + if len(ruids) == 1 and getattr(ruids[0], "uid", ""): + set_launch_record_uid(mach, ruids[0].uid) if mach.pam_settings and getattr(mach.pam_settings, "jit_settings", None): jit = mach.pam_settings.jit_settings ref = getattr(jit, "pam_directory_record", None) or "" @@ -1282,6 +1289,7 @@ def process_data(self, params, project): if admin_uid and is_admin_external(mach): tdag.link_user_to_resource(admin_uid, mach.uid, is_admin=True, belongs_to=False) args = parse_command_options(mach, False) + args["meta_version"] = 1 tdag.set_resource_allowed(**args) mach_users = getattr(mach, "users", []) or [] for user in mach_users: @@ -1309,6 +1317,9 @@ def process_data(self, params, project): if getattr(rs, "password_complexity", None): args["pwd_complexity"] = rs.password_complexity prc.execute(params, silent=True, **args) + launch_uid = get_launch_credential(mach, True) + if launch_uid and not isinstance(mach, PamRemoteBrowserObject): + tdag.link_user_to_resource(launch_uid, mach.uid, is_launch_credential=True, belongs_to=True) if new_resources: print(f"{len(new_resources)}/{len(new_resources)}\n") @@ -1340,6 +1351,9 @@ def process_data(self, params, project): if getattr(rs, "password_complexity", None): args["pwd_complexity"] = rs.password_complexity prc.execute(params, silent=True, **args) + launch_uid = get_launch_credential(mach, True) + if launch_uid and not isinstance(mach, PamRemoteBrowserObject): + tdag.link_user_to_resource(launch_uid, mach.uid, is_launch_credential=True, belongs_to=True) if pce and getattr(pce, "scripts", None) and getattr(pce.scripts, "scripts", None): refs = [x for x in pce.scripts.scripts if getattr(x, "record_refs", None)] diff --git a/keepercommander/commands/tunnel/port_forward/TunnelGraph.py b/keepercommander/commands/tunnel/port_forward/TunnelGraph.py index ca95b9f0e..8f86d83b4 100644 --- a/keepercommander/commands/tunnel/port_forward/TunnelGraph.py +++ b/keepercommander/commands/tunnel/port_forward/TunnelGraph.py @@ -20,6 +20,54 @@ def get_vertex_content(vertex): return return_content +# Resource meta version (int). Vault uses version >= 1 to read launch credentials from ACL. +# In set_resource_allowed: meta_version=None or 0 -> legacy (no version in meta); 1 -> v1. +# Future: add RESOURCE_META_VERSION_V2, etc. and handle them in build_resource_meta(). +RESOURCE_META_VERSION_V1 = 1 + + +def build_resource_meta_v1(allowed_settings, rotate_on_termination=False): + """ + Build DAG resource meta payload in v1 format so vault uses ACL is_launch_credential for launch. + Returns dict: {"version": , "allowedSettings": allowed_settings, "rotateOnTermination": bool}. + """ + if not isinstance(allowed_settings, dict): + allowed_settings = {} + return { + "version": int(RESOURCE_META_VERSION_V1), + "allowedSettings": dict(allowed_settings), + "rotateOnTermination": bool(rotate_on_termination), + } + + +def build_resource_meta(version, allowed_settings, rotate_on_termination=False): + """ + Build DAG resource meta payload for the given version (int). + version=1 -> v1 format; other values can be added for v2, v3, etc. + """ + if version == RESOURCE_META_VERSION_V1: + return build_resource_meta_v1(allowed_settings, rotate_on_termination) + # Future: elif version == RESOURCE_META_VERSION_V2: return build_resource_meta_v2(...) + raise ValueError(f"Unsupported resource meta version: {version}") + + +def ensure_resource_meta_v1(content): + """ + Ensure existing meta content has version 1 and rotateOnTermination (for re-writes). + Returns a copy with version= and rotateOnTermination default False if missing. + """ + if content is None: + return build_resource_meta_v1({}, False) + out = dict(content) + out["version"] = int(RESOURCE_META_VERSION_V1) + if "rotateOnTermination" not in out: + out["rotateOnTermination"] = False + # Normalize allowedSettings key if content used a different key (e.g. allowedSettings) + if "allowedSettings" not in out and "allowed_settings" in out: + out["allowedSettings"] = out.pop("allowed_settings", {}) + return out + + class TunnelDAG: def __init__(self, params, encrypted_session_token, encrypted_transmission_key, record_uid: str, is_config=False, transmission_key=None): @@ -441,7 +489,7 @@ def set_resource_allowed(self, resource_uid, tunneling=None, connections=None, r session_recording=None, typescript_recording=None, remote_browser_isolation=None, ai_enabled=None, ai_session_terminate=None, allowed_settings_name='allowedSettings', is_config=False, - v_type: RefType=str(RefType.PAM_MACHINE)): + v_type: RefType=str(RefType.PAM_MACHINE), meta_version=None): v_type = RefType(v_type) allowed_ref_types = [RefType.PAM_MACHINE, RefType.PAM_DATABASE, RefType.PAM_DIRECTORY, RefType.PAM_BROWSER] if v_type not in allowed_ref_types: @@ -545,7 +593,16 @@ def set_resource_allowed(self, resource_uid, tunneling=None, connections=None, r settings["aiSessionTerminate"] = ai_session_terminate if dirty: - resource_vertex.add_data(content=content, path='meta', needs_encryption=False) + # Legacy: missing or meta_version=0 -> write content as-is (no version in meta) + if meta_version is not None and meta_version != 0: + meta_payload = build_resource_meta( + meta_version, + content.get(allowed_settings_name, {}), + rotate_on_termination=False, + ) + resource_vertex.add_data(content=meta_payload, path='meta', needs_encryption=False) + else: + resource_vertex.add_data(content=content, path='meta', needs_encryption=False) self.linking_dag.save() def is_tunneling_config_set_up(self, resource_uid): From 10c837c4a1aa1537e399bec594ab66b1b327b98b Mon Sep 17 00:00:00 2001 From: Ayrris Aunario <105313137+aaunario-keeper@users.noreply.github.com> Date: Thu, 19 Feb 2026 09:43:23 -0600 Subject: [PATCH 14/16] KC-1143 Fix aging row mapping, filtered staleness scope, test discovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix report_data.index(fmt_row) bug that could map aging columns to wrong records when rows have identical content — use enumerate instead - Scope staleness check to filtered users' records when user_filter is set, preventing full enterprise sync_down on filtered queries - Fix team discovery in test script: use 'name' key (not 'team_name') matching actual enterprise-info JSON output --- keepercommander/commands/compliance.py | 5 ++--- keepercommander/sox/__init__.py | 19 +++++++++++++++---- tests/compliance_test.sh | 6 +++--- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/keepercommander/commands/compliance.py b/keepercommander/commands/compliance.py index 81657ee2a..9ed77f17c 100644 --- a/keepercommander/commands/compliance.py +++ b/keepercommander/commands/compliance.py @@ -598,11 +598,10 @@ def format_table(rows): # Append aging columns to each row record_lookup = sox_data.get_records() - for fmt_row in report_data: + for idx, fmt_row in enumerate(report_data): rec_uid = fmt_row[0] # Handle collapsed UIDs in table format - if not rec_uid and report_data: - idx = report_data.index(fmt_row) + if not rec_uid: for i in range(idx - 1, -1, -1): if report_data[i][0]: rec_uid = report_data[i][0] diff --git a/keepercommander/sox/__init__.py b/keepercommander/sox/__init__.py index 315b2442d..28166a1c1 100644 --- a/keepercommander/sox/__init__.py +++ b/keepercommander/sox/__init__.py @@ -548,10 +548,21 @@ def save_records(records): sd = get_prelim_data(params, enterprise_id, rebuild=rebuild, min_updated=min_updated, cache_only=not min_updated, shared_only=shared_only, user_filter=user_filter) enterprise_users = params.enterprise.get('users', []) all_user_node_ids = {e_user.get('enterprise_user_id'): e_user.get('node_id') for e_user in enterprise_users} - has_stale_records = rebuild or any( - (rec.last_compliance_refreshed or 0) < min_updated - for rec in sd.storage.records.get_all() - ) + if user_filter is not None: + filtered_user_recs = set() + for uid in user_filter: + user = sd.get_user(uid) + if user: + filtered_user_recs.update(user.records) + has_stale_records = rebuild or any( + (rec.last_compliance_refreshed or 0) < min_updated + for rec in sd.storage.records.get_all() if rec.record_uid in filtered_user_recs + ) + else: + has_stale_records = rebuild or any( + (rec.last_compliance_refreshed or 0) < min_updated + for rec in sd.storage.records.get_all() + ) if has_stale_records: sync_down(sd, node_id, user_node_id_lookup=all_user_node_ids) if user_filter is not None: diff --git a/tests/compliance_test.sh b/tests/compliance_test.sh index c8b54f89c..20a5b996b 100755 --- a/tests/compliance_test.sh +++ b/tests/compliance_test.sh @@ -93,8 +93,8 @@ print(active[-1]['email'] if active else '') import json, sys teams = json.loads(sys.stdin.read()) skip = {'everyone', 'admins'} -candidates = [t for t in teams if t.get('team_name','').lower() not in skip] -print(candidates[0]['team_name'] if candidates else (teams[0]['team_name'] if teams else '')) +candidates = [t for t in teams if t.get('name', t.get('team_name','')).lower() not in skip] +print(candidates[0].get('name', candidates[0].get('team_name','')) if candidates else (teams[0].get('name', teams[0].get('team_name','')) if teams else '')) " <<< "$teams_json") echo " TEAM1=$TEAM1" fi @@ -104,7 +104,7 @@ import json, sys teams = json.loads(sys.stdin.read()) target, u1 = '$TEAM1', '$USER1' for t in teams: - if t.get('team_name','') == target: + if t.get('name', t.get('team_name','')) == target: members = t.get('users', []) others = [m for m in members if m != u1] if others: From 0b21d80991ad05b7e5fda28d37a7639dde5c52d5 Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Fri, 20 Feb 2026 15:03:59 +0000 Subject: [PATCH 15/16] Rename pam_import_generator.py to pam_import_generator_v1.py --- examples/{pam_import_generator.py => pam_import_generator_v1.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{pam_import_generator.py => pam_import_generator_v1.py} (100%) diff --git a/examples/pam_import_generator.py b/examples/pam_import_generator_v1.py similarity index 100% rename from examples/pam_import_generator.py rename to examples/pam_import_generator_v1.py From 48ec8e715ce49e03490ffd480df8a6b62e7d704a Mon Sep 17 00:00:00 2001 From: lthievenaz-keeper Date: Fri, 20 Feb 2026 15:04:33 +0000 Subject: [PATCH 16/16] Create pam_import_generator_v2.py --- examples/pam_import_generator_v2.py | 307 ++++++++++++++++++++++++++++ 1 file changed, 307 insertions(+) create mode 100644 examples/pam_import_generator_v2.py diff --git a/examples/pam_import_generator_v2.py b/examples/pam_import_generator_v2.py new file mode 100644 index 000000000..664dafddb --- /dev/null +++ b/examples/pam_import_generator_v2.py @@ -0,0 +1,307 @@ +#!/usr/bin/env python3 +""" +Generates JSON file ready to be imported by pam project import command. +This example generates JSON that creates one AD machine (pamDirectory) +with AD Admin user (pamUser) and multiple local machines (pamMachine) +configured with connections and rotation enabled and AD Admin as their admin. + +This script uses external CSV file (format: server_name,user_name,password) +and optionally an external JSON template with static pamDirectory and its pamUser and +a dynamic placeholder used for each pamMachine and its pamUser (from CSV) + +You can use any of the full set of options per user/machine type from our docs +https://github.com/Keeper-Security/Commander/blob/master/keepercommander/commands/pam_import/README.md +You can also run the script with --show-template option and use it as startnig point. + +Command line options: + -i, --input-file default = servers_to_import.csv + Specify the input file CSV: hostname,user,password + -o, --output-file default = pam_import.json + Specify the JSON output file + -t, --template-file Specify the JSON template file + -s, --show-template Show sample JSON template (overrides all options) + -p, --prefix-names Enable username prefixes (server1-admin vs admin) +""" +from __future__ import annotations + +import argparse +import copy +import json +import os +import sys +from csv import DictReader +from pathlib import Path +from typing import Any, Dict, List + +from time import time + +DEFAULT_IMPORT_TEMPLATE = { + "project": "PAM Project", + "shared_folder_users": { + "manage_users": True, + "manage_records": True, + "can_edit": True, + "can_share": True + }, + "shared_folder_resources": { + "manage_users": True, + "manage_records": True, + "can_edit": True, + "can_share": True + }, + "pam_configuration": { + "environment": "local", + "connections": "on", + "rotation": "on", + "graphical_session_recording": "on" + }, + "pam_data": { + "resources": [] + } +} + + +def _build_cli() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + description="Generate Keeper PAM import JSON file", + formatter_class=argparse.RawTextHelpFormatter, + ) + + p.add_argument("-i", "--input-file", default="servers_to_import.csv", + help="Specify the input file - " + "CSV with hostname,user,password (default: %(default)s)") + p.add_argument("-o", "--output-file", default="pam_import.json", + help="Specify the JSON output file (default: %(default)s)") + p.add_argument("-t", "--template-file", + help="Specify the JSON template file (default: %(default)s)") + p.add_argument("-s", "--show-template", action="store_true", + help="Show sample JSON template (overrides all options)") + p.add_argument("-p", "--prefix-names", action="store_true", + help="Enable username prefixes (server1-admin vs admin)") + + return p + + +def _load_template(path: str) -> Dict[str, Any]: + full_path = os.path.abspath(os.path.expanduser(path)) + if not os.path.isfile(full_path): + print(f"JSON template file not found: {path}") + print("Use --show-template option to get a sample template") + sys.exit(1) + + res = {} + with open(full_path, encoding="utf-8") as fp: + res = json.load(fp) + return res + + +def _read_csv(path: str) -> List[Dict[str, str]]: + full_path = os.path.abspath(os.path.expanduser(path)) + if not os.path.isfile(full_path): + print(f"CSV file not found: {path}", ) + sys.exit(2) + + with open(full_path, encoding="utf-8") as fp: + csv_data = list(DictReader(fp)) + # skip incomplete + valid_rows = [] + for i,obj in enumerate(csv_data): + host = obj.get('hostname',None) + username = obj.get('username',None) + user_path = obj.get('user_path',None) + if not host and not all([username,user_path]): + print(f"Row {i+1} incomplete - skipped") + else: + valid_rows.append(obj) + + return valid_rows + + +def _parse_fields(obj: Dict, type: str, tmpl=None): + templates = { + 'rs':{ + "pam_settings": { + "options": { + "rotation": "off", + "connections": "on", + "tunneling": "off", + "graphical_session_recording": "on" + }, + "connection":{} + }, + "users": [] + }, + 'usr':{ + "rotation_settings": {} + } + } + res = templates.get(type,{}) + if tmpl: + res = tmpl + + + for key in obj: + if obj[key] == '': continue + if key.startswith(type): + split_arg = key.split('.') + if len(split_arg)==2: + res[split_arg[1]] = obj[key] + elif len(split_arg)==3: + res[split_arg[1]][split_arg[2]] = obj[key] + elif len(split_arg)==4: + res[split_arg[1]][split_arg[2]][split_arg[3]] = obj[key] + return res + + + +def _gen_data(csv_data: List[Dict[str, str]], + template: Dict[str, Any], + prefix_names: bool) -> Dict[str, Any]: + + data = copy.deepcopy(template) if template else DEFAULT_IMPORT_TEMPLATE + + # pop out pamMachine template + rsrs = data.get("pam_data", {}).get("resources") or [] + idx = next((i for i, item in enumerate(rsrs) if str(item.get("type")) == "pamMachine"), None) + tmpl = rsrs.pop(idx) if idx is not None else {} + rs_tmpl, usr_tmpl = None,None + if tmpl: + rs_tmpl = tmpl + usr_tmpl = tmpl.get('users',[None])[0] + rs_tmpl['users'] = [] + + seen: set[str] = set() + for i,obj in enumerate(csv_data): + host = obj.get("hostname",None) + + # filter machines + if not host: continue + if host in seen: + print(f"Duplicate hostname {host} on row {i+1} - skipped") + continue + seen.add(host) + + # create machine dict + mach = _parse_fields(obj,'rs',rs_tmpl) + mach['hostname'] = host + mach['title'] = obj.get('title',host) + mach['type'] = obj.get("type","pamMachine") + if obj.get('folder_path',None): + mach['folder_path'] = obj['folder_path'] + + rsrs.append(mach) + + # Once all resources added, add pamUsers + seen: set[str] = set() + for i,obj in enumerate(csv_data): + username = obj["username"] + password = obj.get("password","") + user_path = obj["user_path"] + + if not username: continue + if username in seen: + print(f"Duplicate username {username} on row {i+1} - skipped") + continue + seen.add(username) + + user = (_parse_fields(obj,'usr',usr_tmpl)) + if obj.get('folder_path',None): + user['folder_path'] = obj['folder_path'] + user["title"] = obj.get('title',f"{obj['user_path']} - {obj['username']}") + user['type'] = "pamUser" + user['login'] = obj['username'] + user["password"] = obj.get('password',"") + + # Map user to resource + for rs in rsrs: + if rs['title'] == user_path: + rs['users'].append(user) + + + data["pam_data"]["resources"] = rsrs + return data + + +def _write(fpath: Path, content: str): + with fpath.open("w", encoding="utf-8") as fp: + fp.write(content) + print(f"Wrote {fpath}") + + +def write_import_json(data: Dict[str, Any], path: str): + """ Generate JSON and save to file""" + content = json.dumps(data, indent=2) + _write(Path(path), content) + + +def prepare_template(template: Dict[str, Any]) -> None: + """ Prepare JSON template - populate missing defaults """ + tdic = DEFAULT_IMPORT_TEMPLATE + if "project" not in template: + template["project"] = tdic["project"] + if "shared_folder_users" not in template: + template["shared_folder_users"] = tdic["shared_folder_users"] + if "shared_folder_resources" not in template: + template["shared_folder_resources"] = tdic["shared_folder_resources"] + if "pam_configuration" not in template: + template["pam_configuration"] = tdic["pam_configuration"] + env = str(template["pam_configuration"].get("environment")) + if env != "local": + print(f"This script works only with pam_configuration.environment = local, currently it is set to '{env}'") + sys.exit(4) + if (str(template["pam_configuration"].get("connections")).lower() != "on" or + str(template["pam_configuration"].get("rotation")).lower() != "on"): + print("connections and rotation must be set to 'on' in pam_configuration section - adjusted") + template["pam_configuration"]["connections"] = "on" + template["pam_configuration"]["rotation"] = "on" + if "pam_data" not in template or not template["pam_data"].get("resources"): + print('"pam_data": { "resources": [] } - must be present and non-empty') + sys.exit(4) + res = template["pam_data"].get("resources") or [] + if len(res) != 2: + print('pam_data.resources[] - must define exactly two "machines": pamDirectory and pamUser') + sys.exit(4) + for i in (0, 1): + mach_type = res[i].get("type") or "" + mach_usrs = res[i].get("users") or [] + if ((i == 0 and mach_type != "pamDirectory") or (i == 1 and mach_type != "pamMachine") or not mach_usrs): + print('Expected first machine type=pamDirectory and second type=pamUser, and each to have at least one pamUser') + sys.exit(4) + if "pam_settings" not in res[i]: + print("Missing pam_settings section in pamDirectory or pamMachine") + sys.exit(4) + if ("connection" not in res[i]["pam_settings"] or + "administrative_credentials" not in res[i]["pam_settings"]["connection"]): + print("Missing pam_settings.connection.administrative_credentials in pamDirectory or pamMachine") + sys.exit(4) + # ToDo: verify admin users setup and cross references + contents = json.dumps(template, indent=2) + pos = contents.find('"XXX:') + if pos != -1: + print(f"Template still missing required values: {contents[pos:pos+80]}") + sys.exit(4) + + +def main(): + """ Main function """ + args = _build_cli().parse_args() + + # --show-template overides any other options + if args.show_template: + print(DEFAULT_IMPORT_TEMPLATE) + sys.exit(0) + + rows = _read_csv(args.input_file) + tmpl = DEFAULT_IMPORT_TEMPLATE + if args.template_file: + tmpl = _load_template(args.template_file) + prepare_template(tmpl) + print(f"Processing {len(rows)} servers") + + data = _gen_data(rows, tmpl, args.prefix_names) + write_import_json(data, args.output_file) + print(f"Import with `pam project import -f={args.output_file}") + + +if __name__ == "__main__": + main()