diff --git a/examples/pam_import_generator_v2.py b/examples/pam_import_generator_v2.py new file mode 100644 index 000000000..6f257af2e --- /dev/null +++ b/examples/pam_import_generator_v2.py @@ -0,0 +1,311 @@ +#!/usr/bin/env python3 +""" +Generates JSON file ready to be imported by pam project import command. +This example generates JSON that creates one AD machine (pamDirectory) +with AD Admin user (pamUser) and multiple local machines (pamMachine) +configured with connections and rotation enabled and AD Admin as their admin. + +This script uses external CSV file (format: server_name,user_name,password) +and optionally an external JSON template with static pamDirectory and its pamUser and +a dynamic placeholder used for each pamMachine and its pamUser (from CSV) + +You can use any of the full set of options per user/machine type from our docs +https://github.com/Keeper-Security/Commander/blob/master/keepercommander/commands/pam_import/README.md +You can also run the script with --show-template option and use it as startnig point. + +Command line options: + -i, --input-file default = servers_to_import.csv + Specify the input file CSV: hostname,user,password + -o, --output-file default = pam_import.json + Specify the JSON output file + -t, --template-file Specify the JSON template file + -s, --show-template Show sample JSON template (overrides all options) + -p, --prefix-names Enable username prefixes (server1-admin vs admin) +""" + +from __future__ import annotations +import argparse +import copy +import json +import os +import sys +from csv import DictReader +from pathlib import Path +from typing import Any, Dict, List + +DEFAULT_IMPORT_TEMPLATE = { + "project": "PAM Project", + "shared_folder_users": { + "manage_users": True, + "manage_records": True, + "can_edit": True, + "can_share": True + }, + "shared_folder_resources": { + "manage_users": True, + "manage_records": True, + "can_edit": True, + "can_share": True + }, + "pam_configuration": { + "environment": "local", + "connections": "on", + "rotation": "on", + "graphical_session_recording": "on" + }, + "pam_data": { + "resources": [] + } +} + + +def _build_cli() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + description="Generate Keeper PAM import JSON file", + formatter_class=argparse.RawTextHelpFormatter, + ) + + p.add_argument("-i", "--input-file", default="servers_to_import.csv", + help="Specify the input file - " + "CSV with hostname,user,password (default: %(default)s)") + p.add_argument("-o", "--output-file", default="pam_import.json", + help="Specify the JSON output file (default: %(default)s)") + p.add_argument("-t", "--template-file", + help="Specify the JSON template file (default: %(default)s)") + p.add_argument("-s", "--show-template", action="store_true", + help="Show sample JSON template (overrides all options)") + p.add_argument("-p", "--prefix-names", action="store_true", + help="Enable username prefixes (server1-admin vs admin)") + + return p + + +def _load_template(path: str) -> Dict[str, Any]: + full_path = os.path.abspath(os.path.expanduser(path)) + if not os.path.isfile(full_path): + print(f"JSON template file not found: {path}") + print("Use --show-template option to get a sample template") + sys.exit(1) + + res = {} + with open(full_path, encoding="utf-8") as fp: + res = json.load(fp) + return res + + +def _read_csv(path: str) -> List[Dict[str, str]]: + full_path = os.path.abspath(os.path.expanduser(path)) + if not os.path.isfile(full_path): + print(f"CSV file not found: {path}", ) + sys.exit(2) + + with open(full_path, encoding="utf-8") as fp: + csv_data = list(DictReader(fp)) + # skip incomplete + valid_rows = [] + for i,obj in enumerate(csv_data): + host = obj.get("hostname",None) + username = obj.get("username",None) + user_path = obj.get("user_path",None) + if not host and not all([username,user_path]): + print(f"Row {i+1} incomplete - skipped") + else: + valid_rows.append(obj) + + return valid_rows + + +def _parse_fields(obj: Dict, type: str, tmpl=None): + templates = { + "rs":{ + "title":"", + "host":"", + "type":"pamMachine", + "pam_settings": { + "options": { + "rotation": "off", + "connections": "on", + "tunneling": "off", + "graphical_session_recording": "on" + }, + "connection":{} + }, + "users": [] + }, + "usr":{ + "rotation_settings": {} + } + } + res = templates.get(type,{}) + if tmpl: + res = tmpl + + for key in obj: + if obj[key] == "": continue + if key.startswith(type): + split_arg = key.split(".") + if len(split_arg)==2: + res[split_arg[1]] = obj[key] + elif len(split_arg)==3: + res[split_arg[1]][split_arg[2]] = obj[key] + elif len(split_arg)==4: + res[split_arg[1]][split_arg[2]][split_arg[3]] = obj[key] + return res + + +def _gen_data(csv_data: List[Dict[str, str]], + template: Dict[str, Any], + prefix_names: bool) -> Dict[str, Any]: + + _ = prefix_names # reserved for backward compatibility + data = copy.deepcopy(template) if template else DEFAULT_IMPORT_TEMPLATE + + # pop out pamMachine template + rsrs = data.get("pam_data", {}).get("resources") or [] + idx = next((i for i, item in enumerate(rsrs) if str(item.get("type")) == "pamMachine"), None) + tmpl = rsrs.pop(idx) if idx is not None else {} + rs_tmpl, usr_tmpl = None,None + if tmpl: + rs_tmpl = tmpl + usr_tmpl = tmpl.get("users",[None])[0] + rs_tmpl["users"] = [] + + seen: set[str] = set() + for i,obj in enumerate(csv_data): + host = obj.get("hostname",None) + + # filter machines + if not host: continue + if host in seen: + print(f"Duplicate hostname {host} on row {i+1} - skipped") + continue + seen.add(host) + + # create machine dict + mach = _parse_fields(obj,"rs",rs_tmpl) + mach["title"] = obj.get("title",host) + mach["host"] = host + if obj.get("type",None): + mach["type"] = obj["type"] + + if obj.get("folder_path",None): + mach["folder_path"] = obj["folder_path"] + + rsrs.append(mach) + + # Once all resources added, add pamUsers + seen = set() + for i,obj in enumerate(csv_data): + username = obj["username"] + password = obj.get("password","") + user_path = obj["user_path"] + + if not username: continue + if username in seen: + print(f"Duplicate username {username} on row {i+1} - skipped") + continue + seen.add(username) + + user = (_parse_fields(obj,"usr",usr_tmpl)) + if obj.get("folder_path",None): + user["folder_path"] = obj["folder_path"] + user_path_value = obj["user_path"] + username_value = obj["username"] + default_user_title = f"{user_path_value} - {username_value}" + user["title"] = obj.get("title", default_user_title) + user["type"] = "pamUser" + user["login"] = obj["username"] + user["password"] = password + + # Map user to resource + for rs in rsrs: + if rs["title"] == user_path: + rs["users"].append(user) + + data["pam_data"]["resources"] = rsrs + return data + + +def _write(fpath: Path, content: str): + with fpath.open("w", encoding="utf-8") as fp: + fp.write(content) + print(f"Wrote {fpath}") + + +def write_import_json(data: Dict[str, Any], path: str): + """ Generate JSON and save to file""" + content = json.dumps(data, indent=2) + _write(Path(path), content) + + +def prepare_template(template: Dict[str, Any]) -> None: + """ Prepare JSON template - populate missing defaults """ + tdic = DEFAULT_IMPORT_TEMPLATE + if "project" not in template: + template["project"] = tdic["project"] + if "shared_folder_users" not in template: + template["shared_folder_users"] = tdic["shared_folder_users"] + if "shared_folder_resources" not in template: + template["shared_folder_resources"] = tdic["shared_folder_resources"] + if "pam_configuration" not in template: + template["pam_configuration"] = tdic["pam_configuration"] + env = str(template["pam_configuration"].get("environment")) + if env != "local": + print(f"This script works only with pam_configuration.environment = local, currently it is set to \"{env}\"") + sys.exit(4) + if (str(template["pam_configuration"].get("connections")).lower() != "on" or + str(template["pam_configuration"].get("rotation")).lower() != "on"): + print("connections and rotation must be set to 'on' in pam_configuration section - adjusted") + template["pam_configuration"]["connections"] = "on" + template["pam_configuration"]["rotation"] = "on" + if "pam_data" not in template or not template["pam_data"].get("resources"): + print('"pam_data": { "resources": [] } - must be present and non-empty') + sys.exit(4) + res = template["pam_data"].get("resources") or [] + if len(res) != 2: + print("pam_data.resources[] - must define exactly two \"machines\": pamDirectory and pamUser") + sys.exit(4) + for i in (0, 1): + mach_type = res[i].get("type") or "" + mach_usrs = res[i].get("users") or [] + if ((i == 0 and mach_type != "pamDirectory") or (i == 1 and mach_type != "pamMachine") or not mach_usrs): + print("Expected first machine type=pamDirectory and second type=pamUser, and each to have at least one pamUser") + sys.exit(4) + if "pam_settings" not in res[i]: + print("Missing pam_settings section in pamDirectory or pamMachine") + sys.exit(4) + if ("connection" not in res[i]["pam_settings"] or + "administrative_credentials" not in res[i]["pam_settings"]["connection"]): + print("Missing pam_settings.connection.administrative_credentials in pamDirectory or pamMachine") + sys.exit(4) + # ToDo: verify admin users setup and cross references + contents = json.dumps(template, indent=2) + pos = contents.find("\"XXX:") + if pos != -1: + print(f"Template still missing required values: {contents[pos:pos+80]}") + sys.exit(4) + + +def main(): + """ Main function """ + args = _build_cli().parse_args() + + # --show-template overides any other options + if args.show_template: + print(DEFAULT_IMPORT_TEMPLATE) + sys.exit(0) + + rows = _read_csv(args.input_file) + tmpl = DEFAULT_IMPORT_TEMPLATE + if args.template_file: + tmpl = _load_template(args.template_file) + prepare_template(tmpl) + print(f"Processing {len(rows)} servers") + + data = _gen_data(rows, tmpl, args.prefix_names) + write_import_json(data, args.output_file) + print(f"Import with `pam project import -f={args.output_file}") + + +if __name__ == "__main__": + main() diff --git a/keepercommander/__init__.py b/keepercommander/__init__.py index e3e86789d..73c61cc2a 100644 --- a/keepercommander/__init__.py +++ b/keepercommander/__init__.py @@ -10,4 +10,4 @@ # Contact: commander@keepersecurity.com # -__version__ = '17.2.8' +__version__ = '17.2.9' diff --git a/keepercommander/__main__.py b/keepercommander/__main__.py index 54691109d..d2f454b2b 100644 --- a/keepercommander/__main__.py +++ b/keepercommander/__main__.py @@ -335,13 +335,13 @@ def main(from_package=False): params.server = resolved_server else: # Show error and valid options - print(f"\nError: '{opts.server}' is not a valid Keeper server.") - print('\nValid server codes:') - print(' Production: US, EU, AU, CA, JP, GOV') - print(' Dev: US_DEV, EU_DEV, AU_DEV, CA_DEV, JP_DEV, GOV_DEV') - print(' QA: US_QA, EU_QA, AU_QA, CA_QA, JP_QA, GOV_QA') - print('\nYou can also use the full hostname (e.g., keepersecurity.com, keepersecurity.eu)') - print('') + logging.error(f"\nError: '{opts.server}' is not a valid Keeper server.") + logging.error('\nValid server codes:') + logging.error(' Production: US, EU, AU, CA, JP, GOV') + logging.error(' Dev: US_DEV, EU_DEV, AU_DEV, CA_DEV, JP_DEV, GOV_DEV') + logging.error(' QA: US_QA, EU_QA, AU_QA, CA_QA, JP_QA, GOV_QA') + logging.error('\nYou can also use the full hostname (e.g., keepersecurity.com, keepersecurity.eu)') + logging.error('') sys.exit(1) if opts.user is not None: @@ -395,17 +395,17 @@ def main(from_package=False): # If no command provided, show helpful welcome message if not opts.command and not params.commands: - print('') - print('Keeper Commander - CLI-based vault and admin interface to the Keeper platform') - print('') - print('To get started:') - print(' keeper login Authenticate to Keeper') - print(' keeper shell Open interactive command shell') - print(' keeper supershell Open full-screen vault browser (TUI)') - print(' keeper -h Show help and available options') - print('') - print('Learn more at https://docs.keeper.io/en/keeperpam/commander-cli/overview') - print('') + logging.warning('') + logging.warning('Keeper Commander - CLI-based vault and admin interface to the Keeper platform') + logging.warning('') + logging.warning('To get started:') + logging.warning(' keeper login Authenticate to Keeper') + logging.warning(' keeper shell Open interactive command shell') + logging.warning(' keeper supershell Open full-screen vault browser (TUI)') + logging.warning(' keeper -h Show help and available options') + logging.warning('') + logging.warning('Learn more at https://docs.keeper.io/en/keeperpam/commander-cli/overview') + logging.warning('') return if isinstance(params.timedelay, int) and params.timedelay >= 1 and params.commands: diff --git a/keepercommander/auth/console_ui.py b/keepercommander/auth/console_ui.py index 0145d10b1..0a60b077b 100644 --- a/keepercommander/auth/console_ui.py +++ b/keepercommander/auth/console_ui.py @@ -3,6 +3,7 @@ import logging import pyperclip import re +import sys import webbrowser from typing import Optional, List @@ -12,6 +13,11 @@ from ..error import KeeperApiError +def _stderr(msg=''): + """Print interactive prompt text to stderr so it's always visible and never pollutes stdout.""" + print(msg, file=sys.stderr) + + class ConsoleLoginUi(login_steps.LoginUi): def __init__(self): self._show_device_approval_help = True @@ -23,38 +29,38 @@ def __init__(self): def on_device_approval(self, step): if self._show_device_approval_help: - logging.info(f"\n{Fore.YELLOW}Device Approval Required{Fore.RESET}\n") - logging.info(f"{Fore.CYAN}Select an approval method:{Fore.RESET}") - logging.info(f" {Fore.GREEN}1{Fore.RESET}. Email - Send approval link to your email") - logging.info(f" {Fore.GREEN}2{Fore.RESET}. Keeper Push - Send notification to an approved device") - logging.info(f" {Fore.GREEN}3{Fore.RESET}. 2FA Push - Send code via your 2FA method") - logging.info("") - logging.info(f" {Fore.GREEN}c{Fore.RESET}. Enter code - Enter a verification code") - logging.info(f" {Fore.GREEN}q{Fore.RESET}. Cancel login") - logging.info("") + _stderr(f"\n{Fore.YELLOW}Device Approval Required{Fore.RESET}\n") + _stderr(f"{Fore.CYAN}Select an approval method:{Fore.RESET}") + _stderr(f" {Fore.GREEN}1{Fore.RESET}. Email - Send approval link to your email") + _stderr(f" {Fore.GREEN}2{Fore.RESET}. Keeper Push - Send notification to an approved device") + _stderr(f" {Fore.GREEN}3{Fore.RESET}. 2FA Push - Send code via your 2FA method") + _stderr("") + _stderr(f" {Fore.GREEN}c{Fore.RESET}. Enter code - Enter a verification code") + _stderr(f" {Fore.GREEN}q{Fore.RESET}. Cancel login") + _stderr("") self._show_device_approval_help = False else: - logging.info(f"\n{Fore.YELLOW}Waiting for device approval.{Fore.RESET}") - logging.info(f"{Fore.CYAN}Check email, SMS, or push notification on the approved device.{Fore.RESET}") - logging.info(f"Enter {Fore.GREEN}c {Fore.RESET} to submit a verification code.\n") + _stderr(f"\n{Fore.YELLOW}Waiting for device approval.{Fore.RESET}") + _stderr(f"{Fore.CYAN}Check email, SMS, or push notification on the approved device.{Fore.RESET}") + _stderr(f"Enter {Fore.GREEN}c {Fore.RESET} to submit a verification code.\n") try: selection = input(f'{Fore.GREEN}Selection{Fore.RESET} (or Enter to check status): ').strip().lower() if selection == '1' or selection == 'email_send' or selection == 'es': step.send_push(login_steps.DeviceApprovalChannel.Email) - logging.info(f"\n{Fore.GREEN}Email sent to {step.username}{Fore.RESET}") - logging.info("Click the approval link in the email, then press Enter.\n") + _stderr(f"\n{Fore.GREEN}Email sent to {step.username}{Fore.RESET}") + _stderr("Click the approval link in the email, then press Enter.\n") elif selection == '2' or selection == 'keeper_push' or selection == 'kp': step.send_push(login_steps.DeviceApprovalChannel.KeeperPush) - logging.info(f"\n{Fore.GREEN}Push notification sent.{Fore.RESET}") - logging.info("Approve on your device, then press Enter.\n") + _stderr(f"\n{Fore.GREEN}Push notification sent.{Fore.RESET}") + _stderr("Approve on your device, then press Enter.\n") elif selection == '3' or selection == '2fa_send' or selection == '2fs': step.send_push(login_steps.DeviceApprovalChannel.TwoFactor) - logging.info(f"\n{Fore.GREEN}2FA code sent.{Fore.RESET}") - logging.info("Enter the code using option 'c'.\n") + _stderr(f"\n{Fore.GREEN}2FA code sent.{Fore.RESET}") + _stderr("Enter the code using option 'c'.\n") elif selection == 'c' or selection.startswith('c '): # Support both "c" (prompts for code) and "c " (code inline) @@ -67,23 +73,23 @@ def on_device_approval(self, step): # Try email code first, then 2FA try: step.send_code(login_steps.DeviceApprovalChannel.Email, code_input) - logging.info(f"{Fore.GREEN}Successfully verified email code.{Fore.RESET}") + _stderr(f"{Fore.GREEN}Successfully verified email code.{Fore.RESET}") except KeeperApiError: try: step.send_code(login_steps.DeviceApprovalChannel.TwoFactor, code_input) - logging.info(f"{Fore.GREEN}Successfully verified 2FA code.{Fore.RESET}") + _stderr(f"{Fore.GREEN}Successfully verified 2FA code.{Fore.RESET}") except KeeperApiError as e: logging.warning(f"{Fore.YELLOW}Invalid code. Please try again.{Fore.RESET}") elif selection.startswith("email_code="): code = selection.replace("email_code=", "") step.send_code(login_steps.DeviceApprovalChannel.Email, code) - logging.info(f"{Fore.GREEN}Successfully verified email code.{Fore.RESET}") + _stderr(f"{Fore.GREEN}Successfully verified email code.{Fore.RESET}") elif selection.startswith("2fa_code="): code = selection.replace("2fa_code=", "") step.send_code(login_steps.DeviceApprovalChannel.TwoFactor, code) - logging.info(f"{Fore.GREEN}Successfully verified 2FA code.{Fore.RESET}") + _stderr(f"{Fore.GREEN}Successfully verified 2FA code.{Fore.RESET}") elif selection == 'q': step.cancel() @@ -120,13 +126,13 @@ def on_two_factor(self, step): channels = step.get_channels() if self._show_two_factor_help: - logging.info(f"\n{Fore.YELLOW}Two-Factor Authentication Required{Fore.RESET}\n") - logging.info(f"{Fore.CYAN}Select your 2FA method:{Fore.RESET}") + _stderr(f"\n{Fore.YELLOW}Two-Factor Authentication Required{Fore.RESET}\n") + _stderr(f"{Fore.CYAN}Select your 2FA method:{Fore.RESET}") for i in range(len(channels)): channel = channels[i] - logging.info(f" {Fore.GREEN}{i+1}{Fore.RESET}. {ConsoleLoginUi.two_factor_channel_to_desc(channel.channel_type)} {channel.channel_name} {channel.phone}") - logging.info(f" {Fore.GREEN}q{Fore.RESET}. Cancel login") - logging.info("") + _stderr(f" {Fore.GREEN}{i+1}{Fore.RESET}. {ConsoleLoginUi.two_factor_channel_to_desc(channel.channel_type)} {channel.channel_name} {channel.phone}") + _stderr(f" {Fore.GREEN}q{Fore.RESET}. Cancel login") + _stderr("") self._show_device_approval_help = False channel = None # type: Optional[login_steps.TwoFactorChannelInfo] @@ -153,7 +159,7 @@ def on_two_factor(self, step): mfa_prompt = True try: step.send_push(channel.channel_uid, login_steps.TwoFactorPushAction.TextMessage) - logging.info(f'\n{Fore.GREEN}SMS sent successfully.{Fore.RESET}\n') + _stderr(f'\n{Fore.GREEN}SMS sent successfully.{Fore.RESET}\n') except KeeperApiError: logging.warning("Was unable to send SMS.") elif channel.channel_type == login_steps.TwoFactorChannel.SecurityKey: @@ -176,7 +182,7 @@ def on_two_factor(self, step): } step.duration = login_steps.TwoFactorDuration.EveryLogin step.send_code(channel.channel_uid, json.dumps(signature)) - logging.info(f'{Fore.GREEN}Security key verified.{Fore.RESET}') + _stderr(f'{Fore.GREEN}Security key verified.{Fore.RESET}') except ImportError as e: from ..yubikey import display_fido2_warning @@ -226,7 +232,7 @@ def on_two_factor(self, step): 'Ask Every 24 hours' if mfa_expiration == login_steps.TwoFactorDuration.Every24Hours else 'Ask Every 30 days', "|".join(allowed_expirations)) - logging.info(prompt_exp) + _stderr(prompt_exp) try: answer = input(f'\n{Fore.GREEN}Enter 2FA Code: {Fore.RESET}') @@ -262,16 +268,16 @@ def on_two_factor(self, step): step.duration = mfa_expiration try: step.send_code(channel.channel_uid, otp_code) - logging.info(f'{Fore.GREEN}2FA code verified.{Fore.RESET}') + _stderr(f'{Fore.GREEN}2FA code verified.{Fore.RESET}') except KeeperApiError: logging.warning(f'{Fore.YELLOW}Invalid 2FA code. Please try again.{Fore.RESET}') def on_password(self, step): if self._show_password_help: - logging.info(f'{Fore.CYAN}Enter master password for {Fore.WHITE}{step.username}{Fore.RESET}') + _stderr(f'{Fore.CYAN}Enter master password for {Fore.WHITE}{step.username}{Fore.RESET}') if self._failed_password_attempt > 0: - logging.info(f'{Fore.YELLOW}Forgot password? Type "recover"{Fore.RESET}') + _stderr(f'{Fore.YELLOW}Forgot password? Type "recover"{Fore.RESET}') password = getpass.getpass(prompt=f'{Fore.GREEN}Password: {Fore.RESET}', stream=None) if not password: @@ -298,19 +304,19 @@ def on_sso_redirect(self, step): wb = None sp_url = step.sso_login_url - logging.info(f'\n{Fore.CYAN}SSO Login URL:{Fore.RESET}\n{sp_url}\n') + _stderr(f'\n{Fore.CYAN}SSO Login URL:{Fore.RESET}\n{sp_url}\n') if self._show_sso_redirect_help: - logging.info(f'{Fore.CYAN}Navigate to SSO Login URL with your browser and complete login.{Fore.RESET}') - logging.info(f'{Fore.CYAN}Copy the returned SSO Token and paste it here.{Fore.RESET}') - logging.info(f'{Fore.YELLOW}TIP: Click "Copy login token" button on the SSO Connect page.{Fore.RESET}') - logging.info('') - logging.info(f' {Fore.GREEN}a{Fore.RESET}. SSO User with a Master Password') - logging.info(f' {Fore.GREEN}c{Fore.RESET}. Copy SSO Login URL to clipboard') + _stderr(f'{Fore.CYAN}Navigate to SSO Login URL with your browser and complete login.{Fore.RESET}') + _stderr(f'{Fore.CYAN}Copy the returned SSO Token and paste it here.{Fore.RESET}') + _stderr(f'{Fore.YELLOW}TIP: Click "Copy login token" button on the SSO Connect page.{Fore.RESET}') + _stderr('') + _stderr(f' {Fore.GREEN}a{Fore.RESET}. SSO User with a Master Password') + _stderr(f' {Fore.GREEN}c{Fore.RESET}. Copy SSO Login URL to clipboard') if wb: - logging.info(f' {Fore.GREEN}o{Fore.RESET}. Open SSO Login URL in web browser') - logging.info(f' {Fore.GREEN}p{Fore.RESET}. Paste SSO Token from clipboard') - logging.info(f' {Fore.GREEN}q{Fore.RESET}. Cancel SSO login') - logging.info('') + _stderr(f' {Fore.GREEN}o{Fore.RESET}. Open SSO Login URL in web browser') + _stderr(f' {Fore.GREEN}p{Fore.RESET}. Paste SSO Token from clipboard') + _stderr(f' {Fore.GREEN}q{Fore.RESET}. Cancel SSO login') + _stderr('') self._show_sso_redirect_help = False while True: @@ -329,7 +335,7 @@ def on_sso_redirect(self, step): token = None try: pyperclip.copy(sp_url) - logging.info('SSO Login URL is copied to clipboard.') + _stderr('SSO Login URL is copied to clipboard.') except: logging.warning('Failed to copy SSO Login URL to clipboard.') elif token == 'o': @@ -355,14 +361,14 @@ def on_sso_redirect(self, step): def on_sso_data_key(self, step): if self._show_sso_data_key_help: - logging.info(f'\n{Fore.YELLOW}Device Approval Required for SSO{Fore.RESET}\n') - logging.info(f'{Fore.CYAN}Select an approval method:{Fore.RESET}') - logging.info(f' {Fore.GREEN}1{Fore.RESET}. Keeper Push - Send a push notification to your device') - logging.info(f' {Fore.GREEN}2{Fore.RESET}. Admin Approval - Request your admin to approve this device') - logging.info('') - logging.info(f' {Fore.GREEN}r{Fore.RESET}. Resume SSO login after device is approved') - logging.info(f' {Fore.GREEN}q{Fore.RESET}. Cancel SSO login') - logging.info('') + _stderr(f'\n{Fore.YELLOW}Device Approval Required for SSO{Fore.RESET}\n') + _stderr(f'{Fore.CYAN}Select an approval method:{Fore.RESET}') + _stderr(f' {Fore.GREEN}1{Fore.RESET}. Keeper Push - Send a push notification to your device') + _stderr(f' {Fore.GREEN}2{Fore.RESET}. Admin Approval - Request your admin to approve this device') + _stderr('') + _stderr(f' {Fore.GREEN}r{Fore.RESET}. Resume SSO login after device is approved') + _stderr(f' {Fore.GREEN}q{Fore.RESET}. Cancel SSO login') + _stderr('') self._show_sso_data_key_help = False while True: diff --git a/keepercommander/cli.py b/keepercommander/cli.py index 05ba08aa9..64593fd26 100644 --- a/keepercommander/cli.py +++ b/keepercommander/cli.py @@ -476,7 +476,7 @@ def force_quit(): subprocess.run('reset') elif os.name == 'nt': subprocess.run('cls') - print('Auto-logout timer activated.') + logging.warning('Auto-logout timer activated.') except: pass os._exit(0) @@ -734,7 +734,7 @@ def _(event): try: LoginCommand().execute(params, email=params.user, password=params.password, new_login=new_login) except KeyboardInterrupt: - print('') + logging.info('') except EOFError: return 0 except Exception as e: @@ -742,10 +742,10 @@ def _(event): else: if params.device_token: logging.info('Region: %s', params.server) - print() + logging.info('') logging.info("You are not logged in.") - print(f'Type {Fore.GREEN}login {Fore.RESET} to authenticate or {Fore.GREEN}server {Fore.RESET} to change data centers.') - print(f'Type {Fore.GREEN}?{Fore.RESET} for a list of all available commands.') + logging.info(f'Type {Fore.GREEN}login {Fore.RESET} to authenticate or {Fore.GREEN}server {Fore.RESET} to change data centers.') + logging.info(f'Type {Fore.GREEN}?{Fore.RESET} for a list of all available commands.') # Mark that we're in the shell loop (used by supershell to know if it should start a shell on exit) params._in_shell_loop = True diff --git a/keepercommander/command_categories.py b/keepercommander/command_categories.py index c28d7b1ed..a5046214a 100644 --- a/keepercommander/command_categories.py +++ b/keepercommander/command_categories.py @@ -83,7 +83,7 @@ # Service Mode REST API 'Service Mode REST API': { 'service-create', 'service-add-config', 'service-start', 'service-stop', 'service-status', - 'service-config-add', 'service-docker-setup', 'slack-app-setup' + 'service-config-add', 'service-docker-setup', 'slack-app-setup', 'teams-app-setup' }, # Email Configuration Commands diff --git a/keepercommander/commands/base.py b/keepercommander/commands/base.py index 953bf9d86..d075197a6 100644 --- a/keepercommander/commands/base.py +++ b/keepercommander/commands/base.py @@ -755,6 +755,10 @@ def execute_args(self, params, args, **kwargs): envvars = params.environment_variables args = '' if args is None else args if parser: + # Update prog to match alias name (e.g., 'find-password' instead of 'clipboard-copy') + alias_cmd = d.get('command') + if alias_cmd and alias_cmd != parser.prog: + parser.prog = alias_cmd args = expand_cmd_args(args, envvars) args = normalize_output_param(args) if self.support_extra_parameters(): diff --git a/keepercommander/commands/compliance.py b/keepercommander/commands/compliance.py index 9ed77f17c..ffbff8930 100644 --- a/keepercommander/commands/compliance.py +++ b/keepercommander/commands/compliance.py @@ -315,12 +315,13 @@ def validate(self, params): # type: (KeeperParams) -> None class BaseComplianceReportCommand(EnterpriseCommand): - def __init__(self, report_headers, allow_no_opts=True, prelim_only=False): + def __init__(self, report_headers, allow_no_opts=True, prelim_only=False, needs_full_sharing_data=False): super(BaseComplianceReportCommand, self).__init__() self.title = None self.report_headers = report_headers self.allow_no_opts = allow_no_opts self.prelim_only = prelim_only + self.needs_full_sharing_data = needs_full_sharing_data self.group_by_column = None def get_parser(self): # type: () -> Optional[argparse.ArgumentParser] @@ -390,7 +391,7 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any get_sox_data_fn = sox.get_prelim_data if self.prelim_only else sox.get_compliance_data fn_args = [params, enterprise_id] if self.prelim_only else [params, node_id, enterprise_id] fn_kwargs = {'rebuild': rebuild, 'min_updated': min_data_ts, 'no_cache': no_cache, 'shared_only': shared_only, - 'user_filter': user_filter} + 'user_filter': None if self.needs_full_sharing_data else user_filter} sd = get_sox_data_fn(*fn_args, **fn_kwargs) kwargs['_user_filter'] = user_filter report_fmt = kwargs.get('format', 'table') @@ -670,7 +671,8 @@ def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any class ComplianceRecordAccessReportCommand(BaseComplianceReportCommand): def __init__(self): - super(ComplianceRecordAccessReportCommand, self).__init__([], allow_no_opts=True, prelim_only=False) + super(ComplianceRecordAccessReportCommand, self).__init__([], allow_no_opts=True, prelim_only=False, + needs_full_sharing_data=True) self.group_by_column = 0 def get_parser(self): # type: () -> Optional[argparse.ArgumentParser] @@ -678,20 +680,6 @@ def get_parser(self): # type: () -> Optional[argparse.ArgumentParser] def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any kwargs['shared'] = True - emails = kwargs.get('email') or ['@all'] - if '@all' not in emails: - enterprise_users = params.enterprise.get('users', []) - id_to_email = {eu.get('enterprise_user_id'): eu.get('username') for eu in enterprise_users} - resolved_emails = [] - for ref in emails: - if ref.isdigit(): - email = id_to_email.get(int(ref)) - if email: - resolved_emails.append(email) - else: - resolved_emails.append(ref) - if resolved_emails: - kwargs['username'] = resolved_emails return super().execute(params, **kwargs) def generate_report_data(self, params, kwargs, sox_data, report_fmt, node, root_node): @@ -793,8 +781,8 @@ def compile_report_data(rec_ids): use_spinner=use_spinner, stale_rec_ids=stale_rec_ids) - # Update last_aging_refreshed for stale users - if user_filter_uids is not None: + # Update last_aging_refreshed for stale users (only when aging was actually fetched) + if rec_ids and user_filter_uids is not None: now_ts = int(datetime.datetime.now().timestamp()) updated_users = [] for uid in user_filter_uids: @@ -884,7 +872,8 @@ def get_records_accessed(emails, limit_to_vault=False): class ComplianceSummaryReportCommand(BaseComplianceReportCommand): def __init__(self): headers = ['email', 'total_items', 'total_owned', 'active_owned', 'deleted_owned'] - super(ComplianceSummaryReportCommand, self).__init__(headers, allow_no_opts=True, prelim_only=False) + super(ComplianceSummaryReportCommand, self).__init__(headers, allow_no_opts=True, prelim_only=False, + needs_full_sharing_data=True) def get_parser(self): # type: () -> Optional[argparse.ArgumentParser] return summary_report_parser diff --git a/keepercommander/commands/enterprise.py b/keepercommander/commands/enterprise.py index f4d58ddbf..960f89dd6 100644 --- a/keepercommander/commands/enterprise.py +++ b/keepercommander/commands/enterprise.py @@ -1014,7 +1014,10 @@ def tree_node(node): row.append(len(enforcements)) elif column == 'enforcements': enforcements = role_enforcements.get(role_id, {}) - row.append(list(enforcements.keys())) + if kwargs.get('format') == 'json': + row.append(dict(enforcements)) + else: + row.append(list(enforcements.keys())) elif column == 'managed_node_count': row.append(len(managed_nodes_list)) elif column == 'managed_nodes': @@ -1147,6 +1150,10 @@ def execute(self, params, **kwargs): n.append(node) else: node_lookup[node_name] = [n, node] + if not node.get('parent_id'): + ent_name = params.enterprise['enterprise_name'].lower() + if ent_name not in node_lookup: + node_lookup[ent_name] = node parent_id = None if kwargs.get('parent'): diff --git a/keepercommander/commands/pam_import/base.py b/keepercommander/commands/pam_import/base.py index 16bcfb633..9bd9ab8d8 100644 --- a/keepercommander/commands/pam_import/base.py +++ b/keepercommander/commands/pam_import/base.py @@ -850,6 +850,7 @@ def __init__(self): self.attachments = None # fileRef self.scripts = None # script self.rotation_settings = None # DAG: rotation settings + self._is_pam_directory_user: bool = False # True when loaded as a user of pamDirectory (AD); empty password is valid @classmethod def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParams] = None): @@ -931,8 +932,13 @@ def create_record(self, params, folder_uid): return uid def validate_record(self): - if not self.password: - logging.warning("PAM User is missing required field `login`") + # For pamDirectory (AD) users, empty password is valid; do not warn (debug only). Otherwise require password. + password_empty = not (self.password and isinstance(self.password, str) and self.password.strip()) + if getattr(self, "_is_pam_directory_user", False): + if password_empty: + logging.debug("PAM User (pamDirectory/AD) has empty password (valid for AD).") + elif password_empty: + logging.warning("PAM User is missing required field `password`") if not self.rotation_settings: logging.debug("PAM User is missing rotation settings") if isinstance(self.rotation_settings, PamRotationSettingsObject): @@ -1565,6 +1571,7 @@ def load(cls, data: Union[str, dict], rotation_params: Optional[PamRotationParam continue usr = PamUserObject.load(user, rotation_params) if usr: + usr._is_pam_directory_user = True # AD users may have empty password obj.users.append(usr) else: logging.warning(f"""Warning: PAM Directory "{obj.title}" with empty users section.""") @@ -3075,6 +3082,18 @@ def is_admin_external(mach) -> bool: res = True return res +def mark_local_users_allowing_empty_password_for_external_admin(resources) -> None: + """When a pamMachine/pamDatabase/pamDirectory has is_admin_external + (admin_credential is AD/pamDirectory user, e.g. dot-separated 'AD1.Admin'), + The machine's local users may have empty passwords (AD admin rotates them). + Mark those users so validation does not warn.""" + for mach in resources or []: + if not getattr(mach, "is_admin_external", False): + continue + if hasattr(mach, "users") and isinstance(mach.users, list): + for u in mach.users: + u._is_pam_directory_user = True + def get_admin_credential(obj, uid:bool=False) -> str: # Get one of pam_settings.connection.{userRecords,userRecordUid} value: str = "" @@ -3134,14 +3153,15 @@ def set_launch_record_uid(obj, uid: str) -> bool: return False def find_external_user(mach, machines, title: str) -> list: - # Local pamMachine could reference pamDirectory AD user as its admin + # pamMachine/pamDatabase/pamDirectory can reference pamDirectory AD user as admin (dot-separated e.g. "AD1.Admin") res = [] - if title and machines and mach.type == "pamMachine": - mu = title.split(".", 1) # machine/user titles + mach_type = getattr(mach, "type", "") or "" + if title and machines and mach_type in ("pamMachine", "pamDatabase", "pamDirectory"): + mu = title.split(".", 1) # resource/user titles (e.g. "AD1"."Admin") mname = mu[0] if len(mu) > 1 else "" uname = mu[1] if len(mu) > 1 else mu[0] for m in machines: - if m.type == "pamDirectory" and (not mname or mname == m.title): + if getattr(m, "type", "") == "pamDirectory" and (not mname or mname == getattr(m, "title", None)): res.extend(search_machine(m, uname) or []) return res diff --git a/keepercommander/commands/pam_import/edit.py b/keepercommander/commands/pam_import/edit.py index d4a261d43..881887c56 100644 --- a/keepercommander/commands/pam_import/edit.py +++ b/keepercommander/commands/pam_import/edit.py @@ -37,6 +37,7 @@ get_launch_credential, get_sftp_attribute, is_admin_external, + mark_local_users_allowing_empty_password_for_external_admin, parse_command_options, resolve_domain_admin, resolve_script_creds, @@ -52,12 +53,14 @@ from ..record_edit import RecordUploadAttachmentCommand from ..tunnel.port_forward.TunnelGraph import TunnelDAG from ..tunnel.port_forward.tunnel_helpers import get_keeper_tokens +from ..tunnel_and_connections import PAMTunnelEditCommand from ... import api, crypto, utils, vault, record_management from ...display import bcolors from ...error import CommandError from ...importer import imp_exp from ...importer.importer import SharedFolder, Permission from ...keeper_dag import EdgeType +from ...keeper_dag.types import RefType from ...params import LAST_FOLDER_UID, LAST_SHARED_FOLDER_UID from ...proto import record_pb2, APIRequest_pb2, enterprise_pb2 from ...recordv3 import RecordV3 @@ -72,7 +75,7 @@ class PAMProjectImportCommand(Command): parser.add_argument("--sample-data", "-s", required=False, dest="sample_data", action="store_true", default=False, help="Generate sample data.") parser.add_argument("--show-template", "-t", required=False, dest="show_template", action="store_true", default=False, help="Print JSON template required for manual import.") # parser.add_argument("--force", "-e", required=False, dest="force", action="store_true", default=False, help="Force data import (re/configure later)") - parser.add_argument("--output", "-o", required=False, dest="output", action="store", choices=["token", "base64", "json"], default="base64", help="Output format (token: one-time token, config: base64/json)") + parser.add_argument("--output", "-o", required=False, dest="output", action="store", choices=["token", "base64", "json", "k8s"], default="base64", help="Output format (token: one-time token, config: base64/json/k8s)") def get_parser(self): return PAMProjectImportCommand.parser @@ -351,7 +354,8 @@ def process_gateway(self, params, project: dict) -> dict: return res # Create new Gateway - PAMCreateGatewayCommand() - token_format = None if project["options"]["output"] == "token" else "b64" + output_fmt = project["options"]["output"] + token_format = None if output_fmt == "token" else ("k8s" if output_fmt == "k8s" else "b64") ksm_app_uid = project["ksm_app"]["app_uid"] gw = self.create_gateway( params, @@ -363,8 +367,9 @@ def process_gateway(self, params, project: dict) -> dict: res["gateway_token"] = gw[0].get("oneTimeToken", "") if gw and gw_names else "" # OTT else: res["gateway_token"] = gw[0].get("config", "") if gw and gw_names else "" # Config - if project["options"]["output"] == "json": + if output_fmt == "json": res["gateway_token"] = json.loads(utils.base64_url_decode(res["gateway_token"])) + # k8s: config is already Kubernetes Secret YAML string; base64: keep as-is res["gateway_device_token"] = gw[0].get("deviceToken", "") if gw and gw_names else "" # controller_uid is not returned by vault/app_client_add @@ -377,6 +382,7 @@ def process_gateway(self, params, project: dict) -> dict: return res def process_pam_config(self, params, project: dict) -> dict: + # Local import to avoid circular import with discoveryrotation from ..discoveryrotation import PAMConfigurationNewCommand res:Dict[str, Any] = { "pam_config_name_target": "", @@ -517,9 +523,8 @@ def generate_sample_data(self, params, project: dict): def generate_discovery_playground_data(self, params, project: dict): """ Generate data that works with discovery-playground docker setup """ - from ..tunnel_and_connections import PAMTunnelEditCommand + # Local import to avoid circular import with discoveryrotation from ..discoveryrotation import PAMCreateRecordRotationCommand - # PUBLIC_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0bH13XfBiKcej3/W"\ # "mnc7GYbx+B+hmfYTaDFqfJ/vEGy3HTSz2t5nDb3+S1clBcCmse5FzEA7aXC3cZXurGBH"\ # "irz2Ud8wCL2t95cJnrkzfft7lsILnchm0J0Y0TyDW42gLj1JWh/E5qQyUxF0F6xEBKcy"\ @@ -1358,9 +1363,8 @@ def verify_users_and_teams(self, params, users_and_teams): def process_data(self, params, project): """Process project data (JSON)""" - from ..tunnel_and_connections import PAMTunnelEditCommand + # Local import to avoid circular import with discoveryrotation from ..discoveryrotation import PAMCreateRecordRotationCommand - # users section is mostly RBI login users, but occasional "disconnected" # PAM User (ex. NOOP rotation) requires explicit record type to be set # also for shared users b/n ssh, vnc, rdp on same host (one pamMachine each) @@ -1547,6 +1551,9 @@ def process_data(self, params, project): if uid: mach.rbi_settings.connection.httpCredentialsUid = [uid] + # Local users on machines/databases/directories with external AD admin may have empty passwords (AD rotates them). + mark_local_users_allowing_empty_password_for_external_admin(resources) + for usr in users: # resolve user.rotation_settings.resource - "iam_user", "scripts_only" if (usr and hasattr(usr, "rotation_settings") and usr.rotation_settings @@ -1632,6 +1639,7 @@ def process_data(self, params, project): # bugfix: RBI=True also needs connections=True to enable RBI (in web vault) if args.get("remote_browser_isolation", False) is True: args["connections"] = True + args["v_type"] = RefType.PAM_BROWSER tdag.set_resource_allowed(**args) else: # machine/db/directory args = parse_command_options(mach, True) @@ -1641,6 +1649,8 @@ def process_data(self, params, project): tdag.link_user_to_resource(admin_uid, mach.uid, is_admin=True, belongs_to=False) args = parse_command_options(mach, False) args["meta_version"] = 1 + _rtype = (getattr(mach, "type", "") or "").lower() + args["v_type"] = RefType.PAM_DIRECTORY if _rtype == "pamdirectory" else RefType.PAM_DATABASE if _rtype == "pamdatabase" else RefType.PAM_MACHINE tdag.set_resource_allowed(**args) # After setting allowedSettings, save JIT settings if present diff --git a/keepercommander/commands/pam_import/extend.py b/keepercommander/commands/pam_import/extend.py index 495e93523..c8a0127c0 100644 --- a/keepercommander/commands/pam_import/extend.py +++ b/keepercommander/commands/pam_import/extend.py @@ -15,6 +15,7 @@ import logging import os.path import re +from types import SimpleNamespace from itertools import chain from typing import Any, Dict, Optional, List @@ -26,13 +27,11 @@ PAM_RESOURCES_RECORD_TYPES, PamUserObject, LoginUserObject, - PamBaseMachineParser, PamMachineObject, PamDatabaseObject, PamDirectoryObject, PamRemoteBrowserObject, PamRotationParams, - PamRotationSettingsObject, add_pam_scripts, find_external_user, find_user, @@ -40,19 +39,30 @@ get_launch_credential, get_sftp_attribute, is_admin_external, + mark_local_users_allowing_empty_password_for_external_admin, parse_command_options, + resolve_domain_admin, resolve_script_creds, set_launch_record_uid, set_sftp_uid, set_user_record_uid ) +from .keeper_ai_settings import ( + set_resource_jit_settings, + set_resource_keeper_ai_settings, + refresh_meta_to_latest, + refresh_link_to_config_to_latest, +) +from ...keeper_dag import EdgeType +from ...keeper_dag.types import RefType from ..base import Command from ..ksm import KSMCommand from ..pam import gateway_helper from ..pam.config_helper import configuration_controller_get from ..tunnel.port_forward.TunnelGraph import TunnelDAG from ..tunnel.port_forward.tunnel_helpers import get_keeper_tokens -from ... import api, crypto, utils, vault, vault_extensions +from ..tunnel_and_connections import PAMTunnelEditCommand +from ... import api, crypto, utils, vault, vault_extensions, record_management from ...display import bcolors from ...error import CommandError from ...params import LAST_FOLDER_UID, LAST_SHARED_FOLDER_UID @@ -291,7 +301,7 @@ def _get_ksm_app_record_uids(params, ksm_shared_folders: list) -> set: def _get_records_in_folder(params, folder_uid: str): - """Return list of (record_uid, title, record_type) for records in folder_uid. + """Return list of (record_uid, title, record_type, login) for records in folder_uid. record_type from record for autodetect (e.g. pamUser, pamMachine, login).""" subfolder_record_cache = getattr(params, "subfolder_record_cache", None) or {} result = [] @@ -302,14 +312,20 @@ def _get_records_in_folder(params, folder_uid: str): rtype = "" if hasattr(rec, "record_type"): rtype = getattr(rec, "record_type", "") or "" - result.append((ruid, title, rtype)) + login = "" + fields = getattr(rec, "fields", None) + if isinstance(fields, list): + field = next((x for x in fields if getattr(x, "type", "") == "login"), None) + if field and hasattr(field, "get_default_value"): + login = (field.get_default_value() or "") or "" + result.append((ruid, title, rtype, login)) except Exception: pass return result def _get_all_ksm_app_records(params, ksm_shared_folders: list) -> list: - """Return list of (record_uid, title, record_type) for every record in any folder under KSM app.""" + """Return list of (record_uid, title, record_type, login) for every record in any folder under KSM app.""" folder_uids = _collect_all_folder_uids_under_ksm(ksm_shared_folders) out = [] for fuid in folder_uids: @@ -653,7 +669,7 @@ def process_folders(self, params, project: dict) -> dict: if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: for path, _ in good_paths: tag = "existing" if path in existing_paths_set else "new" - print(f" [DEBUG] [{tag}] {path}") + logging.debug(f" [DEBUG] [{tag}] {path}") folders_out["path_to_folder_uid"] = path_to_folder_uid folders_out["good_paths"] = good_paths @@ -850,10 +866,13 @@ def resolve_one(obj, parent_machine=None): ) if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: for t in autogenerated_titles: - print(f" [DEBUG] autogenerated title: {t}") + logging.debug(f" [DEBUG] autogenerated title: {t}") machines = [x for x in resources if not isinstance(x, PamRemoteBrowserObject)] pam_directories = [x for x in machines if (getattr(x, "type", "") or "").lower() == "pamdirectory"] + # Augment with vault records so resolve_script_creds can resolve additional_credentials from vault + users_for_scripts = list(users) + [SimpleNamespace(uid=r[0], title=r[1] or "", login=(r[3] if len(r) > 3 else "") or "") for r in all_ksm_records if ((r[2] or "").lower() in ("pamuser", "login"))] + resources_for_scripts = list(resources) + [SimpleNamespace(uid=r[0], title=r[1] or "", login=(r[3] if len(r) > 3 else "") or "") for r in all_ksm_records if (r[2] or "").lower() in ("pammachine", "pamdatabase", "pamdirectory")] for mach in resources: if not mach: continue @@ -865,10 +884,28 @@ def resolve_one(obj, parent_machine=None): ruids = ruids or [x for x in machines if getattr(x, "login", None) == sftp_res] if len(ruids) == 1 and getattr(ruids[0], "uid", ""): set_sftp_uid(mach, "sftpResourceUid", ruids[0].uid) + elif len(ruids) > 1: + logging.warning(f"{bcolors.WARNING}Multiple records match sftpResource '{sftp_res}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + else: + ref = (sftp_res or "").strip() + vault_matches = [r for r in all_ksm_records if (r[1] or "").strip() == ref and (r[2] or "").lower() in ("pammachine", "pamdatabase", "pamdirectory")] + if len(vault_matches) == 1: + set_sftp_uid(mach, "sftpResourceUid", vault_matches[0][0]) + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault records match sftpResource '{ref}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") if sftp_user: ruids = find_user(mach, users, sftp_user) or find_user(machines, users, sftp_user) if len(ruids) == 1 and getattr(ruids[0], "uid", ""): set_sftp_uid(mach, "sftpUserUid", ruids[0].uid) + elif len(ruids) > 1: + logging.warning(f"{bcolors.WARNING}Multiple records match sftpUser '{sftp_user}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + else: + ref = (sftp_user or "").strip() + vault_matches = [r for r in all_ksm_records if ((r[1] or "").strip() == ref or (len(r) > 3 and (r[3] or "").strip() == ref)) and (r[2] or "").lower() in ("pamuser", "login")] + if len(vault_matches) == 1: + set_sftp_uid(mach, "sftpUserUid", vault_matches[0][0]) + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault records match sftpUser '{ref}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") if admin_cred: ruids = find_user(mach, users, admin_cred) is_external = False @@ -877,19 +914,52 @@ def resolve_one(obj, parent_machine=None): is_external = True if len(ruids) == 1 and getattr(ruids[0], "uid", ""): set_user_record_uid(mach, ruids[0].uid, is_external) + elif len(ruids) > 1: + logging.warning(f"{bcolors.WARNING}Multiple import records match administrative_credentials for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + # Fallback only when no match in import (len(ruids) == 0) + elif len(ruids) == 0: + ref = (admin_cred or "").strip() + vault_matches = [r for r in all_ksm_records if ((r[1] or "").strip() == ref or (len(r) > 3 and (r[3] or "").strip() == ref)) and (r[2] or "").lower() in ("pamuser", "login")] + if len(vault_matches) == 1: + set_user_record_uid(mach, vault_matches[0][0], False) + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault records match administrative_credentials '{ref}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") launch_cred = get_launch_credential(mach) if launch_cred and not isinstance(mach, PamRemoteBrowserObject): ruids = find_user(mach, users, launch_cred) or find_external_user(mach, machines, launch_cred) if len(ruids) == 1 and getattr(ruids[0], "uid", ""): set_launch_record_uid(mach, ruids[0].uid) - if mach.pam_settings and getattr(mach.pam_settings, "jit_settings", None): - jit = mach.pam_settings.jit_settings - ref = getattr(jit, "pam_directory_record", None) or "" - if ref and isinstance(ref, str) and ref.strip(): - matches = [x for x in pam_directories if getattr(x, "title", None) == ref.strip()] - if len(matches) == 1: + elif len(ruids) > 1: + logging.warning(f"{bcolors.WARNING}Multiple import records match launch_credentials for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + # Fallback only when no match in import (len(ruids) == 0) + elif len(ruids) == 0: + ref = (launch_cred or "").strip() + vault_matches = [r for r in all_ksm_records if ((r[1] or "").strip() == ref or (len(r) > 3 and (r[3] or "").strip() == ref)) and (r[2] or "").lower() in ("pamuser", "login")] + if len(vault_matches) == 1: + set_launch_record_uid(mach, vault_matches[0][0]) + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault records match launch_credentials '{ref}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + # jit_settings.pam_directory_record -> pam_directory_uid (pamDirectory in pam_data.resources by title) + # RBI has rbi_settings only (no pam_settings.jit_settings) + ps = getattr(mach, "pam_settings", None) + jit = getattr(ps, "jit_settings", None) if ps else None + if jit and getattr(jit, "pam_directory_record", None): + ref = (jit.pam_directory_record or "").strip() + if ref: + matches = [x for x in pam_directories if getattr(x, "title", None) == ref] + if len(matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple pamDirectory matches for jit_settings.pam_directory_record '{ref}' in {getattr(mach, 'title', mach)}; using first.{bcolors.ENDC}") + if len(matches) == 0: + vault_matches = [r for r in all_ksm_records if (r[1] or "").strip() == ref and (r[2] or "").lower() == "pamdirectory"] + if len(vault_matches) == 1: + jit.pam_directory_uid = vault_matches[0][0] + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault pamDirectory matches for jit_settings.pam_directory_record '{ref}' in {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + else: + logging.error(f"jit_settings.pam_directory_record '{ref}' for '{getattr(mach, 'title', mach)}': no pamDirectory record found in JSON or vault. Match by title.") + else: jit.pam_directory_uid = matches[0].uid - resolve_script_creds(mach, users, resources) + resolve_script_creds(mach, users_for_scripts, resources_for_scripts) if hasattr(mach, "users") and isinstance(mach.users, list): for usr in mach.users: if usr and hasattr(usr, "rotation_settings") and usr.rotation_settings: @@ -898,7 +968,7 @@ def resolve_one(obj, parent_machine=None): usr.rotation_settings.resourceUid = mach.uid elif rot in ("iam_user", "scripts_only"): usr.rotation_settings.resourceUid = pam_cfg_uid - resolve_script_creds(usr, users, resources) + resolve_script_creds(usr, users_for_scripts, resources_for_scripts) if hasattr(mach, "rbi_settings") and getattr(mach.rbi_settings, "connection", None): conn = mach.rbi_settings.connection if getattr(conn, "protocol", None) and str(getattr(conn.protocol, "value", "") or "").lower() == "http": @@ -909,19 +979,38 @@ def resolve_one(obj, parent_machine=None): matches = matches or [x for x in users if getattr(x, "login", None) == cred] if len(matches) == 1 and getattr(matches[0], "uid", ""): mach.rbi_settings.connection.httpCredentialsUid = [matches[0].uid] + elif len(matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple records match RBI httpCredentials '{cred}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + else: + ref = (cred or "").strip() + vault_matches = [r for r in all_ksm_records if ((r[1] or "").strip() == ref or (len(r) > 3 and (r[3] or "").strip() == ref)) and (r[2] or "").lower() in ("pamuser", "login")] + if len(vault_matches) == 1: + mach.rbi_settings.connection.httpCredentialsUid = [vault_matches[0][0]] + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault records match RBI httpCredentials '{ref}' for {getattr(mach, 'title', mach)}; not resolved.{bcolors.ENDC}") + # Local users on machines/databases/directories with external AD admin may have empty passwords (AD rotates them). + mark_local_users_allowing_empty_password_for_external_admin(resources) for usr in users: if usr and hasattr(usr, "rotation_settings") and usr.rotation_settings: rot = getattr(usr.rotation_settings, "rotation", None) if rot in ("iam_user", "scripts_only"): usr.rotation_settings.resourceUid = pam_cfg_uid elif rot == "general": - res = getattr(usr.rotation_settings, "resource", "") or "" + res = (getattr(usr.rotation_settings, "resource", "") or "").strip() if res: ruids = [x for x in machines if getattr(x, "title", None) == res] ruids = ruids or [x for x in machines if getattr(x, "login", None) == res] - if ruids: + if len(ruids) == 1: usr.rotation_settings.resourceUid = ruids[0].uid - resolve_script_creds(usr, users, resources) + elif len(ruids) > 1: + logging.warning(f"{bcolors.WARNING}Multiple records match rotation_settings.resource '{res}' for user {getattr(usr, 'title', usr)}; not resolved.{bcolors.ENDC}") + else: + vault_matches = [r for r in all_ksm_records if (r[1] or "").strip() == res and (r[2] or "").lower() in ("pammachine", "pamdatabase", "pamdirectory")] + if len(vault_matches) == 1: + usr.rotation_settings.resourceUid = vault_matches[0][0] + elif len(vault_matches) > 1: + logging.warning(f"{bcolors.WARNING}Multiple vault records match rotation_settings.resource '{res}' for user {getattr(usr, 'title', usr)}; not resolved.{bcolors.ENDC}") + resolve_script_creds(usr, users_for_scripts, resources_for_scripts) if step2_errors: project["error_count"] = project.get("error_count", 0) + len(step2_errors) @@ -952,7 +1041,7 @@ def resolve_one(obj, parent_machine=None): otype = getattr(o, "type", "") or "" label = getattr(o, "title", None) or getattr(o, "login", None) or "" uid_suffix = f"\tuid={getattr(o, 'uid', '')}" if tag == "existing" else "" - print(f" [DRY RUN] [{tag}] folder={path}\trecord={otype}: {label}{uid_suffix}") + logging.info(f" [DRY RUN] [{tag}] folder={path}\trecord={otype}: {label}{uid_suffix}") for mach in resources: if hasattr(mach, "users") and isinstance(mach.users, list): for u in mach.users: @@ -961,8 +1050,11 @@ def resolve_one(obj, parent_machine=None): utype = getattr(u, "type", "") or "" label = getattr(u, "title", None) or getattr(u, "login", None) or "" uid_suffix = f"\tuid={getattr(u, 'uid', '')}" if tag == "existing" else "" - print(f" [DRY RUN] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix}") - print(f"[DRY RUN] {total_line}") + logging.info(f" [DRY RUN] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix}") + # Always show in dry-run (logging.info) when user allows empty password and has none + if getattr(u, "_is_pam_directory_user", False) and not (getattr(u, "password", None) and str(getattr(u, "password", "") or "").strip()): + logging.info("PAM User (pamDirectory/AD or local user with external AD admin) has empty password (valid).") + logging.info(f"[DRY RUN] {total_line}") else: if logging.getLogger().getEffectiveLevel() <= logging.DEBUG: for o in chain(resources, users): @@ -971,7 +1063,7 @@ def resolve_one(obj, parent_machine=None): otype = getattr(o, "type", "") or "" label = getattr(o, "title", None) or getattr(o, "login", None) or "" uid_suffix = f"\tuid={getattr(o, 'uid', '')}" if tag == "existing" else "" - print(f" [DEBUG] [{tag}] folder={path}\trecord={otype}: {label}{uid_suffix}") + logging.debug(f" [DEBUG] [{tag}] folder={path}\trecord={otype}: {label}{uid_suffix}") for mach in resources: if hasattr(mach, "users") and isinstance(mach.users, list): for u in mach.users: @@ -980,7 +1072,11 @@ def resolve_one(obj, parent_machine=None): utype = getattr(u, "type", "") or "" label = getattr(u, "title", None) or getattr(u, "login", None) or "" uid_suffix = f"\tuid={getattr(u, 'uid', '')}" if tag == "existing" else "" - print(f" [DEBUG] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix}") + # Users allowing empty password vs other users + if getattr(u, "_is_pam_directory_user", False) and not (getattr(u, "password", None) and str(getattr(u, "password", "") or "").strip()): + logging.debug(f" [DEBUG] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix} - empty password (valid).") + else: + logging.debug(f" [DEBUG] [{tag}] folder={path}\trecord={utype}: {label} (nested on {getattr(mach, 'title', '')}){uid_suffix}") print(total_line) project["mapped_resources"] = resources @@ -1234,7 +1330,7 @@ def process_data(self, params, project): """Extend: only create records tagged new; use resolved_folder_uid; for existing machines only add new users.""" if project.get("options", {}).get("dry_run", False) is True: return - from ..tunnel_and_connections import PAMTunnelEditCommand + # Local import to avoid circular import with discoveryrotation from ..discoveryrotation import PAMCreateRecordRotationCommand resources = project.get("mapped_resources") or [] @@ -1244,6 +1340,23 @@ def process_data(self, params, project): shfusr = (project.get("folders") or {}).get("users_folder_uid", "") pce = (project.get("pam_config") or {}).get("pam_config_object") + # Resolve domain admin if Domain PAM Config: match against extend JSON users and existing vault users + if pce and getattr(pce, "environment", "") == "domain" and getattr(pce, "dom_administrative_credential", None): + users_for_domain_admin = list(users) + ksm_shared_folders = project.get("ksm_shared_folders") or [] + if ksm_shared_folders: + all_ksm_records = _get_all_ksm_app_records(params, ksm_shared_folders) + for r in all_ksm_records: + rtype = ((r[2] if len(r) > 2 else "") or "").lower() + if rtype not in ("pamuser", "login"): + continue + uid = r[0] if r else "" + title = (r[1] if len(r) > 1 else "") or "" + login = (r[3] if len(r) > 3 else "") or "" + if uid: + users_for_domain_admin.append(SimpleNamespace(uid=uid, title=title, login=login)) + resolve_domain_admin(pce, users_for_domain_admin) + print("Started importing data...") encrypted_session_token, encrypted_transmission_key, transmission_key = get_keeper_tokens(params) tdag = TunnelDAG(params, encrypted_session_token, encrypted_transmission_key, pam_cfg_uid, True, @@ -1280,6 +1393,7 @@ def process_data(self, params, project): args = parse_command_options(mach, False) if args.get("remote_browser_isolation", False) is True: args["connections"] = True + args["v_type"] = RefType.PAM_BROWSER tdag.set_resource_allowed(**args) else: args = parse_command_options(mach, True) @@ -1290,7 +1404,39 @@ def process_data(self, params, project): tdag.link_user_to_resource(admin_uid, mach.uid, is_admin=True, belongs_to=False) args = parse_command_options(mach, False) args["meta_version"] = 1 + _rtype = (getattr(mach, "type", "") or "").lower() + args["v_type"] = RefType.PAM_DIRECTORY if _rtype == "pamdirectory" else RefType.PAM_DATABASE if _rtype == "pamdatabase" else RefType.PAM_MACHINE tdag.set_resource_allowed(**args) + + # After setting allowedSettings, save JIT settings if present + # JIT settings don't apply to RBI records (only machine/db/directory); RBI has rbi_settings, no pam_settings.jit_settings + ps = getattr(mach, "pam_settings", None) + jit = getattr(ps, "jit_settings", None) if ps else None + ai = getattr(ps, "ai_settings", None) if ps else None + if jit: + jit_dag_dict = jit.to_dag_dict() + if jit_dag_dict: # Only save if not empty + set_resource_jit_settings(params, mach.uid, jit_dag_dict, pam_cfg_uid) + + # After setting allowedSettings, save AI settings if present + # AI settings don't apply to RBI records (only machine/db/directory) + if ai: + user_id = "" + if getattr(params, "account_uid_bytes", None): + user_id = utils.base64_url_encode(params.account_uid_bytes) + elif getattr(params, "user", ""): + user_id = params.user + ai_dag_dict = ai.to_dag_dict(user_id=user_id) + if ai_dag_dict: # Only save if not empty + set_resource_keeper_ai_settings(params, mach.uid, ai_dag_dict, pam_cfg_uid) + + # Web vault UI visualizer shows only latest and meta is most wanted path. + if jit or ai: + refresh_meta_to_latest(params, mach.uid, pam_cfg_uid) + # Bump LINK to config only when AI is present (AI adds the encryption KEY). + if ai: + refresh_link_to_config_to_latest(params, mach.uid, pam_cfg_uid) + mach_users = getattr(mach, "users", []) or [] for user in mach_users: if getattr(user, "_extend_tag", None) != "new": @@ -1320,6 +1466,7 @@ def process_data(self, params, project): launch_uid = get_launch_credential(mach, True) if launch_uid and not isinstance(mach, PamRemoteBrowserObject): tdag.link_user_to_resource(launch_uid, mach.uid, is_launch_credential=True, belongs_to=True) + if new_resources: print(f"{len(new_resources)}/{len(new_resources)}\n") @@ -1355,6 +1502,47 @@ def process_data(self, params, project): if launch_uid and not isinstance(mach, PamRemoteBrowserObject): tdag.link_user_to_resource(launch_uid, mach.uid, is_launch_credential=True, belongs_to=True) + # link machine -> pamDirectory (LINK, path=domain) for jit_settings.pam_directory_uid + # RBI has rbi_settings only (no pam_settings.jit_settings) + jit_domain_links_added = False + for mach in resources: + ps = getattr(mach, "pam_settings", None) + jit = getattr(ps, "jit_settings", None) if ps else None + if not (mach and jit): + continue + dir_uid = getattr(jit, "pam_directory_uid", None) + if not dir_uid: + continue + dag = tdag.linking_dag + machine_vertex = dag.get_vertex(mach.uid) + dir_vertex = dag.get_vertex(dir_uid) + if machine_vertex and dir_vertex: + machine_vertex.belongs_to(dir_vertex, EdgeType.LINK, path="domain", content={}) + jit_domain_links_added = True + if jit_domain_links_added: + tdag.linking_dag.save() + + # PAM Domain Config - update domain admin creds + if pce and getattr(pce, "environment", "") == "domain": + if getattr(pce, "admin_credential_ref", None): + pcuid = (project.get("pam_config") or {}).get("pam_config_uid") + pcrec = vault.KeeperRecord.load(params, pcuid) if pcuid else None + if pcrec and isinstance(pcrec, vault.TypedRecord) and pcrec.version == 6: + if pcrec.record_type == "pamDomainConfiguration": + prf = pcrec.get_typed_field("pamResources") + if not prf: + prf = vault.TypedField.new_field("pamResources", {}) + pcrec.fields.append(prf) + prf.value = prf.value or [{}] + if isinstance(prf.value[0], dict): + prf.value[0]["adminCredentialRef"] = pce.admin_credential_ref + record_management.update_record(params, pcrec) + tdag.link_user_to_config_with_options(pce.admin_credential_ref, is_admin="on") + else: + logging.error(f"Unable to add adminCredentialRef - bad pamResources field in PAM Config {pcuid}") + else: + logging.debug(f"Unable to resolve domain admin '{getattr(pce, 'dom_administrative_credential', '')}' for PAM Domain configuration.") + if pce and getattr(pce, "scripts", None) and getattr(pce.scripts, "scripts", None): refs = [x for x in pce.scripts.scripts if getattr(x, "record_refs", None)] if refs: diff --git a/keepercommander/commands/start_service.py b/keepercommander/commands/start_service.py index 62024e5e7..6524a596f 100644 --- a/keepercommander/commands/start_service.py +++ b/keepercommander/commands/start_service.py @@ -13,7 +13,7 @@ from ..service.commands.config_operation import AddConfigService from ..service.commands.handle_service import StartService, StopService, ServiceStatus from ..service.commands.service_docker_setup import ServiceDockerSetupCommand -from ..service.commands.slack_app_setup import SlackAppSetupCommand +from ..service.commands.integrations import SlackAppSetupCommand, TeamsAppSetupCommand def register_commands(commands): commands['service-create'] = CreateService() @@ -23,6 +23,7 @@ def register_commands(commands): commands['service-status'] = ServiceStatus() commands['service-docker-setup'] = ServiceDockerSetupCommand() commands['slack-app-setup'] = SlackAppSetupCommand() + commands['teams-app-setup'] = TeamsAppSetupCommand() def register_command_info(aliases, command_info): service_classes = [ @@ -32,7 +33,8 @@ def register_command_info(aliases, command_info): StopService, ServiceStatus, ServiceDockerSetupCommand, - SlackAppSetupCommand + SlackAppSetupCommand, + TeamsAppSetupCommand ] for service_class in service_classes: diff --git a/keepercommander/commands/utils.py b/keepercommander/commands/utils.py index d01819895..c35f5dc07 100644 --- a/keepercommander/commands/utils.py +++ b/keepercommander/commands/utils.py @@ -469,7 +469,7 @@ def execute(self, params, **kwargs): if len(params.pending_share_requests) > 0: for user in params.pending_share_requests: accepted = False - print('Note: You have pending share request from ' + user) + logging.info('Note: You have pending share request from ' + user) answer = user_choice('Do you want to accept these request?', 'yn', 'n') rq = { 'command': 'accept_share' if answer == 'y' else 'cancel_share', @@ -506,38 +506,38 @@ def execute(self, params, **kwargs): # Check for valid sub-commands that need a value if len(ops) == 1 and action != 'register': if action in ('timeout', 'to'): - print(f"Usage: this-device timeout (e.g., 10m, 1h, 7d, 30d)") + logging.error(f"Usage: this-device timeout (e.g., 10m, 1h, 7d, 30d)") elif action == '2fa_expiration': - print(f"Usage: this-device 2fa_expiration (e.g., login, 12h, 24h, 30d, forever)") + logging.error(f"Usage: this-device 2fa_expiration (e.g., login, 12h, 24h, 30d, forever)") elif action in ('persistent_login', 'persistent-login', 'pl'): - print(f"Usage: this-device persistent-login ") + logging.error(f"Usage: this-device persistent-login ") elif action in ('ip_auto_approve', 'ip-auto-approve', 'iaa'): - print(f"Usage: this-device ip-auto-approve ") + logging.error(f"Usage: this-device ip-auto-approve ") elif action == 'no-yubikey-pin': - print(f"Usage: this-device no-yubikey-pin ") + logging.error(f"Usage: this-device no-yubikey-pin ") elif action in ('rename', 'ren'): - print(f"Usage: this-device rename ") + logging.error(f"Usage: this-device rename ") else: - print(f"Unknown sub-command: {action}") - print(f"Run {Fore.GREEN}this-device -h{Fore.RESET} for detailed help.") + logging.error(f"Unknown sub-command: {action}") + logging.error(f"Run {Fore.GREEN}this-device -h{Fore.RESET} for detailed help.") return if len(ops) >= 1 and action != 'register' and len(ops) != 2: - print(f"Invalid arguments. Run {Fore.GREEN}this-device -h{Fore.RESET} for help.") + logging.error(f"Invalid arguments. Run {Fore.GREEN}this-device -h{Fore.RESET} for help.") return def register_device(): is_device_registered = loginv3.LoginV3API.register_encrypted_data_key_for_device(params) if is_device_registered: - print(bcolors.OKGREEN + "Successfully registered device" + bcolors.ENDC) + logging.info(bcolors.OKGREEN + "Successfully registered device" + bcolors.ENDC) else: - print(bcolors.OKGREEN + "Device already registered" + bcolors.ENDC) + logging.info(bcolors.OKGREEN + "Device already registered" + bcolors.ENDC) if action == 'rename' or action == 'ren': value = ops[1] loginv3.LoginV3API.rename_device(params, value) - print(bcolors.OKGREEN + "Successfully renamed device to '" + value + "'" + bcolors.ENDC) + logging.info(bcolors.OKGREEN + "Successfully renamed device to '" + value + "'" + bcolors.ENDC) elif action == 'register': register_device() @@ -552,7 +552,7 @@ def register_device(): value_extracted = ThisDeviceCommand.get_setting_str_to_value('persistent_login', value) loginv3.LoginV3API.set_user_setting(params, 'persistent_login', value_extracted) msg = (bcolors.OKGREEN + "ENABLED" + bcolors.ENDC) if value_extracted == '1' else (bcolors.FAIL + "DISABLED" + bcolors.ENDC) - print("Successfully " + msg + " Persistent Login on this account") + logging.info("Successfully " + msg + " Persistent Login on this account") register_device() @@ -562,7 +562,7 @@ def register_device(): if this_device: if 'encryptedDataKeyPresent' not in this_device: - print(bcolors.WARNING + "\tThis device is not registered. To register, run command `this-device register`" + bcolors.ENDC) + logging.warning(bcolors.WARNING + "\tThis device is not registered. To register, run command `this-device register`" + bcolors.ENDC) elif action == 'ip_auto_approve' or action == 'ip-auto-approve' or action == 'iaa': value = ops[1] @@ -572,14 +572,14 @@ def register_device(): # invert ip_auto_approve value before passing it to ip_disable_auto_approve value_extracted = '0' if value_extracted == '1' else '1' if value_extracted == '0' else value_extracted loginv3.LoginV3API.set_user_setting(params, 'ip_disable_auto_approve', value_extracted) - print("Successfully " + msg + " 'ip_auto_approve'") + logging.info("Successfully " + msg + " 'ip_auto_approve'") elif action == 'no-yubikey-pin': value = ops[1] value_extracted = ThisDeviceCommand.get_setting_str_to_value('no-yubikey-pin', value) msg = (bcolors.OKGREEN + "ENABLED" + bcolors.ENDC) if value_extracted == '0' else (bcolors.FAIL + "DISABLED" + bcolors.ENDC) loginv3.LoginV3API.set_user_setting(params, 'security_keys_no_user_verify', value_extracted) - print("Successfully " + msg + " Security Key PIN verification") + logging.info("Successfully " + msg + " Security Key PIN verification") elif action == 'timeout' or action == 'to': @@ -587,7 +587,7 @@ def register_device(): timeout_delta = enforce_timeout_range(ThisDeviceCommand.get_setting_str_to_value('logout_timer', value)) loginv3.LoginV3API.set_user_setting(params, 'logout_timer', get_timeout_setting_from_delta(timeout_delta)) dispay_value = 'default value' if timeout_delta == timedelta(0) else format_timeout(timeout_delta) - print(f'Successfully set "logout_timer" to {dispay_value}.') + logging.info(f'Successfully set "logout_timer" to {dispay_value}.') elif action == '2fa_expiration': value = ops[1] @@ -599,7 +599,7 @@ def register_device(): rq = APIRequest_pb2.TwoFactorUpdateExpirationRequest() rq.expireIn = mfa_expiration api.communicate_rest(params, rq, 'authentication/2fa_update_expiration') - print(f'Successfully set "2fa_expiration" to {value}.') + logging.info(f'Successfully set "2fa_expiration" to {value}.') else: raise Exception("Unknown sub-command " + action + ". Available sub-commands: ", ", ".join(this_device_available_command_verbs)) @@ -803,7 +803,7 @@ def execute(self, params, **kwargs): api.sync_down(params) if self._is_vault_empty(params): - print("No records or folders to delete. Vault is already empty.") + logging.info("No records or folders to delete. Vault is already empty.") return if not self._confirm_user_wants_deletion(kwargs): @@ -840,7 +840,7 @@ def _confirm_user_wants_deletion(self, kwargs): if force_flag_passed: # Force flag was passed via command line, skip confirmation logging.info("Force flag detected, proceeding without confirmation...") - print(f"{bcolors.WARNING}Force mode: Deleting all records and folders without confirmation{bcolors.ENDC}") + logging.warning(f"{bcolors.WARNING}Force mode: Deleting all records and folders without confirmation{bcolors.ENDC}") return True # Show confirmation prompt @@ -865,12 +865,12 @@ def _process_record_deletion(self, params): return DeletionStats() if skipped_stats['shared_folders'] > 0 or skipped_stats['shared_records'] > 0: - print(f"\nSHARED FOLDER CONTENT SKIPPED:") - print(f" • {skipped_stats['shared_folders']} shared folders avoided") - print(f" • {skipped_stats['shared_records']} records in shared folders avoided") - print(f"\nFor shared folders with many records, use this workflow:") - print(f" 1. Run 'transform-folder ' to convert shared folder to user folder (fast)") - print(f" 2. Then run delete-all to clean remaining user vault content\n") + logging.warning(f"\nSHARED FOLDER CONTENT SKIPPED:") + logging.warning(f" • {skipped_stats['shared_folders']} shared folders avoided") + logging.warning(f" • {skipped_stats['shared_records']} records in shared folders avoided") + logging.warning(f"\nFor shared folders with many records, use this workflow:") + logging.warning(f" 1. Run 'transform-folder ' to convert shared folder to user folder (fast)") + logging.warning(f" 2. Then run delete-all to clean remaining user vault content\n") logging.info('Preparing to delete %s records from user folders', len(records_with_folders)) return self._delete_objects_in_batches(params, records_with_folders, 'records') @@ -1696,9 +1696,9 @@ def execute(self, params, **kwargs): if not region: # Check extended server list region = next((k for k, v in KEEPER_SERVERS.items() if v == params.server), params.server) - print(f'{Fore.CYAN}Data center: {Fore.WHITE}{region}{Fore.RESET}') - print(f'{Fore.CYAN}Use {Fore.GREEN}login --server {Fore.CYAN} to change (US, EU, AU, CA, JP, GOV){Fore.RESET}') - print() + print(f'{Fore.CYAN}Data center: {Fore.WHITE}{region}{Fore.RESET}', file=sys.stderr) + print(f'{Fore.CYAN}Use {Fore.GREEN}login --server {Fore.CYAN} to change (US, EU, AU, CA, JP, GOV){Fore.RESET}', file=sys.stderr) + print('', file=sys.stderr) user = input(f'{Fore.GREEN}Email: {Fore.RESET}').strip() if not user: return @@ -1745,12 +1745,12 @@ def execute(self, params, **kwargs): show_help = kwargs.get('show_help', True) if show_help: if params.batch_mode: - # One-shot login from terminal - show simple success message - print() - print(f'{Fore.GREEN}Keeper login successful.{Fore.RESET}') - print(f'Type "{Fore.GREEN}keeper shell{Fore.RESET}" for the interactive shell, "{Fore.GREEN}keeper supershell{Fore.RESET}" for the vault UI,') - print(f'or "{Fore.GREEN}keeper help{Fore.RESET}" to see all available commands.') - print() + # One-shot login from terminal - show simple success message (stderr, not stdout) + logging.warning('') + logging.warning(f'{Fore.GREEN}Keeper login successful.{Fore.RESET}') + logging.warning(f'Type "{Fore.GREEN}keeper shell{Fore.RESET}" for the interactive shell, "{Fore.GREEN}keeper supershell{Fore.RESET}" for the vault UI,') + logging.warning(f'or "{Fore.GREEN}keeper help{Fore.RESET}" to see all available commands.') + logging.warning('') else: # Interactive shell - show full summary with tips record_count = getattr(params, '_sync_record_count', 0) @@ -1768,7 +1768,7 @@ def is_authorised(self): def execute(self, params, **kwargs): if params.enforcements: if 'enterprise_invited' in params.enforcements: - print('You\'ve been invited to join {0}.'.format(params.enforcements['enterprise_invited'])) + logging.info('You\'ve been invited to join {0}.'.format(params.enforcements['enterprise_invited'])) action = user_choice('A(ccept)/D(ecline)/I(gnore)?: ', 'adi') action = action.lower() if action == 'a': diff --git a/keepercommander/display.py b/keepercommander/display.py index 7ebc9e46d..8e23f0e15 100644 --- a/keepercommander/display.py +++ b/keepercommander/display.py @@ -8,6 +8,7 @@ # Contact: ops@keepersecurity.com # import json +import logging import shutil from typing import Tuple, List, Union, Optional @@ -69,19 +70,19 @@ def keeper_colorize(text, color): def show_government_warning(): """Display U.S. Government Information System warning for GOV environments.""" - print('') - print(f'{bcolors.WARNING}' + '=' * 80 + f'{bcolors.ENDC}') - print(f'{bcolors.WARNING}U.S. GOVERNMENT INFORMATION SYSTEM{bcolors.ENDC}') - print(f'{bcolors.WARNING}' + '=' * 80 + f'{bcolors.ENDC}') - print('') - print('You are about to access a U.S. Government Information System. Although the') - print('encrypted vault adheres to a zero-knowledge security architecture, system') - print('access logs are subject to monitoring, recording and audit. Unauthorized') - print('use of this system is prohibited and may result in civil and criminal') - print('penalties. Your use of this system indicates your acknowledgement and consent.') - print('') - print(f'{bcolors.WARNING}' + '=' * 80 + f'{bcolors.ENDC}') - print('') + logging.warning('') + logging.warning(f'{bcolors.WARNING}' + '=' * 80 + f'{bcolors.ENDC}') + logging.warning(f'{bcolors.WARNING}U.S. GOVERNMENT INFORMATION SYSTEM{bcolors.ENDC}') + logging.warning(f'{bcolors.WARNING}' + '=' * 80 + f'{bcolors.ENDC}') + logging.warning('') + logging.warning('You are about to access a U.S. Government Information System. Although the') + logging.warning('encrypted vault adheres to a zero-knowledge security architecture, system') + logging.warning('access logs are subject to monitoring, recording and audit. Unauthorized') + logging.warning('use of this system is prohibited and may result in civil and criminal') + logging.warning('penalties. Your use of this system indicates your acknowledgement and consent.') + logging.warning('') + logging.warning(f'{bcolors.WARNING}' + '=' * 80 + f'{bcolors.ENDC}') + logging.warning('') def welcome(): @@ -300,27 +301,27 @@ def post_login_summary(record_count=0, breachwatch_count=0, show_tips=True): DIM = Fore.WHITE WARN = Fore.YELLOW - print() + logging.info('') # Vault summary if record_count > 0: - print(f" {ACCENT}✓{Fore.RESET} Decrypted {record_count} records") + logging.info(f" {ACCENT}✓{Fore.RESET} Decrypted {record_count} records") # BreachWatch warning if breachwatch_count > 0: - print(f" {WARN}⚠ {breachwatch_count} high-risk passwords{Fore.RESET} - run {ACCENT}breachwatch list{Fore.RESET}") + logging.info(f" {WARN}⚠ {breachwatch_count} high-risk passwords{Fore.RESET} - run {ACCENT}breachwatch list{Fore.RESET}") if show_tips: - print() - print(f" {DIM}Quick Start:{Fore.RESET}") - print(f" {ACCENT}ls{Fore.RESET} List records") - print(f" {ACCENT}ls -l -f{Fore.RESET} List folders") - print(f" {ACCENT}cd {Fore.RESET} Change folder") - print(f" {ACCENT}get {Fore.RESET} Get record or folder info") - print(f" {ACCENT}supershell{Fore.RESET} Launch vault TUI") - print(f" {ACCENT}search{Fore.RESET} Search your vault") - print(f" {ACCENT}this-device{Fore.RESET} Configure device settings") - print(f" {ACCENT}whoami{Fore.RESET} Display account info") - print(f" {ACCENT}?{Fore.RESET} List all commands") - - print() + logging.info('') + logging.info(f" {DIM}Quick Start:{Fore.RESET}") + logging.info(f" {ACCENT}ls{Fore.RESET} List records") + logging.info(f" {ACCENT}ls -l -f{Fore.RESET} List folders") + logging.info(f" {ACCENT}cd {Fore.RESET} Change folder") + logging.info(f" {ACCENT}get {Fore.RESET} Get record or folder info") + logging.info(f" {ACCENT}supershell{Fore.RESET} Launch vault TUI") + logging.info(f" {ACCENT}search{Fore.RESET} Search your vault") + logging.info(f" {ACCENT}this-device{Fore.RESET} Configure device settings") + logging.info(f" {ACCENT}whoami{Fore.RESET} Display account info") + logging.info(f" {ACCENT}?{Fore.RESET} List all commands") + + logging.info('') diff --git a/keepercommander/loginv3.py b/keepercommander/loginv3.py index a7b27f6b0..b8ef29914 100644 --- a/keepercommander/loginv3.py +++ b/keepercommander/loginv3.py @@ -5,6 +5,7 @@ import logging import os import re +import sys from collections import namedtuple from sys import platform as _platform from typing import Optional, List, Any @@ -45,7 +46,7 @@ def __init__(self, login_ui=None): # type: (login_steps.LoginUi) -> None def _fallback_to_password_auth(self, params, encryptedDeviceToken, clone_code_bytes, login_type): """Helper method to handle fallback from biometric to default authentication""" - logging.info("Falling back to default authentication...") + print("Falling back to default authentication...", file=sys.stderr) return LoginV3API.startLoginMessage(params, encryptedDeviceToken, cloneCode=clone_code_bytes, loginType=login_type) def login(self, params, new_device=False, new_login=False, new_password_if_reset_required=None): # type: (KeeperParams, bool, bool, string) -> None @@ -119,8 +120,8 @@ def fallback_to_password(self): try: from .biometric.commands.verify import BiometricVerifyCommand auth_helper = BiometricVerifyCommand() - logging.info("Attempting biometric authentication...") - logging.info("Press Ctrl+C to skip biometric and use default login method") + print("Attempting biometric authentication...", file=sys.stderr) + print("Press Ctrl+C to skip biometric and use default login method", file=sys.stderr) biometric_result = auth_helper.biometric_authenticate(params, username=params.user) @@ -128,11 +129,11 @@ def fallback_to_password(self): logging.debug("Biometric authentication successful!") step.verify_biometric_key(biometric_result.get('login_token')) else: - logging.info("Biometric authentication failed") + print("Biometric authentication failed", file=sys.stderr) step.fallback_to_password() except KeyboardInterrupt: - logging.info("Biometric authentication cancelled by user") + print("Biometric authentication cancelled by user", file=sys.stderr) step.fallback_to_password() except Exception as e: @@ -150,7 +151,7 @@ def fallback_to_password(self): else: raise Exception("Device needs approval for biometric authentication. Please register your device first.") else: - logging.info(f"Biometric authentication error: {e}") + print(f"Biometric authentication error: {e}", file=sys.stderr) step.fallback_to_password() if should_cancel: @@ -256,8 +257,8 @@ def cancel(self): if encryptedLoginToken: resp = LoginV3API.resume_login(params, encryptedLoginToken, encryptedDeviceToken, loginMethod='AFTER_SSO') else: - logging.info(bcolors.BOLD + bcolors.OKGREEN + "\nAttempting to authenticate with a master password." + bcolors.ENDC + bcolors.ENDC) - logging.info(bcolors.OKBLUE + "(Note: SSO users can create a Master Password in Web Vault > Settings)\n" + bcolors.ENDC) + print(bcolors.BOLD + bcolors.OKGREEN + "\nAttempting to authenticate with a master password." + bcolors.ENDC + bcolors.ENDC, file=sys.stderr) + print(bcolors.OKBLUE + "(Note: SSO users can create a Master Password in Web Vault > Settings)\n" + bcolors.ENDC, file=sys.stderr) is_alternate_login = True resp = LoginV3API.startLoginMessage(params, encryptedDeviceToken, loginType='ALTERNATE') @@ -272,7 +273,7 @@ def cancel(self): elif resp.loginState == APIRequest_pb2.REGION_REDIRECT: params.server = resp.stateSpecificValue - logging.info('Redirecting to region: %s', params.server) + print('Redirecting to region: %s' % params.server, file=sys.stderr) LoginV3API.register_device_in_region(params, encryptedDeviceToken) resp = LoginV3API.startLoginMessage(params, encryptedDeviceToken) @@ -385,18 +386,18 @@ def post_login_processing(params: KeeperParams, resp: APIRequest_pb2.LoginRespon raise Exception(msg) elif resp.sessionTokenType == APIRequest_pb2.ACCOUNT_RECOVERY: if new_password_if_reset_required: - print('Resetting expired Master Password.\n') + print('Resetting expired Master Password.\n', file=sys.stderr) LoginV3API.change_master_password(params, new_password_if_reset_required) # always returns False return False elif new_password_if_reset_required is None: - print('Your Master Password has expired, you are required to change it before you can login.\n') + print('Your Master Password has expired, you are required to change it before you can login.\n', file=sys.stderr) if LoginV3Flow.change_master_password(params): return False # Return exception if password change fails params.clear_session() raise Exception('Change password failed') elif resp.sessionTokenType == APIRequest_pb2.SHARE_ACCOUNT: - logging.info('Account transfer required') + print('Account transfer required', file=sys.stderr) accepted = api.accept_account_transfer_consent(params) if accepted: return False @@ -425,7 +426,8 @@ def post_login_processing(params: KeeperParams, resp: APIRequest_pb2.LoginRespon if bw_audit: params.breach_watch.send_audit_events = True - logging.info(bcolors.OKGREEN + "Successfully authenticated with " + login_type_message + "" + bcolors.ENDC) + if not params.batch_mode: + print(bcolors.OKGREEN + "Successfully authenticated with " + login_type_message + "" + bcolors.ENDC, file=sys.stderr) return True @staticmethod @@ -491,7 +493,7 @@ def change_master_password(params, password_rules=None, min_iterations=None): try: while True: - print('Please choose a new Master Password.') + print('Please choose a new Master Password.', file=sys.stderr) password = getpass.getpass(prompt='... {0:>24}: '.format('Master Password'), stream=None).strip() if not password: raise KeyboardInterrupt() @@ -505,7 +507,7 @@ def change_master_password(params, password_rules=None, min_iterations=None): failed_rules.append(rule.description) if len(failed_rules) == 0: LoginV3API.change_master_password(params, password, min_iterations) - logging.info('Password changed') + print('Password changed', file=sys.stderr) params.password = password return True else: @@ -514,7 +516,7 @@ def change_master_password(params, password_rules=None, min_iterations=None): else: logging.warning('Passwords do not match.') except KeyboardInterrupt: - logging.info('Canceled') + print('Canceled', file=sys.stderr) params.session_token = None params.data_key = None return False @@ -762,8 +764,8 @@ def cancel(self): raise KeyboardInterrupt() def handle_account_recovery(self, params, encrypted_login_token_bytes): - logging.info('') - logging.info('Password Recovery') + print('', file=sys.stderr) + print('Password Recovery', file=sys.stderr) rq = APIRequest_pb2.MasterPasswordRecoveryVerificationRequest() rq.encryptedLoginToken = encrypted_login_token_bytes try: @@ -772,7 +774,7 @@ def handle_account_recovery(self, params, encrypted_login_token_bytes): if kae.result_code != 'bad_request' and not kae.message.startswith('Email has been sent.'): raise kae - logging.info('Please check your email and enter the verification code below:') + print('Please check your email and enter the verification code below:', file=sys.stderr) verification_code = input('Verification Code: ') if not verification_code: return @@ -786,7 +788,7 @@ def handle_account_recovery(self, params, encrypted_login_token_bytes): backup_type = rs.backupKeyType if backup_type == APIRequest_pb2.BKT_SEC_ANSWER: - print(f'Security Question: {rs.securityQuestion}') + print(f'Security Question: {rs.securityQuestion}', file=sys.stderr) answer = getpass.getpass(prompt='Answer: ', stream=None) if not answer: return @@ -794,7 +796,7 @@ def handle_account_recovery(self, params, encrypted_login_token_bytes): auth_hash = crypto.derive_keyhash_v1(recovery_phrase, rs.salt, rs.iterations) elif backup_type == APIRequest_pb2.BKT_PASSPHRASE_HASH: p = PassphrasePrompt() - print('Please enter your Recovery Phrase ') + print('Please enter your Recovery Phrase ', file=sys.stderr) if os.isatty(0): phrase = prompt('Recovery Phrase: ', lexer=p, completer=p, key_bindings=p.kb, validator=p, validate_while_typing=False, editing_mode=EditingMode.VI, wrap_lines=True, @@ -810,7 +812,7 @@ def handle_account_recovery(self, params, encrypted_login_token_bytes): recovery_phrase = ' '.join(words) auth_hash = crypto.generate_hkdf_key('recovery_auth_token', recovery_phrase) else: - logging.info('Unsupported account recovery type') + print('Unsupported account recovery type', file=sys.stderr) return rq = APIRequest_pb2.GetDataKeyBackupV3Request() @@ -844,7 +846,7 @@ def rest_request(params: KeeperParams, api_endpoint: str, rq): @staticmethod def get_device_id(params, new_device=False): # type: (KeeperParams, bool) -> bytes if new_device: - logging.info('Resetting device token') + print('Resetting device token', file=sys.stderr) params.device_token = None if 'device_token' in params.config: del params.config['device_token'] @@ -935,7 +937,7 @@ def resume_login(params: KeeperParams, encryptedLoginToken, encryptedDeviceToken if 'error' in rs and 'message' in rs: if rs['error'] == 'region_redirect': params.server = rs['region_host'] - logging.info('Redirecting to region: %s', params.server) + print('Redirecting to region: %s' % params.server, file=sys.stderr) LoginV3API.register_device_in_region(params, encryptedDeviceToken) return LoginV3API.startLoginMessage(params, encryptedDeviceToken, loginType=loginType) @@ -977,7 +979,7 @@ def startLoginMessage(params, encryptedDeviceToken, cloneCode = None, loginType if 'error' in rs and 'message' in rs: if rs['error'] == 'region_redirect': params.server = rs['region_host'] - logging.info('Redirecting to region: %s', params.server) + print('Redirecting to region: %s' % params.server, file=sys.stderr) LoginV3API.register_device_in_region(params, encryptedDeviceToken) return LoginV3API.startLoginMessage(params, encryptedDeviceToken, loginType=loginType) diff --git a/keepercommander/service/commands/integrations/__init__.py b/keepercommander/service/commands/integrations/__init__.py new file mode 100644 index 000000000..e4e4570ae --- /dev/null +++ b/keepercommander/service/commands/integrations/__init__.py @@ -0,0 +1,22 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' str: + """e.g. 'Slack', 'Teams' -- drives all naming conventions.""" + + @abstractmethod + def collect_integration_config(self) -> Any: + """Prompt user for config values, return a config dataclass.""" + + @abstractmethod + def build_record_custom_fields(self, config) -> List: + """Return list of vault.TypedField for the config record.""" + + @abstractmethod + def print_integration_specific_resources(self, config) -> None: + """Print integration-specific resource lines.""" + + @abstractmethod + def print_integration_commands(self) -> None: + """Print available bot commands for this integration.""" + + # -- Convention defaults (derived from name, override if needed) - + + def get_command_name(self) -> str: + return f'{self.get_integration_name().lower()}-app-setup' + + def get_default_folder_name(self) -> str: + return f'Commander Service Mode - {self.get_integration_name()} App' + + def get_default_record_name(self) -> str: + return f'Commander Service Mode {self.get_integration_name()} App Config' + + def get_docker_service_name(self) -> str: + return f'{self.get_integration_name().lower()}-app' + + def get_docker_container_name(self) -> str: + return f'keeper-{self.get_integration_name().lower()}-app' + + def get_docker_image(self) -> str: + return f'keeper/{self.get_integration_name().lower()}-app:latest' + + def get_record_env_key(self) -> str: + return f'{self.get_integration_name().upper()}_RECORD' + + def get_commander_service_name(self) -> str: + return f'commander-{self.get_integration_name().lower()}' + + def get_commander_container_name(self) -> str: + return f'keeper-service-{self.get_integration_name().lower()}' + + def get_service_commands(self) -> str: + return 'search,share-record,share-folder,record-add,one-time-share,epm,pedm,device-approve,get,server' + + # -- Parser (auto-built from name, cached per subclass) ---------- + + def get_parser(self): + cls = type(self) + if cls not in IntegrationSetupCommand._parser_cache: + IntegrationSetupCommand._parser_cache[cls] = self._build_parser() + return IntegrationSetupCommand._parser_cache[cls] + + def _build_parser(self) -> argparse.ArgumentParser: + name = self.get_integration_name() + name_lower = name.lower() + default_folder = self.get_default_folder_name() + default_record = self.get_default_record_name() + + parser = argparse.ArgumentParser( + prog=f'{name_lower}-app-setup', + description=f'Automate {name} App integration setup with Commander Service Mode', + formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument( + '--folder-name', dest='folder_name', type=str, default=default_folder, + help=f'Name for the shared folder (default: "{default_folder}")' + ) + parser.add_argument( + '--app-name', dest='app_name', type=str, default=DockerSetupConstants.DEFAULT_APP_NAME, + help=f'Name for the secrets manager app (default: "{DockerSetupConstants.DEFAULT_APP_NAME}")' + ) + parser.add_argument( + '--config-record-name', dest='config_record_name', type=str, + default=DockerSetupConstants.DEFAULT_RECORD_NAME, + help=f'Name for the config record (default: "{DockerSetupConstants.DEFAULT_RECORD_NAME}")' + ) + parser.add_argument( + f'--{name_lower}-record-name', dest='integration_record_name', type=str, + default=default_record, + help=f'Name for the {name} config record (default: "{default_record}")' + ) + parser.add_argument( + '--config-path', dest='config_path', type=str, + help='Path to config.json file (default: ~/.keeper/config.json)' + ) + parser.add_argument( + '--timeout', dest='timeout', type=str, default=DockerSetupConstants.DEFAULT_TIMEOUT, + help=f'Device timeout setting (default: {DockerSetupConstants.DEFAULT_TIMEOUT})' + ) + parser.add_argument( + '--skip-device-setup', dest='skip_device_setup', action='store_true', + help='Skip device registration and setup if already configured' + ) + parser.error = raise_parse_exception + parser.exit = suppress_exit + return parser + + # -- Main flow --------------------------------------------------- + + def execute(self, params, **kwargs): + name = self.get_integration_name() + + # Phase 1 -- Docker service mode setup + print(f"\n{bcolors.BOLD}Phase 1: Running Docker Service Mode Setup{bcolors.ENDC}") + setup_result, service_config, config_path = self._run_base_docker_setup(params, kwargs) + DockerSetupPrinter.print_completion("Service Mode Configuration Complete!") + + # Phase 2 -- Integration-specific setup + print(f"\n{bcolors.BOLD}Phase 2: {name} App Integration Setup{bcolors.ENDC}") + record_name = kwargs.get('integration_record_name', self.get_default_record_name()) + record_uid, config = self._run_integration_setup( + params, setup_result, service_config, record_name + ) + + # Consolidated success output + self._print_success_message(setup_result, service_config, record_uid, config, config_path) + + # -- Phase 1 (docker service mode) -------------------------------- + + def _run_base_docker_setup(self, params, kwargs: Dict[str, Any]) -> Tuple[SetupResult, ServiceConfig, str]: + docker_cmd = ServiceDockerSetupCommand() + + config_path = kwargs.get('config_path') or os.path.expanduser('~/.keeper/config.json') + if not os.path.isfile(config_path): + raise CommandError(self.get_command_name(), f'Config file not found: {config_path}') + + DockerSetupPrinter.print_header("Docker Setup") + + setup_result = docker_cmd.run_setup_steps( + params=params, + folder_name=kwargs.get('folder_name', self.get_default_folder_name()), + app_name=kwargs.get('app_name', DockerSetupConstants.DEFAULT_APP_NAME), + record_name=kwargs.get('config_record_name', DockerSetupConstants.DEFAULT_RECORD_NAME), + config_path=config_path, + timeout=kwargs.get('timeout', DockerSetupConstants.DEFAULT_TIMEOUT), + skip_device_setup=kwargs.get('skip_device_setup', False) + ) + + DockerSetupPrinter.print_completion("Docker Setup Complete!") + + service_config = self._get_integration_service_configuration() + + return setup_result, service_config, config_path + + def _get_integration_service_configuration(self) -> ServiceConfig: + DockerSetupPrinter.print_header("Service Mode Configuration") + + print(f"{bcolors.BOLD}Port:{bcolors.ENDC}") + print(f" The port on which Commander Service will listen") + while True: + port_input = ( + input(f"{bcolors.OKBLUE}Port [Press Enter for {DockerSetupConstants.DEFAULT_PORT}]:{bcolors.ENDC} ").strip() + or str(DockerSetupConstants.DEFAULT_PORT) + ) + try: + port = ConfigValidator.validate_port(port_input) + break + except ValidationError as e: + print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") + + ngrok_config = self._get_ngrok_config() + + if not ngrok_config['ngrok_enabled']: + cloudflare_config = self._get_cloudflare_config() + else: + cloudflare_config = { + 'cloudflare_enabled': False, 'cloudflare_tunnel_token': '', + 'cloudflare_custom_domain': '', 'cloudflare_public_url': '' + } + + return ServiceConfig( + port=port, + commands=self.get_service_commands(), + queue_enabled=True, + ngrok_enabled=ngrok_config['ngrok_enabled'], + ngrok_auth_token=ngrok_config['ngrok_auth_token'], + ngrok_custom_domain=ngrok_config['ngrok_custom_domain'], + ngrok_public_url=ngrok_config.get('ngrok_public_url', ''), + cloudflare_enabled=cloudflare_config['cloudflare_enabled'], + cloudflare_tunnel_token=cloudflare_config['cloudflare_tunnel_token'], + cloudflare_custom_domain=cloudflare_config['cloudflare_custom_domain'], + cloudflare_public_url=cloudflare_config.get('cloudflare_public_url', '') + ) + + # -- Phase 2 (integration-specific) -------------------------------- + + def _run_integration_setup(self, params, setup_result: SetupResult, + service_config: ServiceConfig, + record_name: str) -> Tuple[str, Any]: + name = self.get_integration_name() + + DockerSetupPrinter.print_header(f"{name} App Configuration") + config = self.collect_integration_config() + + DockerSetupPrinter.print_step(1, 2, f"Creating {name} config record '{record_name}'...") + custom_fields = self.build_record_custom_fields(config) + record_uid = self._create_integration_record(params, record_name, setup_result.folder_uid, custom_fields) + + DockerSetupPrinter.print_step(2, 2, f"Updating docker-compose.yml with {name} App service...") + self._update_docker_compose(setup_result, service_config, record_uid) + + return record_uid, config + + # -- Record management --------------------------------------------- + + def _create_integration_record(self, params, record_name: str, + folder_uid: str, custom_fields: List) -> str: + record_uid = self._find_record_in_folder(params, folder_uid, record_name) + + if record_uid: + DockerSetupPrinter.print_success("Using existing record (will update with custom fields)") + else: + record_uid = self._create_login_record(params, folder_uid, record_name) + + self._update_record_custom_fields(params, record_uid, custom_fields) + + name = self.get_integration_name() + DockerSetupPrinter.print_success(f"{name} config record ready (UID: {record_uid})") + return record_uid + + def _find_record_in_folder(self, params, folder_uid: str, record_name: str): + if folder_uid in params.subfolder_record_cache: + for rec_uid in params.subfolder_record_cache[folder_uid]: + rec = api.get_record(params, rec_uid) + if rec.title == record_name: + return rec_uid + return None + + def _create_login_record(self, params, folder_uid: str, record_name: str) -> str: + try: + record = vault.KeeperRecord.create(params, 'login') + record.record_uid = utils.generate_uid() + record.record_key = utils.generate_aes_key() + record.title = record_name + record.type_name = 'login' + record_management.add_record_to_folder(params, record, folder_uid) + api.sync_down(params) + return record.record_uid + except Exception as e: + raise CommandError(self.get_command_name(), f'Failed to create record: {str(e)}') + + def _update_record_custom_fields(self, params, record_uid: str, custom_fields: List) -> None: + try: + record = vault.KeeperRecord.load(params, record_uid) + record.custom = custom_fields + record_management.update_record(params, record) + params.sync_data = True + api.sync_down(params) + except Exception as e: + raise CommandError(self.get_command_name(), f'Failed to update record fields: {str(e)}') + + # -- Docker Compose update ----------------------------------------- + + def _update_docker_compose(self, setup_result: SetupResult, + service_config: ServiceConfig, + record_uid: str) -> None: + compose_file = os.path.join(os.getcwd(), 'docker-compose.yml') + service_name = self.get_docker_service_name() + + if os.path.exists(compose_file): + with open(compose_file, 'r') as f: + content = f.read() + + if f'{service_name}:' in content: + DockerSetupPrinter.print_warning(f"{service_name} service already exists in docker-compose.yml") + return + + try: + builder = DockerComposeBuilder( + setup_result, asdict(service_config), + commander_service_name=self.get_commander_service_name(), + commander_container_name=self.get_commander_container_name() + ) + yaml_content = builder.add_integration_service( + service_name=service_name, + container_name=self.get_docker_container_name(), + image=self.get_docker_image(), + record_uid=record_uid, + record_env_key=self.get_record_env_key() + ).build() + + with open(compose_file, 'w') as f: + f.write(yaml_content) + + DockerSetupPrinter.print_success("docker-compose.yml updated successfully") + except Exception as e: + raise CommandError(self.get_command_name(), f'Failed to update docker-compose.yml: {str(e)}') + + # -- Success output ------------------------------------------------ + + def _print_success_message(self, setup_result: SetupResult, + service_config: ServiceConfig, + record_uid: str, config, config_path: str) -> None: + name = self.get_integration_name() + + print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ {name} App Integration Setup Complete!{bcolors.ENDC}\n") + + print(f"{bcolors.BOLD}Resources Created:{bcolors.ENDC}") + print(f" {bcolors.BOLD}Phase 1 - Commander Service:{bcolors.ENDC}") + DockerSetupPrinter.print_phase1_resources(setup_result, indent=" ") + print(f" {bcolors.BOLD}Phase 2 - {name} App:{bcolors.ENDC}") + self._print_integration_resources(record_uid, config) + + DockerSetupPrinter.print_common_deployment_steps(str(service_config.port), config_path) + + container = self.get_docker_container_name() + print(f" {bcolors.OKGREEN}docker logs {container}{bcolors.ENDC} - View {name} App logs") + + self.print_integration_commands() + + def _print_integration_resources(self, record_uid: str, config) -> None: + name = self.get_integration_name() + print(f" • {name} Config Record: {bcolors.OKBLUE}{record_uid}{bcolors.ENDC}") + self.print_integration_specific_resources(config) + print(f" • PEDM Integration: {bcolors.OKBLUE}{'true' if config.pedm_enabled else 'false'}{bcolors.ENDC}") + print(f" • Device Approval: {bcolors.OKBLUE}{'true' if config.device_approval_enabled else 'false'}{bcolors.ENDC}") + + # -- Optional feature collectors ----------------------------------- + + def _collect_pedm_config(self) -> Tuple[bool, int]: + print(f"\n{bcolors.BOLD}PEDM (Endpoint Privilege Manager) Integration (optional):{bcolors.ENDC}") + print(f" Integrate with Keeper PEDM for privilege elevation") + enabled = input(f"{bcolors.OKBLUE}Enable PEDM? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + interval = 120 + if enabled: + interval_input = input(f"{bcolors.OKBLUE}PEDM polling interval in seconds [Press Enter for 120]:{bcolors.ENDC} ").strip() + interval = int(interval_input) if interval_input else 120 + return enabled, interval + + def _collect_device_approval_config(self) -> Tuple[bool, int]: + name = self.get_integration_name() + print(f"\n{bcolors.BOLD}SSO Cloud Device Approval Integration (optional):{bcolors.ENDC}") + print(f" Approve SSO Cloud device registrations via {name}") + enabled = input(f"{bcolors.OKBLUE}Enable Device Approval? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' + interval = 120 + if enabled: + interval_input = input(f"{bcolors.OKBLUE}Device approval polling interval in seconds [Press Enter for 120]:{bcolors.ENDC} ").strip() + interval = int(interval_input) if interval_input else 120 + return enabled, interval + + # -- Input / validation -------------------------------------------- + + def _prompt_with_validation(self, prompt: str, validator, error_msg: str) -> str: + while True: + value = input(f"{bcolors.OKBLUE}{prompt}{bcolors.ENDC} ").strip() + if validator(value): + return value + print(f"{bcolors.FAIL}Error: {error_msg}{bcolors.ENDC}") + + @staticmethod + def is_valid_uuid(value: str) -> bool: + return bool(UUID_PATTERN.match(value or '')) diff --git a/keepercommander/service/commands/integrations/slack_app_setup.py b/keepercommander/service/commands/integrations/slack_app_setup.py new file mode 100644 index 000000000..cebfb4a9a --- /dev/null +++ b/keepercommander/service/commands/integrations/slack_app_setup.py @@ -0,0 +1,97 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' = 90, + "Invalid Slack App Token (must start with 'xapp-' and be at least 90 chars)" + ) + + print(f"\n{bcolors.BOLD}SLACK_BOT_TOKEN:{bcolors.ENDC}") + print(f" Bot token for Slack workspace") + slack_bot_token = self._prompt_with_validation( + "Token (starts with xoxb-):", + lambda t: t and t.startswith('xoxb-') and len(t) >= 50, + "Invalid Slack Bot Token (must start with 'xoxb-' and be at least 50 chars)" + ) + + print(f"\n{bcolors.BOLD}SLACK_SIGNING_SECRET:{bcolors.ENDC}") + print(f" Signing secret for verifying Slack requests") + slack_signing_secret = self._prompt_with_validation( + "Secret:", + lambda s: s and len(s) == 32, + "Invalid Slack Signing Secret (must be exactly 32 characters)" + ) + + print(f"\n{bcolors.BOLD}APPROVALS_CHANNEL_ID:{bcolors.ENDC}") + print(f" Slack channel ID for approval notifications") + approvals_channel_id = self._prompt_with_validation( + "Channel ID (starts with C):", + lambda c: c and c.startswith('C'), + "Invalid Approvals Channel ID (must start with 'C')" + ) + + pedm_enabled, pedm_interval = self._collect_pedm_config() + da_enabled, da_interval = self._collect_device_approval_config() + + print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ Slack Configuration Complete!{bcolors.ENDC}") + + return SlackConfig( + slack_app_token=slack_app_token, + slack_bot_token=slack_bot_token, + slack_signing_secret=slack_signing_secret, + approvals_channel_id=approvals_channel_id, + pedm_enabled=pedm_enabled, + pedm_polling_interval=pedm_interval, + device_approval_enabled=da_enabled, + device_approval_polling_interval=da_interval + ) + + def build_record_custom_fields(self, config): + return [ + vault.TypedField.new_field('secret', config.slack_app_token, 'slack_app_token'), + vault.TypedField.new_field('secret', config.slack_bot_token, 'slack_bot_token'), + vault.TypedField.new_field('secret', config.slack_signing_secret, 'slack_signing_secret'), + vault.TypedField.new_field('text', config.approvals_channel_id, 'approvals_channel_id'), + vault.TypedField.new_field('text', 'true' if config.pedm_enabled else 'false', 'pedm_enabled'), + vault.TypedField.new_field('text', str(config.pedm_polling_interval), 'pedm_polling_interval'), + vault.TypedField.new_field('text', 'true' if config.device_approval_enabled else 'false', 'device_approval_enabled'), + vault.TypedField.new_field('text', str(config.device_approval_polling_interval), 'device_approval_polling_interval'), + ] + + # ── Display ─────────────────────────────────────────────────── + + def print_integration_specific_resources(self, config): + print(f" • Approvals Channel: {bcolors.OKBLUE}{config.approvals_channel_id}{bcolors.ENDC}") + + def print_integration_commands(self): + print(f"\n{bcolors.BOLD}Slack Commands Available:{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}• /keeper-request-record{bcolors.ENDC} - Request access to a record") + print(f" {bcolors.OKGREEN}• /keeper-request-folder{bcolors.ENDC} - Request access to a folder") + print(f" {bcolors.OKGREEN}• /keeper-one-time-share{bcolors.ENDC} - Request a one-time share link\n") diff --git a/keepercommander/service/commands/integrations/teams_app_setup.py b/keepercommander/service/commands/integrations/teams_app_setup.py new file mode 100644 index 000000000..76a46d6f3 --- /dev/null +++ b/keepercommander/service/commands/integrations/teams_app_setup.py @@ -0,0 +1,109 @@ +# _ __ +# | |/ /___ ___ _ __ ___ _ _ ® +# | ' = 30, + "Invalid Client Secret (must be at least 30 characters)" + ) + + print(f"\n{bcolors.BOLD}TENANT_ID:{bcolors.ENDC}") + print(f" Azure AD Tenant ID") + tenant_id = self._prompt_with_validation( + "Tenant ID:", + lambda v: self.is_valid_uuid(v), + "Invalid Tenant ID (must be 32 hex characters in pattern xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)" + ) + + print(f"\n{bcolors.BOLD}APPROVALS_CHANNEL_ID:{bcolors.ENDC}") + print(f" Teams channel ID for approval notifications") + approvals_channel_id = self._prompt_with_validation( + "Channel ID (starts with 19:):", + lambda v: v and v.startswith('19:') and '@thread.tacv2' in v, + "Invalid Channel ID (must start with '19:' and end with '@thread.tacv2')" + ) + + print(f"\n{bcolors.BOLD}APPROVALS_TEAM_ID:{bcolors.ENDC}") + print(f" Teams team ID containing the approvals channel") + approvals_team_id = self._prompt_with_validation( + "Team ID:", + lambda v: self.is_valid_uuid(v), + "Invalid Team ID (must be 32 hex characters in pattern xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)" + ) + + pedm_enabled, pedm_interval = self._collect_pedm_config() + da_enabled, da_interval = self._collect_device_approval_config() + + print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ Teams Configuration Complete!{bcolors.ENDC}") + + return TeamsConfig( + client_id=client_id, + client_secret=client_secret, + tenant_id=tenant_id, + approvals_channel_id=approvals_channel_id, + approvals_team_id=approvals_team_id, + pedm_enabled=pedm_enabled, + pedm_polling_interval=pedm_interval, + device_approval_enabled=da_enabled, + device_approval_polling_interval=da_interval + ) + + def build_record_custom_fields(self, config): + return [ + vault.TypedField.new_field('text', config.client_id, 'client_id'), + vault.TypedField.new_field('secret', config.client_secret, 'client_secret'), + vault.TypedField.new_field('text', config.tenant_id, 'tenant_id'), + vault.TypedField.new_field('text', config.approvals_channel_id, 'approvals_channel_id'), + vault.TypedField.new_field('text', config.approvals_team_id, 'approvals_team_id'), + vault.TypedField.new_field('text', 'true' if config.pedm_enabled else 'false', 'pedm_enabled'), + vault.TypedField.new_field('text', str(config.pedm_polling_interval), 'pedm_polling_interval'), + vault.TypedField.new_field('text', 'true' if config.device_approval_enabled else 'false', 'device_approval_enabled'), + vault.TypedField.new_field('text', str(config.device_approval_polling_interval), 'device_approval_polling_interval'), + ] + + # ── Display ─────────────────────────────────────────────────── + + def print_integration_specific_resources(self, config): + print(f" • Approvals Channel: {bcolors.OKBLUE}{config.approvals_channel_id}{bcolors.ENDC}") + print(f" • Approvals Team: {bcolors.OKBLUE}{config.approvals_team_id}{bcolors.ENDC}") + + def print_integration_commands(self): + print(f"\n{bcolors.BOLD}Teams Bot Commands Available:{bcolors.ENDC}") + print(f" {bcolors.OKGREEN}• @keeper request-record{bcolors.ENDC} - Request access to a record") + print(f" {bcolors.OKGREEN}• @keeper request-folder{bcolors.ENDC} - Request access to a folder") + print(f" {bcolors.OKGREEN}• @keeper one-time-share{bcolors.ENDC} - Request a one-time share link\n") diff --git a/keepercommander/service/commands/slack_app_setup.py b/keepercommander/service/commands/slack_app_setup.py deleted file mode 100644 index d08cb2959..000000000 --- a/keepercommander/service/commands/slack_app_setup.py +++ /dev/null @@ -1,401 +0,0 @@ -# _ __ -# | |/ /___ ___ _ __ ___ _ _ ® -# | ' Tuple[SetupResult, Dict[str, Any], str]: - """ - Run the base Docker setup using ServiceDockerSetupCommand. - Returns (SetupResult, service_config, config_path) - """ - docker_cmd = ServiceDockerSetupCommand() - - # Determine config path - config_path = kwargs.get('config_path') or os.path.expanduser('~/.keeper/config.json') - if not os.path.isfile(config_path): - raise CommandError('slack-app-setup', f'Config file not found: {config_path}') - - # Print header - DockerSetupPrinter.print_header("Docker Setup") - - # Run core setup steps (Steps 1-7) - setup_result = docker_cmd.run_setup_steps( - params=params, - folder_name=kwargs.get('folder_name', DockerSetupConstants.DEFAULT_SLACK_FOLDER_NAME), - app_name=kwargs.get('app_name', DockerSetupConstants.DEFAULT_APP_NAME), - record_name=kwargs.get('config_record_name', DockerSetupConstants.DEFAULT_RECORD_NAME), - config_path=config_path, - timeout=kwargs.get('timeout', DockerSetupConstants.DEFAULT_TIMEOUT), - skip_device_setup=kwargs.get('skip_device_setup', False) - ) - - DockerSetupPrinter.print_completion("Docker Setup Complete!") - - # Get simplified service configuration for Slack App - service_config = self._get_slack_service_configuration() - - # Generate initial docker-compose.yml - docker_cmd.generate_and_save_docker_compose(setup_result, service_config) - - return setup_result, service_config, config_path - - def _get_slack_service_configuration(self) -> ServiceConfig: - """Get service configuration for Slack App (port + tunneling options)""" - DockerSetupPrinter.print_header("Service Mode Configuration") - - # Port configuration - print(f"{bcolors.BOLD}Port:{bcolors.ENDC}") - print(f" The port on which Commander Service will listen") - while True: - port_input = input(f"{bcolors.OKBLUE}Port [Press Enter for {DockerSetupConstants.DEFAULT_PORT}]:{bcolors.ENDC} ").strip() or str(DockerSetupConstants.DEFAULT_PORT) - try: - port = ConfigValidator.validate_port(port_input) - break - except ValidationError as e: - print(f"{bcolors.FAIL}Error: {str(e)}{bcolors.ENDC}") - - # Get tunneling configuration - ngrok_config = self._get_ngrok_config() - - # Only ask for Cloudflare if ngrok is not enabled - if not ngrok_config['ngrok_enabled']: - cloudflare_config = self._get_cloudflare_config() - else: - cloudflare_config = {'cloudflare_enabled': False, 'cloudflare_tunnel_token': '', - 'cloudflare_custom_domain': '', 'cloudflare_public_url': ''} - - return ServiceConfig( - port=port, - commands='search,share-record,share-folder,record-add,one-time-share,epm,pedm,device-approve,get,server', - queue_enabled=True, # Always enable queue mode (v2 API) - ngrok_enabled=ngrok_config['ngrok_enabled'], - ngrok_auth_token=ngrok_config['ngrok_auth_token'], - ngrok_custom_domain=ngrok_config['ngrok_custom_domain'], - ngrok_public_url=ngrok_config.get('ngrok_public_url', ''), - cloudflare_enabled=cloudflare_config['cloudflare_enabled'], - cloudflare_tunnel_token=cloudflare_config['cloudflare_tunnel_token'], - cloudflare_custom_domain=cloudflare_config['cloudflare_custom_domain'], - cloudflare_public_url=cloudflare_config.get('cloudflare_public_url', '') - ) - - def _run_slack_setup(self, params, setup_result: SetupResult, service_config: ServiceConfig, - slack_record_name: str) -> Tuple[str, SlackConfig]: - """ - Run Slack-specific setup steps. - Returns (slack_record_uid, slack_config) - """ - # Get Slack configuration - DockerSetupPrinter.print_header("Slack App Configuration") - slack_config = self._get_slack_configuration() - - # Create Slack record - DockerSetupPrinter.print_step(1, 2, f"Creating Slack config record '{slack_record_name}'...") - slack_record_uid = self._create_slack_record( - params, - slack_record_name, - setup_result.folder_uid, - slack_config - ) - - # Update docker-compose.yml - DockerSetupPrinter.print_step(2, 2, "Updating docker-compose.yml with Slack App service...") - self._update_docker_compose_yaml(setup_result, service_config, slack_record_uid) - - return slack_record_uid, slack_config - - def _get_slack_configuration(self) -> SlackConfig: - """Interactively get Slack configuration from user""" - # Slack App Token - print(f"\n{bcolors.BOLD}SLACK_APP_TOKEN:{bcolors.ENDC}") - print(f" App-level token for Slack App") - slack_app_token = self._prompt_with_validation( - "Token (starts with xapp-):", - lambda t: t and t.startswith('xapp-') and len(t) >= 90, - "Invalid Slack App Token (must start with 'xapp-' and be at least 90 chars)" - ) - - # Slack Bot Token - print(f"\n{bcolors.BOLD}SLACK_BOT_TOKEN:{bcolors.ENDC}") - print(f" Bot token for Slack workspace") - slack_bot_token = self._prompt_with_validation( - "Token (starts with xoxb-):", - lambda t: t and t.startswith('xoxb-') and len(t) >= 50, - "Invalid Slack Bot Token (must start with 'xoxb-' and be at least 50 chars)" - ) - - # Slack Signing Secret - print(f"\n{bcolors.BOLD}SLACK_SIGNING_SECRET:{bcolors.ENDC}") - print(f" Signing secret for verifying Slack requests") - slack_signing_secret = self._prompt_with_validation( - "Secret:", - lambda s: s and len(s) == 32, - "Invalid Slack Signing Secret (must be exactly 32 characters)" - ) - - # Approvals Channel ID - print(f"\n{bcolors.BOLD}APPROVALS_CHANNEL_ID:{bcolors.ENDC}") - print(f" Slack channel ID for approval notifications") - approvals_channel_id = self._prompt_with_validation( - "Channel ID (starts with C):", - lambda c: c and c.startswith('C'), - "Invalid Approvals Channel ID (must start with 'C')" - ) - - # PEDM Integration (optional) - print(f"\n{bcolors.BOLD}PEDM (Endpoint Privilege Manager) Integration (optional):{bcolors.ENDC}") - print(f" Integrate with Keeper PEDM for privilege elevation") - pedm_enabled = input(f"{bcolors.OKBLUE}Enable PEDM? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' - pedm_polling_interval = 120 - if pedm_enabled: - interval_input = input(f"{bcolors.OKBLUE}PEDM polling interval in seconds [Press Enter for 120]:{bcolors.ENDC} ").strip() - pedm_polling_interval = int(interval_input) if interval_input else 120 - - # Device Approval Integration (optional) - print(f"\n{bcolors.BOLD}SSO Cloud Device Approval Integration (optional):{bcolors.ENDC}") - print(f" Approve SSO Cloud device registrations via Slack") - device_approval_enabled = input(f"{bcolors.OKBLUE}Enable Device Approval? [Press Enter for No] (y/n):{bcolors.ENDC} ").strip().lower() == 'y' - device_approval_polling_interval = 120 - if device_approval_enabled: - interval_input = input(f"{bcolors.OKBLUE}Device approval polling interval in seconds [Press Enter for 120]:{bcolors.ENDC} ").strip() - device_approval_polling_interval = int(interval_input) if interval_input else 120 - - print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ Slack Configuration Complete!{bcolors.ENDC}") - - return SlackConfig( - slack_app_token=slack_app_token, - slack_bot_token=slack_bot_token, - slack_signing_secret=slack_signing_secret, - approvals_channel_id=approvals_channel_id, - pedm_enabled=pedm_enabled, - pedm_polling_interval=pedm_polling_interval, - device_approval_enabled=device_approval_enabled, - device_approval_polling_interval=device_approval_polling_interval - ) - - def _prompt_with_validation(self, prompt: str, validator, error_msg: str) -> str: - """Helper method to prompt user input with validation""" - while True: - value = input(f"{bcolors.OKBLUE}{prompt}{bcolors.ENDC} ").strip() - if validator(value): - return value - print(f"{bcolors.FAIL}Error: {error_msg}{bcolors.ENDC}") - - def _create_slack_record(self, params, record_name: str, folder_uid: str, - slack_config: SlackConfig) -> str: - """Create or update Slack configuration record""" - # Check if record exists - record_uid = self._find_existing_record(params, folder_uid, record_name) - - if record_uid: - DockerSetupPrinter.print_success("Using existing record (will update with custom fields)") - else: - # Create new record - record_uid = self._create_basic_slack_record(params, folder_uid, record_name) - - # Update record with custom fields - self._update_slack_record_fields(params, record_uid, slack_config) - - DockerSetupPrinter.print_success(f"Slack config record ready (UID: {record_uid})") - return record_uid - - def _find_existing_record(self, params, folder_uid: str, record_name: str) -> Optional[str]: - """Find existing record by name in folder""" - if folder_uid in params.subfolder_record_cache: - for rec_uid in params.subfolder_record_cache[folder_uid]: - rec = api.get_record(params, rec_uid) - if rec.title == record_name: - return rec_uid - return None - - def _create_basic_slack_record(self, params, folder_uid: str, record_name: str) -> str: - """Create a basic login record for Slack configuration""" - try: - from ..config.cli_handler import CommandHandler - - cli_handler = CommandHandler() - cmd_add = f"record-add --folder='{folder_uid}' --title='{record_name}' --record-type=login" - cli_handler.execute_cli_command(params, cmd_add) - - api.sync_down(params) - - # Find the created record - record_uid = self._find_existing_record(params, folder_uid, record_name) - if not record_uid: - raise CommandError('slack-app-setup', 'Failed to find created Slack record') - - return record_uid - except Exception as e: - raise CommandError('slack-app-setup', f'Failed to create Slack record: {str(e)}') - - def _update_slack_record_fields(self, params, record_uid: str, slack_config: SlackConfig) -> None: - """Update record with Slack configuration custom fields""" - try: - record = vault.KeeperRecord.load(params, record_uid) - - # Add custom fields (secret fields are masked, text fields are visible) - record.custom = [ - vault.TypedField.new_field('secret', slack_config.slack_app_token, 'slack_app_token'), - vault.TypedField.new_field('secret', slack_config.slack_bot_token, 'slack_bot_token'), - vault.TypedField.new_field('secret', slack_config.slack_signing_secret, 'slack_signing_secret'), - vault.TypedField.new_field('text', slack_config.approvals_channel_id, 'approvals_channel_id'), - vault.TypedField.new_field('text', 'true' if slack_config.pedm_enabled else 'false', 'pedm_enabled'), - vault.TypedField.new_field('text', str(slack_config.pedm_polling_interval), 'pedm_polling_interval'), - vault.TypedField.new_field('text', 'true' if slack_config.device_approval_enabled else 'false', 'device_approval_enabled'), - vault.TypedField.new_field('text', str(slack_config.device_approval_polling_interval), 'device_approval_polling_interval'), - ] - - record_management.update_record(params, record) - params.sync_data = True - api.sync_down(params) - - except Exception as e: - raise CommandError('slack-app-setup', f'Failed to update Slack record fields: {str(e)}') - - def _update_docker_compose_yaml(self, setup_result: SetupResult, service_config: ServiceConfig, - slack_record_uid: str) -> None: - """Regenerate docker-compose.yml with Slack app service""" - compose_file = os.path.join(os.getcwd(), 'docker-compose.yml') - - if not os.path.exists(compose_file): - raise CommandError('slack-app-setup', f'docker-compose.yml not found at {compose_file}') - - try: - # Check if slack-app already exists - with open(compose_file, 'r') as f: - content = f.read() - - if 'slack-app:' in content: - DockerSetupPrinter.print_warning("slack-app service already exists in docker-compose.yml") - return - - # Regenerate docker-compose.yml with both Commander and Slack App - builder = DockerComposeBuilder(setup_result, asdict(service_config)) - yaml_content = builder.add_slack_service(slack_record_uid).build() - - with open(compose_file, 'w') as f: - f.write(yaml_content) - - DockerSetupPrinter.print_success("docker-compose.yml updated successfully") - - except Exception as e: - raise CommandError('slack-app-setup', f'Failed to update docker-compose.yml: {str(e)}') - - def _print_success_message(self, setup_result: SetupResult, service_config: ServiceConfig, - slack_record_uid: str, slack_config: SlackConfig, config_path: str) -> None: - """Print consolidated success message for both phases""" - print(f"\n{bcolors.OKGREEN}{bcolors.BOLD}✓ Slack App Integration Setup Complete!{bcolors.ENDC}\n") - - # Resources created - print(f"{bcolors.BOLD}Resources Created:{bcolors.ENDC}") - print(f" {bcolors.BOLD}Phase 1 - Commander Service:{bcolors.ENDC}") - DockerSetupPrinter.print_phase1_resources(setup_result, indent=" ") - print(f" {bcolors.BOLD}Phase 2 - Slack App:{bcolors.ENDC}") - print(f" • Slack Config Record: {bcolors.OKBLUE}{slack_record_uid}{bcolors.ENDC}") - print(f" • Approvals Channel: {bcolors.OKBLUE}{slack_config.approvals_channel_id}{bcolors.ENDC}") - print(f" • PEDM Integration: {bcolors.OKBLUE}{'true' if slack_config.pedm_enabled else 'false'}{bcolors.ENDC}") - print(f" • Device Approval: {bcolors.OKBLUE}{'true' if slack_config.device_approval_enabled else 'false'}{bcolors.ENDC}") - - # Next steps - self._print_next_steps(service_config, config_path) - - def _print_next_steps(self, service_config: ServiceConfig, config_path: str) -> None: - """Print deployment next steps for Slack integration""" - DockerSetupPrinter.print_common_deployment_steps(str(service_config.port), config_path) - - # Slack-specific logs - print(f" {bcolors.OKGREEN}docker logs keeper-slack-app{bcolors.ENDC} - View Slack App logs") - - # Slack-specific commands - print(f"\n{bcolors.BOLD}Slack Commands Available:{bcolors.ENDC}") - print(f" {bcolors.OKGREEN}• /keeper-request-record{bcolors.ENDC} - Request access to a record") - print(f" {bcolors.OKGREEN}• /keeper-request-folder{bcolors.ENDC} - Request access to a folder") - print(f" {bcolors.OKGREEN}• /keeper-one-time-share{bcolors.ENDC} - Request a one-time share link\n") diff --git a/keepercommander/service/docker/__init__.py b/keepercommander/service/docker/__init__.py index d10db96ce..97f4e998b 100644 --- a/keepercommander/service/docker/__init__.py +++ b/keepercommander/service/docker/__init__.py @@ -19,7 +19,7 @@ - Docker Compose generation """ -from .models import DockerSetupConstants, SetupResult, ServiceConfig, SlackConfig, SetupStep +from .models import DockerSetupConstants, SetupResult, ServiceConfig, SlackConfig, TeamsConfig, SetupStep from .printer import DockerSetupPrinter from .setup_base import DockerSetupBase from .compose_builder import DockerComposeBuilder @@ -29,6 +29,7 @@ 'SetupResult', 'ServiceConfig', 'SlackConfig', + 'TeamsConfig', 'SetupStep', 'DockerSetupPrinter', 'DockerSetupBase', diff --git a/keepercommander/service/docker/compose_builder.py b/keepercommander/service/docker/compose_builder.py index f7498608f..f4836f143 100644 --- a/keepercommander/service/docker/compose_builder.py +++ b/keepercommander/service/docker/compose_builder.py @@ -9,75 +9,49 @@ # Contact: commander@keepersecurity.com # -""" -Builder class for generating docker-compose.yml configuration -""" -import os +"""docker-compose.yml generation.""" from typing import Dict, Any, List class DockerComposeBuilder: - """Builder for docker-compose.yml content with support for Commander and Slack App services""" + """Builds docker-compose.yml for Commander + integration services.""" - def __init__(self, setup_result, config: Dict[str, Any]): - """ - Initialize the builder - - Args: - setup_result: Results from Docker setup containing UIDs and KSM config (SetupResult object) - config: Service configuration dictionary - """ + def __init__(self, setup_result, config: Dict[str, Any], commander_service_name: str = 'commander', commander_container_name: str = 'keeper-service'): self.setup_result = setup_result self.config = config + self.commander_service_name = commander_service_name + self.commander_container_name = commander_container_name self._service_cmd_parts: List[str] = [] self._volumes: List[str] = [] self._services: Dict[str, Dict[str, Any]] = {} - + def build(self) -> str: - """ - Build the complete docker-compose.yml content - - Returns: - YAML content as a string - """ - if 'commander' not in self._services: - self._services['commander'] = self._build_commander_service() + if self.commander_service_name not in self._services: + self._services[self.commander_service_name] = self._build_commander_service() return self.to_yaml() - + def build_dict(self) -> Dict[str, Any]: - """ - Build the docker-compose structure as a dictionary - - Returns: - Dictionary structure ready for YAML serialization - """ - if 'commander' not in self._services: - self._services['commander'] = self._build_commander_service() + if self.commander_service_name not in self._services: + self._services[self.commander_service_name] = self._build_commander_service() return {'services': self._services} - def add_slack_service(self, slack_record_uid: str) -> 'DockerComposeBuilder': - """ - Add Slack App service to the compose configuration - - Args: - slack_record_uid: UID of the Slack config record - - Returns: - Self for method chaining - """ - # Ensure commander service exists first - if 'commander' not in self._services: - self._services['commander'] = self._build_commander_service() - # Add slack service - self._services['slack-app'] = self._build_slack_service(slack_record_uid) + def add_integration_service(self, service_name: str, container_name: str, + image: str, record_uid: str, + record_env_key: str) -> 'DockerComposeBuilder': + """Add an integration service to the compose file. Returns self.""" + if self.commander_service_name not in self._services: + self._services[self.commander_service_name] = self._build_commander_service() + self._services[service_name] = self._build_integration_service( + container_name, image, record_uid, record_env_key + ) return self + def _build_commander_service(self) -> Dict[str, Any]: - """Build the Commander service configuration""" self._build_service_command() service = { - 'container_name': 'keeper-service', + 'container_name': self.commander_container_name, 'ports': [f"127.0.0.1:{self.config['port']}:{self.config['port']}"], 'image': 'keeper/commander:latest', 'command': ' '.join(self._service_cmd_parts), @@ -90,18 +64,19 @@ def _build_commander_service(self) -> Dict[str, Any]: return service - def _build_slack_service(self, slack_record_uid: str) -> Dict[str, Any]: - """Build the Slack App service configuration""" + def _build_integration_service(self, container_name: str, image: str, + record_uid: str, + record_env_key: str) -> Dict[str, Any]: return { - 'container_name': 'keeper-slack-app', - 'image': 'keeper/slack-app:latest', + 'container_name': container_name, + 'image': image, 'environment': { 'KSM_CONFIG': self.setup_result.b64_config, 'COMMANDER_RECORD': self.setup_result.record_uid, - 'SLACK_RECORD': slack_record_uid + record_env_key: record_uid }, 'depends_on': { - 'commander': { + self.commander_service_name: { 'condition': 'service_healthy' } }, @@ -109,7 +84,6 @@ def _build_slack_service(self, slack_record_uid: str) -> Dict[str, Any]: } def _build_service_command(self) -> None: - """Build the service-create command parts""" port = self.config['port'] commands = self.config['commands'] queue_enabled = self.config.get('queue_enabled', True) @@ -126,7 +100,6 @@ def _build_service_command(self) -> None: self._add_docker_options() def _add_security_options(self) -> None: - """Add advanced security options (IP filtering, rate limiting, encryption)""" # IP allowed list (only add if not default) allowed_ip = self.config.get('allowed_ip', '0.0.0.0/0,::/0') if allowed_ip and allowed_ip != '0.0.0.0/0,::/0': @@ -153,7 +126,6 @@ def _add_security_options(self) -> None: self._service_cmd_parts.append(f"-te '{token_expiration}'") def _add_tunneling_options(self) -> None: - """Add ngrok and Cloudflare tunneling options""" # Ngrok configuration if self.config.get('ngrok_enabled') and self.config.get('ngrok_auth_token'): self._service_cmd_parts.append(f"-ng {self.config['ngrok_auth_token']}") @@ -167,7 +139,6 @@ def _add_tunneling_options(self) -> None: self._service_cmd_parts.append(f"-cfd {self.config['cloudflare_custom_domain']}") def _add_docker_options(self) -> None: - """Add Docker-specific parameters (KSM config, record UIDs)""" self._service_cmd_parts.extend([ f"-ur {self.setup_result.record_uid}", f"--ksm-config {self.setup_result.b64_config}", @@ -175,7 +146,6 @@ def _add_docker_options(self) -> None: ]) def _build_healthcheck(self) -> Dict[str, Any]: - """Build the healthcheck configuration""" port = self.config['port'] # Build the Python script as a single-line command @@ -193,12 +163,6 @@ def _build_healthcheck(self) -> Dict[str, Any]: } def to_yaml(self) -> str: - """ - Convert the docker-compose structure to YAML string - - Returns: - YAML formatted string - """ try: import yaml except ImportError: diff --git a/keepercommander/service/docker/models.py b/keepercommander/service/docker/models.py index 04a5d74b3..78ad0a6c6 100644 --- a/keepercommander/service/docker/models.py +++ b/keepercommander/service/docker/models.py @@ -9,9 +9,7 @@ # Contact: commander@keepersecurity.com # -""" -Data models and constants for Docker setup. -""" +"""Docker setup data models and constants.""" from dataclasses import dataclass from enum import Enum @@ -22,32 +20,21 @@ # ======================== class DockerSetupConstants: - """Constants for Docker setup command""" - # Default resource names for service-docker-setup + """Defaults for docker setup.""" DEFAULT_FOLDER_NAME = 'Commander Service Mode - Docker' DEFAULT_APP_NAME = 'Commander Service Mode - KSM App' DEFAULT_RECORD_NAME = 'Commander Service Mode Docker Config' DEFAULT_CLIENT_NAME = 'Commander Service Mode - KSM App Client' - # Default resource names for slack-app-setup - DEFAULT_SLACK_FOLDER_NAME = 'Commander Service Mode - Slack App' - DEFAULT_SLACK_RECORD_NAME = 'Commander Service Mode Slack App Config' - # Default service configuration DEFAULT_PORT = 8900 DEFAULT_COMMANDS = 'tree,ls' DEFAULT_TIMEOUT = '30d' - # Essential config keys RECORD_UID_KEY = 'record_uid' KSM_CONFIG_KEY = 'ksm_config' -# ======================== -# Enums -# ======================== - - class SetupStep(Enum): """Enumeration for setup steps""" DEVICE_SETUP = 1 @@ -59,13 +46,8 @@ class SetupStep(Enum): CREATE_CLIENT = 7 -# ======================== -# Data Classes -# ======================== - @dataclass class SetupResult: - """Container for setup results that can be reused by integration commands""" folder_uid: str folder_name: str app_uid: str @@ -76,7 +58,6 @@ class SetupResult: @dataclass class ServiceConfig: - """Service configuration for Docker deployment""" port: int commands: str queue_enabled: bool @@ -98,7 +79,6 @@ class ServiceConfig: @dataclass class SlackConfig: - """Slack App configuration for Docker deployment""" slack_app_token: str slack_bot_token: str slack_signing_secret: str @@ -108,3 +88,16 @@ class SlackConfig: device_approval_enabled: bool = False device_approval_polling_interval: int = 120 + +@dataclass +class TeamsConfig: + client_id: str + client_secret: str + tenant_id: str + approvals_channel_id: str + approvals_team_id: str + pedm_enabled: bool = False + pedm_polling_interval: int = 120 + device_approval_enabled: bool = False + device_approval_polling_interval: int = 120 + diff --git a/keepercommander/service/util/request_validation.py b/keepercommander/service/util/request_validation.py index f2750f98b..5b3019a0d 100644 --- a/keepercommander/service/util/request_validation.py +++ b/keepercommander/service/util/request_validation.py @@ -67,8 +67,8 @@ def process_file_data(request_data: Dict[str, Any], command: str) -> Tuple[str, # Check if command contains FILEDATA placeholder and request has filedata if "FILEDATA" in command and "filedata" in request_data: filedata = request_data.get("filedata") - if not isinstance(filedata, dict): - logger.warning("filedata must be a JSON object") + if not isinstance(filedata, (dict, list)): + logger.warning("filedata must be a JSON object or array") return processed_command, temp_files try: diff --git a/keepercommander/versioning.py b/keepercommander/versioning.py index 05f5bb46c..3f5cd760b 100644 --- a/keepercommander/versioning.py +++ b/keepercommander/versioning.py @@ -112,7 +112,7 @@ def welcome_print_version(params): elif not ver_info.get('is_up_to_date'): from colorama import Fore current = ver_info.get('current_github_version') - print(f"{Fore.YELLOW}Update available: v{current} (you have v{this_app_version}). Type 'version' for details.{Fore.RESET}\n") + logging.warning(f"{Fore.YELLOW}Update available: v{current} (you have v{this_app_version}). Type 'version' for details.{Fore.RESET}\n") else: pass # print("Your version of the Commander CLI is %s." % this_app_version) diff --git a/requirements.txt b/requirements.txt index a58194f81..5897db9ff 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ requests>=2.31.0 cryptography>=39.0.1 protobuf>=4.23.0 keeper-secrets-manager-core>=16.6.0 -keeper_pam_webrtc_rs>=1.2.1; python_version>='3.8' +keeper_pam_webrtc_rs>=2.0.1; python_version>='3.8' pydantic>=2.6.4; python_version>='3.8' flask; python_version>='3.8' pyngrok>=7.5.0 diff --git a/setup.cfg b/setup.cfg index 3e145e711..b05719c26 100644 --- a/setup.cfg +++ b/setup.cfg @@ -43,7 +43,7 @@ install_requires = requests>=2.31.0 tabulate websockets - keeper_pam_webrtc_rs>=1.2.1; python_version>='3.8' + keeper_pam_webrtc_rs>=2.0.1; python_version>='3.8' pydantic>=2.6.4; python_version>='3.8' fpdf2>=2.8.3 cbor2; sys_platform == "darwin" and python_version>='3.10' diff --git a/tests/compliance/diff.py b/tests/compliance/diff.py new file mode 100644 index 000000000..0cbf41a15 --- /dev/null +++ b/tests/compliance/diff.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +"""Field-level JSON diff for compliance A/B test results. + +Compares after/ vs before/ JSON files and reports: + - Row count differences + - Field value differences within matching rows + - Missing/extra rows + +Usage: python3 tests/compliance/diff.py + results_dir should contain after/ and before/ subdirectories with t*.json files. +""" +import json +import os +import sys +from pathlib import Path + + +def load_json(path): + try: + with open(path) as f: + return json.load(f) + except Exception: + return None + + +def row_key(row, headers): + """Build a hashable key from scalar identity fields.""" + def _hashable(v): + if isinstance(v, list): + return tuple(sorted(str(x) for x in v)) + return v + + if isinstance(row, dict): + # Use only scalar identity fields for keying (skip list fields like team_uid) + id_fields = ['record_uid', 'shared_folder_uid', 'team_uid', 'email', 'vault_owner'] + parts = tuple(_hashable(row.get(k)) for k in id_fields if k in row and not isinstance(row.get(k), list)) + if parts: + return parts + return tuple((k, _hashable(v)) for k, v in sorted(row.items())) + if isinstance(row, (list, tuple)): + return tuple(_hashable(x) for x in row[:2]) + return (row,) + + +def _normalize(v): + """Normalize a value for comparison (sort lists so ordering doesn't matter).""" + if isinstance(v, list): + return sorted(v, key=str) + return v + + +def diff_rows(before_row, after_row, headers=None): + """Compare two rows field by field, return list of (field, before_val, after_val).""" + diffs = [] + if isinstance(before_row, dict) and isinstance(after_row, dict): + all_keys = sorted(set(list(before_row.keys()) + list(after_row.keys()))) + for k in all_keys: + bv = before_row.get(k) + av = after_row.get(k) + if _normalize(bv) != _normalize(av): + diffs.append((k, bv, av)) + elif isinstance(before_row, (list, tuple)) and isinstance(after_row, (list, tuple)): + max_len = max(len(before_row), len(after_row)) + for i in range(max_len): + bv = before_row[i] if i < len(before_row) else None + av = after_row[i] if i < len(after_row) else None + if bv != av: + field = headers[i] if headers and i < len(headers) else f'[{i}]' + diffs.append((field, bv, av)) + return diffs + + +def compare_file(before_path, after_path): + """Compare two JSON result files. Returns (status, summary, details).""" + before = load_json(before_path) + after = load_json(after_path) + + if before is None and after is None: + return 'ERR', 'both files failed to parse', [] + if before is None: + return 'ERR', 'before file failed to parse', [] + if after is None: + return 'ERR', 'after file failed to parse', [] + + if not isinstance(before, list) or not isinstance(after, list): + if before == after: + return 'OK', 'identical objects', [] + return 'DIFF', 'object-level diff', [] + + if len(before) == 0 and len(after) == 0: + return 'OK', 'rows: 0', [] + + # Index rows by key for field-level comparison + before_keyed = {} + after_keyed = {} + for r in before: + before_keyed.setdefault(row_key(r, None), []).append(r) + for r in after: + after_keyed.setdefault(row_key(r, None), []).append(r) + + details = [] + field_diffs = 0 + rows_only_before = 0 + rows_only_after = 0 + + all_keys = sorted(set(list(before_keyed.keys()) + list(after_keyed.keys())), + key=lambda k: str(k)) + + for key in all_keys: + b_rows = before_keyed.get(key, []) + a_rows = after_keyed.get(key, []) + pairs = max(len(b_rows), len(a_rows)) + for i in range(pairs): + br = b_rows[i] if i < len(b_rows) else None + ar = a_rows[i] if i < len(a_rows) else None + if br is None: + rows_only_after += 1 + if len(details) < 10: + details.append(f' + after only: {_abbrev(ar)}') + elif ar is None: + rows_only_before += 1 + if len(details) < 10: + details.append(f' - before only: {_abbrev(br)}') + else: + rd = diff_rows(br, ar) + if rd: + field_diffs += 1 + if len(details) < 10: + key_str = _abbrev_key(key) + for field, bv, av in rd[:3]: + details.append(f' ~ {key_str} [{field}]: {bv} -> {av}') + + row_count_match = len(before) == len(after) + has_diffs = field_diffs > 0 or rows_only_before > 0 or rows_only_after > 0 + + if not has_diffs: + return 'OK', f'rows: {len(after)}', [] + + parts = [f'rows: {len(before)}->{len(after)}'] + if field_diffs: + parts.append(f'{field_diffs} field diff(s)') + if rows_only_before: + parts.append(f'{rows_only_before} removed') + if rows_only_after: + parts.append(f'{rows_only_after} added') + + status = 'DIFF' if not row_count_match or field_diffs > 0 or rows_only_before > 0 else 'DIFF' + return status, ', '.join(parts), details + + +def _abbrev(obj, maxlen=80): + s = json.dumps(obj, default=str) + return s if len(s) <= maxlen else s[:maxlen - 3] + '...' + + +def _abbrev_key(key): + parts = [str(v) for v in key if v is not None] + s = '/'.join(parts) + return s[:40] if len(s) > 40 else s + + +def main(): + if len(sys.argv) < 2: + print(f'Usage: {sys.argv[0]} ') + sys.exit(1) + + results_dir = Path(sys.argv[1]) + after_dir = results_dir / 'after' + before_dir = results_dir / 'before' + + if not after_dir.is_dir(): + print(f'ERROR: No after/ directory at {after_dir}') + sys.exit(1) + if not before_dir.is_dir(): + print(f'ERROR: No before/ directory at {before_dir}') + sys.exit(1) + + any_diff = False + after_files = sorted(after_dir.glob('t*.json')) + + for af in after_files: + fname = af.name + bf = before_dir / fname + if not bf.exists(): + print(f' [SKIP] {fname} -- no baseline') + continue + + status, summary, details = compare_file(str(bf), str(af)) + tag = f'[{status}]' + print(f' {tag:8s} {fname} -- {summary}') + for d in details: + print(d) + if status == 'DIFF': + any_diff = True + + # Check for new files + for af in after_files: + bf = before_dir / af.name + if not bf.exists(): + data = load_json(str(af)) + rows = len(data) if isinstance(data, list) else '?' + print(f' [NEW] {af.name} -- rows: {rows} (no baseline)') + + print() + if any_diff: + print('Some tests differ -- review above.') + sys.exit(1) + else: + print('All comparable tests match.') + + +if __name__ == '__main__': + main() diff --git a/tests/compliance_test.sh b/tests/compliance/run.sh similarity index 72% rename from tests/compliance_test.sh rename to tests/compliance/run.sh index 20a5b996b..fac8789a0 100755 --- a/tests/compliance_test.sh +++ b/tests/compliance/run.sh @@ -13,15 +13,15 @@ # - python3 on PATH # # Quick start: -# bash tests/compliance_test.sh after # run tests on current branch -# bash tests/compliance_test.sh before # run tests on baseline -# bash tests/compliance_test.sh diff # compare existing results -# bash tests/compliance_test.sh parallel # run both simultaneously -# bash tests/compliance_test.sh all # run both sequentially, then diff +# bash tests/compliance/run.sh after # run tests on current branch +# bash tests/compliance/run.sh before # run tests on baseline +# bash tests/compliance/run.sh diff # compare existing results +# bash tests/compliance/run.sh parallel # run both simultaneously +# bash tests/compliance/run.sh all # run both sequentially, then diff # # Configuration: # The script auto-discovers users and teams from the vault. Override any -# value by exporting env vars or creating tests/compliance_test.env: +# value by exporting env vars or creating tests/compliance/test.env: # # AFTER_DIR Commander under test (default: repo root) # BEFORE_DIR Baseline Commander (default: empty, skips 'before') @@ -35,12 +35,12 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" -TEMPLATE="$SCRIPT_DIR/compliance_test.batch" -RESULTS_DIR="$SCRIPT_DIR/compliance_test_results" +REPO_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)" +TEMPLATE="$SCRIPT_DIR/test.batch" +RESULTS_DIR="$SCRIPT_DIR/results" # Load env file if present (won't override already-exported vars) -ENV_FILE="$SCRIPT_DIR/compliance_test.env" +ENV_FILE="$SCRIPT_DIR/test.env" if [ -f "$ENV_FILE" ]; then echo "Loading config from $ENV_FILE" set -a; source "$ENV_FILE"; set +a @@ -49,11 +49,36 @@ fi AFTER_DIR="${AFTER_DIR:-$REPO_DIR}" BEFORE_DIR="${BEFORE_DIR:-}" KEEPER_CONFIG="${KEEPER_CONFIG:-./config.json}" +AFTER_KEEPER_CONFIG="${AFTER_KEEPER_CONFIG:-$KEEPER_CONFIG}" +BEFORE_KEEPER_CONFIG="${BEFORE_KEEPER_CONFIG:-$KEEPER_CONFIG}" + +# Convert path to native OS format for Python (no-op on Unix, forward-slash Windows path on MSYS) +native_path() { + if command -v cygpath &>/dev/null; then + cygpath -m "$1" + else + echo "$1" + fi +} + +# Resolve venv bin directory (Scripts on Windows, bin on Unix) +venv_keeper() { + local dir="$1" + if [ -x "$dir/.venv/Scripts/keeper" ] || [ -x "$dir/.venv/Scripts/keeper.exe" ]; then + echo ".venv/Scripts/keeper" + else + echo ".venv/bin/keeper" + fi +} # ── Helper: run a keeper command and capture output ────────────────────────── keeper_cmd() { local dir="$1"; shift - (cd "$dir" && .venv/bin/keeper --config "$KEEPER_CONFIG" "$@" 2>/dev/null) + local keeper; keeper=$(venv_keeper "$dir") + local cfg="$KEEPER_CONFIG" + [[ "$dir" == "$AFTER_DIR" ]] && cfg="$AFTER_KEEPER_CONFIG" + [[ "$dir" == "$BEFORE_DIR" ]] && cfg="$BEFORE_KEEPER_CONFIG" + (cd "$dir" && "$keeper" --config "$cfg" "$@" 2>/dev/null) } # ── Auto-discover test parameters from the vault ──────────────────────────── @@ -68,19 +93,19 @@ discover() { users_json=$(keeper_cmd "$dir" enterprise-info -u --format json || echo "[]") if [ -z "${USER1:-}" ]; then USER1=$(python3 -c " -import json, sys +import json, sys, random users = json.loads(sys.stdin.read()) active = [u for u in users if u.get('status','') == 'Active'] -print(active[0]['email'] if active else users[0]['email'] if users else '') +print(random.choice(active)['email'] if active else users[0]['email'] if users else '') " <<< "$users_json") echo " USER1=$USER1" fi if [ -z "${USER2:-}" ]; then USER2=$(python3 -c " -import json, sys +import json, sys, random users = json.loads(sys.stdin.read()) active = [u for u in users if u.get('status','') == 'Active' and u.get('email','') != '$USER1'] -print(active[-1]['email'] if active else '') +print(random.choice(active)['email'] if active else '') " <<< "$users_json") echo " USER2=$USER2" fi @@ -90,11 +115,17 @@ print(active[-1]['email'] if active else '') teams_json=$(keeper_cmd "$dir" enterprise-info -t --columns users --format json || echo "[]") if [ -z "${TEAM1:-}" ]; then TEAM1=$(python3 -c " -import json, sys +import json, sys, random teams = json.loads(sys.stdin.read()) skip = {'everyone', 'admins'} -candidates = [t for t in teams if t.get('name', t.get('team_name','')).lower() not in skip] -print(candidates[0].get('name', candidates[0].get('team_name','')) if candidates else (teams[0].get('name', teams[0].get('team_name','')) if teams else '')) +candidates = [t for t in teams if t.get('name', t.get('team_name','')).lower() not in skip and len(t.get('users', [])) >= 2] +if not candidates: + candidates = [t for t in teams if t.get('name', t.get('team_name','')).lower() not in skip and len(t.get('users', [])) >= 1] +if candidates: + pick = random.choice(candidates) + print(pick.get('name', pick.get('team_name',''))) +elif teams: + print(teams[0].get('name', teams[0].get('team_name',''))) " <<< "$teams_json") echo " TEAM1=$TEAM1" fi @@ -134,6 +165,7 @@ print('$USER2') # ── Generate a concrete batch file from the template ───────────────────────── generate_batch() { local outdir="$1" dest="$2" + outdir=$(native_path "$outdir") sed -e "s|{OUTDIR}|$outdir|g" \ -e "s|{USER1}|$USER1|g" \ -e "s|{USER2}|$USER2|g" \ @@ -150,6 +182,7 @@ run_after() { mkdir -p "$out" generate_batch "$out" "$batch" + local keeper; keeper=$(venv_keeper "$AFTER_DIR") echo "=== Running AFTER (current branch) ===" echo " Dir: $AFTER_DIR" echo " Output: $out" @@ -158,7 +191,7 @@ run_after() { echo " TEAM1=$TEAM1 TEAM_ONLY_USER=$TEAM_ONLY_USER" echo "" cd "$AFTER_DIR" - .venv/bin/keeper --config "$KEEPER_CONFIG" run-batch "$batch" 2>&1 | tee "$out/_run.log" + "$keeper" --config "$AFTER_KEEPER_CONFIG" run-batch "$batch" 2>&1 | tee "$out/_run.log" echo "" echo "=== AFTER complete ===" } @@ -174,6 +207,7 @@ run_before() { mkdir -p "$out" generate_batch "$out" "$batch" + local keeper; keeper=$(venv_keeper "$BEFORE_DIR") echo "=== Running BEFORE (baseline) ===" echo " Dir: $BEFORE_DIR" echo " Output: $out" @@ -182,62 +216,19 @@ run_before() { echo " TEAM1=$TEAM1 TEAM_ONLY_USER=$TEAM_ONLY_USER" echo "" cd "$BEFORE_DIR" - .venv/bin/keeper --config "$KEEPER_CONFIG" run-batch "$batch" 2>&1 | tee "$out/_run.log" + "$keeper" --config "$BEFORE_KEEPER_CONFIG" run-batch "$batch" 2>&1 | tee "$out/_run.log" echo "" echo "=== BEFORE complete ===" } # ── Compare results ────────────────────────────────────────────────────────── diff_results() { - local after_out="$RESULTS_DIR/after" - local before_out="$RESULTS_DIR/before" + local results_native + results_native=$(native_path "$RESULTS_DIR") echo "" echo "=== Comparing results ===" echo "" - - if [ ! -d "$after_out" ]; then - echo "ERROR: No 'after' results found at $after_out"; exit 1 - fi - if [ ! -d "$before_out" ]; then - echo "ERROR: No 'before' results found at $before_out"; exit 1 - fi - - local any_diff=0 - for f in "$after_out"/t*.json; do - local fname - fname=$(basename "$f") - local before_f="$before_out/$fname" - if [ ! -f "$before_f" ]; then - echo " [SKIP] $fname — no baseline (new test or baseline error)" - continue - fi - local after_rows before_rows - after_rows=$(python3 -c "import json; d=json.load(open('$f')); print(len(d) if isinstance(d,list) else 'obj')" 2>/dev/null || echo "ERR") - before_rows=$(python3 -c "import json; d=json.load(open('$before_f')); print(len(d) if isinstance(d,list) else 'obj')" 2>/dev/null || echo "ERR") - if [ "$after_rows" = "$before_rows" ]; then - echo " [OK] $fname — rows: $after_rows" - else - echo " [DIFF] $fname — before=$before_rows, after=$after_rows" - any_diff=1 - fi - done - - for f in "$after_out"/t*.json; do - local fname - fname=$(basename "$f") - if [ ! -f "$before_out/$fname" ]; then - local after_rows - after_rows=$(python3 -c "import json; d=json.load(open('$f')); print(len(d) if isinstance(d,list) else 'obj')" 2>/dev/null || echo "ERR") - echo " [NEW] $fname — rows: $after_rows (no baseline to compare)" - fi - done - - echo "" - if [ "$any_diff" -eq 0 ]; then - echo "All comparable tests match." - else - echo "Some tests differ — review above." - fi + python3 "$SCRIPT_DIR/diff.py" "$results_native" } # ── Main ───────────────────────────────────────────────────────────────────── @@ -246,6 +237,7 @@ case "${1:-help}" in before) run_before ;; diff) diff_results ;; parallel) + discover "$AFTER_DIR" run_after & local_after_pid=$! run_before & @@ -265,7 +257,7 @@ case "${1:-help}" in cat <<'USAGE' Compliance Test Runner — A/B test harness for Commander compliance commands. -Usage: bash tests/compliance_test.sh +Usage: bash tests/compliance/run.sh Modes: after Run the test suite against the current branch (AFTER_DIR) @@ -275,11 +267,11 @@ Modes: all Run after, then before, then diff Configuration: - Set values in tests/compliance_test.env or export as env vars. + Set values in tests/compliance/test.env or export as env vars. If not set, USER1/USER2/TEAM1/TEAM_ONLY_USER are auto-discovered from the vault via enterprise-info. - See tests/compliance_test.env.example for all options. + See tests/compliance/test.env.example for all options. USAGE exit 1 ;; diff --git a/tests/compliance_test.batch b/tests/compliance/test.batch similarity index 70% rename from tests/compliance_test.batch rename to tests/compliance/test.batch index c877cb87b..33ba58caf 100644 --- a/tests/compliance_test.batch +++ b/tests/compliance/test.batch @@ -1,7 +1,7 @@ # Compliance Test Suite # # Batch file template for Keeper Commander's compliance commands. -# Placeholders are substituted by compliance_test.sh at runtime: +# Placeholders are substituted by run.sh at runtime: # {OUTDIR} results directory # {USER1} primary admin email # {USER2} secondary user email @@ -9,7 +9,7 @@ # {TEAM1} team name or UID # # Do not run this file directly — use the runner: -# bash tests/compliance_test.sh [after|before|diff|parallel|all] +# bash tests/compliance/run.sh [after|before|diff|parallel|all] # ─── compliance report ──────────────────────────────────────────────────────── # t01: full report (rebuild to ensure fresh data) @@ -17,9 +17,9 @@ compliance report -r --format json --output {OUTDIR}/t01_report_full.json # t02: filter by single user compliance report -u {USER1} --format json --output {OUTDIR}/t02_report_user.json # t03: filter by team -compliance report --team {TEAM1} --format json --output {OUTDIR}/t03_report_team.json +compliance report --team "{TEAM1}" --format json --output {OUTDIR}/t03_report_team.json # t04: filter by user + team (OR union) -compliance report -u {USER1} --team {TEAM1} --format json --output {OUTDIR}/t04_report_user_team.json +compliance report -u {USER1} --team "{TEAM1}" --format json --output {OUTDIR}/t04_report_user_team.json # t05: aging columns present compliance report --aging --format json --output {OUTDIR}/t05_report_aging.json # t06: aging + user filter @@ -38,14 +38,21 @@ compliance record-access-report --email @all --aging --format json --output {OUT compliance record-access-report --email {USER2} --aging --format json --output {OUTDIR}/t14_rar_user2_aging.json # t15: first user again (should hit warm cache) compliance record-access-report --email {USER1} --aging --format json --output {OUTDIR}/t15_rar_user1_cached.json -# t16: team filter -compliance record-access-report --email @all --team {TEAM1} --format json --output {OUTDIR}/t16_rar_team.json +# t16: team filter (wipe cache after for t17-t19) +compliance record-access-report --email @all --team "{TEAM1}" -nc --format json --output {OUTDIR}/t16_rar_team.json +# ── KC-1142 regression tests: vault report-type with cold cache ────────────── +# t17: vault report-type — single user, rebuild from cold cache +compliance record-access-report --email {USER1} --report-type vault -r --format json --output {OUTDIR}/t17_rar_user_vault.json +# t18: vault report-type — all users (baseline for comparison with t17) +compliance record-access-report --email @all --report-type vault --format json --output {OUTDIR}/t18_rar_all_vault.json +# t19: vault report-type — single user + aging, rebuild from cold cache +compliance record-access-report --email {USER1} --report-type vault --aging -r --format json --output {OUTDIR}/t19_rar_user_vault_aging.json # ─── compliance team-report ─────────────────────────────────────────────────── # t20: full team report compliance team-report --format json --output {OUTDIR}/t20_tr_full.json # t21: filter by team -compliance team-report --team {TEAM1} --format json --output {OUTDIR}/t21_tr_team.json +compliance team-report --team "{TEAM1}" --format json --output {OUTDIR}/t21_tr_team.json # t22: filter by user who is a direct shared-folder member compliance team-report -u {USER1} --format json --output {OUTDIR}/t22_tr_user_direct.json # t23: filter by user whose only SF access is via a team @@ -53,31 +60,33 @@ compliance team-report -u {TEAM_ONLY_USER} --format json --output {OUTDIR}/t23_t # t24: show team users display flag compliance team-report -tu --format json --output {OUTDIR}/t24_tr_tu.json # t25: team + user combined filter (OR union) -compliance team-report --team {TEAM1} -u {USER1} --format json --output {OUTDIR}/t25_tr_team_user.json +compliance team-report --team "{TEAM1}" -u {USER1} --format json --output {OUTDIR}/t25_tr_team_user.json # ─── compliance summary-report ──────────────────────────────────────────────── # t30: full summary compliance summary-report --format json --output {OUTDIR}/t30_summary_full.json # t31: filter by user compliance summary-report -u {USER1} --format json --output {OUTDIR}/t31_summary_user.json -# t32: filter by team -compliance summary-report --team {TEAM1} --format json --output {OUTDIR}/t32_summary_team.json +# t32: filter by team (wipe cache after for t33) +compliance summary-report --team "{TEAM1}" -nc --format json --output {OUTDIR}/t32_summary_team.json +# t33: filter by user, cold cache (exposes user_filter pre-filtering on total_items) +compliance summary-report -u {USER1} -r --format json --output {OUTDIR}/t33_summary_user_cold.json # ─── compliance shared-folder-report ────────────────────────────────────────── # t40: full sfr compliance shared-folder-report --format json --output {OUTDIR}/t40_sfr_full.json # t41: filter by team -compliance shared-folder-report --team {TEAM1} --format json --output {OUTDIR}/t41_sfr_team.json +compliance shared-folder-report --team "{TEAM1}" --format json --output {OUTDIR}/t41_sfr_team.json # t42: filter by user who is a direct SF member compliance shared-folder-report -u {USER1} --format json --output {OUTDIR}/t42_sfr_user_direct.json # t43: filter by user whose only SF access is via team compliance shared-folder-report -u {TEAM_ONLY_USER} --format json --output {OUTDIR}/t43_sfr_user_team_member.json # t44: team + user combined filter (OR union) -compliance shared-folder-report --team {TEAM1} -u {USER1} --format json --output {OUTDIR}/t44_sfr_team_user.json +compliance shared-folder-report --team "{TEAM1}" -u {USER1} --format json --output {OUTDIR}/t44_sfr_team_user.json # t45: show team users display flag compliance shared-folder-report -tu --format json --output {OUTDIR}/t45_sfr_tu.json # t46: --resolve-teams expands team members into SF user matching -compliance shared-folder-report --team {TEAM1} --resolve-teams --format json --output {OUTDIR}/t46_sfr_team_resolved.json +compliance shared-folder-report --team "{TEAM1}" --resolve-teams --format json --output {OUTDIR}/t46_sfr_team_resolved.json # ─── incremental cache tests ───────────────────────────────────────────────── # t50: rebuild to prime cache diff --git a/tests/compliance_test.env.example b/tests/compliance/test.env.example similarity index 100% rename from tests/compliance_test.env.example rename to tests/compliance/test.env.example