diff --git a/.github/workflows/demo_provisioning_scripts/requirements.txt b/.github/workflows/demo_provisioning_scripts/requirements.txt index 32aaa12b..f319cf55 100644 --- a/.github/workflows/demo_provisioning_scripts/requirements.txt +++ b/.github/workflows/demo_provisioning_scripts/requirements.txt @@ -6,3 +6,4 @@ requests==2.32.4 python-dotenv==1.1.0 launchdarkly-server-sdk-ai launchdarkly-server-sdk +boto3==1.35.0 diff --git a/aws-periodic-scheduler/PeriodicResultsGeneratorLambda.zip b/aws-periodic-scheduler/PeriodicResultsGeneratorLambda.zip new file mode 100644 index 00000000..076dfd04 Binary files /dev/null and b/aws-periodic-scheduler/PeriodicResultsGeneratorLambda.zip differ diff --git a/aws-periodic-scheduler/README.md b/aws-periodic-scheduler/README.md new file mode 100644 index 00000000..a41e68ce --- /dev/null +++ b/aws-periodic-scheduler/README.md @@ -0,0 +1,91 @@ +# AWS Periodic Results Generator + +## Purpose + +This Lambda function automatically regenerates experiment results, monitoring data, and guarded releases for all active demo environments every 3-4 days. This keeps demo environments looking fresh and active without requiring manual intervention. + +## What It Does + +- Queries DynamoDB for all active demo environments (status: `completed`) +- Filters users who haven't had results regenerated in the last 3 days +- Processes users in batches of 5 per Lambda invocation +- Generates fresh data for: + - Experiments (cart suggestions, hero images, AI configs, etc.) + - Monitoring metrics (AI chatbot feedback, financial agent metrics) + - Guarded releases (A4, risk management, financial advisor, databases) +- Updates `lastResultsGenerated` timestamp in DynamoDB to track processing + +## Architecture + +``` +CloudWatch EventBridge (every 6-12 hours) + ↓ +Lambda Function + ↓ +DynamoDB (fetch active users) + LaunchDarkly Management API (fetch SDK keys) + ↓ +LaunchDarkly SDK (send events) + ↓ +LaunchDarkly Dashboard (updated experiment results) +``` + +## Key Technical Solutions + +### Connection Pool Fix +The Lambda includes a urllib3 monkey-patch that increases the connection pool size from 1 to 50, solving the "Connection pool is full" issue that prevents events from reaching LaunchDarkly in serverless environments. + +### Batch Processing +Processes 5 users per invocation to prevent Lambda timeout. The CloudWatch scheduler triggers the Lambda every 6-12 hours, gradually processing all users that need refresh. + +### Smart Filtering +Only regenerates results for users whose `lastResultsGenerated` timestamp is >= 3 days old, preventing unnecessary processing and respecting the "every 3-4 days" requirement. + +## Files + +- **`lambda_deploy/`** - Lambda function source code + - `LambdaPeriodicResultsGenerator.py` - Main Lambda handler + - `DynamoDBUtils.py` - DynamoDB client for user management + - `LDAPIUtils.py` - LaunchDarkly Management API client + - `results_generator.py` - Copy with dynamic PROJECT_KEY support + - All Python dependencies (ldclient, boto3, requests, etc.) + +## Deployment + +The deployment package is located at: +``` +aws-periodic-scheduler/PeriodicResultsGeneratorLambda.zip +``` + +### Lambda Configuration +- **Runtime:** Python 3.11 +- **Timeout:** 15 minutes (900 seconds) +- **Memory:** 512 MB +- **Handler:** `LambdaPeriodicResultsGenerator.lambda_handler` + +### Environment Variables +- `LD_API_KEY` - LaunchDarkly Management API Service Token (Reader access) + +### IAM Permissions +- `AmazonDynamoDBFullAccess` - for reading/writing DynamoDB timestamps +- `AWSLambdaBasicExecutionRole` - for CloudWatch Logs + +### CloudWatch EventBridge Schedule +Create a scheduled rule: +- **Schedule:** `rate(6 hours)` or `rate(12 hours)` +- **Target:** The Lambda function + +## Testing & Verification + +✅ Successfully tested in AWS Lambda +✅ Events reach LaunchDarkly without connection pool errors +✅ Experiment results update correctly in dashboard +✅ DynamoDB timestamp tracking prevents duplicate processing +✅ Batch processing prevents timeout with 300+ users + +## Important Notes + +- This Lambda **does NOT affect** the existing provisioning process in `DemoBuilder.py` +- Original `results_generator.py` in `demo_provisioning_scripts/` remains unchanged +- Lambda uses a separate copy with dynamic PROJECT_KEY support for multi-environment processing +- The Lambda reuses existing generator functions, ensuring consistency across provisioning and periodic updates + diff --git a/aws-periodic-scheduler/lambda_deploy/DynamoDBUtils.py b/aws-periodic-scheduler/lambda_deploy/DynamoDBUtils.py new file mode 100644 index 00000000..b93db877 --- /dev/null +++ b/aws-periodic-scheduler/lambda_deploy/DynamoDBUtils.py @@ -0,0 +1,147 @@ +""" +DynamoDB Utilities +Helper functions to interact with DynamoDB for demo provisioning records +""" +import boto3 +import logging +from datetime import datetime, timedelta + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s' +) + +class DynamoDBClient: + """Client for interacting with DynamoDB demo provisioning table""" + + def __init__(self, table_name="ld-core-demo-provisioning-workflow-records-prod", region="us-east-1"): + self.table_name = table_name + self.region = region + self.dynamodb = boto3.resource('dynamodb', region_name=region) + self.table = self.dynamodb.Table(table_name) + self.user_records = {} + + def get_completed_users(self): + """ + Retrieve all users with status='completed' from DynamoDB + Handles duplicate records by keeping only the most recent one per user + + Returns: + list: List of unique usernames with completed status + """ + try: + logging.info(f"Scanning DynamoDB table: {self.table_name}") + + response = self.table.scan( + FilterExpression='#status = :completed', + ExpressionAttributeNames={'#status': 'status'}, + ExpressionAttributeValues={':completed': 'completed'} + ) + + items = response.get('Items', []) + + while 'LastEvaluatedKey' in response: + response = self.table.scan( + FilterExpression='#status = :completed', + ExpressionAttributeNames={'#status': 'status'}, + ExpressionAttributeValues={':completed': 'completed'}, + ExclusiveStartKey=response['LastEvaluatedKey'] + ) + items.extend(response.get('Items', [])) + + logging.info(f"Found {len(items)} completed records") + + #de-duplicate by username, keeping most recent + for item in items: + username = item.get('userKey') + created_at = item.get('createdAt') + + if not username: + continue + + if username not in self.user_records: + self.user_records[username] = item + elif created_at and created_at > self.user_records[username].get('createdAt', ''): + self.user_records[username] = item + + unique_usernames = list(self.user_records.keys()) + logging.info(f"Found {len(unique_usernames)} unique users") + + return unique_usernames + + except Exception as e: + logging.error(f"Error scanning DynamoDB: {str(e)}") + return [] + + def filter_users_needing_refresh(self, usernames, days_threshold=3): + """ + Filter users who need results regenerated (haven't been processed recently) + + Args: + usernames: List of usernames to filter + days_threshold: Number of days since last generation to consider stale + + Returns: + list: Usernames that need refresh, sorted by priority (oldest first) + """ + users_needing_refresh = [] + now = datetime.now() + + for username in usernames: + user_record = self.user_records.get(username, {}) + last_generated = user_record.get('lastResultsGenerated') + + if not last_generated: + users_needing_refresh.append((username, None)) + else: + try: + last_gen_dt = datetime.fromisoformat(last_generated.replace('Z', '+00:00')) + days_since = (now - last_gen_dt).days + + if days_since >= days_threshold: + users_needing_refresh.append((username, last_gen_dt)) + except Exception as e: + logging.warning(f"Error parsing date for {username}: {e}") + users_needing_refresh.append((username, None)) + + users_needing_refresh.sort(key=lambda x: x[1] if x[1] else datetime.min) + + return [username for username, _ in users_needing_refresh] + + def update_last_generated_timestamp(self, username): + """ + Update the lastResultsGenerated timestamp for a user + Attempts to write to DynamoDB, falls back to logging if it fails + """ + timestamp = datetime.now().isoformat() + logging.info(f"Results generated for {username} at {timestamp}") + + try: + user_record = self.user_records.get(username) + if not user_record: + logging.warning(f"No record found for {username}, cannot update timestamp") + return + + user_key = user_record.get('userKey') + created_at = user_record.get('createdAt') + + if not user_key or not created_at: + logging.warning(f"Missing key fields for {username}") + return + + self.table.update_item( + Key={ + 'userKey': user_key, + 'createdAt': created_at + }, + UpdateExpression='SET lastResultsGenerated = :timestamp', + ExpressionAttributeValues={ + ':timestamp': timestamp + } + ) + + logging.info(f"Successfully updated timestamp in DynamoDB for {username}") + + except Exception as e: + logging.warning(f"Could not update DynamoDB timestamp for {username}: {e}") + diff --git a/aws-periodic-scheduler/lambda_deploy/LDAPIUtils.py b/aws-periodic-scheduler/lambda_deploy/LDAPIUtils.py new file mode 100644 index 00000000..811cc08f --- /dev/null +++ b/aws-periodic-scheduler/lambda_deploy/LDAPIUtils.py @@ -0,0 +1,81 @@ +""" +LaunchDarkly API Utilities +Helper functions to interact with LaunchDarkly Management API +""" +import requests +import logging + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s' +) + +class LaunchDarklyAPIClient: + """Client for interacting with LaunchDarkly Management API""" + + def __init__(self, api_token): + self.api_token = api_token + self.base_url = "https://app.launchdarkly.com/api/v2" + self.headers = { + "Authorization": api_token, + "Content-Type": "application/json" + } + + def get_project_environment_keys(self, project_key, environment_key="production"): + """ + Retrieve SDK key and credentials for a specific project environment + + Args: + project_key: The project key (ex: "user-ld-demo") + environment_key: The environment key (default: "production") + + Returns: + dict: Contains sdk_key, mobile_key, and client_id + Returns None if project doesn't exist + """ + try: + url = f"{self.base_url}/projects/{project_key}/environments/{environment_key}" + + logging.info(f"Fetching credentials for project: {project_key}") + response = requests.get(url, headers=self.headers, timeout=10) + + if response.status_code == 404: + logging.warning(f"Project {project_key} not found") + return None + + response.raise_for_status() + env_data = response.json() + + credentials = { + "project_key": project_key, + "environment_key": environment_key, + "sdk_key": env_data.get("apiKey"), + "mobile_key": env_data.get("mobileKey"), + "client_id": env_data.get("id") + } + + logging.info(f"Successfully retrieved credentials for {project_key}") + return credentials + + except requests.exceptions.RequestException as e: + logging.error(f"Error fetching credentials for {project_key}: {str(e)}") + return None + + def project_exists(self, project_key): + """Check if a LaunchDarkly project exists""" + try: + url = f"{self.base_url}/projects/{project_key}" + response = requests.get(url, headers=self.headers, timeout=10) + return response.status_code == 200 + except Exception as e: + logging.error(f"Error checking project {project_key}: {str(e)}") + return False + + +def construct_project_key_from_username(username): + """ + Construct LaunchDarkly project key from username + Pattern: {username}-ld-demo + """ + return f"{username}-ld-demo" + diff --git a/aws-periodic-scheduler/lambda_deploy/LambdaPeriodicResultsGenerator.py b/aws-periodic-scheduler/lambda_deploy/LambdaPeriodicResultsGenerator.py new file mode 100644 index 00000000..bac6e6fe --- /dev/null +++ b/aws-periodic-scheduler/lambda_deploy/LambdaPeriodicResultsGenerator.py @@ -0,0 +1,273 @@ +""" +Periodic Results Generator for AWS Lambda +Runs results generator for all active demo environments every 3-4 days +""" +import os +import logging + +# CRITICAL: Monkey-patch urllib3 to increase connection pool size +import urllib3 +_original_poolmanager_init = urllib3.PoolManager.__init__ + +def _patched_poolmanager_init(self, *args, **kwargs): + # Force maxsize to 50 instead of default 10 + kwargs['maxsize'] = 50 + kwargs['block'] = False + return _original_poolmanager_init(self, *args, **kwargs) + +urllib3.PoolManager.__init__ = _patched_poolmanager_init +print("[PATCH] urllib3.PoolManager maxsize increased to 50") + +import ldclient +from ldclient.config import Config +from dotenv import load_dotenv +from DynamoDBUtils import DynamoDBClient +from LDAPIUtils import LaunchDarklyAPIClient, construct_project_key_from_username +from results_generator import ( + evaluate_all_flags, + ai_configs_monitoring_results_generator, + financial_agent_monitoring_results_generator, + experiment_results_generator, + ai_configs_experiment_results_generator, + hero_image_experiment_results_generator, + hero_redesign_experiment_results_generator, + hallucination_detection_experiment_results_generator, + togglebank_signup_funnel_experiment_results_generator, + togglebank_widget_position_experiment_results_generator, + government_ai_config_experiment_results_generator, + a4_guarded_release_generator, + risk_mgmt_guarded_release_generator, + financial_advisor_agent_guarded_release_generator, + togglebank_db_guarded_release_generator, + investment_db_guarded_release_generator, + investment_api_guarded_release_generator, + risk_mgmt_db_guarded_release_generator +) +import threading +import time + +load_dotenv() + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s' +) + +def run_generators_for_environment(project_key, sdk_key, environment_key="production"): + """ + Initializes LaunchDarkly client and runs all generators for a project + """ + logging.info(f"--- Running generators for project: {project_key} ---") + + os.environ["LD_SDK_KEY"] = sdk_key + os.environ["LD_PROJECT_KEY"] = project_key + + ldclient.set_config(Config( + sdk_key=sdk_key, + events_max_pending=10000, + flush_interval=2, + send_events=True + )) + client = ldclient.get() + + if not client.is_initialized(): + print(f" ERROR: LaunchDarkly SDK failed to initialize for {project_key}") + return + + try: + # Run all generators for comprehensive results + print(f" Step 1/4: Evaluating flags...") + evaluate_all_flags(client) + client.flush() + time.sleep(3) + + print(f" Step 2/4: Generating monitoring data...") + ai_configs_monitoring_results_generator(client) + client.flush() + time.sleep(2) + + financial_agent_monitoring_results_generator(client) + client.flush() + time.sleep(2) + + print(f" Step 3/4: Generating experiment results...") + experiment_results_generator(client) + client.flush() + time.sleep(3) + + ai_configs_experiment_results_generator(client) + client.flush() + time.sleep(2) + + hero_image_experiment_results_generator(client) + client.flush() + time.sleep(2) + + hero_redesign_experiment_results_generator(client) + client.flush() + time.sleep(2) + + hallucination_detection_experiment_results_generator(client) + client.flush() + time.sleep(2) + + togglebank_signup_funnel_experiment_results_generator(client) + client.flush() + time.sleep(2) + + togglebank_widget_position_experiment_results_generator(client) + client.flush() + time.sleep(2) + + government_ai_config_experiment_results_generator(client) + client.flush() + time.sleep(2) + + print(f" Step 4/4: Running guarded release generators...") + stop_events = { + 'a4': threading.Event(), + 'risk_mgmt': threading.Event(), + 'financial_agent': threading.Event(), + 'togglebank_db': threading.Event(), + 'investment_db': threading.Event(), + 'investment_api': threading.Event(), + 'risk_mgmt_db': threading.Event() + } + + threads = [ + threading.Thread(target=a4_guarded_release_generator, args=(client, stop_events['a4'])), + threading.Thread(target=risk_mgmt_guarded_release_generator, args=(client, stop_events['risk_mgmt'])), + threading.Thread(target=financial_advisor_agent_guarded_release_generator, args=(client, stop_events['financial_agent'])), + threading.Thread(target=togglebank_db_guarded_release_generator, args=(client, stop_events['togglebank_db'])), + threading.Thread(target=investment_db_guarded_release_generator, args=(client, stop_events['investment_db'])), + threading.Thread(target=investment_api_guarded_release_generator, args=(client, stop_events['investment_api'])), + threading.Thread(target=risk_mgmt_db_guarded_release_generator, args=(client, stop_events['risk_mgmt_db'])) + ] + + for thread in threads: + thread.start() + + time.sleep(5) + + for event in stop_events.values(): + event.set() + + for thread in threads: + thread.join() + + client.flush() + time.sleep(3) + + print(f" All generators completed for {project_key}") + + except Exception as e: + print(f" ERROR: Error running generators for {project_key}: {str(e)}") + finally: + client.flush() + time.sleep(5) + client.close() + print(f" Completed generators for: {project_key}") + + +def main(): + """ + Main function to fetch active environments and run generators for each + Processes users in batches to avoid Lambda timeout + """ + BATCH_SIZE = 5 # Process 5 users per Lambda invocation + DAYS_BEFORE_REGENERATE = 3 # Only regenerate if >= 3 days since last run + + print("=" * 70) + print("STARTING PERIODIC RESULTS GENERATION (BATCH MODE)") + print("=" * 70) + + ld_api_token = os.getenv("LD_API_KEY") + if not ld_api_token: + print("ERROR: LD_API_KEY not set") + return + + dynamodb_client = DynamoDBClient() + ld_api_client = LaunchDarklyAPIClient(ld_api_token) + + # Get all completed users from DynamoDB + all_usernames = dynamodb_client.get_completed_users() + + if not all_usernames: + print("No active demo environments found") + return + + print(f"Total users in database: {len(all_usernames)}") + + # Filter to only users that need refresh (>= 3 days old) + users_to_process = dynamodb_client.filter_users_needing_refresh( + all_usernames, + days_threshold=DAYS_BEFORE_REGENERATE + ) + + print(f"Users needing refresh (>= {DAYS_BEFORE_REGENERATE} days old): {len(users_to_process)}") + + # Process only the first BATCH_SIZE users in this Lambda invocation + batch = users_to_process[:BATCH_SIZE] + print(f"Processing batch size: {len(batch)} users") + print("-" * 70) + + successful_count = 0 + failed_count = 0 + skipped_count = 0 + + for idx, username in enumerate(batch, 1): + try: + project_key = construct_project_key_from_username(username) + + print(f"[{idx}/{len(batch)}] Processing: {username} ({project_key})") + credentials = ld_api_client.get_project_environment_keys(project_key, "production") + + if not credentials or not credentials.get("sdk_key"): + print(f" SKIPPED: No credentials found for {username}") + skipped_count += 1 + continue + + run_generators_for_environment(project_key, credentials["sdk_key"]) + dynamodb_client.update_last_generated_timestamp(username) + + successful_count += 1 + print(f" SUCCESS: Completed {username}") + + time.sleep(2) # Brief delay between users + + except Exception as e: + print(f" ERROR: Failed to process {username}: {str(e)}") + failed_count += 1 + + print("=" * 70) + print(f"SUMMARY: {successful_count} successful | {failed_count} failed | {skipped_count} skipped") + print(f"Remaining users to process: {len(users_to_process) - len(batch)}") + print("=" * 70) + print("Periodic results generation completed!") + + +def lambda_handler(event, context): + """AWS Lambda entry point""" + print(f"Lambda invoked with event: {event}") + print(f"Request ID: {context.aws_request_id}") + + try: + main() + print("Lambda execution completed successfully") + return { + 'statusCode': 200, + 'body': 'Results generation completed successfully' + } + except Exception as e: + print(f"LAMBDA EXECUTION FAILED: {str(e)}") + import traceback + print(traceback.format_exc()) + return { + 'statusCode': 500, + 'body': f'Error: {str(e)}' + } + + +if __name__ == "__main__": + main() + diff --git a/aws-periodic-scheduler/lambda_deploy/results_generator.py b/aws-periodic-scheduler/lambda_deploy/results_generator.py new file mode 100644 index 00000000..565035dc --- /dev/null +++ b/aws-periodic-scheduler/lambda_deploy/results_generator.py @@ -0,0 +1,1034 @@ +import os +import logging +import requests +import uuid +import ldclient +from ldclient.config import Config +from ldclient.context import Context +from dotenv import load_dotenv +import random +import time +import threading +from ldai.client import LDAIClient, AIConfig, ModelConfig, LDMessage, ProviderConfig +from ldai.tracker import TokenUsage, FeedbackKind +from datetime import datetime, timedelta + +load_dotenv() + +LD_API_KEY = os.getenv("LD_API_KEY") +LD_API_URL = os.getenv("LD_API_URL", "https://app.launchdarkly.com/api/v2") +ENVIRONMENT_KEY = "production" + +HEADERS = { + "Authorization": LD_API_KEY, + "Content-Type": "application/json" +} + +def get_project_key(): + """Get PROJECT_KEY dynamically from environment (needed for Lambda multi-environment support)""" + return os.getenv("LD_PROJECT_KEY") + +A4_FLAG_KEY = "togglebankAPIGuardedRelease" +API_ERROR_RATE_KEY = "stocks-api-error-rates" +API_LATENCY_KEY = "stocks-api-latency" + +RISK_MGMT_FLAG_KEY = "riskmgmtbureauAPIGuardedRelease" +RISK_API_ERROR_RATE_KEY = "rm-api-errors" +RISK_API_LATENCY_KEY = "rm-api-latency" + +FINANCIAL_AGENT_FLAG_KEY = "ai-config--togglebank-financial-advisor-agent" +FINANCIAL_AGENT_ACCURACY_KEY = "financial-agent-accuracy" +FINANCIAL_AGENT_NEGATIVE_FEEDBACK_KEY = "financial-agent-negative-feedback" + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s' +) + +def get_all_flags_by_tag(tag): + project_key = get_project_key() + url = f"{LD_API_URL}/flags/{project_key}?limit=100" + response = requests.get(url, headers=HEADERS) + if not response.ok: + logging.error(f"Failed to fetch flags: {response.status_code} {response.text}") + return [] + data = response.json() + return [flag['key'] for flag in data.get('items', []) if tag in flag.get("tags", [])] + +def get_flag_details(flag_key): + project_key = get_project_key() + url = f"{LD_API_URL}/flags/{project_key}/{flag_key}" + response = requests.get(url, headers=HEADERS) + if not response.ok: + logging.error(f"Failed to fetch flag details: {response.status_code} {response.text}") + return None + return response.json() + +def is_measured_rollout(flag_details): + # Check for measured rollout attribute in flag details + # Look for 'environments' -> ENVIRONMENT_KEY -> 'fallthrough' -> 'rollout' + try: + env = flag_details['environments'][ENVIRONMENT_KEY] + fallthrough = env.get('fallthrough', {}) + rollout = fallthrough.get('rollout') + return rollout is not None + except Exception as e: + logging.error(f"Error checking measured rollout: {str(e)}") + return False + +def generate_user_context(): + user_key = f"user-{uuid.uuid4()}" + builder = Context.builder(user_key) + builder.set("name", f"Test User {user_key[:8]}") + builder.set("email", f"test-{user_key[:8]}@example.com") + builder.set("accountType", random.choice(["personal", "business"])) + builder.set("accountAge", random.randint(1, 60)) + builder.set("lastLogin", (datetime.now() - timedelta(days=random.randint(0, 30))).isoformat()) + builder.set("location", random.choice(["New York", "Los Angeles", "Chicago", "Houston", "Phoenix"])) + builder.set("deviceType", random.choice(["mobile", "desktop", "tablet"])) + builder.set("browser", random.choice(["chrome", "safari", "firefox", "edge"])) + builder.set("os", random.choice(["windows", "macos", "ios", "android"])) + builder.set("timezone", random.choice(["America/New_York", "America/Los_Angeles", "America/Chicago"])) + builder.set("language", random.choice(["en-US", "en-GB", "es-ES", "fr-FR"])) + builder.set("referrer", random.choice(["google", "direct", "social", "email", "partner"])) + builder.set("utm_source", random.choice(["google", "facebook", "twitter", "linkedin", "email"])) + builder.set("utm_medium", random.choice(["cpc", "organic", "social", "email", "display"])) + builder.set("utm_campaign", random.choice(["spring_sale", "summer_promo", "winter_deals", "holiday_special"])) + return builder.build() + +def evaluate_flags_by_tag(client, tag, tag_label): + if not client.is_initialized(): + logging.error(f"Failed to initialize LaunchDarkly client for {tag_label} flags") + return + + logging.info(f"Starting flag evaluation for all '{tag_label}' flags...") + flag_keys = get_all_flags_by_tag(tag) + + if not flag_keys: + logging.error(f"No '{tag_label}' flags found to evaluate.") + return + + for flag_key in flag_keys: + logging.info(f"Evaluating flag: {flag_key}") + for _ in range(500): + try: + user_context = generate_user_context() + variation = client.variation(flag_key, user_context, False) + logging.info(f"User {user_context.key} got variation '{variation}' for flag '{flag_key}'") + except Exception as e: + logging.error(f"Error evaluating flag {flag_key}: {str(e)}") + continue + + logging.info(f"Flag evaluation for '{tag_label}' completed. Flushing client...") + client.flush() + logging.info(f"Flag evaluation script for '{tag_label}' finished.") + +def evaluate_all_flags(client): + # Evaluate all flags by their tags + evaluate_flags_by_tag(client, "bank", "bank") + evaluate_flags_by_tag(client, "public-sector", "public-sector") + evaluate_flags_by_tag(client, "release", "release") + evaluate_flags_by_tag(client, "experiment", "experiment") + evaluate_flags_by_tag(client, "ecommerce", "ecommerce") + evaluate_flags_by_tag(client, "investment", "investment") + evaluate_flags_by_tag(client, "airways", "airways") + evaluate_flags_by_tag(client, "ai-models", "ai-models") + evaluate_flags_by_tag(client, "ai-config", "ai-config") + evaluate_flags_by_tag(client, "guarded-release", "guarded-release") + evaluate_flags_by_tag(client, "migration-assistant", "migration-assistant") + evaluate_flags_by_tag(client, "release-pipeline", "release-pipeline") + evaluate_flags_by_tag(client, "utils", "utils") + evaluate_flags_by_tag(client, "temporary", "temporary") + evaluate_flags_by_tag(client, "demo", "demo") + evaluate_flags_by_tag(client, "events", "events") + evaluate_flags_by_tag(client, "demoengineering", "demoengineering") + evaluate_flags_by_tag(client, "optional", "optional") + evaluate_flags_by_tag(client, "financial-ai", "financial-ai") + evaluate_flags_by_tag(client, "ai-agent", "ai-agent") + evaluate_flags_by_tag(client, "financial-advisor-agent", "financial-advisor-agent") + evaluate_flags_by_tag(client, "Experimentation", "Experimentation") + +def a4_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for A4") + return + logging.info("Starting guarded release rollback generator for A4 flag...") + while True: + flag_details = get_flag_details(A4_FLAG_KEY) + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting A4 generator.") + stop_event.set() + break + try: + user_context = generate_user_context() + flag_value = client.variation(A4_FLAG_KEY, user_context, False) + if flag_value: + # True: higher error rate, higher latency + if random.random() < 0.8: + client.track(API_ERROR_RATE_KEY, user_context) + latency = random.randint(500, 1000) + client.track(API_LATENCY_KEY, user_context, None, latency) + else: + # False: lower error rate, lower latency + if random.random() < 0.1: + client.track(API_ERROR_RATE_KEY, user_context) + latency = random.randint(100, 200) + client.track(API_LATENCY_KEY, user_context, None, latency) + time.sleep(0.05) # Increased delay to prevent API overload + except Exception as e: + logging.error(f"Error during A4 guarded release simulation: {str(e)}") + continue + logging.info("A4 guarded release rollback generator finished.") + +def risk_mgmt_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for Risk Management API") + return + logging.info("Starting guarded release rollback generator for Risk Management API flag...") + while True: + flag_details = get_flag_details(RISK_MGMT_FLAG_KEY) + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting Risk Management API generator.") + stop_event.set() + break + try: + user_context = generate_user_context() + flag_value = client.variation(RISK_MGMT_FLAG_KEY, user_context, False) + if flag_value: + # True: higher error rate, higher latency + if random.random() < 0.75: + client.track(RISK_API_ERROR_RATE_KEY, user_context) + latency = random.randint(400, 800) + client.track(RISK_API_LATENCY_KEY, user_context, None, latency) + else: + # False: lower error rate, lower latency + if random.random() < 0.05: + client.track(RISK_API_ERROR_RATE_KEY, user_context) + latency = random.randint(80, 150) + client.track(RISK_API_LATENCY_KEY, user_context, None, latency) + time.sleep(0.05) # Increased delay to prevent API overload + except Exception as e: + logging.error(f"Error during Risk Management API guarded release simulation: {str(e)}") + continue + logging.info("Risk Management API guarded release rollback generator finished.") + +def financial_advisor_agent_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for Financial Advisor Agent") + return + logging.info("Starting guarded release rollback generator for Financial Advisor Agent...") + + while True: + flag_details = get_flag_details(FINANCIAL_AGENT_FLAG_KEY) + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting Financial Advisor Agent generator.") + stop_event.set() + break + + try: + user_context = generate_user_context() + variation = client.variation(FINANCIAL_AGENT_FLAG_KEY, user_context, None) + + # Get the model name to differentiate between different AI models + if variation and hasattr(variation, 'model') and variation.model: + model_name = variation.model.get('name', 'unknown') + else: + model_name = 'default' + + # Different performance characteristics based on AI model + if 'ld-ai-model-pro' in model_name.lower(): + # Pro model - Excellent performance, wins the experiment + accuracy = random.uniform(85, 95) # High accuracy + if random.random() < 0.05: # Very low negative feedback rate + client.track(FINANCIAL_AGENT_NEGATIVE_FEEDBACK_KEY, user_context) + + client.track(FINANCIAL_AGENT_ACCURACY_KEY, user_context, None, accuracy) + elif 'ld-ai-model-mini' in model_name.lower(): + # Mini model - Extremely poor performance, fails the experiment + accuracy = random.uniform(5, 15) # Extremely low accuracy + if random.random() < 0.90: # Very high negative feedback rate + client.track(FINANCIAL_AGENT_NEGATIVE_FEEDBACK_KEY, user_context) + + client.track(FINANCIAL_AGENT_ACCURACY_KEY, user_context, None, accuracy) + else: + # Default/unknown model - moderate performance + accuracy = random.uniform(50, 70) # Moderate accuracy + if random.random() < 0.30: # Moderate negative feedback rate + client.track(FINANCIAL_AGENT_NEGATIVE_FEEDBACK_KEY, user_context) + + client.track(FINANCIAL_AGENT_ACCURACY_KEY, user_context, None, accuracy) + + time.sleep(0.05) # Increased delay to prevent API overload + except Exception as e: + logging.error(f"Error during Financial Advisor Agent guarded release simulation: {str(e)}") + continue + + logging.info("Financial Advisor Agent guarded release rollback generator finished.") + +def ai_configs_monitoring_results_generator(client): + LD_FLAG_KEY = "ai-config--togglebot" + NUM_RUNS = 1000 + aiclient = LDAIClient(client) + + if not client.is_initialized(): + logging.error("Failed to initialize LaunchDarkly client for AI Config monitoring") + return + + logging.info("Starting AI Configs monitoring results generation...") + + fallback_value = AIConfig( + enabled=True, + model=ModelConfig( + name="default-model", + parameters={"temperature": 0.8}, + ), + messages=[LDMessage(role="system", content="")], + provider=ProviderConfig(name="default-provider"), + ) + + for i in range(NUM_RUNS): + try: + context = generate_user_context() + config, tracker = aiclient.config(LD_FLAG_KEY, context, fallback_value) + duration = random.randint(500, 2000) + time_to_first_token = random.randint(50, duration) + prompt_tokens = random.randint(20, 100) + completion_tokens = random.randint(50, 500) + total_tokens = prompt_tokens + completion_tokens + tokens = TokenUsage(prompt_tokens, completion_tokens, total_tokens) + feedback_kind = FeedbackKind.Positive if random.random() < 0.5 else FeedbackKind.Negative + tracker.track_duration(duration) + tracker.track_tokens(tokens) + tracker.track_feedback({'kind': feedback_kind}) + tracker.track_time_to_first_token(time_to_first_token) + if random.random() < 0.95: + tracker.track_success() + else: + tracker.track_error() + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} monitoring events") + client.flush() + except Exception as e: + logging.error(f"Error processing monitoring event {i}: {str(e)}") + continue + + logging.info("AI Configs monitoring results generation completed") + # Do not flush or close client here; handled in generate_flags + +def financial_agent_monitoring_results_generator(client): + LD_FLAG_KEY = "ai-config--togglebank-financial-advisor-agent" + NUM_RUNS = 1000 + aiclient = LDAIClient(client) + + if not client.is_initialized(): + logging.error("Failed to initialize LaunchDarkly client for Financial Agent monitoring") + return + + logging.info("Starting Financial Agent monitoring results generation...") + + fallback_value = AIConfig( + enabled=True, + model=ModelConfig( + name="default-model", + parameters={"temperature": 0.8}, + ), + messages=[LDMessage(role="system", content="")], + provider=ProviderConfig(name="default-provider"), + ) + + for i in range(NUM_RUNS): + try: + context = generate_user_context() + config, tracker = aiclient.config(LD_FLAG_KEY, context, fallback_value) + duration = random.randint(500, 2000) + time_to_first_token = random.randint(50, duration) + prompt_tokens = random.randint(20, 100) + completion_tokens = random.randint(50, 500) + total_tokens = prompt_tokens + completion_tokens + tokens = TokenUsage(prompt_tokens, completion_tokens, total_tokens) + feedback_kind = FeedbackKind.Positive if random.random() < 0.5 else FeedbackKind.Negative + tracker.track_duration(duration) + tracker.track_tokens(tokens) + tracker.track_feedback({'kind': feedback_kind}) + tracker.track_time_to_first_token(time_to_first_token) + if random.random() < 0.95: + tracker.track_success() + else: + tracker.track_error() + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} monitoring events") + client.flush() + except Exception as e: + logging.error(f"Error processing monitoring event {i}: {str(e)}") + continue + + logging.info("Financial Agent monitoring results generation completed") + # Do not flush or close client here; handled in generate_flags + +def experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "cartSuggestedItems" + LD_PRIMARYMETRIC_KEY = "in-cart-total-items" + LD_SECONDARYMETRIC_KEY = "in-cart-total-price" + NUM_USERS = 2500 + + logging.info("Starting experiment results generation for cartSuggestedItems...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, False) + # Simulate user interaction: 90% conversion for True, 10% for False + engagement_probability = 0.6 if variation is True else 0.4 + is_engaged = random.random() < engagement_probability + if is_engaged: + random_value = random.randint(2, 10) + client.track(LD_PRIMARYMETRIC_KEY, user_context, None, random_value) + random_value = random.randint(100, 1000) + client.track(LD_SECONDARYMETRIC_KEY, user_context, None, random_value) + logging.info(f"User {user_context.key} engaged with {variation} variation") + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for experiment results") + client.flush() # Flush events every 100 users + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Experiment results generation for cartSuggestedItems completed") + +def ai_configs_experiment_results_generator(client): + LD_FLAG_KEY = "ai-config--togglebot" + POSITIVE_METRIC_KEY = "ai-chatbot-positive-feedback" + NEGATIVE_METRIC_KEY = "ai-chatbot-negative-feedback" + NUM_USERS = 2500 + + logging.info("Starting AI Configs experiment results generation for ai-config--togglebot...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + _ = client.variation(LD_FLAG_KEY, user_context, None) + # Randomly track positive or negative feedback (roughly 50/50 split) + if random.random() < random.uniform(0.4, 0.6): + client.track(POSITIVE_METRIC_KEY, user_context) + else: + client.track(NEGATIVE_METRIC_KEY, user_context) + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for AI Configs experiment results") + client.flush() # Flush events every 100 users + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("AI Configs experiment results generation for ai-config--togglebot completed") + +def hero_image_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "showDifferentHeroImageString" + LD_PRIMARYMETRIC_KEY = "signup clicked" + NUM_USERS = 3000 + + logging.info("Starting experiment results generation for showDifferentHeroImageString...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, False) + # Simulate user interaction: higher conversion for True variation (new hero image) + engagement_probability = 0.65 if variation is True else 0.45 + is_engaged = random.random() < engagement_probability + if is_engaged: + client.track(LD_PRIMARYMETRIC_KEY, user_context) + logging.info(f"User {user_context.key} engaged with {variation} variation") + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for experiment results") + client.flush() # Flush events every 100 users + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Experiment results generation for showDifferentHeroImageString completed") + +def hero_redesign_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "showHeroRedesign" + LD_PRIMARYMETRIC_KEY = "signup clicked" + NUM_USERS = 3000 + + logging.info("Starting experiment results generation for showHeroRedesign...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, False) + # Simulate user interaction: higher conversion for True variation (new hero redesign) + engagement_probability = 0.70 if variation is True else 0.50 + is_engaged = random.random() < engagement_probability + if is_engaged: + client.track(LD_PRIMARYMETRIC_KEY, user_context) + logging.info(f"User {user_context.key} engaged with {variation} variation") + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for experiment results") + client.flush() # Flush events every 100 users + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Experiment results generation for showHeroRedesign completed") + +def hallucination_detection_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "ai-config--togglebot" + AI_ACCURACY_KEY = "ai-accuracy" + AI_SOURCE_FIDELITY_KEY = "ai-source-fidelity" + AI_COST_KEY = "ai-cost" + AI_CHATBOT_NEGATIVE_FEEDBACK_KEY = "ai-chatbot-negative-feedback" + NUM_USERS = 7500 + + logging.info("Starting hallucination detection experiment results generation for ai-config--togglebot...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, None) + + # Generate metrics with slight variations between different AI models + # but keep results competitive so no model drastically loses + + if variation and hasattr(variation, 'model') and variation.model: + model_name = variation.model.get('name', 'unknown') + else: + model_name = 'default' + + # AI Accuracy: 85-95% range with slight model variations, scaled for visibility + if 'claude' in model_name.lower(): + accuracy = random.uniform(88, 94) # Scaled up 100x for better visibility + elif 'nova' in model_name.lower(): + accuracy = random.uniform(86, 92) # Scaled up 100x for better visibility + elif 'gpt' in model_name.lower(): + accuracy = random.uniform(87, 93) # Scaled up 100x for better visibility + else: + accuracy = random.uniform(85, 91) # Scaled up 100x for better visibility + + # AI Source Fidelity: 80-90% range with slight model variations, scaled for visibility + if 'claude' in model_name.lower(): + source_fidelity = random.uniform(83, 89) # Scaled up 100x for better visibility + elif 'nova' in model_name.lower(): + source_fidelity = random.uniform(81, 87) # Scaled up 100x for better visibility + elif 'gpt' in model_name.lower(): + source_fidelity = random.uniform(82, 88) # Scaled up 100x for better visibility + else: + source_fidelity = random.uniform(80, 86) # Scaled up 100x for better visibility + + # AI Cost: $0.10-$0.50 range with slight model variations, scaled for visibility + if 'claude' in model_name.lower(): + cost = random.uniform(0.20, 0.40) # Scaled up 100x for better visibility + elif 'nova' in model_name.lower(): + cost = random.uniform(0.10, 0.30) # Scaled up 100x for better visibility + elif 'gpt' in model_name.lower(): + cost = random.uniform(0.15, 0.35) # Scaled up 100x for better visibility + else: + cost = random.uniform(0.10, 0.20) # Scaled up 100x for better visibility + + # Track the metrics + client.track(AI_ACCURACY_KEY, user_context, None, accuracy) + client.track(AI_SOURCE_FIDELITY_KEY, user_context, None, source_fidelity) + client.track(AI_COST_KEY, user_context, None, cost) + + # AI Chatbot Negative Feedback: 5-15% range with slight model variations + if random.random() < 0.10: # 10% chance of negative feedback + client.track(AI_CHATBOT_NEGATIVE_FEEDBACK_KEY, user_context) + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for hallucination detection experiment results") + client.flush() # Flush events every 100 users + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Hallucination detection experiment results generation for ai-config--togglebot completed") + +def togglebank_signup_funnel_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "releaseNewSignupPromo" + SIGNUP_STARTED_KEY = "signup-started" + INITIAL_SIGNUP_COMPLETED_KEY = "initial-signup-completed" + PERSONAL_DETAILS_COMPLETED_KEY = "signup-personal-details-completed" + SERVICES_COMPLETED_KEY = "signup-services-completed" + SIGNUP_FLOW_COMPLETED_KEY = "signup-flow-completed" + NUM_USERS = 3000 + + logging.info("Starting funnel experiment results generation for releaseNewSignupPromo...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, "control") + + client.track(SIGNUP_STARTED_KEY, user_context) + + if variation == "credit-card-offer": + step2_rate = 0.70 + step3_rate = 0.65 + step4_rate = 0.55 + step5_rate = 0.45 + elif variation == "mortgage-offer": + step2_rate = 0.75 + step3_rate = 0.70 + step4_rate = 0.62 + step5_rate = 0.52 + elif variation == "cashback-offer": + step2_rate = 0.65 + step3_rate = 0.58 + step4_rate = 0.48 + step5_rate = 0.38 + else: + step2_rate = 0.68 + step3_rate = 0.60 + step4_rate = 0.50 + step5_rate = 0.40 + + if random.random() < step2_rate: + client.track(INITIAL_SIGNUP_COMPLETED_KEY, user_context) + + if random.random() < step3_rate: + client.track(PERSONAL_DETAILS_COMPLETED_KEY, user_context) + + if random.random() < step4_rate: + client.track(SERVICES_COMPLETED_KEY, user_context) + + if random.random() < step5_rate: + client.track(SIGNUP_FLOW_COMPLETED_KEY, user_context) + logging.info(f"User {user_context.key} completed full signup funnel with {variation} variation") + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for ToggleBank signup funnel experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Funnel experiment results generation for releaseNewSignupPromo completed") + +def ecommerce_collection_banner_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "storeAttentionCallout" + STORE_ACCESSED_KEY = "store-accessed" + ITEM_ADDED_KEY = "item-added" + CART_ACCESSED_KEY = "cart-accessed" + CUSTOMER_CHECKOUT_KEY = "customer-checkout" + IN_CART_TOTAL_PRICE_KEY = "in-cart-total-price" + NUM_USERS = 3000 + + logging.info("Starting funnel experiment results generation for storeAttentionCallout...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, "New Items") + + # Different conversion rates based on banner text + if variation == "Final Hours!": + store_access_rate = 0.85 + item_add_rate = 0.70 + cart_access_rate = 0.65 + checkout_rate = 0.55 + elif variation == "Sale": + store_access_rate = 0.80 + item_add_rate = 0.65 + cart_access_rate = 0.60 + checkout_rate = 0.50 + else: # "New Items" (control) + store_access_rate = 0.70 + item_add_rate = 0.55 + cart_access_rate = 0.50 + checkout_rate = 0.40 + + if random.random() < store_access_rate: + client.track(STORE_ACCESSED_KEY, user_context) + + if random.random() < item_add_rate: + client.track(ITEM_ADDED_KEY, user_context) + + if random.random() < cart_access_rate: + client.track(CART_ACCESSED_KEY, user_context) + + if random.random() < checkout_rate: + client.track(CUSTOMER_CHECKOUT_KEY, user_context) + total_price = random.randint(50, 500) + client.track(IN_CART_TOTAL_PRICE_KEY, user_context, None, total_price) + logging.info(f"User {user_context.key} completed checkout with {variation} variation") + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for collection banner experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Funnel experiment results generation for storeAttentionCallout completed") + +def ecommerce_shorten_collection_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "release-new-shorten-collections-page" + ITEM_ADDED_KEY = "item-added" + CART_ACCESSED_KEY = "cart-accessed" + CUSTOMER_CHECKOUT_KEY = "customer-checkout" + IN_CART_TOTAL_PRICE_KEY = "in-cart-total-price" + NUM_USERS = 3000 + + logging.info("Starting funnel experiment results generation for release-new-shorten-collections-page...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, "old-long-collections-page") + + # Better conversion for shortened page due to reduced decision fatigue + if variation == "new-shorten-collections-page": + item_add_rate = 0.75 + cart_access_rate = 0.85 + checkout_rate = 0.70 + else: # "old-long-collections-page" + item_add_rate = 0.60 + cart_access_rate = 0.75 + checkout_rate = 0.55 + + if random.random() < item_add_rate: + client.track(ITEM_ADDED_KEY, user_context) + + if random.random() < cart_access_rate: + client.track(CART_ACCESSED_KEY, user_context) + + if random.random() < checkout_rate: + client.track(CUSTOMER_CHECKOUT_KEY, user_context) + total_price = random.randint(80, 600) + client.track(IN_CART_TOTAL_PRICE_KEY, user_context, None, total_price) + logging.info(f"User {user_context.key} completed checkout with {variation} variation") + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for shorten collection page experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Funnel experiment results generation for release-new-shorten-collections-page completed") + +def ecommerce_new_search_engine_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "release-new-search-engine" + SEARCH_ENGINE_ADD_TO_CART_KEY = "search-engine-add-to-cart" + IN_CART_TOTAL_PRICE_KEY = "in-cart-total-price" + NUM_USERS = 3000 + + logging.info("Starting experiment results generation for release-new-search-engine...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, False) + + # New search engine has better add-to-cart conversion + if variation is True: + add_to_cart_rate = 0.65 + avg_price_low = 100 + avg_price_high = 800 + else: + add_to_cart_rate = 0.45 + avg_price_low = 80 + avg_price_high = 600 + + if random.random() < add_to_cart_rate: + client.track(SEARCH_ENGINE_ADD_TO_CART_KEY, user_context) + total_price = random.randint(avg_price_low, avg_price_high) + client.track(IN_CART_TOTAL_PRICE_KEY, user_context, None, total_price) + logging.info(f"User {user_context.key} added items via search with {variation} variation") + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for new search engine experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Experiment results generation for release-new-search-engine completed") + +def togglebank_special_offers_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "showDifferentSpecialOfferString" + SIGNUP_STARTED_KEY = "signup-started" + NUM_USERS = 3000 + + logging.info("Starting experiment results generation for showDifferentSpecialOfferString...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, "offerA") + + # Different offers have different conversion rates + if variation == "offerA": # Credit card offer + conversion_rate = 0.55 + elif variation == "offerB": # Car loan offer + conversion_rate = 0.45 + elif variation == "offerC": # Platinum rewards offer + conversion_rate = 0.65 + else: + conversion_rate = 0.50 + + if random.random() < conversion_rate: + client.track(SIGNUP_STARTED_KEY, user_context) + logging.info(f"User {user_context.key} started signup with {variation} variation") + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for special offers experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Experiment results generation for showDifferentSpecialOfferString completed") + +def togglebank_widget_position_experiment_results_generator(client): + LD_FEATURE_FLAG_KEY = "swapWidgetPositions" + SIGNUP_STARTED_KEY = "signup-started" + NUM_USERS = 3000 + + logging.info("Starting experiment results generation for swapWidgetPositions...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + variation = client.variation(LD_FEATURE_FLAG_KEY, user_context, False) + + # Test if swapping widget positions improves conversion + if variation is True: # Retirement left, Mortgage right + conversion_rate = 0.58 + else: # Mortgage left, Retirement right (original) + conversion_rate = 0.52 + + if random.random() < conversion_rate: + client.track(SIGNUP_STARTED_KEY, user_context) + logging.info(f"User {user_context.key} started signup with widgets swapped={variation}") + + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for widget position experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("Experiment results generation for swapWidgetPositions completed") + +def government_ai_config_experiment_results_generator(client): + LD_FLAG_KEY = "ai-config--publicbot" + POSITIVE_METRIC_KEY = "ai-chatbot-positive-feedback" + NEGATIVE_METRIC_KEY = "ai-chatbot-negative-feedback" + NUM_USERS = 2500 + + logging.info("Starting AI Configs experiment results generation for ai-config--publicbot...") + + for i in range(NUM_USERS): + try: + user_context = generate_user_context() + _ = client.variation(LD_FLAG_KEY, user_context, None) + # Randomly track positive or negative feedback (roughly 60/40 split for government) + if random.random() < 0.60: + client.track(POSITIVE_METRIC_KEY, user_context) + else: + client.track(NEGATIVE_METRIC_KEY, user_context) + if (i + 1) % 100 == 0: + logging.info(f"Processed {i + 1} users for Government AI Configs experiment results") + client.flush() + except Exception as e: + logging.error(f"Error processing user {i}: {str(e)}") + continue + logging.info("AI Configs experiment results generation for ai-config--publicbot completed") + +def togglebank_db_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for ToggleBank DB") + return + logging.info("Starting guarded release rollback generator for ToggleBank DB flag...") + while True: + flag_details = get_flag_details("togglebankDBGuardedRelease") + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting ToggleBank DB generator.") + stop_event.set() + break + try: + user_context = generate_user_context() + flag_value = client.variation("togglebankDBGuardedRelease", user_context, False) + if flag_value: + # True: higher error rate, higher latency + if random.random() < 0.7: + client.track("recent-trades-db-errors", user_context) + latency = random.randint(300, 700) + client.track("recent-trades-db-latency", user_context, None, latency) + else: + # False: lower error rate, lower latency + if random.random() < 0.08: + client.track("recent-trades-db-errors", user_context) + latency = random.randint(50, 150) + client.track("recent-trades-db-latency", user_context, None, latency) + time.sleep(0.05) + except Exception as e: + logging.error(f"Error during ToggleBank DB guarded release simulation: {str(e)}") + continue + logging.info("ToggleBank DB guarded release rollback generator finished.") + +def investment_db_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for Investment DB") + return + logging.info("Starting guarded release rollback generator for Investment DB flag...") + while True: + flag_details = get_flag_details("investment-recent-trade-db") + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting Investment DB generator.") + stop_event.set() + break + try: + user_context = generate_user_context() + flag_value = client.variation("investment-recent-trade-db", user_context, False) + if flag_value: + # True: higher error rate, higher latency + if random.random() < 0.65: + client.track("recent-trades-db-errors", user_context) + latency = random.randint(250, 600) + client.track("recent-trades-db-latency", user_context, None, latency) + else: + # False: lower error rate, lower latency + if random.random() < 0.06: + client.track("recent-trades-db-errors", user_context) + latency = random.randint(40, 120) + client.track("recent-trades-db-latency", user_context, None, latency) + time.sleep(0.05) + except Exception as e: + logging.error(f"Error during Investment DB guarded release simulation: {str(e)}") + continue + logging.info("Investment DB guarded release rollback generator finished.") + +def investment_api_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for Investment API") + return + logging.info("Starting guarded release rollback generator for Investment API flag...") + while True: + flag_details = get_flag_details("release-new-investment-stock-api") + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting Investment API generator.") + stop_event.set() + break + try: + user_context = generate_user_context() + flag_value = client.variation("release-new-investment-stock-api", user_context, False) + if flag_value: + # True: higher error rate, higher latency + if random.random() < 0.72: + client.track("stocks-api-error-rates", user_context) + latency = random.randint(350, 750) + client.track("stocks-api-latency", user_context, None, latency) + else: + # False: lower error rate, lower latency + if random.random() < 0.09: + client.track("stocks-api-error-rates", user_context) + latency = random.randint(70, 170) + client.track("stocks-api-latency", user_context, None, latency) + time.sleep(0.05) + except Exception as e: + logging.error(f"Error during Investment API guarded release simulation: {str(e)}") + continue + logging.info("Investment API guarded release rollback generator finished.") + +def risk_mgmt_db_guarded_release_generator(client, stop_event): + if not client.is_initialized(): + logging.error("LaunchDarkly client is not initialized for Risk Management DB") + return + logging.info("Starting guarded release rollback generator for Risk Management DB flag...") + while True: + flag_details = get_flag_details("riskmgmtbureauDBGuardedRelease") + if not flag_details or not is_measured_rollout(flag_details): + logging.info("Measured rollout is over or flag details unavailable. Exiting Risk Management DB generator.") + stop_event.set() + break + try: + user_context = generate_user_context() + flag_value = client.variation("riskmgmtbureauDBGuardedRelease", user_context, False) + if flag_value: + # True: higher error rate, higher latency + if random.random() < 0.68: + client.track("rm-db-errors", user_context) + latency = random.randint(280, 650) + client.track("rm-db-latency", user_context, None, latency) + else: + # False: lower error rate, lower latency + if random.random() < 0.07: + client.track("rm-db-errors", user_context) + latency = random.randint(60, 140) + client.track("rm-db-latency", user_context, None, latency) + time.sleep(0.05) + except Exception as e: + logging.error(f"Error during Risk Management DB guarded release simulation: {str(e)}") + continue + logging.info("Risk Management DB guarded release rollback generator finished.") + +def generate_results(project_key, api_key): + print(f"Generating flags for project {project_key} with API key {api_key} (stub)") + sdk_key = os.getenv("LD_SDK_KEY") + if sdk_key: + ldclient.set_config(Config(sdk_key=sdk_key, events_max_pending=1000)) + client = ldclient.get() + + # Evaluate all flags by their tags + evaluate_all_flags(client) + + # AI Configs monitoring + ai_configs_monitoring_results_generator(client) + financial_agent_monitoring_results_generator(client) + + # All experiment result generators + experiment_results_generator(client) # cartSuggestedItems + ai_configs_experiment_results_generator(client) # ai-config--togglebot + hero_image_experiment_results_generator(client) # showDifferentHeroImageString + hero_redesign_experiment_results_generator(client) # showHeroRedesign + hallucination_detection_experiment_results_generator(client) # ai-config--togglebot + togglebank_signup_funnel_experiment_results_generator(client) # releaseNewSignupPromo + + # New experiment generators + ecommerce_collection_banner_experiment_results_generator(client) # storeAttentionCallout + ecommerce_shorten_collection_experiment_results_generator(client) # release-new-shorten-collections-page + ecommerce_new_search_engine_experiment_results_generator(client) # release-new-search-engine + togglebank_special_offers_experiment_results_generator(client) # showDifferentSpecialOfferString + togglebank_widget_position_experiment_results_generator(client) # swapWidgetPositions + government_ai_config_experiment_results_generator(client) # ai-config--publicbot + + # Guarded release generators + stop_event = threading.Event() + risk_mgmt_stop_event = threading.Event() + financial_agent_stop_event = threading.Event() + togglebank_db_stop_event = threading.Event() + investment_db_stop_event = threading.Event() + investment_api_stop_event = threading.Event() + risk_mgmt_db_stop_event = threading.Event() + + # Create and start all guarded release threads + a4_thread = threading.Thread(target=a4_guarded_release_generator, args=(client, stop_event)) + risk_mgmt_thread = threading.Thread(target=risk_mgmt_guarded_release_generator, args=(client, risk_mgmt_stop_event)) + financial_agent_thread = threading.Thread(target=financial_advisor_agent_guarded_release_generator, args=(client, financial_agent_stop_event)) + togglebank_db_thread = threading.Thread(target=togglebank_db_guarded_release_generator, args=(client, togglebank_db_stop_event)) + investment_db_thread = threading.Thread(target=investment_db_guarded_release_generator, args=(client, investment_db_stop_event)) + investment_api_thread = threading.Thread(target=investment_api_guarded_release_generator, args=(client, investment_api_stop_event)) + risk_mgmt_db_thread = threading.Thread(target=risk_mgmt_db_guarded_release_generator, args=(client, risk_mgmt_db_stop_event)) + + a4_thread.start() + risk_mgmt_thread.start() + financial_agent_thread.start() + togglebank_db_thread.start() + investment_db_thread.start() + investment_api_thread.start() + risk_mgmt_db_thread.start() + + time.sleep(5) + + stop_event.set() + risk_mgmt_stop_event.set() + financial_agent_stop_event.set() + togglebank_db_stop_event.set() + investment_db_stop_event.set() + investment_api_stop_event.set() + risk_mgmt_db_stop_event.set() + + a4_thread.join() + risk_mgmt_thread.join() + financial_agent_thread.join() + togglebank_db_thread.join() + investment_db_thread.join() + investment_api_thread.join() + risk_mgmt_db_thread.join() + + client.flush() + client.close() + else: + print("LD_SDK_KEY not set in environment. Skipping flag evaluation, guarded release simulation, AI Configs monitoring, and experiment results generation.") \ No newline at end of file