From a3f09231e6192a428e2dab2776a6b498b4909263 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 19:43:10 +0000 Subject: [PATCH 01/19] feat: Add workflow cancellation helpers to WorkflowLifecycle - Add check_cancel_requested(run_id) to poll for cancel status - Returns True when workflow status is 'canceling' - Workflows should call this periodically in their main loop - Add on_workflow_cancel(run_id, message) to complete cancellation - Validates workflow is in 'canceling' status - Updates status to 'canceled' with timestamp - Emits cancellation event This completes the cancel flow: 1. Frontend calls POST /api/workflows/{id}/cancel 2. API sets status to 'canceling' 3. Workflow calls check_cancel_requested() periodically 4. When True, workflow cleans up and calls on_workflow_cancel() 5. Status becomes 'canceled' Co-Authored-By: Claude Opus 4.5 --- src/kurt/observability/lifecycle.py | 80 +++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/src/kurt/observability/lifecycle.py b/src/kurt/observability/lifecycle.py index 5f40eedc..d109bfc1 100644 --- a/src/kurt/observability/lifecycle.py +++ b/src/kurt/observability/lifecycle.py @@ -679,6 +679,86 @@ def on_workflow_fail(self, run_id: str, error: str) -> None: """ self.update_status(run_id, "failed", error=error) + # ========================================================================= + # Cancellation Support + # ========================================================================= + + def check_cancel_requested(self, run_id: str) -> bool: + """Check if workflow cancellation has been requested. + + Workflows should call this periodically to detect cancel requests. + If cancellation is detected, the workflow should cleanup and call + on_workflow_cancel() to complete the cancellation. + + Args: + run_id: The workflow run ID. + + Returns: + True if the workflow should cancel, False otherwise. + + Example: + # In workflow loop: + if lifecycle.check_cancel_requested(run_id): + lifecycle.on_workflow_cancel(run_id) + return + """ + try: + row = self._db.query_one( + "SELECT status FROM workflow_runs WHERE id = ?", + [run_id], + ) + if row is None: + return False + return row.get("status") == "canceling" + except Exception: + return False + + def on_workflow_cancel(self, run_id: str, message: str | None = None) -> None: + """Callback for workflow cancellation (status=canceling -> canceled). + + Call this when the workflow has detected cancellation and finished cleanup. + + Args: + run_id: The workflow run ID. + message: Optional message describing cancellation. + """ + # Get current status first + try: + row = self._db.query_one( + "SELECT status FROM workflow_runs WHERE id = ?", + [run_id], + ) + if row is None: + logger.warning(f"Cannot cancel workflow {run_id}: not found") + return + if row.get("status") != "canceling": + logger.warning(f"Cannot cancel workflow {run_id}: status is {row.get('status')}") + return + except Exception as e: + logger.warning(f"Cannot check workflow status {run_id}: {e}") + return + + # Update to canceled + now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + error_msg = message or "Workflow cancelled by user" + try: + self._db.execute( + "UPDATE workflow_runs SET status = 'canceled', completed_at = ?, error = ? WHERE id = ?", + [now, error_msg, run_id], + ) + logger.info(f"Workflow {run_id} cancelled") + + if self._emit_events: + self._emit_event( + run_id=run_id, + step_id="workflow", + status="failed", + message=error_msg, + metadata={"status": "canceled"}, + ) + except Exception as e: + logger.error(f"Failed to cancel workflow {run_id}: {e}") + # ========================================================================= # Internal Helpers # ========================================================================= From 901257e9e24de509aaacb2ced4d6226c80bea1c8 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 19:45:51 +0000 Subject: [PATCH 02/19] feat: Enhance workflow list search and pagination - Search now matches workflow name in addition to ID - Add has_more, offset, limit fields to pagination response - Track raw_count before Python filtering for accurate has_more Co-Authored-By: Claude Opus 4.5 --- src/kurt/web/api/server.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index 680b183d..1e3a07e1 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -1354,7 +1354,9 @@ def api_list_workflows( params.append(status) if search: - conditions.append("id LIKE ?") + # Search by ID or workflow name + conditions.append("(id LIKE ? OR workflow LIKE ?)") + params.append(f"%{search}%") params.append(f"%{search}%") if conditions: @@ -1366,6 +1368,7 @@ def api_list_workflows( result = db.query(sql, params) workflows = [] + raw_count = len(result.rows) # Count before Python filtering for row in result.rows: workflow_id = row.get("id", "") @@ -1427,12 +1430,22 @@ def api_list_workflows( workflows.append(workflow) - return {"workflows": workflows, "total": len(workflows)} + # has_more is True if the raw SQL query returned 'limit' rows + # (there might be more in the database) + has_more = raw_count >= limit + + return { + "workflows": workflows, + "total": len(workflows), + "has_more": has_more, + "offset": offset, + "limit": limit, + } except Exception as e: # Handle missing table (no workflows run yet) if "no such table" in str(e).lower() or "doesn't exist" in str(e).lower(): - return {"workflows": [], "total": 0} - return {"workflows": [], "total": 0, "error": f"Database error: {e}"} + return {"workflows": [], "total": 0, "has_more": False} + return {"workflows": [], "total": 0, "has_more": False, "error": f"Database error: {e}"} @app.get("/api/workflows/{workflow_id}") From f9e83aadf9d87679f2d9191f60643d6ad72fd63f Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 19:48:12 +0000 Subject: [PATCH 03/19] feat: Add workflow_type classification for filtering - Tool workflows (map, fetch, signals, etc.) now set workflow_type="tool" - TOML workflows set workflow_type="toml" - Agent workflows already set workflow_type="agent" - Also propagate parent_workflow_id for nested workflow tracking Co-Authored-By: Claude Opus 4.5 --- src/kurt/tools/core/runner.py | 15 +++++++++++++++ src/kurt/workflows/toml/cli.py | 13 +++++++++++++ 2 files changed, 28 insertions(+) diff --git a/src/kurt/tools/core/runner.py b/src/kurt/tools/core/runner.py index 5279b9aa..a2b7275e 100644 --- a/src/kurt/tools/core/runner.py +++ b/src/kurt/tools/core/runner.py @@ -10,6 +10,7 @@ import argparse import asyncio import json +import os import subprocess import sys import tempfile @@ -122,6 +123,13 @@ def run_tool_with_tracking( run_metadata["priority"] = priority if cli_command: run_metadata["cli_command"] = cli_command + # Set workflow_type for tool filtering in UI + if "workflow_type" not in run_metadata: + run_metadata["workflow_type"] = "tool" + # Store parent workflow ID for nested workflow display + parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") + if parent_workflow_id and "parent_workflow_id" not in run_metadata: + run_metadata["parent_workflow_id"] = parent_workflow_id if run_id is None: run_id = lifecycle.create_run( @@ -194,6 +202,13 @@ def create_pending_run( run_metadata["priority"] = priority if cli_command: run_metadata["cli_command"] = cli_command + # Set workflow_type for tool filtering in UI + if "workflow_type" not in run_metadata: + run_metadata["workflow_type"] = "tool" + # Store parent workflow ID for nested workflow display + parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") + if parent_workflow_id and "parent_workflow_id" not in run_metadata: + run_metadata["parent_workflow_id"] = parent_workflow_id return lifecycle.create_run( workflow=tool_name, diff --git a/src/kurt/workflows/toml/cli.py b/src/kurt/workflows/toml/cli.py index 8945b290..b6948ee8 100644 --- a/src/kurt/workflows/toml/cli.py +++ b/src/kurt/workflows/toml/cli.py @@ -21,6 +21,7 @@ import asyncio import json +import os import sys import time from datetime import datetime @@ -261,9 +262,21 @@ def run_cmd(workflow_path: Path, inputs: tuple[str, ...], background: bool, fore from kurt.observability import WorkflowLifecycle lifecycle = WorkflowLifecycle(db) + + # Set workflow_type in metadata for type filtering + metadata = { + "workflow_type": "toml", + "definition_file": str(workflow_path.name), + } + # Store parent workflow ID for nested workflow display + parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") + if parent_workflow_id: + metadata["parent_workflow_id"] = parent_workflow_id + lifecycle.create_run( workflow=workflow_def.workflow.name, inputs=merged_inputs, + metadata=metadata, run_id=run_id, status="pending", ) From 8b1f743512b02f17cf47d4db444e7302706a6987 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 19:50:04 +0000 Subject: [PATCH 04/19] feat: Enhance log streaming to use step_events table - /api/workflows/{id}/logs/stream now streams from step_events table - /api/workflows/{id}/logs returns structured events from step_events - Both endpoints also support file-based logs as fallback - Add step_id filtering parameter to both endpoints - Return structured event data with metadata parsing Co-Authored-By: Claude Opus 4.5 --- src/kurt/web/api/server.py | 246 +++++++++++++++++++++++++++++-------- 1 file changed, 195 insertions(+), 51 deletions(-) diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index 1e3a07e1..e5ea0ca3 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -1691,8 +1691,20 @@ async def event_generator(): @app.get("/api/workflows/{workflow_id}/logs/stream") -async def api_stream_workflow_logs(workflow_id: str): - """Stream workflow logs via Server-Sent Events.""" +async def api_stream_workflow_logs( + workflow_id: str, + step_id: Optional[str] = Query(None, description="Filter by step ID"), +): + """Stream workflow logs via Server-Sent Events. + + Streams step_events with messages as structured log entries. + Also checks for and streams file-based logs as fallback. + + Events are JSON with format: + {"type": "event", "event": {...}} - Structured step event + {"type": "log", "content": "..."} - File-based log content + {"done": true} - Stream complete + """ import asyncio from fastapi.responses import StreamingResponse @@ -1721,44 +1733,116 @@ async def api_stream_workflow_logs(workflow_id: str): log_file = Path(".kurt") / "logs" / f"workflow-{full_id}.log" async def event_generator(): - last_size = 0 + cursor_id = 0 + last_file_size = 0 + terminal_statuses = ("completed", "failed", "canceled") + while True: try: + db_inner = _get_dolt_db() + if not db_inner: + await asyncio.sleep(0.5) + continue + + # Fetch new step_events since cursor + conditions = ["run_id = ?", "id > ?"] + params: list[Any] = [full_id, cursor_id] + + if step_id: + conditions.append("step_id = ?") + params.append(step_id) + + params.append(50) # Batch limit + + sql = f""" + SELECT id, step_id, substep, status, current, total, message, + metadata_json, created_at + FROM step_events + WHERE {' AND '.join(conditions)} + ORDER BY id ASC + LIMIT ? + """ + events_result = db_inner.query(sql, params) + + # Yield new events + for row in events_result.rows: + event_data = { + "id": row.get("id"), + "step_id": row.get("step_id"), + "substep": row.get("substep"), + "status": row.get("status"), + "current": row.get("current"), + "total": row.get("total"), + "message": row.get("message"), + "created_at": str(row.get("created_at")) if row.get("created_at") else None, + } + # Parse metadata if present + metadata_raw = row.get("metadata_json") + if metadata_raw: + try: + if isinstance(metadata_raw, str): + event_data["metadata"] = json.loads(metadata_raw) + else: + event_data["metadata"] = metadata_raw + except (json.JSONDecodeError, TypeError): + pass + + yield f"data: {json.dumps({'type': 'event', 'event': event_data})}\n\n" + cursor_id = max(cursor_id, row.get("id", 0)) + + # Also check file-based logs if log_file.exists(): current_size = log_file.stat().st_size - if current_size > last_size: + if current_size > last_file_size: with open(log_file, "r") as f: - f.seek(last_size) + f.seek(last_file_size) new_content = f.read() if new_content: - yield f"data: {json.dumps({'content': new_content})}\n\n" - last_size = current_size + yield f"data: {json.dumps({'type': 'log', 'content': new_content})}\n\n" + last_file_size = current_size # Check if workflow is done - db_inner = _get_dolt_db() - if db_inner: - status_result = db_inner.query( - "SELECT status FROM workflow_runs WHERE id = ?", - [full_id], - ) - if status_result.rows: - status = status_result.rows[0].get("status") - if status in ("completed", "failed", "canceled"): - # Send final content and close - await asyncio.sleep(0.5) - if log_file.exists(): - current_size = log_file.stat().st_size - if current_size > last_size: - with open(log_file, "r") as f: - f.seek(last_size) - new_content = f.read() - if new_content: - yield f"data: {json.dumps({'content': new_content})}\n\n" - yield f"data: {json.dumps({'done': True})}\n\n" - break + status_result = db_inner.query( + "SELECT status FROM workflow_runs WHERE id = ?", + [full_id], + ) + if status_result.rows: + status = status_result.rows[0].get("status") + if status in terminal_statuses: + # Final poll for any remaining events + await asyncio.sleep(0.3) + + # Fetch final events + final_result = db_inner.query(sql, [full_id, cursor_id, step_id, 50] if step_id else [full_id, cursor_id, 50]) + for row in final_result.rows: + event_data = { + "id": row.get("id"), + "step_id": row.get("step_id"), + "substep": row.get("substep"), + "status": row.get("status"), + "current": row.get("current"), + "total": row.get("total"), + "message": row.get("message"), + "created_at": str(row.get("created_at")) if row.get("created_at") else None, + } + yield f"data: {json.dumps({'type': 'event', 'event': event_data})}\n\n" + + # Final file content + if log_file.exists(): + current_size = log_file.stat().st_size + if current_size > last_file_size: + with open(log_file, "r") as f: + f.seek(last_file_size) + new_content = f.read() + if new_content: + yield f"data: {json.dumps({'type': 'log', 'content': new_content})}\n\n" + + yield f"data: {json.dumps({'done': True, 'status': status})}\n\n" + break await asyncio.sleep(0.5) - except Exception: + except Exception as e: + yield f"data: {json.dumps({'type': 'error', 'message': str(e)})}\n\n" break return StreamingResponse( @@ -1774,10 +1858,16 @@ async def event_generator(): @app.get("/api/workflows/{workflow_id}/logs") def api_get_workflow_logs( workflow_id: str, - offset: int = Query(0, ge=0), - limit: int = Query(500, le=5000), + step_id: Optional[str] = Query(None, description="Filter by step ID"), + since_id: int = Query(0, ge=0, description="Return events after this ID"), + limit: int = Query(100, le=1000), + include_file_logs: bool = Query(True, description="Include file-based logs"), ): - """Read workflow log file in chunks.""" + """Get workflow logs from step_events and optional file logs. + + Returns structured events from step_events table plus optional + file-based log content. + """ db = _get_dolt_db() full_id = workflow_id @@ -1792,28 +1882,82 @@ def api_get_workflow_logs( except Exception: pass - log_file = Path(".kurt") / "logs" / f"workflow-{full_id}.log" - - if not log_file.exists(): - return {"content": "", "total_lines": 0, "has_more": False, "offset": offset} + events: list[dict[str, Any]] = [] + file_content = "" + has_more = False - try: - with open(log_file, "r") as f: - lines = f.readlines() + # Query step_events from database + if db is not None: + try: + conditions = ["run_id = ?"] + params: list[Any] = [full_id] + + if step_id: + conditions.append("step_id = ?") + params.append(step_id) + + if since_id > 0: + conditions.append("id > ?") + params.append(since_id) + + params.append(limit + 1) # Fetch one extra to check has_more + + sql = f""" + SELECT id, step_id, substep, status, current, total, message, + metadata_json, created_at + FROM step_events + WHERE {' AND '.join(conditions)} + ORDER BY id ASC + LIMIT ? + """ + events_result = db.query(sql, params) + + for row in events_result.rows[:limit]: + event_data = { + "id": row.get("id"), + "step_id": row.get("step_id"), + "substep": row.get("substep"), + "status": row.get("status"), + "current": row.get("current"), + "total": row.get("total"), + "message": row.get("message"), + "created_at": str(row.get("created_at")) if row.get("created_at") else None, + } + # Parse metadata if present + metadata_raw = row.get("metadata_json") + if metadata_raw: + try: + if isinstance(metadata_raw, str): + event_data["metadata"] = json.loads(metadata_raw) + else: + event_data["metadata"] = metadata_raw + except (json.JSONDecodeError, TypeError): + pass + events.append(event_data) + + has_more = len(events_result.rows) > limit + except Exception: + pass - total_lines = len(lines) - selected_lines = lines[offset : offset + limit] - has_more = offset + limit < total_lines + # Also include file-based logs if requested + if include_file_logs: + log_file = Path(".kurt") / "logs" / f"workflow-{full_id}.log" + if log_file.exists(): + try: + with open(log_file, "r") as f: + file_content = f.read() + except Exception: + pass - return { - "content": "".join(selected_lines), - "total_lines": total_lines, - "offset": offset, - "limit": limit, - "has_more": has_more, - } - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) + return { + "workflow_id": full_id, + "events": events, + "total_events": len(events), + "has_more": has_more, + "since_id": since_id, + "limit": limit, + "file_content": file_content if include_file_logs else None, + } # PTY WebSocket endpoint for terminal sessions From 84b36e9226f838db437b79cf5c34f66a5e2940fe Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 19:52:42 +0000 Subject: [PATCH 05/19] feat: Add parent_step_name tracking for child workflows - Store parent_step_name in child workflow metadata - Read from KURT_PARENT_STEP_NAME environment variable - Set KURT_PARENT_STEP_NAME="agent_execution" for agent subprocesses - Return parent_step_name in workflow list API response - Enables frontend to group child workflows by parent step Co-Authored-By: Claude Opus 4.5 --- src/kurt/tools/core/runner.py | 10 ++++++++-- src/kurt/web/api/server.py | 1 + src/kurt/workflows/agents/executor.py | 15 ++++++++++++--- src/kurt/workflows/toml/cli.py | 5 ++++- 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/kurt/tools/core/runner.py b/src/kurt/tools/core/runner.py index a2b7275e..d8836e4b 100644 --- a/src/kurt/tools/core/runner.py +++ b/src/kurt/tools/core/runner.py @@ -126,10 +126,13 @@ def run_tool_with_tracking( # Set workflow_type for tool filtering in UI if "workflow_type" not in run_metadata: run_metadata["workflow_type"] = "tool" - # Store parent workflow ID for nested workflow display + # Store parent workflow ID and step name for nested workflow display parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") if parent_workflow_id and "parent_workflow_id" not in run_metadata: run_metadata["parent_workflow_id"] = parent_workflow_id + parent_step_name = os.environ.get("KURT_PARENT_STEP_NAME") + if parent_step_name and "parent_step_name" not in run_metadata: + run_metadata["parent_step_name"] = parent_step_name if run_id is None: run_id = lifecycle.create_run( @@ -205,10 +208,13 @@ def create_pending_run( # Set workflow_type for tool filtering in UI if "workflow_type" not in run_metadata: run_metadata["workflow_type"] = "tool" - # Store parent workflow ID for nested workflow display + # Store parent workflow ID and step name for nested workflow display parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") if parent_workflow_id and "parent_workflow_id" not in run_metadata: run_metadata["parent_workflow_id"] = parent_workflow_id + parent_step_name = os.environ.get("KURT_PARENT_STEP_NAME") + if parent_step_name and "parent_step_name" not in run_metadata: + run_metadata["parent_step_name"] = parent_step_name return lifecycle.create_run( workflow=tool_name, diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index e5ea0ca3..388e05ef 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -1414,6 +1414,7 @@ def api_list_workflows( "updated_at": str(row.get("completed_at")) if row.get("completed_at") else None, "error": row.get("error"), "parent_workflow_id": parent_workflow_id, + "parent_step_name": metadata.get("parent_step_name"), "workflow_type": wf_type, } diff --git a/src/kurt/workflows/agents/executor.py b/src/kurt/workflows/agents/executor.py index 6e3db455..adf67958 100644 --- a/src/kurt/workflows/agents/executor.py +++ b/src/kurt/workflows/agents/executor.py @@ -253,10 +253,13 @@ def execute_agent_workflow( "definition_name": definition.name, "trigger": trigger, } - # Store parent workflow ID for nested workflow display + # Store parent workflow ID and step name for nested workflow display parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") if parent_workflow_id: metadata["parent_workflow_id"] = parent_workflow_id + parent_step_name = os.environ.get("KURT_PARENT_STEP_NAME") + if parent_step_name: + metadata["parent_step_name"] = parent_step_name lifecycle.create_run( workflow=f"agent:{definition.name}", @@ -466,9 +469,12 @@ def agent_execution_step( # Set up environment for subprocess env = os.environ.copy() env["KURT_TOOL_LOG_FILE"] = tool_log_path - # Pass parent workflow ID so child workflows can be nested + # Pass parent workflow ID and step name so child workflows can be nested if run_id: env["KURT_PARENT_WORKFLOW_ID"] = run_id + # For agent workflows, use "agent_execution" as the step name + # This allows frontend to group child workflows under the agent step + env["KURT_PARENT_STEP_NAME"] = "agent_execution" # Add workflow directory to PYTHONPATH for custom tool imports # This allows: from workflows.my_workflow.tools import my_tool @@ -673,10 +679,13 @@ def execute_steps_workflow( "trigger": trigger, "total_steps": len(definition.steps), } - # Store parent workflow ID for nested workflow display + # Store parent workflow ID and step name for nested workflow display parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") if parent_workflow_id: metadata["parent_workflow_id"] = parent_workflow_id + parent_step_name = os.environ.get("KURT_PARENT_STEP_NAME") + if parent_step_name: + metadata["parent_step_name"] = parent_step_name lifecycle.create_run( workflow=f"steps:{definition.name}", diff --git a/src/kurt/workflows/toml/cli.py b/src/kurt/workflows/toml/cli.py index b6948ee8..5652dd38 100644 --- a/src/kurt/workflows/toml/cli.py +++ b/src/kurt/workflows/toml/cli.py @@ -268,10 +268,13 @@ def run_cmd(workflow_path: Path, inputs: tuple[str, ...], background: bool, fore "workflow_type": "toml", "definition_file": str(workflow_path.name), } - # Store parent workflow ID for nested workflow display + # Store parent workflow ID and step name for nested workflow display parent_workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") if parent_workflow_id: metadata["parent_workflow_id"] = parent_workflow_id + parent_step_name = os.environ.get("KURT_PARENT_STEP_NAME") + if parent_step_name: + metadata["parent_step_name"] = parent_step_name lifecycle.create_run( workflow=workflow_def.workflow.name, From 86d4090e07b7e0f2c576e3d618d4512b40c21340 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 20:10:39 +0000 Subject: [PATCH 06/19] feat: Add token/cost metadata and queue step type for agent workflows - Store token counts and cost in workflow metadata on completion (#13) - Add step_type="queue" to agent step logs for frontend display (#21) - Enables frontend to show costs and identify queue steps Co-Authored-By: Claude Opus 4.5 --- src/kurt/workflows/agents/executor.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/kurt/workflows/agents/executor.py b/src/kurt/workflows/agents/executor.py index adf67958..7f68a95b 100644 --- a/src/kurt/workflows/agents/executor.py +++ b/src/kurt/workflows/agents/executor.py @@ -315,11 +315,14 @@ def execute_agent_workflow( workflow_dir = get_workflow_dir(definition.name) # Create step log for agent execution + # step_type="queue" indicates this step can spawn child workflows + # (via KURT_PARENT_STEP_NAME="agent_execution" env var) lifecycle.create_step_log( run_id=run_id, step_id="agent_execution", tool="ClaudeCLI", metadata={ + "step_type": "queue", "model": definition.agent.model, "max_turns": definition.agent.max_turns, }, @@ -367,8 +370,15 @@ def on_progress(event: dict[str, Any]) -> None: }, ) - # Complete workflow - lifecycle.update_status(run_id, "completed") + # Complete workflow with token/cost metadata for API display + lifecycle.update_status(run_id, "completed", metadata={ + "agent_turns": result.get("turns"), + "tokens_in": result.get("tokens_in"), + "tokens_out": result.get("tokens_out"), + "cost_usd": result.get("cost_usd"), + "tool_calls": result.get("tool_calls"), + "duration_seconds": result.get("duration_seconds"), + }) track_event( run_id=run_id, From ffc0e2ab4e80783b7ea38998bcbac0fedf6d2900 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 20:10:42 +0000 Subject: [PATCH 07/19] fix: Disable duplicate Tiptap extensions in StarterKit - Disable link and underline in StarterKit since we configure them separately - Fixes console warnings about duplicate extension names Co-Authored-By: Claude Opus 4.5 --- src/kurt/web/client/src/components/Editor.jsx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/kurt/web/client/src/components/Editor.jsx b/src/kurt/web/client/src/components/Editor.jsx index dc6c6b82..7cf6424e 100644 --- a/src/kurt/web/client/src/components/Editor.jsx +++ b/src/kurt/web/client/src/components/Editor.jsx @@ -591,6 +591,9 @@ export default function Editor({ StarterKit.configure({ // Disable default codeBlock, we use CodeBlockLowlight for syntax highlighting codeBlock: false, + // Disable extensions we configure separately to avoid duplicate extension warnings + link: false, + underline: false, }), Underline, Link.configure({ From d139866de6422e082e0e52d0254d130ea95acdae Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 20:44:01 +0000 Subject: [PATCH 08/19] fix: Status filter case mismatch and remove pytest-postgresql - Add _denormalize_status_filter() to convert frontend status filters (SUCCESS, ERROR, PENDING, CANCELLED) to database values - Use IN clause for PENDING filter which maps to multiple DB states (pending, running, canceling) - Remove pytest-postgresql from dev dependencies (not needed) - Update uv.lock to reflect dependency changes Fixes workflow list filtering which was broken because the UI sends uppercase statuses but the database stores lowercase values. Co-Authored-By: Claude Opus 4.5 --- pyproject.toml | 2 - src/kurt/web/api/server.py | 30 +++++++++++++- uv.lock | 82 -------------------------------------- 3 files changed, 28 insertions(+), 86 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 15f4a6be..3e432b13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,6 @@ dev = [ "pytest>=8.0.0", "pytest-asyncio>=0.24.0", "pytest-cov>=4.1.0", - "pytest-postgresql>=6.0.0", "ruff>=0.1.0", "pre-commit>=3.5.0", ] @@ -129,6 +128,5 @@ dev = [ "pytest>=8.4.2", "pytest-asyncio>=0.24.0", "pytest-cov>=4.1.0", - "pytest-postgresql>=7.0.2", "ruff>=0.1.0", ] diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index 388e05ef..b018d9c4 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -1329,6 +1329,28 @@ def _normalize_workflow_status(dolt_status: str) -> str: return status_map.get(normalized, normalized.upper()) +def _denormalize_status_filter(frontend_status: str) -> list[str]: + """Convert frontend status filter to database status values. + + Reverse mapping of _normalize_workflow_status for filtering queries. + Returns a list since some frontend statuses map to multiple DB values. + """ + reverse_map = { + "PENDING": ["pending", "running", "canceling"], # All active states + "SUCCESS": ["completed"], + "WARNING": ["completed_with_errors"], + "ERROR": ["failed"], + "CANCELLED": ["canceled"], + "ENQUEUED": ["pending"], # Queued jobs are pending + } + # Try uppercase first, then original value + key = frontend_status.upper() if frontend_status else "" + if key in reverse_map: + return reverse_map[key] + # Fallback: pass through as-is (lowercase) + return [frontend_status.lower()] if frontend_status else [] + + @app.get("/api/workflows") def api_list_workflows( status: Optional[str] = Query(None), @@ -1350,8 +1372,12 @@ def api_list_workflows( conditions = [] if status: - conditions.append("status = ?") - params.append(status) + # Convert frontend status to DB status values + db_statuses = _denormalize_status_filter(status) + if db_statuses: + placeholders = ", ".join("?" * len(db_statuses)) + conditions.append(f"status IN ({placeholders})") + params.extend(db_statuses) if search: # Search by ID or workflow name diff --git a/uv.lock b/uv.lock index db9d4b72..58e503ab 100644 --- a/uv.lock +++ b/uv.lock @@ -1619,7 +1619,6 @@ dev = [ { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-cov" }, - { name = "pytest-postgresql" }, { name = "ruff" }, ] eval = [ @@ -1682,7 +1681,6 @@ dev = [ { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-cov" }, - { name = "pytest-postgresql" }, { name = "ruff" }, ] @@ -1720,7 +1718,6 @@ requires-dist = [ { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=4.1.0" }, { name = "pytest-httpx", marker = "extra == 'eval'", specifier = ">=0.30.0" }, - { name = "pytest-postgresql", marker = "extra == 'dev'", specifier = ">=6.0.0" }, { name = "python-dotenv", specifier = ">=1.0.0" }, { name = "python-frontmatter", specifier = ">=1.0.0" }, { name = "pyyaml", specifier = ">=6.0.0" }, @@ -1745,7 +1742,6 @@ dev = [ { name = "pytest", specifier = ">=8.4.2" }, { name = "pytest-asyncio", specifier = ">=0.24.0" }, { name = "pytest-cov", specifier = ">=4.1.0" }, - { name = "pytest-postgresql", specifier = ">=7.0.2" }, { name = "ruff", specifier = ">=0.1.0" }, ] @@ -2020,18 +2016,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, ] -[[package]] -name = "mirakuru" -version = "3.0.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "psutil", marker = "sys_platform != 'cygwin'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c2/e5/aea55c5ad991f5ba0cb22c4d643481312fb722a9fc1bfb9b9f38047b1b56/mirakuru-3.0.1.tar.gz", hash = "sha256:834686822da3ac06edd13fa1852143fd9ebcf0fea68d56b78b7d4be1e947f8c0", size = 28996, upload-time = "2025-11-01T21:11:30.533Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/46/1f0318ee8a199ae4a37c525ab92b5ecf77211b25e8e903c6a4ebc934f8a7/mirakuru-3.0.1-py3-none-any.whl", hash = "sha256:43d27dc0e59dfde27bf720516a5e96ead0b4cf9e12cb1adb57cdeea3c9239b93", size = 27412, upload-time = "2025-11-01T21:11:28.521Z" }, -] - [[package]] name = "multidict" version = "6.7.0" @@ -2552,15 +2536,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] -[[package]] -name = "port-for" -version = "1.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/88/a0/80a64e8cc096c7a9d0f546a28994af849b4775afc5e4ee44bf2739a55115/port_for-1.0.0.tar.gz", hash = "sha256:404d161b1b2c82e2f6b31d8646396b4847d02bf5ee10068c92b7263657a14582", size = 21681, upload-time = "2025-09-30T10:22:51.149Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/70/2c/b1faca65b9728b4ac43f0bee4bb9e7294bd0a62cc2ee59fd59403bf575f6/port_for-1.0.0-py3-none-any.whl", hash = "sha256:35a848b98cf4cc075fe80dc49ae5c3a78e3ca345a23bd39bf5252277b4eef5c2", size = 17544, upload-time = "2025-09-30T10:22:49.878Z" }, -] - [[package]] name = "posthog" version = "6.7.14" @@ -2735,47 +2710,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, ] -[[package]] -name = "psutil" -version = "7.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/73/cb/09e5184fb5fc0358d110fc3ca7f6b1d033800734d34cac10f4136cfac10e/psutil-7.2.1.tar.gz", hash = "sha256:f7583aec590485b43ca601dd9cea0dcd65bd7bb21d30ef4ddbf4ea6b5ed1bdd3", size = 490253, upload-time = "2025-12-29T08:26:00.169Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/8e/f0c242053a368c2aa89584ecd1b054a18683f13d6e5a318fc9ec36582c94/psutil-7.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9f33bb525b14c3ea563b2fd521a84d2fa214ec59e3e6a2858f78d0844dd60d", size = 129624, upload-time = "2025-12-29T08:26:04.255Z" }, - { url = "https://files.pythonhosted.org/packages/26/97/a58a4968f8990617decee234258a2b4fc7cd9e35668387646c1963e69f26/psutil-7.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81442dac7abfc2f4f4385ea9e12ddf5a796721c0f6133260687fec5c3780fa49", size = 130132, upload-time = "2025-12-29T08:26:06.228Z" }, - { url = "https://files.pythonhosted.org/packages/db/6d/ed44901e830739af5f72a85fa7ec5ff1edea7f81bfbf4875e409007149bd/psutil-7.2.1-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea46c0d060491051d39f0d2cff4f98d5c72b288289f57a21556cc7d504db37fc", size = 180612, upload-time = "2025-12-29T08:26:08.276Z" }, - { url = "https://files.pythonhosted.org/packages/c7/65/b628f8459bca4efbfae50d4bf3feaab803de9a160b9d5f3bd9295a33f0c2/psutil-7.2.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35630d5af80d5d0d49cfc4d64c1c13838baf6717a13effb35869a5919b854cdf", size = 183201, upload-time = "2025-12-29T08:26:10.622Z" }, - { url = "https://files.pythonhosted.org/packages/fb/23/851cadc9764edcc18f0effe7d0bf69f727d4cf2442deb4a9f78d4e4f30f2/psutil-7.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:923f8653416604e356073e6e0bccbe7c09990acef442def2f5640dd0faa9689f", size = 139081, upload-time = "2025-12-29T08:26:12.483Z" }, - { url = "https://files.pythonhosted.org/packages/59/82/d63e8494ec5758029f31c6cb06d7d161175d8281e91d011a4a441c8a43b5/psutil-7.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cfbe6b40ca48019a51827f20d830887b3107a74a79b01ceb8cc8de4ccb17b672", size = 134767, upload-time = "2025-12-29T08:26:14.528Z" }, - { url = "https://files.pythonhosted.org/packages/05/c2/5fb764bd61e40e1fe756a44bd4c21827228394c17414ade348e28f83cd79/psutil-7.2.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:494c513ccc53225ae23eec7fe6e1482f1b8a44674241b54561f755a898650679", size = 129716, upload-time = "2025-12-29T08:26:16.017Z" }, - { url = "https://files.pythonhosted.org/packages/c9/d2/935039c20e06f615d9ca6ca0ab756cf8408a19d298ffaa08666bc18dc805/psutil-7.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3fce5f92c22b00cdefd1645aa58ab4877a01679e901555067b1bd77039aa589f", size = 130133, upload-time = "2025-12-29T08:26:18.009Z" }, - { url = "https://files.pythonhosted.org/packages/77/69/19f1eb0e01d24c2b3eacbc2f78d3b5add8a89bf0bb69465bc8d563cc33de/psutil-7.2.1-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93f3f7b0bb07711b49626e7940d6fe52aa9940ad86e8f7e74842e73189712129", size = 181518, upload-time = "2025-12-29T08:26:20.241Z" }, - { url = "https://files.pythonhosted.org/packages/e1/6d/7e18b1b4fa13ad370787626c95887b027656ad4829c156bb6569d02f3262/psutil-7.2.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d34d2ca888208eea2b5c68186841336a7f5e0b990edec929be909353a202768a", size = 184348, upload-time = "2025-12-29T08:26:22.215Z" }, - { url = "https://files.pythonhosted.org/packages/98/60/1672114392dd879586d60dd97896325df47d9a130ac7401318005aab28ec/psutil-7.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2ceae842a78d1603753561132d5ad1b2f8a7979cb0c283f5b52fb4e6e14b1a79", size = 140400, upload-time = "2025-12-29T08:26:23.993Z" }, - { url = "https://files.pythonhosted.org/packages/fb/7b/d0e9d4513c46e46897b46bcfc410d51fc65735837ea57a25170f298326e6/psutil-7.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:08a2f175e48a898c8eb8eace45ce01777f4785bc744c90aa2cc7f2fa5462a266", size = 135430, upload-time = "2025-12-29T08:26:25.999Z" }, - { url = "https://files.pythonhosted.org/packages/c5/cf/5180eb8c8bdf6a503c6919f1da28328bd1e6b3b1b5b9d5b01ae64f019616/psutil-7.2.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2e953fcfaedcfbc952b44744f22d16575d3aa78eb4f51ae74165b4e96e55f42", size = 128137, upload-time = "2025-12-29T08:26:27.759Z" }, - { url = "https://files.pythonhosted.org/packages/c5/2c/78e4a789306a92ade5000da4f5de3255202c534acdadc3aac7b5458fadef/psutil-7.2.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:05cc68dbb8c174828624062e73078e7e35406f4ca2d0866c272c2410d8ef06d1", size = 128947, upload-time = "2025-12-29T08:26:29.548Z" }, - { url = "https://files.pythonhosted.org/packages/29/f8/40e01c350ad9a2b3cb4e6adbcc8a83b17ee50dd5792102b6142385937db5/psutil-7.2.1-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e38404ca2bb30ed7267a46c02f06ff842e92da3bb8c5bfdadbd35a5722314d8", size = 154694, upload-time = "2025-12-29T08:26:32.147Z" }, - { url = "https://files.pythonhosted.org/packages/06/e4/b751cdf839c011a9714a783f120e6a86b7494eb70044d7d81a25a5cd295f/psutil-7.2.1-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab2b98c9fc19f13f59628d94df5cc4cc4844bc572467d113a8b517d634e362c6", size = 156136, upload-time = "2025-12-29T08:26:34.079Z" }, - { url = "https://files.pythonhosted.org/packages/44/ad/bbf6595a8134ee1e94a4487af3f132cef7fce43aef4a93b49912a48c3af7/psutil-7.2.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f78baafb38436d5a128f837fab2d92c276dfb48af01a240b861ae02b2413ada8", size = 148108, upload-time = "2025-12-29T08:26:36.225Z" }, - { url = "https://files.pythonhosted.org/packages/1c/15/dd6fd869753ce82ff64dcbc18356093471a5a5adf4f77ed1f805d473d859/psutil-7.2.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:99a4cd17a5fdd1f3d014396502daa70b5ec21bf4ffe38393e152f8e449757d67", size = 147402, upload-time = "2025-12-29T08:26:39.21Z" }, - { url = "https://files.pythonhosted.org/packages/34/68/d9317542e3f2b180c4306e3f45d3c922d7e86d8ce39f941bb9e2e9d8599e/psutil-7.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:b1b0671619343aa71c20ff9767eced0483e4fc9e1f489d50923738caf6a03c17", size = 136938, upload-time = "2025-12-29T08:26:41.036Z" }, - { url = "https://files.pythonhosted.org/packages/3e/73/2ce007f4198c80fcf2cb24c169884f833fe93fbc03d55d302627b094ee91/psutil-7.2.1-cp37-abi3-win_arm64.whl", hash = "sha256:0d67c1822c355aa6f7314d92018fb4268a76668a536f133599b91edd48759442", size = 133836, upload-time = "2025-12-29T08:26:43.086Z" }, -] - -[[package]] -name = "psycopg" -version = "3.2.12" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, - { name = "tzdata", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a8/77/c72d10262b872617e509a0c60445afcc4ce2cd5cd6bc1c97700246d69c85/psycopg-3.2.12.tar.gz", hash = "sha256:85c08d6f6e2a897b16280e0ff6406bef29b1327c045db06d21f364d7cd5da90b", size = 160642, upload-time = "2025-10-26T00:46:03.045Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/28/8c4f90e415411dc9c78d6ba10b549baa324659907c13f64bfe3779d4066c/psycopg-3.2.12-py3-none-any.whl", hash = "sha256:8a1611a2d4c16ae37eada46438be9029a35bb959bb50b3d0e1e93c0f3d54c9ee", size = 206765, upload-time = "2025-10-26T00:10:42.173Z" }, -] - [[package]] name = "psycopg2-binary" version = "2.9.11" @@ -3121,22 +3055,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/ed/026d467c1853dd83102411a78126b4842618e86c895f93528b0528c7a620/pytest_httpx-0.35.0-py3-none-any.whl", hash = "sha256:ee11a00ffcea94a5cbff47af2114d34c5b231c326902458deed73f9c459fd744", size = 19442, upload-time = "2024-11-28T19:16:52.787Z" }, ] -[[package]] -name = "pytest-postgresql" -version = "7.0.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mirakuru" }, - { name = "packaging" }, - { name = "port-for" }, - { name = "psycopg" }, - { name = "pytest" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/18/15/b3c07d1537c7608c3f45d3ee6f778a56b1daa480221bb500abc9e44e01a0/pytest_postgresql-7.0.2.tar.gz", hash = "sha256:57c8d3f7d4e91d0ea8b2eac786d04f60080fa6ed6e66f1f94d747c71c9e5a4f4", size = 50691, upload-time = "2025-05-17T20:17:59.227Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/18/57/f2db5a80b10c3ac48ce41786cb9b14172f997509ee1b1055ab7db4238e5e/pytest_postgresql-7.0.2-py3-none-any.whl", hash = "sha256:0b0d31c51620a9c1d6be93286af354256bc58a47c379f56f4147b22da6e81fb5", size = 41447, upload-time = "2025-05-17T20:17:58.011Z" }, -] - [[package]] name = "python-dateutil" version = "2.9.0.post0" From 52f935cd327a46543f510e1c0facac0953450022 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 20:48:37 +0000 Subject: [PATCH 09/19] refactor: Normalize status consistently across all API endpoints - Add _normalize_step_status() to convert step/event status to frontend format (completed->success, failed->error, progress->progress) - Apply normalization to: - /api/workflows/{id}/step-logs endpoint - /api/workflows/{id}/logs endpoint - /api/workflows/{id}/logs/stream SSE events - Normalize workflow status in stream done message This ensures the frontend receives consistent status values from all endpoints instead of mixing raw DB values with normalized values. Co-Authored-By: Claude Opus 4.5 --- src/kurt/web/api/server.py | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index b018d9c4..0072d391 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -1351,6 +1351,28 @@ def _denormalize_status_filter(frontend_status: str) -> list[str]: return [frontend_status.lower()] if frontend_status else [] +def _normalize_step_status(db_status: str | None) -> str: + """Normalize step/event status to frontend-expected format. + + Step statuses use lowercase: pending, running, completed, failed, progress + Frontend expects: pending, running, success, error, progress + + Consistent with _map_step_status in observability/status.py + """ + if not db_status: + return "pending" + status_map = { + "pending": "pending", + "running": "running", + "progress": "progress", + "completed": "success", + "failed": "error", + "canceled": "error", + "skipped": "skipped", + } + return status_map.get(db_status.lower(), db_status.lower()) + + @app.get("/api/workflows") def api_list_workflows( status: Optional[str] = Query(None), @@ -1631,7 +1653,7 @@ def api_get_step_logs( logs.append({ "step_id": row.get("step_id"), "tool": row.get("tool"), - "status": row.get("status"), + "status": _normalize_step_status(row.get("status")), "started_at": str(row.get("started_at")) if row.get("started_at") else None, "completed_at": str(row.get("completed_at")) if row.get("completed_at") else None, "input_count": row.get("input_count"), @@ -1797,7 +1819,7 @@ async def event_generator(): "id": row.get("id"), "step_id": row.get("step_id"), "substep": row.get("substep"), - "status": row.get("status"), + "status": _normalize_step_status(row.get("status")), "current": row.get("current"), "total": row.get("total"), "message": row.get("message"), @@ -1846,7 +1868,7 @@ async def event_generator(): "id": row.get("id"), "step_id": row.get("step_id"), "substep": row.get("substep"), - "status": row.get("status"), + "status": _normalize_step_status(row.get("status")), "current": row.get("current"), "total": row.get("total"), "message": row.get("message"), @@ -1864,7 +1886,7 @@ async def event_generator(): if new_content: yield f"data: {json.dumps({'type': 'log', 'content': new_content})}\n\n" - yield f"data: {json.dumps({'done': True, 'status': status})}\n\n" + yield f"data: {json.dumps({'done': True, 'status': _normalize_workflow_status(status)})}\n\n" break await asyncio.sleep(0.5) @@ -1944,7 +1966,7 @@ def api_get_workflow_logs( "id": row.get("id"), "step_id": row.get("step_id"), "substep": row.get("substep"), - "status": row.get("status"), + "status": _normalize_step_status(row.get("status")), "current": row.get("current"), "total": row.get("total"), "message": row.get("message"), From 421469722e3cbac687733e1c4216a950cbfa55d3 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 20:53:50 +0000 Subject: [PATCH 10/19] feat: Auto-expand error workflows and optimize polling interval Auto-expand on error (#20): - Add hasErrorStatus() helper to detect workflows with errors - Track auto-expanded and user-collapsed workflows with refs - Automatically expand first error workflow on load - Respect user's manual collapse (don't re-expand) Polling optimization (#19): - Add adaptive polling intervals (2s fast, 10s slow) - Poll fast when workflows are actively running (PENDING/ENQUEUED) - Poll slow when all workflows are idle/completed - Split useEffect for cleaner separation of concerns Co-Authored-By: Claude Opus 4.5 --- .../client/src/components/WorkflowList.jsx | 97 ++++++++++++++++++- 1 file changed, 92 insertions(+), 5 deletions(-) diff --git a/src/kurt/web/client/src/components/WorkflowList.jsx b/src/kurt/web/client/src/components/WorkflowList.jsx index 264f303f..93db27be 100644 --- a/src/kurt/web/client/src/components/WorkflowList.jsx +++ b/src/kurt/web/client/src/components/WorkflowList.jsx @@ -1,6 +1,18 @@ -import { useState, useEffect, useCallback } from 'react' +import { useState, useEffect, useCallback, useRef, useMemo } from 'react' import WorkflowRow from './WorkflowRow' +// Polling intervals in milliseconds +const POLLING_FAST = 2000 // When workflows are running +const POLLING_SLOW = 10000 // When all workflows are idle +const POLLING_NONE = null // Stop polling when no workflows + +// Check if a workflow has an error status that should trigger auto-expand +const hasErrorStatus = (workflow) => { + return workflow.status === 'ERROR' || + workflow.status === 'RETRIES_EXCEEDED' || + (workflow.error_count && workflow.error_count > 0) +} + const STATUS_OPTIONS = [ { value: '', label: 'All' }, { value: 'PENDING', label: 'Running' }, @@ -31,6 +43,10 @@ export default function WorkflowList({ onAttachWorkflow }) { const [error, setError] = useState(null) const [hasMore, setHasMore] = useState(false) const [offset, setOffset] = useState(0) + // Track workflows that have been auto-expanded (to avoid re-expanding after manual collapse) + const autoExpandedRef = useRef(new Set()) + // Track if user has manually collapsed a workflow + const userCollapsedRef = useRef(new Set()) const fetchWorkflows = useCallback(async (loadMore = false) => { setIsLoading(true) @@ -73,13 +89,75 @@ export default function WorkflowList({ onAttachWorkflow }) { } }, [statusFilter, typeFilter, searchQuery, offset]) - // Initial fetch and polling + // Determine if any workflow is actively running + const hasRunningWorkflows = useMemo(() => { + return workflows.some( + (w) => w.status === 'PENDING' || w.status === 'ENQUEUED' + ) + }, [workflows]) + + // Calculate optimal polling interval based on workflow states + const pollingInterval = useMemo(() => { + if (workflows.length === 0) { + // No workflows loaded yet - poll at slow interval to check for new ones + return POLLING_SLOW + } + if (hasRunningWorkflows) { + // Active workflows - poll fast for real-time updates + return POLLING_FAST + } + // All workflows completed/idle - poll slowly or stop + // Continue slow polling to detect new workflows + return POLLING_SLOW + }, [workflows.length, hasRunningWorkflows]) + + // Track polling interval changes for logging (debug) + const prevIntervalRef = useRef(pollingInterval) + + // Initial fetch and adaptive polling useEffect(() => { fetchWorkflows(false) - const interval = setInterval(() => fetchWorkflows(false), 5000) - return () => clearInterval(interval) }, [statusFilter, typeFilter, searchQuery]) + // Adaptive polling based on workflow state + useEffect(() => { + if (pollingInterval === POLLING_NONE) { + return // No polling needed + } + + const interval = setInterval(() => { + fetchWorkflows(false) + }, pollingInterval) + + // Update ref for tracking + prevIntervalRef.current = pollingInterval + + return () => clearInterval(interval) + }, [pollingInterval, statusFilter, typeFilter, searchQuery, fetchWorkflows]) + + // Auto-expand workflows with error status on first load + useEffect(() => { + if (workflows.length === 0) return + + // Find the first error workflow that hasn't been auto-expanded yet + // and hasn't been manually collapsed by the user + for (const workflow of workflows) { + const workflowId = workflow.workflow_uuid + if ( + hasErrorStatus(workflow) && + !autoExpandedRef.current.has(workflowId) && + !userCollapsedRef.current.has(workflowId) + ) { + // Mark as auto-expanded so we don't re-expand on refresh + autoExpandedRef.current.add(workflowId) + // Expand this workflow + setExpandedId(workflowId) + // Only auto-expand one workflow at a time + break + } + } + }, [workflows]) + const handleLoadMore = () => { fetchWorkflows(true) } @@ -104,7 +182,16 @@ export default function WorkflowList({ onAttachWorkflow }) { } const handleToggleExpand = (workflowId) => { - setExpandedId(expandedId === workflowId ? null : workflowId) + const isCurrentlyExpanded = expandedId === workflowId + if (isCurrentlyExpanded) { + // User is collapsing - track this to prevent re-auto-expand + userCollapsedRef.current.add(workflowId) + setExpandedId(null) + } else { + // User is expanding - remove from collapsed set if present + userCollapsedRef.current.delete(workflowId) + setExpandedId(workflowId) + } } const getStatusBadgeClass = (status) => { From 6079aa4baf215fb20a1e8eebc1828bc41362adce Mon Sep 17 00:00:00 2001 From: Kurt User Date: Tue, 27 Jan 2026 21:21:45 +0000 Subject: [PATCH 11/19] feat: Add workflow output display, retry, and config sections Output/Result display (#28): - Add _build_output_summary() to extract output metrics - Show agent metrics: turns, tokens, cost, tool_calls - Show tool metrics: output_count, success, errors - Display result preview and errors prominently - Auto-expand output section when errors present Retry functionality (#26): - Add POST /api/workflows/{id}/retry endpoint - Handle both agent and tool workflow retries - Preserve original inputs for retry - Add retry button in UI for completed/failed workflows Config/Definition display (#27): - Add WorkflowConfigSection component - Show workflow_type, definition_name, trigger - Display inputs in formatted key-value grid - Collapsible section with smart preview Co-Authored-By: Claude Opus 4.5 --- src/kurt/observability/status.py | 83 ++++- src/kurt/web/api/server.py | 144 ++++++++ .../client/src/components/WorkflowList.jsx | 18 + .../web/client/src/components/WorkflowRow.jsx | 327 ++++++++++++++++++ src/kurt/web/client/src/styles.css | 171 +++++++++ 5 files changed, 742 insertions(+), 1 deletion(-) diff --git a/src/kurt/observability/status.py b/src/kurt/observability/status.py index abf618b1..75f85ac7 100644 --- a/src/kurt/observability/status.py +++ b/src/kurt/observability/status.py @@ -91,13 +91,16 @@ def get_live_status(db: "DoltDB", workflow_id: str) -> dict[str, Any] | None: # Parse inputs inputs = _parse_json_field(workflow_row.get("inputs")) - # Parse metadata for cli_command + # Parse metadata for cli_command and output info metadata = _parse_json_field(workflow_row.get("metadata_json")) cli_command = metadata.get("cli_command") if metadata else None # Determine effective status status = _determine_effective_status(workflow_row["status"], steps) + # Build output/result section from metadata and steps + output = _build_output_summary(metadata, steps, latest_events) + return { "workflow_id": full_id, "name": workflow_row.get("workflow", "unknown"), @@ -111,6 +114,7 @@ def get_live_status(db: "DoltDB", workflow_id: str) -> dict[str, Any] | None: "error": workflow_row.get("error"), "started_at": _format_datetime(workflow_row.get("started_at")), "completed_at": _format_datetime(workflow_row.get("completed_at")), + "output": output, } @@ -346,6 +350,83 @@ def _determine_effective_status( return workflow_status +def _build_output_summary( + metadata: dict[str, Any] | None, + steps: list[dict[str, Any]], + events: list[dict[str, Any]], +) -> dict[str, Any]: + """Build output summary from workflow metadata, steps, and events. + + For agent workflows, extracts: + - agent_turns: Number of conversation turns + - tokens_in/out: Token usage + - cost_usd: API cost + - tool_calls: Number of tool invocations + - stop_reason: Why the workflow stopped + + For tool workflows, extracts: + - total_output: Sum of output_count from all steps + - total_success: Sum of success counts + - total_errors: Sum of error counts + + Args: + metadata: Workflow metadata dict from workflow_runs + steps: List of step dicts + events: List of recent events + + Returns: + Dict with output summary information + """ + output: dict[str, Any] = {} + + if metadata: + # Agent workflow metrics (stored by executor.py) + if metadata.get("agent_turns") is not None: + output["agent_turns"] = metadata["agent_turns"] + if metadata.get("tokens_in") is not None: + output["tokens_in"] = metadata["tokens_in"] + if metadata.get("tokens_out") is not None: + output["tokens_out"] = metadata["tokens_out"] + if metadata.get("cost_usd") is not None: + output["cost_usd"] = metadata["cost_usd"] + if metadata.get("tool_calls") is not None: + output["tool_calls"] = metadata["tool_calls"] + if metadata.get("stop_reason"): + output["stop_reason"] = metadata["stop_reason"] + if metadata.get("duration_seconds") is not None: + output["duration_seconds"] = metadata["duration_seconds"] + + # Calculate totals from steps + total_output = 0 + total_success = 0 + total_errors = 0 + + for step in steps: + output_count = step.get("output_count") or 0 + success_count = step.get("success") or 0 + error_count = step.get("error") or 0 + + total_output += output_count + total_success += success_count + total_errors += error_count + + if total_output > 0: + output["total_output"] = total_output + if total_success > 0: + output["total_success"] = total_success + if total_errors > 0: + output["total_errors"] = total_errors + + # Check events for result_preview (agent workflow result text) + for event in events: + event_metadata = _parse_json_field(event.get("metadata_json")) + if event_metadata and event_metadata.get("result_preview"): + output["result_preview"] = event_metadata["result_preview"] + break + + return output + + def _map_step_status(status: str) -> str: """Map step status to frontend-expected values.""" status_map = { diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index 0072d391..02ef5344 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -1577,6 +1577,150 @@ def api_cancel_workflow(workflow_id: str): raise HTTPException(status_code=500, detail=str(e)) +@app.post("/api/workflows/{workflow_id}/retry") +def api_retry_workflow(workflow_id: str): + """Retry a workflow by starting a new run with the same inputs. + + Reads the original workflow's inputs from workflow_runs table, + then starts a new workflow run with the same configuration. + + Only works for completed (success, error, cancelled) workflows. + """ + db = _get_dolt_db() + if db is None: + raise HTTPException(status_code=503, detail="Database not available") + + try: + # Get original workflow details + result = db.query( + "SELECT id, workflow, status, inputs, metadata_json FROM workflow_runs WHERE id LIKE CONCAT(?, '%') LIMIT 1", + [workflow_id], + ) + if not result.rows: + raise HTTPException(status_code=404, detail="Workflow not found") + + row = result.rows[0] + current_status = row.get("status") + + # Only allow retry for terminal states + if current_status in ("pending", "running", "canceling"): + raise HTTPException( + status_code=400, + detail=f"Cannot retry workflow with status '{current_status}'. Wait for it to complete.", + ) + + workflow_name = row.get("workflow", "") + original_id = row.get("id") + + # Parse inputs + raw_inputs = row.get("inputs") + inputs = {} + if raw_inputs: + try: + inputs = json.loads(raw_inputs) if isinstance(raw_inputs, str) else raw_inputs + except Exception: + pass + + # Parse metadata to get workflow type and definition name + metadata = {} + raw_metadata = row.get("metadata_json") + if raw_metadata: + try: + metadata = json.loads(raw_metadata) if isinstance(raw_metadata, str) else raw_metadata + except Exception: + pass + + workflow_type = metadata.get("workflow_type") + definition_name = metadata.get("definition_name") + + # Handle agent workflows + if workflow_type == "agent" and definition_name: + from kurt.workflows.agents import run_definition + + # Extract original inputs from the inputs dict (if they were stored there) + agent_inputs = inputs.get("inputs") if isinstance(inputs.get("inputs"), dict) else {} + + result = run_definition( + definition_name=definition_name, + inputs=agent_inputs, + background=True, + trigger="retry", + ) + return { + "status": "started", + "workflow_id": result.get("workflow_id"), + "original_workflow_id": original_id, + } + + # Handle map/fetch workflows via CLI subprocess + if workflow_name in ("map_workflow", "fetch_workflow"): + # Build CLI command from inputs + cmd = ["kurt", "content"] + + if workflow_name == "map_workflow": + cmd.append("map") + if inputs.get("source_url"): + cmd.append(inputs["source_url"]) + elif inputs.get("url"): + cmd.append(inputs["url"]) + if inputs.get("max_depth") is not None: + cmd.extend(["--max-depth", str(inputs["max_depth"])]) + if inputs.get("max_pages") is not None: + cmd.extend(["--max-pages", str(inputs["max_pages"])]) + if inputs.get("include_pattern"): + cmd.extend(["--include", inputs["include_pattern"]]) + if inputs.get("exclude_pattern"): + cmd.extend(["--exclude", inputs["exclude_pattern"]]) + elif workflow_name == "fetch_workflow": + cmd.append("fetch") + if inputs.get("fetch_engine"): + cmd.extend(["--engine", inputs["fetch_engine"]]) + + # Always run in background + cmd.append("--background") + + # Run the CLI command + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=str(Path.cwd()), + ) + + stdout, stderr = proc.communicate(timeout=30) + + if proc.returncode != 0: + error_msg = stderr.decode("utf-8", errors="replace").strip() if stderr else "Unknown error" + raise HTTPException(status_code=500, detail=f"Failed to start workflow: {error_msg}") + + # Try to extract workflow ID from output + output = stdout.decode("utf-8", errors="replace").strip() + new_workflow_id = None + + # Look for workflow ID in output (format: "Workflow started: ") + import re + match = re.search(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", output, re.IGNORECASE) + if match: + new_workflow_id = match.group(0) + + return { + "status": "started", + "workflow_id": new_workflow_id, + "original_workflow_id": original_id, + } + + # Unsupported workflow type + raise HTTPException( + status_code=400, + detail=f"Retry not supported for workflow type: {workflow_name}", + ) + + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + @app.get("/api/workflows/{workflow_id}/status") def api_get_workflow_status(workflow_id: str): """Get live workflow status with progress information from Dolt. diff --git a/src/kurt/web/client/src/components/WorkflowList.jsx b/src/kurt/web/client/src/components/WorkflowList.jsx index 93db27be..62c3dfc7 100644 --- a/src/kurt/web/client/src/components/WorkflowList.jsx +++ b/src/kurt/web/client/src/components/WorkflowList.jsx @@ -177,6 +177,23 @@ export default function WorkflowList({ onAttachWorkflow }) { } } + const handleRetry = async (workflowId) => { + try { + const response = await fetch(apiUrl(`/api/workflows/${workflowId}/retry`), { + method: 'POST', + }) + if (!response.ok) { + const data = await response.json().catch(() => ({})) + throw new Error(data.detail || `Retry failed: ${response.status}`) + } + // Refresh list after retry to show the new workflow + fetchWorkflows(false) + } catch (err) { + console.error('Failed to retry workflow:', err) + // Could show error to user here + } + } + const handleAttach = (workflowId) => { onAttachWorkflow?.(workflowId) } @@ -280,6 +297,7 @@ export default function WorkflowList({ onAttachWorkflow }) { onToggleExpand={() => handleToggleExpand(workflow.workflow_uuid)} onAttach={() => handleAttach(workflow.workflow_uuid)} onCancel={() => handleCancel(workflow.workflow_uuid)} + onRetry={() => handleRetry(workflow.workflow_uuid)} getStatusBadgeClass={getStatusBadgeClass} /> ))} diff --git a/src/kurt/web/client/src/components/WorkflowRow.jsx b/src/kurt/web/client/src/components/WorkflowRow.jsx index bb8143b2..928aaaaa 100644 --- a/src/kurt/web/client/src/components/WorkflowRow.jsx +++ b/src/kurt/web/client/src/components/WorkflowRow.jsx @@ -642,6 +642,306 @@ function WorkflowChildBox({ ) } +function WorkflowConfigSection({ workflow, liveStatus }) { + const [isExpanded, setIsExpanded] = useState(false) + + // Get inputs from liveStatus or workflow + const inputs = liveStatus?.inputs || workflow?.inputs + const metadata = liveStatus?.metadata || {} + + // Get workflow-specific metadata + const workflowType = workflow?.workflow_type || metadata?.workflow_type + const definitionName = workflow?.definition_name || metadata?.definition_name + const trigger = workflow?.trigger || metadata?.trigger + + // Parse inputs if it's a string + let parsedInputs = inputs + if (typeof inputs === 'string') { + try { + parsedInputs = JSON.parse(inputs) + } catch { + parsedInputs = null + } + } + + // Filter out internal/empty fields from inputs + const getDisplayInputs = () => { + if (!parsedInputs || typeof parsedInputs !== 'object') return null + if (Array.isArray(parsedInputs)) return parsedInputs + + const filtered = {} + for (const [key, value] of Object.entries(parsedInputs)) { + // Skip internal fields and empty values + if (key.startsWith('_')) continue + if (value === null || value === undefined || value === '') continue + if (key === 'dry_run' && value === false) continue + filtered[key] = value + } + return Object.keys(filtered).length > 0 ? filtered : null + } + + const displayInputs = getDisplayInputs() + + // Don't show section if there's nothing to display + const hasContent = definitionName || trigger || displayInputs || workflowType + if (!hasContent) return null + + // Build preview text + const previewParts = [] + if (definitionName) previewParts.push(definitionName) + if (trigger) previewParts.push(`trigger: ${trigger}`) + if (displayInputs && !definitionName) { + const inputCount = Object.keys(displayInputs).length + previewParts.push(`${inputCount} input${inputCount !== 1 ? 's' : ''}`) + } + const previewText = previewParts.join(' | ') || 'Configuration' + + const formatValue = (value) => { + if (typeof value === 'boolean') return value ? 'true' : 'false' + if (typeof value === 'number') return String(value) + if (Array.isArray(value)) return value.join(', ') + if (typeof value === 'object') return JSON.stringify(value) + return String(value) + } + + return ( +
+
setIsExpanded(!isExpanded)} + role="button" + tabIndex={0} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault() + setIsExpanded(!isExpanded) + } + }} + > + + {isExpanded ? : } + + Config + {previewText} +
+ {isExpanded && ( +
+
+ {workflowType && ( +
+ Type + {workflowType} +
+ )} + {definitionName && ( +
+ Definition + + {definitionName} + +
+ )} + {trigger && ( +
+ Trigger + {trigger} +
+ )} + {displayInputs && ( + <> +
+
+ Inputs +
+ {Object.entries(displayInputs).map(([key, value]) => ( +
+ {key} + + {formatValue(value)} + +
+ ))} + + )} +
+
+ )} +
+ ) +} + +function WorkflowOutputSection({ workflow, liveStatus }) { + const [isExpanded, setIsExpanded] = useState(false) + + // Get output info from liveStatus or workflow + const output = liveStatus?.output || {} + const workflowError = liveStatus?.error || workflow?.error + const workflowStatus = liveStatus?.status || workflow?.status + + // Determine if workflow is completed (SUCCESS or ERROR) + const isCompleted = ['SUCCESS', 'ERROR', 'CANCELLED', 'completed', 'failed', 'canceled', 'completed_with_errors'].includes(workflowStatus) + + // Check if there's anything to show + const hasAgentOutput = output.agent_turns != null || output.tokens_in != null || output.tool_calls != null || output.stop_reason + const hasToolOutput = output.total_output != null || output.total_success != null + const hasErrors = output.total_errors > 0 || workflowError + const hasResultPreview = !!output.result_preview + + // Don't show for running workflows or if there's nothing to display + if (!isCompleted) return null + if (!hasAgentOutput && !hasToolOutput && !hasErrors && !hasResultPreview) return null + + // Auto-expand if there are errors + const shouldAutoExpand = hasErrors + + // Build preview text + const previewParts = [] + if (workflowStatus === 'ERROR' || workflowStatus === 'failed' || workflowStatus === 'canceled') { + previewParts.push('Error') + } else if (output.total_errors > 0) { + previewParts.push(`${output.total_errors} error${output.total_errors !== 1 ? 's' : ''}`) + } else { + previewParts.push('Completed') + } + + if (output.agent_turns != null) { + previewParts.push(`${output.agent_turns} turn${output.agent_turns !== 1 ? 's' : ''}`) + } + if (output.tool_calls != null) { + previewParts.push(`${output.tool_calls} tool call${output.tool_calls !== 1 ? 's' : ''}`) + } + if (output.total_success != null && output.total_success > 0) { + previewParts.push(`${output.total_success} processed`) + } + + const previewText = previewParts.join(' | ') + + const formatCostValue = (cost) => { + if (cost == null) return '-' + if (cost < 0.01) return `$${cost.toFixed(4)}` + return `$${cost.toFixed(2)}` + } + + const formatTokens = (tokens) => { + if (tokens == null) return '-' + return tokens.toLocaleString() + } + + return ( +
+
setIsExpanded(!isExpanded)} + role="button" + tabIndex={0} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault() + setIsExpanded(!isExpanded) + } + }} + > + + {isExpanded || shouldAutoExpand ? : } + + Output + + {previewText} + +
+ {(isExpanded || shouldAutoExpand) && ( +
+ {/* Error message display */} + {workflowError && ( +
+
Error
+
{workflowError}
+
+ )} + + {/* Agent workflow output */} + {hasAgentOutput && ( +
+ {output.agent_turns != null && ( +
+ Turns + {output.agent_turns} +
+ )} + {output.tool_calls != null && ( +
+ Tool Calls + {output.tool_calls} +
+ )} + {output.tokens_in != null && ( +
+ Tokens In + {formatTokens(output.tokens_in)} +
+ )} + {output.tokens_out != null && ( +
+ Tokens Out + {formatTokens(output.tokens_out)} +
+ )} + {output.cost_usd != null && ( +
+ Cost + {formatCostValue(output.cost_usd)} +
+ )} + {output.stop_reason && ( +
+ Stop Reason + + {output.stop_reason} + +
+ )} +
+ )} + + {/* Tool workflow output */} + {hasToolOutput && !hasAgentOutput && ( +
+ {output.total_output != null && ( +
+ Total Output + {output.total_output} +
+ )} + {output.total_success != null && ( +
+ Successful + {output.total_success} +
+ )} + {output.total_errors != null && output.total_errors > 0 && ( +
+ Errors + {output.total_errors} +
+ )} +
+ )} + + {/* Result preview for agent workflows */} + {hasResultPreview && ( +
+
Result Preview
+
+ {output.result_preview} +
+
+ )} +
+ )} +
+ ) +} + const formatTokenCount = (tokens) => { if (tokens == null) return null if (tokens >= 1000000) return `${(tokens / 1000000).toFixed(1)}M` @@ -661,12 +961,15 @@ export default function WorkflowRow({ onToggleExpand, onAttach, onCancel, + onRetry, getStatusBadgeClass, depth = 0, }) { const [liveStatus, setLiveStatus] = useState(null) + const [isRetrying, setIsRetrying] = useState(false) const isRunning = workflow.status === 'PENDING' || workflow.status === 'ENQUEUED' + const canRetry = ['SUCCESS', 'ERROR', 'CANCELLED', 'WARNING', 'RETRIES_EXCEEDED'].includes(workflow.status) // Compute effective status - show WARNING if there are errors even if workflow "succeeded" const getEffectiveStatus = () => { @@ -797,6 +1100,17 @@ export default function WorkflowRow({ } } + const handleRetry = async (e) => { + e.stopPropagation() + if (isRetrying || !onRetry) return + setIsRetrying(true) + try { + await onRetry() + } finally { + setIsRetrying(false) + } + } + return (
)} + {canRetry && onRetry && ( + + )}
@@ -931,6 +1256,7 @@ export default function WorkflowRow({
+ {liveStatus && ( )} + )} diff --git a/src/kurt/web/client/src/styles.css b/src/kurt/web/client/src/styles.css index 756bb145..86067aeb 100644 --- a/src/kurt/web/client/src/styles.css +++ b/src/kurt/web/client/src/styles.css @@ -3191,6 +3191,20 @@ html { background: var(--color-error-hover); } +.workflow-retry { + background: var(--color-accent); + color: var(--color-text-inverse); +} + +.workflow-retry:hover { + background: var(--color-accent-hover); +} + +.workflow-retry:disabled { + opacity: 0.6; + cursor: not-allowed; +} + /* Workflow Row Details */ .workflow-row-details { padding: var(--space-2); @@ -3597,6 +3611,163 @@ html { font-size: var(--text-xs); } +/* Workflow Config Section */ +.workflow-config-grid { + display: flex; + flex-direction: column; + gap: var(--space-1); +} + +.workflow-config-row { + display: flex; + align-items: baseline; + gap: var(--space-2); + font-size: var(--text-xs); +} + +.workflow-config-label { + color: var(--color-text-tertiary); + flex-shrink: 0; + min-width: 70px; +} + +.workflow-config-value { + color: var(--color-text-primary); + word-break: break-word; +} + +.workflow-config-definition { + font-family: var(--font-mono); + color: var(--color-violet); + font-weight: var(--font-medium); +} + +.workflow-config-divider { + border-top: 1px solid var(--color-border); + margin: var(--space-1) 0; +} + +.workflow-config-inputs-header { + margin-top: var(--space-1); +} + +.workflow-config-inputs-header .workflow-config-label { + font-weight: var(--font-semibold); + color: var(--color-text-secondary); +} + +.workflow-config-input-row { + padding-left: var(--space-2); +} + +.workflow-config-input-key { + font-family: var(--font-mono); + color: var(--color-text-secondary); + flex-shrink: 0; + min-width: 100px; +} + +.workflow-config-input-value { + font-family: var(--font-mono); + color: var(--color-text-primary); + word-break: break-word; + overflow: hidden; + text-overflow: ellipsis; + max-width: 300px; +} + +/* Workflow Output Section */ +.workflow-output-error { + border-color: var(--color-error); +} + +.workflow-output-error .workflow-section-header { + background: rgba(var(--color-error-rgb), 0.1); +} + +.workflow-output-error-text { + color: var(--color-error) !important; +} + +.workflow-output-error-block { + background: rgba(var(--color-error-rgb), 0.1); + border: 1px solid var(--color-error); + border-radius: var(--radius-sm); + padding: var(--space-2); + margin-bottom: var(--space-2); +} + +.workflow-output-error-label { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-error); + margin-bottom: var(--space-1); +} + +.workflow-output-error-message { + font-family: var(--font-mono); + font-size: var(--text-xs); + color: var(--color-text-primary); + white-space: pre-wrap; + word-break: break-word; +} + +.workflow-output-grid { + display: grid; + grid-template-columns: auto 1fr; + gap: var(--space-1) var(--space-3); + align-items: baseline; +} + +.workflow-output-row { + display: contents; +} + +.workflow-output-label { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + white-space: nowrap; +} + +.workflow-output-value { + font-size: var(--text-xs); + font-family: var(--font-mono); + color: var(--color-text-primary); +} + +.workflow-output-success { + color: var(--color-success); +} + +.workflow-output-cost { + color: var(--color-violet); +} + +.workflow-output-preview { + margin-top: var(--space-2); + padding-top: var(--space-2); + border-top: 1px solid var(--color-border); +} + +.workflow-output-preview-label { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-text-secondary); + margin-bottom: var(--space-1); +} + +.workflow-output-preview-content { + font-size: var(--text-xs); + color: var(--color-text-primary); + background: var(--color-bg-secondary); + padding: var(--space-2); + border-radius: var(--radius-sm); + white-space: pre-wrap; + word-break: break-word; + max-height: 200px; + overflow: auto; +} + .workflow-step-box { border: 1px solid var(--color-border); border-radius: var(--radius-sm); From a0c5af399463a52bd13d02652e46d6db61234c8b Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 07:57:24 +0000 Subject: [PATCH 12/19] feat: Complete workflow observability UI with timeline, metrics, and detail panel - Add WorkflowTimeline.jsx: Horizontal bar chart visualization of step execution - Color-coded status (green=success, red=error, blue=running, gray=pending) - Duration bars proportional to execution time - Hover tooltips with step details - Add WorkflowMetrics.jsx: Aggregate statistics dashboard - Total workflows by status (success/error/running counts) - Total cost and tokens across all workflows - Average duration - Add WorkflowDetailPanel.jsx: Dedicated workflow detail view - Full workflow metadata and inputs display - Integrated timeline visualization - Expandable step cards with error details - Action buttons (cancel, retry, attach terminal) - Auto-refresh toggle with adaptive polling - Update CLAUDE.md: Document Workflow Observability API - Table schemas (workflow_runs, step_logs, step_events) - All API endpoints with request/response examples - Status values and transitions - Programmatic usage examples Co-Authored-By: Claude Opus 4.5 --- CLAUDE.md | 434 +++++++ src/kurt/web/client/src/App.jsx | 71 +- .../client/src/components/WorkflowList.jsx | 6 +- .../client/src/components/WorkflowMetrics.jsx | 168 +++ .../web/client/src/components/WorkflowRow.jsx | 15 + .../src/components/WorkflowTimeline.jsx | 206 +++ .../client/src/panels/WorkflowDetailPanel.jsx | 690 ++++++++++ .../web/client/src/panels/WorkflowsPanel.jsx | 6 +- src/kurt/web/client/src/styles.css | 1137 +++++++++++++++-- 9 files changed, 2637 insertions(+), 96 deletions(-) create mode 100644 src/kurt/web/client/src/components/WorkflowMetrics.jsx create mode 100644 src/kurt/web/client/src/components/WorkflowTimeline.jsx create mode 100644 src/kurt/web/client/src/panels/WorkflowDetailPanel.jsx diff --git a/CLAUDE.md b/CLAUDE.md index be5b586f..6c3b68a8 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1316,3 +1316,437 @@ Context about the task. | Quick task | 5-10 | 50,000 | 120 | | Standard | 15-25 | 150,000 | 600 | | Complex | 30-50 | 300,000 | 1800 | + +--- + +## Workflow Observability API + +Kurt uses Dolt (a Git-versioned database) for workflow tracking with three main tables. The observability system provides comprehensive tracking of workflow execution, step-level progress, and real-time event streaming. + +### Table Schemas + +#### workflow_runs + +Main workflow execution records. One row per workflow run. + +| Column | Type | Description | +|--------|------|-------------| +| `id` | VARCHAR(36) | Primary key (UUID) | +| `workflow` | VARCHAR(255) | Workflow name (e.g., "map_workflow", "fetch_workflow") | +| `status` | VARCHAR(20) | Current status (see Status Values below) | +| `started_at` | DATETIME | When the workflow started | +| `completed_at` | DATETIME | When the workflow completed (NULL if running) | +| `error` | TEXT | Error message (for failed/cancelled workflows) | +| `inputs` | JSON | Input parameters passed to the workflow | +| `metadata_json` | JSON | Additional metadata (workflow_type, cli_command, etc.) | +| `user_id` | VARCHAR | User ID (multi-tenancy) | +| `workspace_id` | VARCHAR | Workspace ID (multi-tenancy) | + +**Indexes:** `status`, `started_at`, `workflow` + +#### step_logs + +Summary records per step per workflow run. Updated in place as steps progress. + +| Column | Type | Description | +|--------|------|-------------| +| `id` | VARCHAR(36) | Primary key (UUID) | +| `run_id` | VARCHAR(36) | Foreign key to workflow_runs.id | +| `step_id` | VARCHAR(255) | Step identifier (e.g., "fetch", "extract", "map") | +| `tool` | VARCHAR(50) | Tool name (e.g., "FetchTool", "MapTool") | +| `status` | VARCHAR(20) | Step status (pending, running, completed, failed, skipped) | +| `started_at` | DATETIME | When the step started | +| `completed_at` | DATETIME | When the step completed | +| `input_count` | INT | Number of input items processed | +| `output_count` | INT | Number of output items produced | +| `error_count` | INT | Number of errors encountered | +| `errors` | JSON | List of error details: `[{row_idx, error_type, message}]` | +| `metadata_json` | JSON | Additional step metadata | + +**Indexes:** `(run_id, step_id)`, `step_id` + +#### step_events + +Append-only event stream for real-time progress tracking. + +| Column | Type | Description | +|--------|------|-------------| +| `id` | BIGINT | Auto-increment primary key (for cursor-based pagination) | +| `run_id` | VARCHAR(36) | Foreign key to workflow_runs.id | +| `step_id` | VARCHAR(255) | Step identifier | +| `substep` | VARCHAR(255) | Optional substep name (e.g., "fetch", "parse") | +| `status` | VARCHAR(20) | Event status: "running", "progress", "completed", "failed" | +| `created_at` | DATETIME | Event timestamp | +| `current` | INT | Current progress count | +| `total` | INT | Total items to process | +| `message` | TEXT | Human-readable status message | +| `metadata_json` | JSON | Additional event data | + +**Indexes:** `(run_id, id)` + +### Status Values + +**Workflow Status** (workflow_runs.status): +- `pending` - Workflow queued but not started +- `running` - Workflow actively executing +- `completed` - Workflow finished successfully +- `failed` - Workflow failed with error +- `canceling` - Cancellation requested, waiting for cleanup +- `canceled` - Workflow cancelled by user + +**Step Status** (step_logs.status): +- `pending` - Step not yet started +- `running` - Step actively executing +- `completed` - Step finished successfully +- `failed` - Step failed with error +- `skipped` - Step skipped (e.g., dry-run mode) + +**Event Status** (step_events.status): +- `running` - Step started or continuing +- `progress` - Progress update (includes current/total) +- `completed` - Step finished +- `failed` - Step failed + +**Valid Status Transitions:** +``` +Workflow: pending -> running -> completed + -> running -> failed + -> running -> canceling -> canceled + +Step: pending -> running -> completed + -> running -> failed + -> running -> canceled +``` + +### API Endpoints + +#### List Workflows + +``` +GET /api/workflows +``` + +Query parameters: +- `status` (optional): Filter by status +- `limit` (default: 50, max: 200): Number of results +- `offset` (default: 0): Pagination offset +- `search` (optional): Search by ID or workflow name +- `workflow_type` (optional): Filter by type ("agent", "tool") +- `parent_id` (optional): Filter children of a parent workflow + +Response: +```json +{ + "workflows": [ + { + "workflow_uuid": "abc-123-def", + "name": "map_workflow", + "status": "completed", + "created_at": "2024-01-15T10:30:00", + "updated_at": "2024-01-15T10:35:00", + "error": null, + "parent_workflow_id": null, + "workflow_type": "tool" + } + ], + "total": 42, + "has_more": true, + "offset": 0, + "limit": 50 +} +``` + +#### Get Workflow Details + +``` +GET /api/workflows/{workflow_id} +``` + +Response: +```json +{ + "workflow_uuid": "abc-123-def", + "name": "map_workflow", + "status": "completed", + "created_at": "2024-01-15T10:30:00", + "updated_at": "2024-01-15T10:35:00", + "error": null, + "inputs": {"url": "https://example.com", "max_pages": 100}, + "metadata": {"workflow_type": "tool", "cli_command": "kurt content map ..."} +} +``` + +#### Get Live Status with Progress + +``` +GET /api/workflows/{workflow_id}/status +``` + +Returns comprehensive status including step details and progress. + +Response: +```json +{ + "workflow_id": "abc-123-def", + "name": "map_workflow", + "status": "running", + "stage": "fetch", + "progress": {"current": 45, "total": 100}, + "steps": [ + { + "name": "fetch", + "status": "running", + "success": 45, + "error": 2, + "duration_ms": 12500, + "errors": ["Timeout fetching page 12", "404 on page 37"], + "step_type": "step", + "input_count": 100, + "output_count": 47 + } + ], + "duration_ms": 15000, + "inputs": {"url": "https://example.com"}, + "cli_command": "kurt content map https://example.com", + "error": null, + "started_at": "2024-01-15T10:30:00", + "completed_at": null, + "output": { + "total_output": 47, + "total_success": 45, + "total_errors": 2 + } +} +``` + +#### Get Step Logs + +``` +GET /api/workflows/{workflow_id}/step-logs +``` + +Query parameters: +- `step` (optional): Filter by step name +- `limit` (default: 100, max: 500): Number of results + +Response: +```json +{ + "logs": [ + { + "step_id": "fetch", + "tool": "FetchTool", + "status": "completed", + "started_at": "2024-01-15T10:30:05", + "completed_at": "2024-01-15T10:32:15", + "input_count": 100, + "output_count": 95, + "error_count": 5, + "errors": [ + {"row_idx": 12, "error_type": "timeout", "message": "Request timed out"} + ], + "metadata": {} + } + ], + "step": null +} +``` + +#### Get Workflow Logs (Events) + +``` +GET /api/workflows/{workflow_id}/logs +``` + +Query parameters: +- `step_id` (optional): Filter by step ID +- `since_id` (default: 0): Return events after this ID (cursor pagination) +- `limit` (default: 100, max: 1000): Number of results +- `include_file_logs` (default: true): Include file-based logs + +Response: +```json +{ + "workflow_id": "abc-123-def", + "events": [ + { + "id": 1234, + "step_id": "fetch", + "substep": "download", + "status": "progress", + "current": 45, + "total": 100, + "message": "Fetched page 45 of 100", + "created_at": "2024-01-15T10:31:00" + } + ], + "total_events": 50, + "has_more": false, + "since_id": 0, + "limit": 100, + "file_content": null +} +``` + +#### Stream Status (SSE) + +``` +GET /api/workflows/{workflow_id}/status/stream +``` + +Server-Sent Events stream. Sends JSON status updates every 0.5s until workflow completes. + +``` +data: {"workflow_id": "abc-123", "status": "running", "progress": {"current": 45, "total": 100}, ...} + +data: {"workflow_id": "abc-123", "status": "running", "progress": {"current": 67, "total": 100}, ...} + +data: {"workflow_id": "abc-123", "status": "completed", ...} +``` + +#### Stream Logs (SSE) + +``` +GET /api/workflows/{workflow_id}/logs/stream +``` + +Query parameters: +- `step_id` (optional): Filter by step ID + +Server-Sent Events stream for real-time log updates. + +``` +data: {"type": "event", "event": {"id": 1234, "step_id": "fetch", "status": "progress", ...}} + +data: {"type": "log", "content": "File-based log content..."} + +data: {"done": true, "status": "completed"} +``` + +#### Cancel Workflow + +``` +POST /api/workflows/{workflow_id}/cancel +``` + +Sets workflow status to "canceling". The workflow runner must detect this and call `on_workflow_cancel()` to complete. + +Response: +```json +{"status": "canceling", "workflow_id": "abc-123-def"} +``` + +#### Retry Workflow + +``` +POST /api/workflows/{workflow_id}/retry +``` + +Starts a new workflow run with the same inputs. Only works for terminal states (completed, failed, cancelled). + +Response: +```json +{ + "status": "started", + "workflow_id": "new-workflow-id", + "original_workflow_id": "abc-123-def" +} +``` + +### Querying Workflow Status Programmatically + +Using the Python API: + +```python +from kurt.observability.status import get_live_status +from kurt.db.dolt import DoltDB + +db = DoltDB(".dolt") +status = get_live_status(db, "abc-123") + +if status: + print(f"Status: {status['status']}") + print(f"Progress: {status['progress']['current']}/{status['progress']['total']}") + for step in status['steps']: + print(f" {step['name']}: {step['status']} ({step['success']}/{step['output_count']})") +``` + +Using the Lifecycle API: + +```python +from kurt.observability.lifecycle import WorkflowLifecycle +from kurt.db.dolt import DoltDB + +db = DoltDB(".dolt") +lifecycle = WorkflowLifecycle(db) + +# Create and track a workflow +run_id = lifecycle.create_run("my_workflow", inputs={"url": "https://example.com"}) + +# Track step progress +lifecycle.create_step_log(run_id, "fetch", "FetchTool", input_count=100) +lifecycle.update_step_log(run_id, "fetch", status="completed", output_count=95, error_count=5) + +# Complete workflow +lifecycle.update_status(run_id, "completed") +``` + +Using the Event Tracker for high-throughput progress: + +```python +from kurt.observability.tracking import track_event, EventTracker +from kurt.db.dolt import DoltDB + +db = DoltDB(".dolt") + +# Single event +track_event( + run_id="abc-123", + step_id="fetch", + substep="download", + status="progress", + current=45, + total=100, + message="Fetched page 45 of 100", + db=db, +) + +# Batched events (high throughput) +with EventTracker(db) as tracker: + for i in range(1000): + tracker.track( + run_id="abc-123", + step_id="process", + status="progress", + current=i, + total=1000, + ) +``` + +### Metadata Conventions + +The `metadata_json` field in `workflow_runs` stores workflow-specific data: + +| Key | Description | Example | +|-----|-------------|---------| +| `workflow_type` | Type classification | "agent", "tool", "map", "fetch" | +| `cli_command` | Original CLI command | "kurt content map https://..." | +| `parent_workflow_id` | Parent workflow UUID | "abc-123-def" | +| `parent_step_name` | Step that spawned this workflow | "claude_execution" | +| `definition_name` | Agent workflow definition | "daily-report" | +| `trigger` | How workflow was started | "cli", "api", "schedule", "retry" | +| `agent_turns` | Agent conversation turns | 15 | +| `tokens_in` | Input tokens used | 50000 | +| `tokens_out` | Output tokens used | 12000 | +| `cost_usd` | API cost | 0.45 | +| `tool_calls` | Number of tool invocations | 42 | +| `stop_reason` | Why agent stopped | "end_turn", "max_turns" | + +### Files + +- `src/kurt/observability/models.py` - SQLModel table definitions +- `src/kurt/observability/lifecycle.py` - WorkflowLifecycle class for tracking +- `src/kurt/observability/tracking.py` - Event tracking (track_event, EventTracker) +- `src/kurt/observability/status.py` - Status query functions (get_live_status) +- `src/kurt/observability/streaming.py` - SSE streaming utilities +- `src/kurt/web/api/server.py` - API endpoints diff --git a/src/kurt/web/client/src/App.jsx b/src/kurt/web/client/src/App.jsx index 8d05ad1c..48527715 100644 --- a/src/kurt/web/client/src/App.jsx +++ b/src/kurt/web/client/src/App.jsx @@ -14,6 +14,7 @@ import EmptyPanel from './panels/EmptyPanel' import ReviewPanel from './panels/ReviewPanel' import WorkflowsPanel from './panels/WorkflowsPanel' import WorkflowTerminalPanel from './panels/WorkflowTerminalPanel' +import WorkflowDetailPanel from './panels/WorkflowDetailPanel' import ClaudeStreamChat from './components/chat/ClaudeStreamChat' // POC mode - add ?poc=chat, ?poc=diff, or ?poc=tiptap-diff to URL to test @@ -31,6 +32,7 @@ const components = { review: ReviewPanel, workflows: WorkflowsPanel, workflowTerminal: WorkflowTerminalPanel, + workflowDetail: WorkflowDetailPanel, } const KNOWN_COMPONENTS = new Set(Object.keys(components)) @@ -1626,6 +1628,72 @@ export default function App() { [dockApi] ) + // Open a workflow detail panel + const openWorkflowDetail = useCallback( + (workflowId) => { + if (!dockApi) return + + const panelId = `workflow-detail-${workflowId}` + const existingPanel = dockApi.getPanel(panelId) + + if (existingPanel) { + existingPanel.api.setActive() + return + } + + // Add in center area + const position = centerGroupRef.current + ? { referenceGroup: centerGroupRef.current } + : { direction: 'right', referencePanel: 'filetree' } + + // Close empty panel if present + const emptyPanel = dockApi.getPanel('empty-center') + + const panel = dockApi.addPanel({ + id: panelId, + component: 'workflowDetail', + title: `Workflow: ${workflowId.slice(0, 8)}`, + position, + params: { + workflowId, + onClose: () => { + const p = dockApi.getPanel(panelId) + if (p) p.api.close() + }, + onAttach: openWorkflowTerminal, + onCancel: async (wfId) => { + try { + await fetch(apiUrl(`/api/workflows/${wfId}/cancel`), { method: 'POST' }) + } catch (err) { + console.error('Failed to cancel workflow:', err) + } + }, + onRetry: async (wfId) => { + try { + await fetch(apiUrl(`/api/workflows/${wfId}/retry`), { method: 'POST' }) + } catch (err) { + console.error('Failed to retry workflow:', err) + } + }, + }, + }) + + if (emptyPanel) { + emptyPanel.api.close() + } + + if (panel?.group) { + panel.group.header.hidden = false + centerGroupRef.current = panel.group + panel.group.api.setConstraints({ + minimumHeight: 200, + maximumHeight: Infinity, + }) + } + }, + [dockApi, openWorkflowTerminal] + ) + // Update workflows panel params // projectRoot dependency ensures this runs after layout restoration useEffect(() => { @@ -1636,6 +1704,7 @@ export default function App() { collapsed: collapsed.workflows, onToggleCollapse: toggleWorkflows, onAttachWorkflow: openWorkflowTerminal, + onOpenWorkflowDetail: openWorkflowDetail, }) } // Also update shell panel with collapse state for the tab button @@ -1646,7 +1715,7 @@ export default function App() { onToggleCollapse: toggleWorkflows, }) } - }, [dockApi, collapsed.workflows, toggleWorkflows, openWorkflowTerminal, projectRoot]) + }, [dockApi, collapsed.workflows, toggleWorkflows, openWorkflowTerminal, openWorkflowDetail, projectRoot]) // Restore saved tabs when dockApi and projectRoot become available const hasRestoredTabs = useRef(false) diff --git a/src/kurt/web/client/src/components/WorkflowList.jsx b/src/kurt/web/client/src/components/WorkflowList.jsx index 62c3dfc7..e40402c0 100644 --- a/src/kurt/web/client/src/components/WorkflowList.jsx +++ b/src/kurt/web/client/src/components/WorkflowList.jsx @@ -1,5 +1,6 @@ import { useState, useEffect, useCallback, useRef, useMemo } from 'react' import WorkflowRow from './WorkflowRow' +import WorkflowMetrics from './WorkflowMetrics' // Polling intervals in milliseconds const POLLING_FAST = 2000 // When workflows are running @@ -33,7 +34,7 @@ const PAGE_SIZE = 50 const apiBase = import.meta.env.VITE_API_URL || '' const apiUrl = (path) => `${apiBase}${path}` -export default function WorkflowList({ onAttachWorkflow }) { +export default function WorkflowList({ onAttachWorkflow, onOpenWorkflowDetail }) { const [workflows, setWorkflows] = useState([]) const [isLoading, setIsLoading] = useState(false) const [statusFilter, setStatusFilter] = useState('') @@ -277,6 +278,8 @@ export default function WorkflowList({ onAttachWorkflow }) { + +
{error && (
@@ -298,6 +301,7 @@ export default function WorkflowList({ onAttachWorkflow }) { onAttach={() => handleAttach(workflow.workflow_uuid)} onCancel={() => handleCancel(workflow.workflow_uuid)} onRetry={() => handleRetry(workflow.workflow_uuid)} + onOpenDetail={() => onOpenWorkflowDetail?.(workflow.workflow_uuid)} getStatusBadgeClass={getStatusBadgeClass} /> ))} diff --git a/src/kurt/web/client/src/components/WorkflowMetrics.jsx b/src/kurt/web/client/src/components/WorkflowMetrics.jsx new file mode 100644 index 00000000..ab31e151 --- /dev/null +++ b/src/kurt/web/client/src/components/WorkflowMetrics.jsx @@ -0,0 +1,168 @@ +import { useMemo } from 'react' + +/** + * WorkflowMetrics - Displays aggregate statistics for a list of workflows. + * + * Shows summary cards with: + * - Total workflow count by status (success/error/running) + * - Total cost (sum of cost_usd) + * - Total tokens (sum of tokens_in + tokens_out) + * - Average duration + */ + +const formatTokens = (tokens) => { + if (tokens == null || tokens === 0) return '0' + if (tokens >= 1000000) return `${(tokens / 1000000).toFixed(1)}M` + if (tokens >= 1000) return `${(tokens / 1000).toFixed(1)}k` + return tokens.toLocaleString() +} + +const formatCost = (cost) => { + if (cost == null || cost === 0) return '$0.00' + if (cost < 0.01) return `$${cost.toFixed(4)}` + if (cost < 1) return `$${cost.toFixed(3)}` + return `$${cost.toFixed(2)}` +} + +const formatDuration = (ms) => { + if (ms == null || ms === 0) return '-' + if (ms < 1000) return `${Math.round(ms)}ms` + const seconds = ms / 1000 + if (seconds < 60) return `${seconds.toFixed(1)}s` + const minutes = Math.floor(seconds / 60) + const remainingSeconds = Math.round(seconds % 60) + return `${minutes}m ${remainingSeconds}s` +} + +export default function WorkflowMetrics({ workflows }) { + const metrics = useMemo(() => { + if (!workflows || workflows.length === 0) { + return null + } + + let successCount = 0 + let errorCount = 0 + let runningCount = 0 + let totalCost = 0 + let totalTokensIn = 0 + let totalTokensOut = 0 + let totalDuration = 0 + let durationCount = 0 + + for (const workflow of workflows) { + // Count by status + switch (workflow.status) { + case 'SUCCESS': + successCount++ + break + case 'ERROR': + case 'RETRIES_EXCEEDED': + errorCount++ + break + case 'PENDING': + case 'ENQUEUED': + runningCount++ + break + default: + // CANCELLED and others + break + } + + // Aggregate cost + if (workflow.cost_usd != null && workflow.cost_usd > 0) { + totalCost += workflow.cost_usd + } + + // Aggregate tokens + if (workflow.tokens_in != null) { + totalTokensIn += workflow.tokens_in + } + if (workflow.tokens_out != null) { + totalTokensOut += workflow.tokens_out + } + + // Calculate average duration from completed workflows + if (workflow.duration_ms != null && workflow.duration_ms > 0) { + totalDuration += workflow.duration_ms + durationCount++ + } + } + + const totalTokens = totalTokensIn + totalTokensOut + const avgDuration = durationCount > 0 ? totalDuration / durationCount : 0 + + return { + total: workflows.length, + successCount, + errorCount, + runningCount, + totalCost, + totalTokens, + totalTokensIn, + totalTokensOut, + avgDuration, + } + }, [workflows]) + + // Don't render if no workflows + if (!metrics) { + return null + } + + return ( +
+
+
Workflows
+
+ {metrics.total} + + {metrics.successCount > 0 && ( + + {metrics.successCount} ok + + )} + {metrics.errorCount > 0 && ( + + {metrics.errorCount} err + + )} + {metrics.runningCount > 0 && ( + + {metrics.runningCount} run + + )} + +
+
+ + {metrics.totalCost > 0 && ( +
+
Total Cost
+
+ {formatCost(metrics.totalCost)} +
+
+ )} + + {metrics.totalTokens > 0 && ( +
+
Tokens
+
+ + {formatTokens(metrics.totalTokensIn)} in / {formatTokens(metrics.totalTokensOut)} out + +
+
+ )} + + {metrics.avgDuration > 0 && ( +
+
Avg Duration
+
+ {formatDuration(metrics.avgDuration)} +
+
+ )} +
+ ) +} diff --git a/src/kurt/web/client/src/components/WorkflowRow.jsx b/src/kurt/web/client/src/components/WorkflowRow.jsx index 928aaaaa..35862256 100644 --- a/src/kurt/web/client/src/components/WorkflowRow.jsx +++ b/src/kurt/web/client/src/components/WorkflowRow.jsx @@ -1,5 +1,6 @@ import { useState, useEffect, useCallback } from 'react' import { Copy, ChevronDown, ChevronRight } from 'lucide-react' +import WorkflowTimeline from './WorkflowTimeline' const apiBase = import.meta.env.VITE_API_URL || '' const apiUrl = (path) => `${apiBase}${path}` @@ -962,6 +963,7 @@ export default function WorkflowRow({ onAttach, onCancel, onRetry, + onOpenDetail, getStatusBadgeClass, depth = 0, }) { @@ -1152,6 +1154,16 @@ export default function WorkflowRow({ )}
e.stopPropagation()}> + {onOpenDetail && ( + + )} {isRunning && ( <>
)} diff --git a/src/kurt/web/client/src/components/WorkflowTimeline.jsx b/src/kurt/web/client/src/components/WorkflowTimeline.jsx new file mode 100644 index 00000000..bbc22aa0 --- /dev/null +++ b/src/kurt/web/client/src/components/WorkflowTimeline.jsx @@ -0,0 +1,206 @@ +import { useState } from 'react' + +/** + * WorkflowTimeline - Horizontal timeline visualization for workflow steps + * + * Shows steps as horizontal bars with: + * - Step name on the left + * - Duration bar proportional to execution time + * - Status color coding (green=success, red=error, blue=running, gray=pending) + * - Hover tooltip with details (duration_ms, output_count, errors) + */ + +const formatDuration = (ms) => { + if (ms == null || ms === 0) return '-' + if (ms < 1000) return `${ms}ms` + const seconds = ms / 1000 + if (seconds < 60) return `${seconds.toFixed(1)}s` + const minutes = Math.floor(seconds / 60) + const remainingSeconds = Math.round(seconds % 60) + return `${minutes}m ${remainingSeconds}s` +} + +const formatStepName = (stepName) => { + if (!stepName) return 'Unknown' + // Convert snake_case to Title Case + return stepName + .split('_') + .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) + .join(' ') +} + +const getStatusColor = (step) => { + const hasErrors = (step.errors && step.errors.length > 0) || step.error > 0 + if (hasErrors || step.status === 'error') return 'error' + if (step.status === 'success' || step.status === 'completed') return 'success' + if (step.status === 'running' || step.status === 'in_progress') return 'running' + return 'pending' +} + +const getStatusLabel = (status) => { + switch (status) { + case 'success': + return 'Success' + case 'error': + return 'Error' + case 'running': + return 'Running' + case 'pending': + default: + return 'Pending' + } +} + +function StepTooltip({ step, position }) { + const statusColor = getStatusColor(step) + const outputCount = step.output_count ?? step.success ?? 0 + const errorCount = step.errors?.length ?? step.error ?? 0 + const duration = formatDuration(step.duration_ms) + + return ( +
+
+ + {formatStepName(step.name)} +
+
+
+ Status + {getStatusLabel(statusColor)} +
+
+ Duration + {duration} +
+ {outputCount > 0 && ( +
+ Output + {outputCount} items +
+ )} + {errorCount > 0 && ( +
+ Errors + {errorCount} +
+ )} + {step.errors && step.errors.length > 0 && ( +
+ {step.errors.slice(0, 3).map((err, idx) => ( +
+ {err.length > 80 ? `${err.slice(0, 80)}...` : err} +
+ ))} + {step.errors.length > 3 && ( +
+ +{step.errors.length - 3} more errors +
+ )} +
+ )} +
+
+ ) +} + +function TimelineBar({ step, maxDuration, onHover, onLeave }) { + const statusColor = getStatusColor(step) + const duration = step.duration_ms || 0 + const widthPercent = maxDuration > 0 ? Math.max((duration / maxDuration) * 100, 2) : 2 + + const handleMouseEnter = (e) => { + const rect = e.currentTarget.getBoundingClientRect() + onHover(step, { + x: rect.left + rect.width / 2, + y: rect.top - 8, + }) + } + + return ( +
+
+ {formatStepName(step.name)} +
+
+
+ {formatDuration(duration)} +
+
+ ) +} + +export default function WorkflowTimeline({ steps = [] }) { + const [hoveredStep, setHoveredStep] = useState(null) + const [tooltipPosition, setTooltipPosition] = useState({ x: 0, y: 0 }) + + if (!steps || steps.length === 0) { + return null + } + + // Calculate max duration for scaling bars + const maxDuration = Math.max(...steps.map((s) => s.duration_ms || 0), 1) + + const handleHover = (step, position) => { + setHoveredStep(step) + setTooltipPosition(position) + } + + const handleLeave = () => { + setHoveredStep(null) + } + + return ( +
+
+ Step Timeline + + + + Success + + + + Error + + + + Running + + + + Pending + + +
+
+ {steps.map((step, idx) => ( + + ))} +
+ {hoveredStep && ( + + )} +
+ ) +} diff --git a/src/kurt/web/client/src/panels/WorkflowDetailPanel.jsx b/src/kurt/web/client/src/panels/WorkflowDetailPanel.jsx new file mode 100644 index 00000000..c56cfd37 --- /dev/null +++ b/src/kurt/web/client/src/panels/WorkflowDetailPanel.jsx @@ -0,0 +1,690 @@ +import { useState, useEffect, useCallback, useRef } from 'react' +import { ArrowLeft, RefreshCw, Copy, ExternalLink } from 'lucide-react' +import WorkflowTimeline from '../components/WorkflowTimeline' + +const apiBase = import.meta.env.VITE_API_URL || '' +const apiUrl = (path) => `${apiBase}${path}` + +// Auto-refresh intervals +const REFRESH_INTERVAL_RUNNING = 2000 +const REFRESH_INTERVAL_COMPLETED = 30000 + +const formatDuration = (ms) => { + if (ms == null) return '-' + if (ms < 1000) return `${ms}ms` + const seconds = ms / 1000 + if (seconds < 60) return `${seconds.toFixed(1)}s` + const minutes = Math.floor(seconds / 60) + const remainingSeconds = Math.round(seconds % 60) + return `${minutes}m ${remainingSeconds}s` +} + +const formatDateTime = (dateStr) => { + if (!dateStr) return '-' + try { + // Handle numeric timestamps + if (/^\d+$/.test(dateStr)) { + const date = new Date(parseInt(dateStr, 10)) + if (!isNaN(date.getTime())) return date.toLocaleString() + } + // Try parsing as-is + let date = new Date(dateStr) + if (!isNaN(date.getTime())) return date.toLocaleString() + // Handle space-separated format + if (typeof dateStr === 'string' && dateStr.includes(' ')) { + date = new Date(dateStr.replace(' ', 'T') + 'Z') + if (!isNaN(date.getTime())) return date.toLocaleString() + } + return '-' + } catch { + return '-' + } +} + +const formatTokens = (tokens) => { + if (tokens == null) return '-' + return tokens.toLocaleString() +} + +const formatCost = (cost) => { + if (cost == null) return '-' + if (cost < 0.01) return `$${cost.toFixed(4)}` + return `$${cost.toFixed(2)}` +} + +const getStatusBadgeClass = (status) => { + switch (status) { + case 'SUCCESS': + return 'workflow-status-success' + case 'WARNING': + return 'workflow-status-warning' + case 'ERROR': + case 'RETRIES_EXCEEDED': + return 'workflow-status-error' + case 'PENDING': + return 'workflow-status-pending' + case 'ENQUEUED': + return 'workflow-status-enqueued' + case 'CANCELLED': + return 'workflow-status-cancelled' + default: + return '' + } +} + +const formatStepName = (stepName) => { + if (!stepName) return 'Unknown' + return stepName + .split('_') + .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) + .join(' ') +} + +function StatusBadge({ status }) { + return ( + + {status} + + ) +} + +function MetadataSection({ workflow, liveStatus }) { + const inputs = liveStatus?.inputs || workflow?.inputs + const metadata = liveStatus?.metadata || {} + + const workflowType = workflow?.workflow_type || metadata?.workflow_type + const definitionName = workflow?.definition_name || metadata?.definition_name + const trigger = workflow?.trigger || metadata?.trigger + + return ( +
+
+
+ Type + + + {workflowType || 'unknown'} + + +
+ {definitionName && ( +
+ Definition + {definitionName} +
+ )} + {trigger && ( +
+ Trigger + {trigger} +
+ )} +
+ Duration + + {formatDuration(liveStatus?.duration_ms)} + +
+
+ Created + {formatDateTime(workflow?.created_at)} +
+
+ Updated + {formatDateTime(workflow?.updated_at)} +
+
+
+ ) +} + +function InputsSection({ liveStatus }) { + const inputs = liveStatus?.inputs + if (!inputs) return null + + let displayInputs = inputs + if (typeof inputs === 'string') { + try { + displayInputs = JSON.parse(inputs) + } catch { + // Show as raw string + return ( +
+

Inputs

+
+ {inputs} +
+
+ ) + } + } + + if (typeof displayInputs !== 'object' || displayInputs === null) { + return null + } + + // Filter out internal fields + const filteredInputs = {} + for (const [key, value] of Object.entries(displayInputs)) { + if (key.startsWith('_')) continue + if (value === null || value === undefined || value === '') continue + if (key === 'dry_run' && value === false) continue + filteredInputs[key] = value + } + + if (Object.keys(filteredInputs).length === 0) return null + + const formatValue = (value) => { + if (typeof value === 'boolean') return value ? 'true' : 'false' + if (typeof value === 'number') return String(value) + if (Array.isArray(value)) return value.join(', ') + if (typeof value === 'object') return JSON.stringify(value) + return String(value) + } + + return ( +
+

Inputs

+
+ {Object.entries(filteredInputs).map(([key, value]) => ( +
+ {key} + + {formatValue(value)} + +
+ ))} +
+
+ ) +} + +function StepsSection({ liveStatus }) { + const steps = liveStatus?.steps || [] + + if (steps.length === 0) return null + + return ( +
+

Steps ({steps.length})

+
+ {steps.map((step, idx) => { + const hasErrors = (step.errors?.length > 0) || (step.error > 0) + const statusColor = hasErrors || step.status === 'error' + ? 'error' + : step.status === 'success' || step.status === 'completed' + ? 'success' + : step.status === 'running' || step.status === 'in_progress' + ? 'running' + : 'pending' + + return ( +
+
+ + {formatStepName(step.name)} + + {formatDuration(step.duration_ms)} + +
+
+ + {step.success ?? step.output_count ?? 0} ok + + + {step.error ?? step.errors?.length ?? 0} errors + +
+ {step.errors && step.errors.length > 0 && ( +
+ {step.errors.slice(0, 5).map((err, errIdx) => ( +
+ {err.length > 200 ? `${err.slice(0, 200)}...` : err} +
+ ))} + {step.errors.length > 5 && ( +
+ +{step.errors.length - 5} more errors +
+ )} +
+ )} +
+ ) + })} +
+
+ ) +} + +function OutputSection({ workflow, liveStatus }) { + const output = liveStatus?.output || {} + const workflowError = liveStatus?.error || workflow?.error + const workflowStatus = liveStatus?.status || workflow?.status + + const isCompleted = ['SUCCESS', 'ERROR', 'CANCELLED', 'completed', 'failed', 'canceled', 'completed_with_errors'].includes(workflowStatus) + + const hasAgentOutput = output.agent_turns != null || output.tokens_in != null || output.tool_calls != null + const hasToolOutput = output.total_output != null || output.total_success != null + const hasErrors = output.total_errors > 0 || workflowError + + if (!isCompleted && !hasErrors) return null + if (!hasAgentOutput && !hasToolOutput && !hasErrors && !output.result_preview) return null + + return ( +
+

Output

+
+ {workflowError && ( +
+
Error
+
{workflowError}
+
+ )} + + {hasAgentOutput && ( +
+ {output.agent_turns != null && ( +
+ Turns + {output.agent_turns} +
+ )} + {output.tool_calls != null && ( +
+ Tool Calls + {output.tool_calls} +
+ )} + {output.tokens_in != null && ( +
+ Tokens In + + {formatTokens(output.tokens_in)} + +
+ )} + {output.tokens_out != null && ( +
+ Tokens Out + + {formatTokens(output.tokens_out)} + +
+ )} + {output.cost_usd != null && ( +
+ Cost + + {formatCost(output.cost_usd)} + +
+ )} + {output.stop_reason && ( +
+ Stop Reason + + {output.stop_reason} + +
+ )} +
+ )} + + {hasToolOutput && !hasAgentOutput && ( +
+ {output.total_output != null && ( +
+ Total Output + {output.total_output} +
+ )} + {output.total_success != null && ( +
+ Successful + + {output.total_success} + +
+ )} + {output.total_errors != null && output.total_errors > 0 && ( +
+ Errors + + {output.total_errors} + +
+ )} +
+ )} + + {output.result_preview && ( +
+
Result Preview
+
{output.result_preview}
+
+ )} +
+
+ ) +} + +function ErrorSection({ liveStatus }) { + const errors = [] + + // Collect errors from status + if (liveStatus?.error) { + errors.push({ source: 'Workflow', message: liveStatus.error }) + } + + // Collect errors from steps + const steps = liveStatus?.steps || [] + steps.forEach((step) => { + if (step.errors && step.errors.length > 0) { + step.errors.forEach((err) => { + errors.push({ source: formatStepName(step.name), message: err }) + }) + } + }) + + if (errors.length === 0) return null + + return ( +
+

Errors ({errors.length})

+
+ {errors.slice(0, 20).map((err, idx) => ( +
+ {err.source} + {err.message} +
+ ))} + {errors.length > 20 && ( +
+ +{errors.length - 20} more errors +
+ )} +
+
+ ) +} + +export default function WorkflowDetailPanel({ params }) { + const { workflowId, onClose, onAttach, onCancel, onRetry } = params || {} + + const [workflow, setWorkflow] = useState(null) + const [liveStatus, setLiveStatus] = useState(null) + const [isLoading, setIsLoading] = useState(true) + const [error, setError] = useState(null) + const [autoRefresh, setAutoRefresh] = useState(true) + const refreshIntervalRef = useRef(null) + + const isRunning = workflow?.status === 'PENDING' || workflow?.status === 'ENQUEUED' + + const fetchWorkflow = useCallback(async () => { + if (!workflowId) return + + try { + const response = await fetch(apiUrl(`/api/workflows/${workflowId}/status`)) + if (!response.ok) { + throw new Error(`Failed to fetch workflow: ${response.status}`) + } + const data = await response.json() + setLiveStatus(data) + + // Also fetch basic workflow info if not present + if (!workflow) { + const wfResponse = await fetch(apiUrl(`/api/workflows?search=${workflowId}&limit=1`)) + if (wfResponse.ok) { + const wfData = await wfResponse.json() + if (wfData.workflows && wfData.workflows.length > 0) { + setWorkflow(wfData.workflows[0]) + } + } + } + + setError(null) + } catch (err) { + setError(err.message) + } finally { + setIsLoading(false) + } + }, [workflowId, workflow]) + + // Initial fetch + useEffect(() => { + fetchWorkflow() + }, [workflowId]) + + // Auto-refresh + useEffect(() => { + if (!autoRefresh) { + if (refreshIntervalRef.current) { + clearInterval(refreshIntervalRef.current) + refreshIntervalRef.current = null + } + return + } + + const interval = isRunning ? REFRESH_INTERVAL_RUNNING : REFRESH_INTERVAL_COMPLETED + refreshIntervalRef.current = setInterval(fetchWorkflow, interval) + + return () => { + if (refreshIntervalRef.current) { + clearInterval(refreshIntervalRef.current) + } + } + }, [autoRefresh, isRunning, fetchWorkflow]) + + // SSE for real-time updates when running + useEffect(() => { + if (!isRunning || !workflowId) return + + const eventSource = new EventSource( + apiUrl(`/api/workflows/${workflowId}/status/stream`) + ) + + eventSource.onmessage = (event) => { + try { + const data = JSON.parse(event.data) + setLiveStatus(data) + } catch (err) { + console.error('Failed to parse SSE data:', err) + } + } + + eventSource.onerror = () => { + eventSource.close() + } + + return () => { + eventSource.close() + } + }, [isRunning, workflowId]) + + const handleCopyId = () => { + if (workflowId) { + navigator.clipboard.writeText(workflowId).catch(() => {}) + } + } + + const handleRefresh = () => { + setIsLoading(true) + fetchWorkflow() + } + + // Compute effective status + const getEffectiveStatus = () => { + if (!workflow) return 'UNKNOWN' + if (workflow.status !== 'SUCCESS') return workflow.status + if (liveStatus?.status === 'completed_with_errors') return 'WARNING' + const steps = liveStatus?.steps || [] + const hasErrors = steps.some((s) => s.error > 0) + if (hasErrors) return 'WARNING' + return workflow.status + } + + const effectiveStatus = getEffectiveStatus() + const workflowName = workflow?.definition_name || workflow?.name || 'Loading...' + const shortId = workflowId?.slice(0, 8) || '' + + if (!workflowId) { + return ( +
+
No workflow ID provided
+
+ ) + } + + return ( +
+ {/* Header */} +
+
+ {onClose && ( + + )} +
+

{workflowName}

+
+ + {shortId} + + +
+
+
+
+ + + {formatDuration(liveStatus?.duration_ms)} + +
+ + +
+
+
+ + {/* Content */} +
+ {error && ( +
+ {error} +
+ )} + + {isLoading && !liveStatus ? ( +
Loading workflow details...
+ ) : ( + <> + {/* Progress for running workflows */} + {isRunning && liveStatus?.stage && ( +
+
+ + {liveStatus.stage} + + + {liveStatus.progress?.current || 0}/{liveStatus.progress?.total || '?'} + +
+
+
0 + ? Math.round((liveStatus.progress.current / liveStatus.progress.total) * 100) + : 0}%` + }} + /> +
+
+ )} + + + + + {/* Timeline visualization */} + {liveStatus?.steps?.length > 0 && ( +
+

Timeline

+ +
+ )} + + + + + + {/* Action buttons for running workflows */} + {isRunning && ( +
+ {onAttach && ( + + )} + {onCancel && ( + + )} +
+ )} + + {/* Retry button for completed workflows */} + {!isRunning && onRetry && ['SUCCESS', 'ERROR', 'CANCELLED', 'WARNING', 'RETRIES_EXCEEDED'].includes(workflow?.status) && ( +
+ +
+ )} + + )} +
+
+ ) +} diff --git a/src/kurt/web/client/src/panels/WorkflowsPanel.jsx b/src/kurt/web/client/src/panels/WorkflowsPanel.jsx index ef250a1a..f0e570f2 100644 --- a/src/kurt/web/client/src/panels/WorkflowsPanel.jsx +++ b/src/kurt/web/client/src/panels/WorkflowsPanel.jsx @@ -6,6 +6,7 @@ export default function WorkflowsPanel({ params }) { collapsed, onToggleCollapse, onAttachWorkflow, + onOpenWorkflowDetail, } = params || {} if (collapsed) { @@ -44,7 +45,10 @@ export default function WorkflowsPanel({ params }) {
- +
) diff --git a/src/kurt/web/client/src/styles.css b/src/kurt/web/client/src/styles.css index 86067aeb..8cd7fa2e 100644 --- a/src/kurt/web/client/src/styles.css +++ b/src/kurt/web/client/src/styles.css @@ -2898,6 +2898,92 @@ html { flex-shrink: 0; } +/* Workflow Metrics */ +.workflow-metrics { + display: flex; + gap: var(--space-2); + padding: var(--space-2); + background: var(--color-bg-secondary); + border-bottom: 1px solid var(--color-border); + flex-shrink: 0; + flex-wrap: wrap; +} + +.workflow-metrics-card { + display: flex; + flex-direction: column; + gap: var(--space-1); + padding: var(--space-2) var(--space-3); + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + min-width: 100px; +} + +.workflow-metrics-label { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + text-transform: uppercase; + letter-spacing: 0.03em; + font-weight: var(--font-medium); +} + +.workflow-metrics-value { + display: flex; + align-items: baseline; + gap: var(--space-2); +} + +.workflow-metrics-count { + font-size: var(--text-lg); + font-weight: var(--font-semibold); + color: var(--color-text-primary); +} + +.workflow-metrics-breakdown { + display: flex; + gap: var(--space-1); + font-size: var(--text-xs); +} + +.workflow-metrics-status { + padding: 1px var(--space-1); + border-radius: var(--radius-sm); +} + +.workflow-metrics-success { + background: var(--color-success-bg); + color: var(--color-success); +} + +.workflow-metrics-error { + background: var(--color-error-bg); + color: var(--color-error); +} + +.workflow-metrics-running { + background: var(--color-info-bg); + color: var(--color-info); +} + +.workflow-metrics-cost { + font-size: var(--text-base); + font-weight: var(--font-semibold); + color: var(--color-text-primary); +} + +.workflow-metrics-tokens { + font-size: var(--text-sm); + color: var(--color-text-secondary); + font-family: var(--font-mono); +} + +.workflow-metrics-duration { + font-size: var(--text-sm); + color: var(--color-text-secondary); + font-family: var(--font-mono); +} + .workflow-select { border: 1px solid var(--color-border); background: var(--color-bg-primary); @@ -3173,6 +3259,16 @@ html { transition: background var(--transition-fast); } +.workflow-open-detail { + background: var(--color-bg-tertiary); + color: var(--color-text-secondary); +} + +.workflow-open-detail:hover { + background: var(--color-bg-secondary); + color: var(--color-text-primary); +} + .workflow-attach { background: var(--color-info); color: var(--color-text-inverse); @@ -4099,158 +4195,766 @@ html { height: 100%; } -/* ========== Code Viewer ========== */ +/* ========== Workflow Detail Panel ========== */ -.code-viewer { - height: 100%; - overflow: auto; - font-family: 'IBM Plex Mono', Monaco, 'Courier New', monospace; - font-size: 13px; - line-height: 1.5; +.workflow-detail-panel { + background: var(--color-bg-primary); + display: flex; + flex-direction: column; } -.code-viewer pre { - margin: 0; - border-radius: 0; +.workflow-detail-empty, +.workflow-detail-loading { + color: var(--color-text-tertiary); + padding: var(--space-5); + text-align: center; + font-size: var(--text-sm); } -.code-viewer-empty { +/* Header */ +.workflow-detail-header { display: flex; align-items: center; - justify-content: center; - height: 100%; - color: var(--color-text-tertiary); + justify-content: space-between; + padding: var(--space-3) var(--space-4); + border-bottom: 1px solid var(--color-border); + background: var(--color-bg-secondary); + flex-shrink: 0; } -.code-empty-message { - font-size: 14px; +.workflow-detail-header-left { + display: flex; + align-items: center; + gap: var(--space-3); + min-width: 0; } -.editor-code-viewer { - flex: 1; - min-height: 0; +.workflow-detail-back-btn { + display: flex; + align-items: center; + justify-content: center; + width: 28px; + height: 28px; + border: none; + border-radius: var(--radius-md); + background: transparent; + color: var(--color-text-secondary); + cursor: pointer; + transition: all var(--transition-fast); } -/* Code viewer in editor panel - no padding since prism adds its own */ -.editor-panel-content .code-viewer { - margin: 0; +.workflow-detail-back-btn:hover { + background: var(--color-bg-tertiary); + color: var(--color-text-primary); } -/* ========== Code Editor ========== */ +.workflow-detail-title-group { + min-width: 0; +} -.code-editor { - display: flex; - flex-direction: column; - height: 100%; - min-height: 0; +.workflow-detail-title { + margin: 0; + font-size: var(--text-base); + font-weight: var(--font-semibold); + color: var(--color-text-primary); + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; } -.code-editor-toolbar { +.workflow-detail-id-row { display: flex; align-items: center; - justify-content: space-between; - padding: var(--space-1) var(--space-3); - background: var(--color-pre-bg); - border-bottom: 1px solid var(--color-border-strong); - flex-shrink: 0; + gap: var(--space-1); + margin-top: 2px; } -.code-editor-language { +.workflow-detail-id { + font-family: var(--font-mono); font-size: var(--text-xs); - font-weight: var(--font-medium); - text-transform: uppercase; - letter-spacing: 0.05em; color: var(--color-text-tertiary); - font-family: var(--font-mono); } -.code-editor-status { +.workflow-detail-copy-btn { display: flex; align-items: center; + justify-content: center; + padding: 2px; + border: none; + border-radius: var(--radius-sm); + background: transparent; + color: var(--color-text-tertiary); + cursor: pointer; + transition: all var(--transition-fast); } -.code-editor-container { - flex: 1; - min-height: 0; - overflow: auto; - background: var(--color-pre-bg); -} - -.code-editor-input { - min-height: 100%; +.workflow-detail-copy-btn:hover { + background: var(--color-bg-tertiary); + color: var(--color-text-primary); } -.code-editor-textarea { - outline: none !important; - caret-color: var(--color-text-inverse); +.workflow-detail-header-right { + display: flex; + align-items: center; + gap: var(--space-3); } -.code-editor-pre { - min-height: 100%; +.workflow-detail-status-badge { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + padding: 4px var(--space-2); + border-radius: var(--radius-sm); + text-transform: uppercase; } -.code-editor-line-number { - display: table-cell; - text-align: right; - padding-right: var(--space-4); - user-select: none; - opacity: 0.5; - width: 1%; - white-space: nowrap; - color: var(--color-text-tertiary); +.workflow-detail-duration { + font-family: var(--font-mono); + font-size: var(--text-sm); + color: var(--color-text-secondary); } -.editor-code-editor { - flex: 1; - min-height: 0; +.workflow-detail-controls { + display: flex; + align-items: center; + gap: var(--space-2); } -/* ========== Sidebar View Toggle ========== */ - -.sidebar-view-toggle { +.workflow-detail-auto-refresh { display: flex; align-items: center; - gap: 2px; - background: var(--color-bg-active); - border-radius: var(--radius-sm); - padding: 2px; + gap: var(--space-1); + font-size: var(--text-xs); + color: var(--color-text-secondary); + cursor: pointer; } -.view-toggle-btn { +.workflow-detail-auto-refresh input { + cursor: pointer; +} + +.workflow-detail-refresh-btn { display: flex; align-items: center; justify-content: center; - width: 24px; - height: 22px; - border: none; - background: transparent; - border-radius: var(--radius-sm); - cursor: pointer; + width: 28px; + height: 28px; + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + background: var(--color-bg-primary); color: var(--color-text-secondary); + cursor: pointer; transition: all var(--transition-fast); } -.view-toggle-btn:hover { +.workflow-detail-refresh-btn:hover:not(:disabled) { + background: var(--color-bg-tertiary); color: var(--color-text-primary); } -.view-toggle-btn.active { - background: var(--color-bg-primary); - color: var(--color-text-primary); - box-shadow: var(--shadow-sm); +.workflow-detail-refresh-btn:disabled { + opacity: 0.5; + cursor: not-allowed; } -/* ========== Git Changes View ========== */ +.workflow-detail-refresh-btn .spinning { + animation: spin 1s linear infinite; +} -.git-changes-view { - display: flex; - flex-direction: column; - flex: 1; - overflow: hidden; - font-size: var(--text-xs); +@keyframes spin { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } } -.git-changes-loading, +/* Content */ +.workflow-detail-content { + flex: 1; + overflow: auto; + padding: var(--space-4); +} + +.workflow-detail-error-banner { + background: var(--color-error-bg); + color: var(--color-error); + padding: var(--space-3); + border-radius: var(--radius-md); + margin-bottom: var(--space-4); + font-size: var(--text-sm); +} + +/* Progress */ +.workflow-detail-progress { + background: var(--color-bg-secondary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: var(--space-3); + margin-bottom: var(--space-4); +} + +.workflow-detail-progress-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: var(--space-2); +} + +.workflow-detail-stage-name { + font-size: var(--text-sm); + font-weight: var(--font-medium); + color: var(--color-text-primary); +} + +.workflow-detail-progress-count { + font-family: var(--font-mono); + font-size: var(--text-xs); + color: var(--color-text-secondary); +} + +.workflow-detail-progress-bar { + height: 6px; + background: var(--color-bg-tertiary); + border-radius: var(--radius-full); + overflow: hidden; +} + +.workflow-detail-progress-fill { + height: 100%; + background: var(--color-info); + border-radius: var(--radius-full); + transition: width 0.3s ease; +} + +/* Metadata */ +.workflow-detail-metadata { + background: var(--color-bg-secondary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: var(--space-3); + margin-bottom: var(--space-4); +} + +.workflow-detail-meta-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(180px, 1fr)); + gap: var(--space-2) var(--space-4); +} + +.workflow-detail-meta-row { + display: flex; + flex-direction: column; + gap: 2px; +} + +.workflow-detail-meta-label { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.workflow-detail-meta-value { + font-size: var(--text-sm); + color: var(--color-text-primary); +} + +.workflow-detail-mono { + font-family: var(--font-mono); +} + +/* Sections */ +.workflow-detail-section { + background: var(--color-bg-secondary); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + padding: var(--space-3); + margin-bottom: var(--space-4); +} + +.workflow-detail-section-error { + border-color: var(--color-error); +} + +.workflow-detail-section-title { + font-size: var(--text-sm); + font-weight: var(--font-semibold); + color: var(--color-text-primary); + margin: 0 0 var(--space-3) 0; + padding-bottom: var(--space-2); + border-bottom: 1px solid var(--color-border); +} + +/* Inputs */ +.workflow-detail-inputs-raw { + background: var(--color-pre-bg); + padding: var(--space-3); + border-radius: var(--radius-sm); + overflow: auto; +} + +.workflow-detail-inputs-raw code { + font-family: var(--font-mono); + font-size: var(--text-xs); + color: var(--color-text-primary); + white-space: pre-wrap; + word-break: break-all; +} + +.workflow-detail-inputs-grid { + display: grid; + gap: var(--space-2); +} + +.workflow-detail-input-row { + display: flex; + gap: var(--space-3); + padding: var(--space-2); + background: var(--color-bg-primary); + border-radius: var(--radius-sm); +} + +.workflow-detail-input-key { + font-size: var(--text-xs); + font-weight: var(--font-medium); + color: var(--color-text-secondary); + min-width: 100px; + flex-shrink: 0; +} + +.workflow-detail-input-value { + font-size: var(--text-xs); + color: var(--color-text-primary); + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +/* Steps */ +.workflow-detail-steps { + display: grid; + gap: var(--space-2); +} + +.workflow-detail-step-card { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + padding: var(--space-3); +} + +.workflow-detail-step-header { + display: flex; + align-items: center; + gap: var(--space-2); +} + +.workflow-detail-step-status { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} + +.workflow-detail-step-status-success { + background: var(--color-success); +} + +.workflow-detail-step-status-error { + background: var(--color-error); +} + +.workflow-detail-step-status-running { + background: var(--color-info); + animation: pulse 1.5s ease-in-out infinite; +} + +.workflow-detail-step-status-pending { + background: var(--color-text-tertiary); +} + +@keyframes pulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.5; } +} + +.workflow-detail-step-name { + font-size: var(--text-sm); + font-weight: var(--font-medium); + color: var(--color-text-primary); + flex: 1; +} + +.workflow-detail-step-duration { + font-family: var(--font-mono); + font-size: var(--text-xs); + color: var(--color-text-tertiary); +} + +.workflow-detail-step-stats { + display: flex; + gap: var(--space-3); + margin-top: var(--space-2); + padding-top: var(--space-2); + border-top: 1px solid var(--color-border); +} + +.workflow-detail-step-stat { + font-size: var(--text-xs); +} + +.workflow-detail-step-success { + color: var(--color-success); +} + +.workflow-detail-step-error { + color: var(--color-text-tertiary); +} + +.workflow-detail-step-errors { + margin-top: var(--space-2); + padding: var(--space-2); + background: var(--color-error-bg); + border-radius: var(--radius-sm); +} + +.workflow-detail-step-error-msg { + font-size: var(--text-xs); + color: var(--color-error); + margin-bottom: var(--space-1); + word-break: break-word; +} + +.workflow-detail-step-error-msg:last-child { + margin-bottom: 0; +} + +.workflow-detail-step-error-more { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + font-style: italic; +} + +/* Output */ +.workflow-detail-output { + display: grid; + gap: var(--space-3); +} + +.workflow-detail-output-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(120px, 1fr)); + gap: var(--space-3); +} + +.workflow-detail-output-item { + display: flex; + flex-direction: column; + gap: 2px; +} + +.workflow-detail-output-label { + font-size: var(--text-xs); + color: var(--color-text-tertiary); +} + +.workflow-detail-output-value { + font-size: var(--text-sm); + font-weight: var(--font-medium); + color: var(--color-text-primary); +} + +.workflow-detail-cost { + color: var(--color-warning); +} + +.workflow-detail-success-text { + color: var(--color-success); +} + +.workflow-detail-error-text { + color: var(--color-error); +} + +/* Error blocks */ +.workflow-detail-error-block { + background: var(--color-error-bg); + border: 1px solid var(--color-error); + border-radius: var(--radius-sm); + padding: var(--space-3); +} + +.workflow-detail-error-label { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-error); + margin-bottom: var(--space-1); +} + +.workflow-detail-error-message { + font-size: var(--text-sm); + color: var(--color-error); + word-break: break-word; +} + +/* Result preview */ +.workflow-detail-result-preview { + background: var(--color-bg-primary); + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + padding: var(--space-3); +} + +.workflow-detail-result-label { + font-size: var(--text-xs); + font-weight: var(--font-medium); + color: var(--color-text-secondary); + margin-bottom: var(--space-2); +} + +.workflow-detail-result-content { + font-size: var(--text-sm); + color: var(--color-text-primary); + white-space: pre-wrap; + word-break: break-word; +} + +/* Errors list */ +.workflow-detail-errors-list { + display: grid; + gap: var(--space-2); +} + +.workflow-detail-error-item { + display: flex; + flex-direction: column; + gap: 2px; + padding: var(--space-2); + background: var(--color-error-bg); + border-radius: var(--radius-sm); +} + +.workflow-detail-error-source { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-error); +} + +.workflow-detail-error-msg { + font-size: var(--text-xs); + color: var(--color-text-primary); + word-break: break-word; +} + +.workflow-detail-errors-more { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + font-style: italic; + text-align: center; + padding: var(--space-2); +} + +/* Actions */ +.workflow-detail-actions { + display: flex; + gap: var(--space-3); + padding-top: var(--space-4); + border-top: 1px solid var(--color-border); + margin-top: var(--space-4); +} + +.workflow-detail-action-btn { + flex: 1; + padding: var(--space-2) var(--space-4); + border: 1px solid var(--color-border); + border-radius: var(--radius-md); + font-size: var(--text-sm); + font-weight: var(--font-medium); + cursor: pointer; + transition: all var(--transition-fast); +} + +.workflow-detail-attach-btn { + background: var(--color-info-bg); + border-color: var(--color-info); + color: var(--color-info); +} + +.workflow-detail-attach-btn:hover { + background: var(--color-info); + color: white; +} + +.workflow-detail-cancel-btn { + background: var(--color-error-bg); + border-color: var(--color-error); + color: var(--color-error); +} + +.workflow-detail-cancel-btn:hover { + background: var(--color-error); + color: white; +} + +.workflow-detail-retry-btn { + background: var(--color-bg-primary); + border-color: var(--color-border); + color: var(--color-text-primary); +} + +.workflow-detail-retry-btn:hover { + background: var(--color-bg-tertiary); +} + +/* ========== Code Viewer ========== */ + +.code-viewer { + height: 100%; + overflow: auto; + font-family: 'IBM Plex Mono', Monaco, 'Courier New', monospace; + font-size: 13px; + line-height: 1.5; +} + +.code-viewer pre { + margin: 0; + border-radius: 0; +} + +.code-viewer-empty { + display: flex; + align-items: center; + justify-content: center; + height: 100%; + color: var(--color-text-tertiary); +} + +.code-empty-message { + font-size: 14px; +} + +.editor-code-viewer { + flex: 1; + min-height: 0; +} + +/* Code viewer in editor panel - no padding since prism adds its own */ +.editor-panel-content .code-viewer { + margin: 0; +} + +/* ========== Code Editor ========== */ + +.code-editor { + display: flex; + flex-direction: column; + height: 100%; + min-height: 0; +} + +.code-editor-toolbar { + display: flex; + align-items: center; + justify-content: space-between; + padding: var(--space-1) var(--space-3); + background: var(--color-pre-bg); + border-bottom: 1px solid var(--color-border-strong); + flex-shrink: 0; +} + +.code-editor-language { + font-size: var(--text-xs); + font-weight: var(--font-medium); + text-transform: uppercase; + letter-spacing: 0.05em; + color: var(--color-text-tertiary); + font-family: var(--font-mono); +} + +.code-editor-status { + display: flex; + align-items: center; +} + +.code-editor-container { + flex: 1; + min-height: 0; + overflow: auto; + background: var(--color-pre-bg); +} + +.code-editor-input { + min-height: 100%; +} + +.code-editor-textarea { + outline: none !important; + caret-color: var(--color-text-inverse); +} + +.code-editor-pre { + min-height: 100%; +} + +.code-editor-line-number { + display: table-cell; + text-align: right; + padding-right: var(--space-4); + user-select: none; + opacity: 0.5; + width: 1%; + white-space: nowrap; + color: var(--color-text-tertiary); +} + +.editor-code-editor { + flex: 1; + min-height: 0; +} + +/* ========== Sidebar View Toggle ========== */ + +.sidebar-view-toggle { + display: flex; + align-items: center; + gap: 2px; + background: var(--color-bg-active); + border-radius: var(--radius-sm); + padding: 2px; +} + +.view-toggle-btn { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 22px; + border: none; + background: transparent; + border-radius: var(--radius-sm); + cursor: pointer; + color: var(--color-text-secondary); + transition: all var(--transition-fast); +} + +.view-toggle-btn:hover { + color: var(--color-text-primary); +} + +.view-toggle-btn.active { + background: var(--color-bg-primary); + color: var(--color-text-primary); + box-shadow: var(--shadow-sm); +} + +/* ========== Git Changes View ========== */ + +.git-changes-view { + display: flex; + flex-direction: column; + flex: 1; + overflow: hidden; + font-size: var(--text-xs); +} + +.git-changes-loading, .git-changes-error { padding: var(--space-4); text-align: center; @@ -4766,3 +5470,250 @@ html { transform: scale(1) translateY(0); } } + +/* ========================================================================== + Workflow Timeline Component + ========================================================================== */ + +.workflow-timeline { + margin-top: var(--space-2); + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + background: var(--color-bg-primary); + overflow: hidden; +} + +.workflow-timeline-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: var(--space-2); + background: var(--color-bg-secondary); + border-bottom: 1px solid var(--color-border); +} + +.workflow-timeline-title { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-text-primary); +} + +.workflow-timeline-legend { + display: flex; + align-items: center; + gap: var(--space-3); +} + +.workflow-timeline-legend-item { + display: flex; + align-items: center; + gap: var(--space-1); + font-size: 10px; + color: var(--color-text-secondary); +} + +.workflow-timeline-legend-dot { + width: 8px; + height: 8px; + border-radius: var(--radius-full); +} + +.workflow-timeline-content { + padding: var(--space-2); + display: flex; + flex-direction: column; + gap: var(--space-1); +} + +.workflow-timeline-row { + display: flex; + align-items: center; + gap: var(--space-2); + min-height: 24px; +} + +.workflow-timeline-label { + flex: 0 0 120px; + font-size: var(--text-xs); + font-weight: var(--font-medium); + color: var(--color-text-primary); + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.workflow-timeline-bar-container { + flex: 1; + display: flex; + align-items: center; + gap: var(--space-2); + height: 18px; + background: var(--color-bg-tertiary); + border-radius: var(--radius-sm); + padding: 2px; + overflow: hidden; +} + +.workflow-timeline-bar { + height: 100%; + min-width: 4px; + border-radius: var(--radius-sm); + transition: width var(--transition-normal), opacity var(--transition-fast); + cursor: pointer; +} + +.workflow-timeline-bar:hover { + opacity: 0.85; + filter: brightness(1.1); +} + +/* Timeline status colors */ +.workflow-timeline-status-success, +.workflow-timeline-bar-success { + background: var(--color-success); +} + +.workflow-timeline-status-error, +.workflow-timeline-bar-error { + background: var(--color-error); +} + +.workflow-timeline-status-running, +.workflow-timeline-bar-running { + background: var(--color-info); + animation: timeline-pulse 1.5s ease-in-out infinite; +} + +.workflow-timeline-status-pending, +.workflow-timeline-bar-pending { + background: var(--color-text-tertiary); +} + +@keyframes timeline-pulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.6; + } +} + +.workflow-timeline-duration { + flex-shrink: 0; + font-family: var(--font-mono); + font-size: 10px; + color: var(--color-text-secondary); + min-width: 48px; + text-align: right; +} + +/* Timeline Tooltip */ +.workflow-timeline-tooltip { + position: fixed; + z-index: 1000; + transform: translate(-50%, -100%); + background: var(--color-bg-primary); + border: 1px solid var(--color-border-strong); + border-radius: var(--radius-md); + box-shadow: var(--shadow-lg); + padding: var(--space-2); + min-width: 180px; + max-width: 320px; + pointer-events: none; + animation: tooltip-fade-in 150ms ease-out; +} + +@keyframes tooltip-fade-in { + from { + opacity: 0; + transform: translate(-50%, calc(-100% + 4px)); + } + to { + opacity: 1; + transform: translate(-50%, -100%); + } +} + +.workflow-timeline-tooltip-header { + display: flex; + align-items: center; + gap: var(--space-1); + margin-bottom: var(--space-2); + padding-bottom: var(--space-1); + border-bottom: 1px solid var(--color-border); +} + +.workflow-timeline-tooltip-status { + width: 8px; + height: 8px; + border-radius: var(--radius-full); + flex-shrink: 0; +} + +.workflow-timeline-tooltip-name { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-text-primary); +} + +.workflow-timeline-tooltip-content { + display: flex; + flex-direction: column; + gap: var(--space-1); +} + +.workflow-timeline-tooltip-row { + display: flex; + justify-content: space-between; + align-items: center; + gap: var(--space-2); +} + +.workflow-timeline-tooltip-label { + font-size: 10px; + color: var(--color-text-secondary); +} + +.workflow-timeline-tooltip-value { + font-size: 10px; + font-weight: var(--font-medium); + color: var(--color-text-primary); +} + +.workflow-timeline-tooltip-mono { + font-family: var(--font-mono); +} + +.workflow-timeline-tooltip-error { + color: var(--color-error); +} + +.workflow-timeline-tooltip-error .workflow-timeline-tooltip-value { + color: var(--color-error); +} + +.workflow-timeline-tooltip-errors { + margin-top: var(--space-1); + padding-top: var(--space-1); + border-top: 1px solid var(--color-border); +} + +.workflow-timeline-tooltip-error-msg { + font-size: 10px; + color: var(--color-error); + padding: var(--space-1); + background: var(--color-error-bg); + border-radius: var(--radius-sm); + margin-bottom: var(--space-1); + word-break: break-word; +} + +.workflow-timeline-tooltip-error-msg:last-child { + margin-bottom: 0; +} + +.workflow-timeline-tooltip-more { + font-size: 10px; + color: var(--color-text-tertiary); + font-style: italic; +} From 045758fef1933b3dc57419b2aad330adb2856298 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 08:32:47 +0000 Subject: [PATCH 13/19] feat: Add step logs and distinct config/output visual styles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add expandable step logs in timeline: click any step bar to see its events - Shows event timestamp, status icon, message, and progress - Fetches from /api/workflows/{id}/logs?step_id= - Lazy loading - only fetches when step is expanded - Distinct visual styles for Config and Output sections: - Config: Blue left border, settings icon (⚙), blue-tinted header - Output: Green left border, chart icon (📊), green-tinted header - Error Output: Red left border and error styling - Timeline improvements: - Step labels are now clickable with expand arrow icon - Step bars are clickable to toggle expansion - Smooth rotation animation on expand/collapse Co-Authored-By: Claude Opus 4.5 --- .../web/client/src/components/WorkflowRow.jsx | 12 +- .../src/components/WorkflowTimeline.jsx | 141 +++++++++++++- src/kurt/web/client/src/styles.css | 180 ++++++++++++++++++ 3 files changed, 322 insertions(+), 11 deletions(-) diff --git a/src/kurt/web/client/src/components/WorkflowRow.jsx b/src/kurt/web/client/src/components/WorkflowRow.jsx index 35862256..43ae2317 100644 --- a/src/kurt/web/client/src/components/WorkflowRow.jsx +++ b/src/kurt/web/client/src/components/WorkflowRow.jsx @@ -706,9 +706,9 @@ function WorkflowConfigSection({ workflow, liveStatus }) { } return ( -
+
setIsExpanded(!isExpanded)} role="button" tabIndex={0} @@ -722,6 +722,7 @@ function WorkflowConfigSection({ workflow, liveStatus }) { {isExpanded ? : } + Config {previewText}
@@ -829,9 +830,9 @@ function WorkflowOutputSection({ workflow, liveStatus }) { } return ( -
+
setIsExpanded(!isExpanded)} role="button" tabIndex={0} @@ -845,6 +846,7 @@ function WorkflowOutputSection({ workflow, liveStatus }) { {isExpanded || shouldAutoExpand ? : } + 📊 Output {previewText} @@ -1280,7 +1282,7 @@ export default function WorkflowRow({ /> )} {liveStatus?.steps?.length > 0 && ( - + )}
diff --git a/src/kurt/web/client/src/components/WorkflowTimeline.jsx b/src/kurt/web/client/src/components/WorkflowTimeline.jsx index bbc22aa0..bc79787e 100644 --- a/src/kurt/web/client/src/components/WorkflowTimeline.jsx +++ b/src/kurt/web/client/src/components/WorkflowTimeline.jsx @@ -1,4 +1,7 @@ -import { useState } from 'react' +import { useState, useEffect, useCallback } from 'react' + +const apiBase = import.meta.env.VITE_API_URL || '' +const apiUrl = (path) => `${apiBase}${path}` /** * WorkflowTimeline - Horizontal timeline visualization for workflow steps @@ -7,6 +10,7 @@ import { useState } from 'react' * - Step name on the left * - Duration bar proportional to execution time * - Status color coding (green=success, red=error, blue=running, gray=pending) + * - Click to expand and see step log events * - Hover tooltip with details (duration_ms, output_count, errors) */ @@ -109,7 +113,68 @@ function StepTooltip({ step, position }) { ) } -function TimelineBar({ step, maxDuration, onHover, onLeave }) { +function StepLogs({ events, isLoading }) { + if (isLoading) { + return ( +
+
Loading events...
+
+ ) + } + + if (!events || events.length === 0) { + return ( +
+
No events recorded
+
+ ) + } + + const formatTime = (timestamp) => { + if (!timestamp) return '' + const date = new Date(timestamp) + return date.toLocaleTimeString('en-US', { hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit' }) + } + + const getEventIcon = (status) => { + switch (status) { + case 'completed': + case 'success': + return '✓' + case 'failed': + case 'error': + return '✗' + case 'running': + case 'progress': + return '▸' + default: + return '○' + } + } + + return ( +
+
+ Step Events + {events.length} events +
+
+ {events.map((event, idx) => ( +
+ {getEventIcon(event.status)} + {formatTime(event.created_at)} + {event.message || event.status} + {event.current != null && event.total != null && ( + {event.current}/{event.total} + )} +
+ ))} +
+
+ ) +} + +function TimelineBar({ step, maxDuration, onHover, onLeave, isExpanded, onToggle, events, isLoadingEvents }) { const statusColor = getStatusColor(step) const duration = step.duration_ms || 0 const widthPercent = maxDuration > 0 ? Math.max((duration / maxDuration) * 100, 2) : 2 @@ -123,8 +188,21 @@ function TimelineBar({ step, maxDuration, onHover, onLeave }) { } return ( -
-
+
+
{ + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault() + onToggle() + } + }} + > + {formatStepName(step.name)}
@@ -133,20 +211,67 @@ function TimelineBar({ step, maxDuration, onHover, onLeave }) { style={{ width: `${widthPercent}%` }} onMouseEnter={handleMouseEnter} onMouseLeave={onLeave} - role="progressbar" + onClick={onToggle} + role="button" + tabIndex={0} aria-valuenow={duration} aria-valuemax={maxDuration} aria-label={`${formatStepName(step.name)}: ${formatDuration(duration)}`} + aria-expanded={isExpanded} /> {formatDuration(duration)}
+ {isExpanded && ( + + )}
) } -export default function WorkflowTimeline({ steps = [] }) { +export default function WorkflowTimeline({ steps = [], workflowId }) { const [hoveredStep, setHoveredStep] = useState(null) const [tooltipPosition, setTooltipPosition] = useState({ x: 0, y: 0 }) + const [expandedSteps, setExpandedSteps] = useState(new Set()) + const [stepEvents, setStepEvents] = useState({}) + const [loadingSteps, setLoadingSteps] = useState(new Set()) + + // Fetch events for a specific step + const fetchStepEvents = useCallback(async (stepName) => { + if (!workflowId || stepEvents[stepName] || loadingSteps.has(stepName)) return + + setLoadingSteps(prev => new Set([...prev, stepName])) + try { + const response = await fetch(apiUrl(`/api/workflows/${workflowId}/logs?step_id=${encodeURIComponent(stepName)}&limit=50`)) + if (response.ok) { + const data = await response.json() + setStepEvents(prev => ({ ...prev, [stepName]: data.events || [] })) + } + } catch (err) { + console.error('Failed to fetch step events:', err) + setStepEvents(prev => ({ ...prev, [stepName]: [] })) + } finally { + setLoadingSteps(prev => { + const next = new Set(prev) + next.delete(stepName) + return next + }) + } + }, [workflowId, stepEvents, loadingSteps]) + + // Handle step expansion toggle + const handleToggleStep = useCallback((stepName) => { + setExpandedSteps(prev => { + const next = new Set(prev) + if (next.has(stepName)) { + next.delete(stepName) + } else { + next.add(stepName) + // Fetch events when expanding + fetchStepEvents(stepName) + } + return next + }) + }, [fetchStepEvents]) if (!steps || steps.length === 0) { return null @@ -195,6 +320,10 @@ export default function WorkflowTimeline({ steps = [] }) { maxDuration={maxDuration} onHover={handleHover} onLeave={handleLeave} + isExpanded={expandedSteps.has(step.name)} + onToggle={() => handleToggleStep(step.name)} + events={stepEvents[step.name]} + isLoadingEvents={loadingSteps.has(step.name)} /> ))}
diff --git a/src/kurt/web/client/src/styles.css b/src/kurt/web/client/src/styles.css index 8cd7fa2e..ab3a85c5 100644 --- a/src/kurt/web/client/src/styles.css +++ b/src/kurt/web/client/src/styles.css @@ -3864,6 +3864,186 @@ html { overflow: auto; } +/* Distinct Config Section Styles */ +.workflow-config-section { + border-left: 3px solid var(--color-info); +} + +.workflow-config-header { + background: rgba(var(--color-info-rgb, 59, 130, 246), 0.05); +} + +.workflow-config-header:hover { + background: rgba(var(--color-info-rgb, 59, 130, 246), 0.1); +} + +/* Distinct Output Section Styles */ +.workflow-output-section { + border-left: 3px solid var(--color-success); +} + +.workflow-output-header { + background: rgba(var(--color-success-rgb, 34, 197, 94), 0.05); +} + +.workflow-output-header:hover { + background: rgba(var(--color-success-rgb, 34, 197, 94), 0.1); +} + +.workflow-output-section.workflow-output-error { + border-left-color: var(--color-error); +} + +/* Section Icon */ +.workflow-section-icon { + font-size: 11px; + flex-shrink: 0; + margin-right: 2px; +} + +/* Step Logs in Timeline */ +.workflow-step-logs { + grid-column: 1 / -1; + margin-top: var(--space-2); + padding: var(--space-2); + background: var(--color-bg-secondary); + border-radius: var(--radius-sm); + border: 1px solid var(--color-border); +} + +.workflow-step-logs-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: var(--space-2); + padding-bottom: var(--space-1); + border-bottom: 1px solid var(--color-border); +} + +.workflow-step-logs-title { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-text-primary); +} + +.workflow-step-logs-count { + font-size: 10px; + color: var(--color-text-tertiary); +} + +.workflow-step-logs-loading, +.workflow-step-logs-empty { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + text-align: center; + padding: var(--space-2); +} + +.workflow-step-logs-list { + max-height: 200px; + overflow-y: auto; +} + +.workflow-step-log-entry { + display: flex; + align-items: flex-start; + gap: var(--space-2); + padding: var(--space-1) 0; + border-bottom: 1px solid var(--color-border); + font-size: 11px; +} + +.workflow-step-log-entry:last-child { + border-bottom: none; +} + +.workflow-step-log-icon { + width: 14px; + flex-shrink: 0; + text-align: center; +} + +.workflow-step-log-running .workflow-step-log-icon, +.workflow-step-log-progress .workflow-step-log-icon { + color: var(--color-warning); +} + +.workflow-step-log-completed .workflow-step-log-icon, +.workflow-step-log-success .workflow-step-log-icon { + color: var(--color-success); +} + +.workflow-step-log-failed .workflow-step-log-icon, +.workflow-step-log-error .workflow-step-log-icon { + color: var(--color-error); +} + +.workflow-step-log-time { + font-family: var(--font-mono); + font-size: 10px; + color: var(--color-text-tertiary); + flex-shrink: 0; + width: 60px; +} + +.workflow-step-log-message { + flex: 1; + color: var(--color-text-primary); + word-break: break-word; +} + +.workflow-step-log-progress { + font-family: var(--font-mono); + font-size: 10px; + color: var(--color-text-secondary); + flex-shrink: 0; + background: var(--color-bg-tertiary); + padding: 1px 4px; + border-radius: var(--radius-sm); +} + +/* Timeline row expansion styles */ +.workflow-timeline-row { + display: grid; + grid-template-columns: 120px 1fr; + gap: var(--space-2); + align-items: center; + padding: var(--space-1) 0; +} + +.workflow-timeline-row-expanded { + grid-template-rows: auto auto; +} + +.workflow-timeline-label { + cursor: pointer; + display: flex; + align-items: center; + gap: var(--space-1); +} + +.workflow-timeline-label:hover { + color: var(--color-text-primary); +} + +.workflow-timeline-expand-icon { + font-size: 8px; + color: var(--color-text-tertiary); + transition: transform 0.15s ease; +} + +.workflow-timeline-expand-icon.expanded { + transform: rotate(90deg); +} + +.workflow-timeline-bar { + cursor: pointer; +} + +.workflow-timeline-bar:hover { + opacity: 0.85; +} + .workflow-step-box { border: 1px solid var(--color-border); border-radius: var(--radius-sm); From 0bd41e0e935f3cc1cd0d82f0162ebac6ce95eff2 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 09:02:51 +0000 Subject: [PATCH 14/19] feat: Add Apify adapter for social media monitoring Migrate Apify integration from PR #81 to follow the new patterns in kurt-simplification branch. The ApifyAdapter provides access to Twitter/X, LinkedIn, and Threads via Apify actors. Features: - Actor registry with built-in presets for common platforms - Flexible field mapping for custom actors - Three API levels: high-level (search_twitter), mid-level (fetch_signals), low-level (run_actor + parse_results) - Config via kurt.config (RESEARCH_APIFY_API_TOKEN) - CLI support: kurt integrations research onboard --source apify Co-Authored-By: Claude Opus 4.5 --- src/kurt/integrations/research/cli.py | 25 +- src/kurt/integrations/research/config.py | 5 + .../research/monitoring/__init__.py | 4 +- .../integrations/research/monitoring/apify.py | 635 ++++++++++++++++++ .../research/tests/test_adapters.py | 292 ++++++++ 5 files changed, 958 insertions(+), 3 deletions(-) create mode 100644 src/kurt/integrations/research/monitoring/apify.py diff --git a/src/kurt/integrations/research/cli.py b/src/kurt/integrations/research/cli.py index 463de394..ba1c25d5 100644 --- a/src/kurt/integrations/research/cli.py +++ b/src/kurt/integrations/research/cli.py @@ -27,7 +27,7 @@ def research_group(): @click.option( "--source", default="perplexity", - type=click.Choice(["perplexity"]), + type=click.Choice(["perplexity", "apify"]), help="Research source to configure", ) @track_command @@ -75,6 +75,14 @@ def onboard_cmd(source: str): console.print("\n[bold]Perplexity Setup:[/bold]") console.print(" API_KEY: Get from https://www.perplexity.ai/settings/api") console.print(" DEFAULT_MODEL: sonar-reasoning (recommended)") + elif source == "apify": + console.print("\n[bold]Apify Setup:[/bold]") + console.print(" API_TOKEN: Get from https://console.apify.com/account/integrations") + console.print(" DEFAULT_ACTOR: apidojo/tweet-scraper (for Twitter)") + console.print("\n[bold]Available Actors:[/bold]") + console.print(" Twitter: apidojo/tweet-scraper, quacker/twitter-scraper") + console.print(" LinkedIn: curious_coder/linkedin-post-search-scraper") + console.print(" Threads: apidojo/threads-scraper") return # Test connection @@ -91,6 +99,19 @@ def onboard_cmd(source: str): else: console.print("[red]\u2717 Connection failed[/red]") raise click.Abort() + elif source == "apify": + from .monitoring.apify import ApifyAdapter + + adapter = ApifyAdapter(source_config) + if adapter.test_connection(): + console.print(f"[green]\u2713 Connected to {source.capitalize()}[/green]") + user_info = adapter.get_user_info() + if user_info: + username = user_info.get("username", "unknown") + console.print(f"[dim] Logged in as: {username}[/dim]") + else: + console.print("[red]\u2717 Connection failed[/red]") + raise click.Abort() else: console.print(f"[yellow]No connection test available for {source}[/yellow]") @@ -128,7 +149,7 @@ def status_cmd(): return # Check each known source - known_sources = ["perplexity"] + known_sources = ["perplexity", "apify"] for source in known_sources: if source_configured(source): diff --git a/src/kurt/integrations/research/config.py b/src/kurt/integrations/research/config.py index 343bea03..76b06209 100644 --- a/src/kurt/integrations/research/config.py +++ b/src/kurt/integrations/research/config.py @@ -127,6 +127,11 @@ def create_template_config(source: str) -> dict[str, Any]: "max_tokens": "4000", "temperature": "0.2", } + elif source == "apify": + return { + "api_token": "YOUR_APIFY_API_TOKEN", + "default_actor": "apidojo/tweet-scraper", + } else: return { "api_key": "YOUR_API_KEY", diff --git a/src/kurt/integrations/research/monitoring/__init__.py b/src/kurt/integrations/research/monitoring/__init__.py index 8e0a76fe..9def2f73 100644 --- a/src/kurt/integrations/research/monitoring/__init__.py +++ b/src/kurt/integrations/research/monitoring/__init__.py @@ -1,9 +1,10 @@ """ Monitoring adapters for research signals. -Provides adapters for Reddit, HackerNews, and RSS/Atom feeds. +Provides adapters for Reddit, HackerNews, RSS/Atom feeds, and Apify social media. """ +from kurt.integrations.research.monitoring.apify import ApifyAdapter from kurt.integrations.research.monitoring.feeds import FeedAdapter from kurt.integrations.research.monitoring.hackernews import HackerNewsAdapter from kurt.integrations.research.monitoring.models import Signal @@ -14,4 +15,5 @@ "RedditAdapter", "HackerNewsAdapter", "FeedAdapter", + "ApifyAdapter", ] diff --git a/src/kurt/integrations/research/monitoring/apify.py b/src/kurt/integrations/research/monitoring/apify.py new file mode 100644 index 00000000..cebdc5ad --- /dev/null +++ b/src/kurt/integrations/research/monitoring/apify.py @@ -0,0 +1,635 @@ +""" +Apify adapter for social media monitoring. + +Uses Apify's API to run actors for Twitter/X, LinkedIn, and other platforms. +Requires an API token configured in kurt.config as RESEARCH_APIFY_API_TOKEN. + +Supports flexible actor configuration: +- Use built-in actor presets for common platforms +- Pass raw actor input for full control +- Configure field mappings for custom actors +""" + +from __future__ import annotations + +import hashlib +from dataclasses import dataclass, field +from datetime import datetime +from typing import Any, Callable, Optional + +import httpx + +from kurt.integrations.research.monitoring.models import Signal + + +@dataclass +class FieldMapping: + """ + Maps actor output fields to Signal fields. + + Each field can be: + - A string: direct field name lookup + - A list of strings: try each in order, use first non-null + - A callable: function(item) -> value for complex extraction + """ + + text: str | list[str] | Callable[[dict], str] = field( + default_factory=lambda: ["text", "content", "title", "postContent", "description"] + ) + url: str | list[str] | Callable[[dict], str] = field( + default_factory=lambda: ["url", "postUrl", "link", "profileUrl"] + ) + id: str | list[str] | Callable[[dict], str] = field( + default_factory=lambda: ["id", "postId", "objectID", "tweetId"] + ) + score: str | list[str] | Callable[[dict], int] = field( + default_factory=lambda: ["likeCount", "likes", "numLikes", "reactions", "favoriteCount"] + ) + comments: str | list[str] | Callable[[dict], int] = field( + default_factory=lambda: ["replyCount", "replies", "commentCount", "numComments"] + ) + author: str | list[str] | Callable[[dict], str] = field( + default_factory=lambda: ["author", "username", "authorName", "user"] + ) + timestamp: str | list[str] | Callable[[dict], str] = field( + default_factory=lambda: ["createdAt", "created_at", "publishedAt", "postedAt", "date"] + ) + + +@dataclass +class ActorConfig: + """ + Configuration for a specific Apify actor. + + Defines how to build input and parse output for an actor. + """ + + actor_id: str + source_name: str # e.g., "twitter", "linkedin" + + # Input configuration + # Function to build actor input: (query, max_items, **kwargs) -> dict + build_input: Callable[[str, int, dict], dict] | None = None + + # Output configuration + field_mapping: FieldMapping = field(default_factory=FieldMapping) + + # Description for CLI help + description: str = "" + + +def _build_twitter_search_input(query: str, max_items: int, kwargs: dict) -> dict: + """Build input for Twitter search actors.""" + return { + "searchTerms": [query], + "maxItems": max_items, + "sort": kwargs.get("sort", "Latest"), + **{k: v for k, v in kwargs.items() if k != "sort"}, + } + + +def _build_twitter_profile_input(query: str, max_items: int, kwargs: dict) -> dict: + """Build input for Twitter profile scraper actors.""" + return { + "handles": [query] if not isinstance(query, list) else query, + "maxItems": max_items, + "tweetsDesired": kwargs.get("tweets_desired", max_items), + **{k: v for k, v in kwargs.items() if k not in ["tweets_desired"]}, + } + + +def _build_linkedin_search_input(query: str, max_items: int, kwargs: dict) -> dict: + """Build input for LinkedIn search actors.""" + # Check if query is already a URL + if query.startswith("http"): + search_url = query + else: + search_url = f"https://www.linkedin.com/search/results/content/?keywords={query}" + return { + "searchUrl": search_url, + "maxItems": max_items, + **kwargs, + } + + +def _build_linkedin_profile_input(query: str, max_items: int, kwargs: dict) -> dict: + """Build input for LinkedIn profile scraper actors.""" + # Query can be profile URL or list of URLs + urls = [query] if isinstance(query, str) else query + return { + "profileUrls": urls, + **kwargs, + } + + +def _build_generic_search_input(query: str, max_items: int, kwargs: dict) -> dict: + """Build generic search input - works for many actors.""" + return { + "searchTerms": [query], + "maxItems": max_items, + **kwargs, + } + + +# Registry of known actors with their configurations +ACTOR_REGISTRY: dict[str, ActorConfig] = { + # Twitter/X actors + "apidojo/tweet-scraper": ActorConfig( + actor_id="apidojo/tweet-scraper", + source_name="twitter", + build_input=_build_twitter_search_input, + description="Search Twitter/X for tweets matching a query", + ), + "quacker/twitter-scraper": ActorConfig( + actor_id="quacker/twitter-scraper", + source_name="twitter", + build_input=_build_twitter_search_input, + description="Alternative Twitter search scraper", + ), + "apidojo/twitter-user-scraper": ActorConfig( + actor_id="apidojo/twitter-user-scraper", + source_name="twitter", + build_input=_build_twitter_profile_input, + description="Scrape tweets from specific Twitter profiles", + ), + # LinkedIn actors + "curious_coder/linkedin-post-search-scraper": ActorConfig( + actor_id="curious_coder/linkedin-post-search-scraper", + source_name="linkedin", + build_input=_build_linkedin_search_input, + description="Search LinkedIn for posts matching a query", + ), + "anchor/linkedin-profile-scraper": ActorConfig( + actor_id="anchor/linkedin-profile-scraper", + source_name="linkedin", + build_input=_build_linkedin_profile_input, + description="Scrape LinkedIn profile data", + ), + # Threads actors + "apidojo/threads-scraper": ActorConfig( + actor_id="apidojo/threads-scraper", + source_name="threads", + build_input=_build_generic_search_input, + description="Search Threads for posts", + ), +} + +# Platform aliases map to default actors +PLATFORM_DEFAULTS = { + "twitter": "apidojo/tweet-scraper", + "linkedin": "curious_coder/linkedin-post-search-scraper", + "threads": "apidojo/threads-scraper", +} + + +class ApifyAdapter: + """ + Adapter for fetching social signals via Apify actors. + + Supports three levels of usage: + + 1. High-level convenience methods: + adapter.search_twitter("AI agents") + adapter.search_linkedin("B2B marketing") + + 2. Mid-level with actor selection: + adapter.fetch_signals("query", actor="apidojo/tweet-scraper") + adapter.fetch_signals("@username", actor="apidojo/twitter-user-scraper") + + 3. Low-level raw execution: + result = adapter.run_actor("any/actor", {"custom": "input"}) + signals = adapter.parse_results(result, field_mapping=custom_mapping) + """ + + BASE_URL = "https://api.apify.com/v2" + + def __init__(self, config: dict[str, Any]): + """ + Initialize Apify adapter. + + Args: + config: Configuration dict with api_token and optional settings + """ + self.api_token = config.get("api_token") or config.get("api_key") + if not self.api_token: + raise ValueError("api_token is required for ApifyAdapter") + + self.default_actor = config.get("default_actor", PLATFORM_DEFAULTS["twitter"]) + + # Actor registry + self.actor_registry = {**ACTOR_REGISTRY} + + # Custom field mappings from config + self._custom_mappings: dict[str, FieldMapping] = {} + + def register_actor(self, actor_config: ActorConfig) -> None: + """Register a custom actor configuration.""" + self.actor_registry[actor_config.actor_id] = actor_config + + def set_field_mapping(self, actor_id: str, mapping: FieldMapping) -> None: + """Set custom field mapping for an actor.""" + self._custom_mappings[actor_id] = mapping + + def test_connection(self) -> bool: + """Test API token validity.""" + try: + response = httpx.get( + f"{self.BASE_URL}/users/me", + headers={"Authorization": f"Bearer {self.api_token}"}, + timeout=10.0, + ) + return response.status_code == 200 + except httpx.RequestError: + return False + + def get_user_info(self) -> dict[str, Any] | None: + """Get user account information.""" + try: + response = httpx.get( + f"{self.BASE_URL}/users/me", + headers={"Authorization": f"Bearer {self.api_token}"}, + timeout=10.0, + ) + if response.status_code == 200: + return response.json() + return None + except httpx.RequestError: + return None + + def list_actors(self) -> list[dict[str, str]]: + """List all registered actors with descriptions.""" + return [ + { + "actor_id": cfg.actor_id, + "source_name": cfg.source_name, + "description": cfg.description, + } + for cfg in self.actor_registry.values() + ] + + # ========================================================================= + # Low-level API: Raw actor execution + # ========================================================================= + + def run_actor( + self, + actor: str, + actor_input: dict[str, Any], + timeout: float = 120.0, + ) -> list[dict[str, Any]]: + """ + Run an Apify actor with raw input and return raw output. + + Args: + actor: Actor ID (e.g., "apidojo/tweet-scraper") + actor_input: Raw input dict passed directly to the actor + timeout: Request timeout in seconds + + Returns: + Raw list of result items from the actor + + Raises: + Exception: If actor run fails + """ + try: + response = httpx.post( + f"{self.BASE_URL}/acts/{actor}/run-sync-get-dataset-items", + headers={ + "Authorization": f"Bearer {self.api_token}", + "Content-Type": "application/json", + }, + json=actor_input, + timeout=timeout, + ) + response.raise_for_status() + return response.json() + except httpx.TimeoutException: + raise Exception(f"Apify actor {actor} timed out after {timeout} seconds") + except httpx.HTTPStatusError as e: + raise Exception(f"Apify API error: {e.response.status_code} - {e.response.text}") + except httpx.RequestError as e: + raise Exception(f"Failed to connect to Apify: {e}") + + def parse_results( + self, + items: list[dict[str, Any]], + source: str = "apify", + query: str = "", + field_mapping: FieldMapping | None = None, + ) -> list[Signal]: + """ + Parse raw actor results into Signal objects. + + Args: + items: Raw result items from run_actor() + source: Source name for signals (e.g., "twitter") + query: Original query (stored in keywords) + field_mapping: Custom field mapping (uses defaults if None) + + Returns: + List of Signal objects + """ + mapping = field_mapping or FieldMapping() + signals = [] + + for item in items: + try: + signal = self._parse_item_with_mapping(item, source, query, mapping) + if signal: + signals.append(signal) + except Exception: + continue + + return signals + + # ========================================================================= + # Mid-level API: Actor-aware execution + # ========================================================================= + + def fetch_signals( + self, + query: str, + actor: str | None = None, + max_items: int = 50, + keywords: Optional[list[str]] = None, + actor_input: dict[str, Any] | None = None, + field_mapping: FieldMapping | None = None, + **kwargs: Any, + ) -> list[Signal]: + """ + Fetch social signals via Apify actor. + + Args: + query: Search term, hashtag, or profile username + actor: Apify actor ID (uses default if None) + max_items: Maximum items to return + keywords: Optional keyword filter (applied after fetch) + actor_input: Raw actor input (bypasses input building if provided) + field_mapping: Custom field mapping for output parsing + **kwargs: Additional actor-specific parameters + + Returns: + List of Signal objects sorted by relevance + """ + actor = actor or self.default_actor + + # Get actor config if registered + actor_config = self.actor_registry.get(actor) + + # Build or use provided input + if actor_input is not None: + final_input = actor_input + elif actor_config and actor_config.build_input: + final_input = actor_config.build_input(query, max_items, kwargs) + else: + final_input = _build_generic_search_input(query, max_items, kwargs) + + # Run actor + items = self.run_actor(actor, final_input) + + # Determine source name + source = actor_config.source_name if actor_config else self._guess_source(actor) + + # Get field mapping (priority: param > custom > actor config > default) + mapping = ( + field_mapping + or self._custom_mappings.get(actor) + or (actor_config.field_mapping if actor_config else None) + or FieldMapping() + ) + + # Parse results + signals = self.parse_results(items, source, query, mapping) + + # Filter by keywords if provided + if keywords: + signals = [s for s in signals if s.matches_keywords(keywords)] + + # Sort by relevance + signals.sort(key=lambda s: s.relevance_score, reverse=True) + + return signals[:max_items] + + # ========================================================================= + # High-level API: Platform convenience methods + # ========================================================================= + + def search_twitter( + self, + query: str, + max_items: int = 50, + keywords: Optional[list[str]] = None, + actor: str | None = None, + **kwargs: Any, + ) -> list[Signal]: + """ + Search Twitter/X for posts matching query. + + Args: + query: Search term or hashtag + max_items: Maximum items to return + keywords: Optional keyword filter + actor: Specific Twitter actor (uses default if None) + **kwargs: Additional actor parameters + """ + actor = actor or PLATFORM_DEFAULTS["twitter"] + return self.fetch_signals( + query=query, + actor=actor, + max_items=max_items, + keywords=keywords, + **kwargs, + ) + + def search_linkedin( + self, + query: str, + max_items: int = 50, + keywords: Optional[list[str]] = None, + actor: str | None = None, + **kwargs: Any, + ) -> list[Signal]: + """ + Search LinkedIn for posts matching query. + + Args: + query: Search term or LinkedIn search URL + max_items: Maximum items to return + keywords: Optional keyword filter + actor: Specific LinkedIn actor (uses default if None) + **kwargs: Additional actor parameters + """ + actor = actor or PLATFORM_DEFAULTS["linkedin"] + return self.fetch_signals( + query=query, + actor=actor, + max_items=max_items, + keywords=keywords, + **kwargs, + ) + + def search_threads( + self, + query: str, + max_items: int = 50, + keywords: Optional[list[str]] = None, + actor: str | None = None, + **kwargs: Any, + ) -> list[Signal]: + """ + Search Threads for posts matching query. + + Args: + query: Search term or hashtag + max_items: Maximum items to return + keywords: Optional keyword filter + actor: Specific Threads actor (uses default if None) + **kwargs: Additional actor parameters + """ + actor = actor or PLATFORM_DEFAULTS["threads"] + return self.fetch_signals( + query=query, + actor=actor, + max_items=max_items, + keywords=keywords, + **kwargs, + ) + + def scrape_profile( + self, + profile: str, + platform: str = "twitter", + max_items: int = 50, + **kwargs: Any, + ) -> list[Signal]: + """ + Scrape posts from a specific profile. + + Args: + profile: Username or profile URL + platform: Platform name (twitter, linkedin) + max_items: Maximum items to return + **kwargs: Additional actor parameters + """ + profile_actors = { + "twitter": "apidojo/twitter-user-scraper", + "linkedin": "anchor/linkedin-profile-scraper", + } + + actor = profile_actors.get(platform) + if not actor: + raise ValueError(f"No profile scraper available for platform: {platform}") + + return self.fetch_signals( + query=profile, + actor=actor, + max_items=max_items, + **kwargs, + ) + + # ========================================================================= + # Internal helpers + # ========================================================================= + + def _guess_source(self, actor: str) -> str: + """Guess source name from actor ID.""" + actor_lower = actor.lower() + if "twitter" in actor_lower or "tweet" in actor_lower: + return "twitter" + elif "linkedin" in actor_lower: + return "linkedin" + elif "threads" in actor_lower: + return "threads" + return "apify" + + def _extract_field( + self, + item: dict[str, Any], + field_spec: str | list[str] | Callable[[dict], Any], + ) -> Any: + """Extract a field value using the field specification.""" + if callable(field_spec): + return field_spec(item) + elif isinstance(field_spec, list): + for field_name in field_spec: + value = self._get_nested(item, field_name) + if value is not None: + return value + return None + else: + return self._get_nested(item, field_spec) + + def _get_nested(self, item: dict[str, Any], field_name: str) -> Any: + """Get a possibly nested field value (supports dot notation).""" + if "." in field_name: + parts = field_name.split(".") + value = item + for part in parts: + if isinstance(value, dict): + value = value.get(part) + else: + return None + return value + return item.get(field_name) + + def _parse_item_with_mapping( + self, + item: dict[str, Any], + source: str, + query: str, + mapping: FieldMapping, + ) -> Signal | None: + """Parse a single item using field mapping.""" + text = self._extract_field(item, mapping.text) or "" + url = self._extract_field(item, mapping.url) or "" + + if not text and not url: + return None + + # Extract ID + item_id = self._extract_field(item, mapping.id) + if not item_id: + item_id = hashlib.md5(f"{url}{text[:100]}".encode()).hexdigest()[:12] + signal_id = f"{source}_{item_id}" + + # Extract metrics + score = self._extract_field(item, mapping.score) or 0 + comment_count = self._extract_field(item, mapping.comments) or 0 + + # Extract author + author_value = self._extract_field(item, mapping.author) + if isinstance(author_value, dict): + author = author_value.get("username") or author_value.get("name") + else: + author = author_value + + # Parse timestamp + timestamp = self._parse_date(self._extract_field(item, mapping.timestamp)) + + # Title is first line or truncated text + title = text.split("\n")[0][:200] if text else url[:200] + + return Signal( + signal_id=signal_id, + source=source, + title=title, + url=url, + snippet=text[:500] if text else None, + timestamp=timestamp, + author=author, + score=int(score) if score else 0, + comment_count=int(comment_count) if comment_count else 0, + keywords=[query] if query else [], + ) + + def _parse_date(self, date_str: str | None) -> datetime: + """Parse ISO date string with fallback to now.""" + if not date_str: + return datetime.now() + try: + if isinstance(date_str, str): + clean = date_str.replace("Z", "+00:00") + return datetime.fromisoformat(clean) + return datetime.now() + except (ValueError, TypeError): + return datetime.now() diff --git a/src/kurt/integrations/research/tests/test_adapters.py b/src/kurt/integrations/research/tests/test_adapters.py index 3130fd5a..ed85a77d 100644 --- a/src/kurt/integrations/research/tests/test_adapters.py +++ b/src/kurt/integrations/research/tests/test_adapters.py @@ -6,6 +6,7 @@ import pytest from kurt.integrations.research.base import Citation, ResearchResult +from kurt.integrations.research.monitoring.apify import ApifyAdapter, FieldMapping from kurt.integrations.research.monitoring.hackernews import HackerNewsAdapter from kurt.integrations.research.monitoring.models import Signal from kurt.integrations.research.monitoring.reddit import RedditAdapter @@ -268,3 +269,294 @@ def test_check_feed_valid(self, mock_parse): assert result["valid"] is True assert result["title"] == "Test Feed" assert result["entry_count"] == 1 + + +class TestApifyAdapter: + """Tests for Apify adapter.""" + + def test_init_with_api_token(self): + """Test adapter initialization with api_token.""" + adapter = ApifyAdapter({"api_token": "test_token"}) + assert adapter.api_token == "test_token" + + def test_init_with_api_key(self): + """Test adapter initialization with api_key (alias).""" + adapter = ApifyAdapter({"api_key": "test_key"}) + assert adapter.api_token == "test_key" + + def test_init_raises_without_token(self): + """Test adapter raises error without token.""" + with pytest.raises(ValueError, match="api_token is required"): + ApifyAdapter({}) + + def test_list_actors(self): + """Test listing registered actors.""" + adapter = ApifyAdapter({"api_token": "test"}) + actors = adapter.list_actors() + + assert len(actors) > 0 + # Check structure + for actor in actors: + assert "actor_id" in actor + assert "source_name" in actor + assert "description" in actor + + # Check known actors exist + actor_ids = [a["actor_id"] for a in actors] + assert "apidojo/tweet-scraper" in actor_ids + assert "curious_coder/linkedin-post-search-scraper" in actor_ids + + def test_guess_source_twitter(self): + """Test source guessing for Twitter.""" + adapter = ApifyAdapter({"api_token": "test"}) + assert adapter._guess_source("apidojo/tweet-scraper") == "twitter" + assert adapter._guess_source("some/twitter-thing") == "twitter" + + def test_guess_source_linkedin(self): + """Test source guessing for LinkedIn.""" + adapter = ApifyAdapter({"api_token": "test"}) + assert adapter._guess_source("linkedin-scraper") == "linkedin" + + def test_guess_source_threads(self): + """Test source guessing for Threads.""" + adapter = ApifyAdapter({"api_token": "test"}) + assert adapter._guess_source("threads-posts") == "threads" + + def test_guess_source_unknown(self): + """Test source guessing for unknown actors.""" + adapter = ApifyAdapter({"api_token": "test"}) + assert adapter._guess_source("some/random-actor") == "apify" + + def test_extract_field_string(self): + """Test field extraction with string spec.""" + adapter = ApifyAdapter({"api_token": "test"}) + item = {"title": "Test Title"} + result = adapter._extract_field(item, "title") + assert result == "Test Title" + + def test_extract_field_list(self): + """Test field extraction with list spec (fallback).""" + adapter = ApifyAdapter({"api_token": "test"}) + + # First field exists + item = {"text": "Found text"} + result = adapter._extract_field(item, ["text", "content", "title"]) + assert result == "Found text" + + # Fallback to second field + item = {"content": "Fallback content"} + result = adapter._extract_field(item, ["text", "content", "title"]) + assert result == "Fallback content" + + def test_extract_field_callable(self): + """Test field extraction with callable spec.""" + adapter = ApifyAdapter({"api_token": "test"}) + item = {"first": "Hello", "last": "World"} + result = adapter._extract_field(item, lambda x: f"{x['first']} {x['last']}") + assert result == "Hello World" + + def test_extract_field_nested(self): + """Test nested field extraction.""" + adapter = ApifyAdapter({"api_token": "test"}) + item = {"author": {"username": "testuser", "name": "Test User"}} + result = adapter._extract_field(item, "author.username") + assert result == "testuser" + + def test_parse_results(self): + """Test parsing raw results into signals.""" + adapter = ApifyAdapter({"api_token": "test"}) + items = [ + { + "id": "post123", + "text": "This is a test post about AI", + "url": "https://twitter.com/post123", + "likeCount": 100, + "replyCount": 25, + "author": "testuser", + "createdAt": "2024-01-15T10:00:00Z", + } + ] + + signals = adapter.parse_results(items, source="twitter", query="AI") + + assert len(signals) == 1 + signal = signals[0] + assert signal.signal_id == "twitter_post123" + assert signal.source == "twitter" + assert "test post" in signal.title.lower() + assert signal.url == "https://twitter.com/post123" + assert signal.score == 100 + assert signal.comment_count == 25 + assert signal.author == "testuser" + assert "AI" in signal.keywords + + def test_parse_results_with_custom_mapping(self): + """Test parsing with custom field mapping.""" + adapter = ApifyAdapter({"api_token": "test"}) + items = [ + { + "postId": "custom123", + "postContent": "Custom content", + "postUrl": "https://custom.com/123", + "reactions": 50, + } + ] + + mapping = FieldMapping( + id="postId", + text="postContent", + url="postUrl", + score="reactions", + ) + + signals = adapter.parse_results(items, source="custom", field_mapping=mapping) + + assert len(signals) == 1 + signal = signals[0] + assert signal.signal_id == "custom_custom123" + assert "Custom content" in signal.title + assert signal.score == 50 + + @patch("httpx.get") + def test_test_connection_success(self, mock_get): + """Test successful connection test.""" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_get.return_value = mock_response + + adapter = ApifyAdapter({"api_token": "valid_token"}) + result = adapter.test_connection() + + assert result is True + mock_get.assert_called_once() + + @patch("httpx.get") + def test_test_connection_failure(self, mock_get): + """Test failed connection test.""" + mock_response = MagicMock() + mock_response.status_code = 401 + mock_get.return_value = mock_response + + adapter = ApifyAdapter({"api_token": "invalid_token"}) + result = adapter.test_connection() + + assert result is False + + @patch("httpx.post") + def test_run_actor(self, mock_post): + """Test running an actor.""" + mock_response = MagicMock() + mock_response.json.return_value = [ + {"id": "1", "text": "Result 1"}, + {"id": "2", "text": "Result 2"}, + ] + mock_response.raise_for_status = MagicMock() + mock_post.return_value = mock_response + + adapter = ApifyAdapter({"api_token": "test"}) + results = adapter.run_actor( + "apidojo/tweet-scraper", {"searchTerms": ["test"], "maxItems": 10} + ) + + assert len(results) == 2 + mock_post.assert_called_once() + + @patch("httpx.post") + def test_fetch_signals(self, mock_post): + """Test fetching signals with actor.""" + mock_response = MagicMock() + mock_response.json.return_value = [ + { + "id": "tweet1", + "text": "AI is changing the world", + "url": "https://twitter.com/tweet1", + "likeCount": 50, + "replyCount": 10, + "createdAt": "2024-01-15T10:00:00Z", + } + ] + mock_response.raise_for_status = MagicMock() + mock_post.return_value = mock_response + + adapter = ApifyAdapter({"api_token": "test"}) + signals = adapter.fetch_signals( + query="AI", actor="apidojo/tweet-scraper", max_items=10 + ) + + assert len(signals) == 1 + assert signals[0].source == "twitter" + + @patch("httpx.post") + def test_search_twitter(self, mock_post): + """Test Twitter search convenience method.""" + mock_response = MagicMock() + mock_response.json.return_value = [ + { + "id": "1", + "text": "Test tweet", + "url": "https://twitter.com/1", + "createdAt": "2024-01-15T10:00:00Z", + } + ] + mock_response.raise_for_status = MagicMock() + mock_post.return_value = mock_response + + adapter = ApifyAdapter({"api_token": "test"}) + signals = adapter.search_twitter("test query") + + assert len(signals) == 1 + assert signals[0].source == "twitter" + + @patch("httpx.post") + def test_search_linkedin(self, mock_post): + """Test LinkedIn search convenience method.""" + mock_response = MagicMock() + mock_response.json.return_value = [ + { + "id": "post1", + "postContent": "LinkedIn post", + "postUrl": "https://linkedin.com/post1", + "createdAt": "2024-01-15T10:00:00Z", + } + ] + mock_response.raise_for_status = MagicMock() + mock_post.return_value = mock_response + + adapter = ApifyAdapter({"api_token": "test"}) + signals = adapter.search_linkedin("B2B marketing") + + assert len(signals) == 1 + assert signals[0].source == "linkedin" + + def test_keyword_filtering(self): + """Test keyword filtering in fetch_signals.""" + adapter = ApifyAdapter({"api_token": "test"}) + + # Test signal that matches keywords + signal_match = Signal( + signal_id="test_1", + source="twitter", + title="AI and machine learning", + url="https://example.com", + ) + assert signal_match.matches_keywords(["AI"]) is True + assert signal_match.matches_keywords(["python"]) is False + + def test_custom_actor_registration(self): + """Test registering custom actors.""" + from kurt.integrations.research.monitoring.apify import ActorConfig + + adapter = ApifyAdapter({"api_token": "test"}) + + # Register custom actor + custom_actor = ActorConfig( + actor_id="my/custom-actor", + source_name="custom_source", + description="My custom actor", + ) + adapter.register_actor(custom_actor) + + # Verify it's registered + actors = adapter.list_actors() + actor_ids = [a["actor_id"] for a in actors] + assert "my/custom-actor" in actor_ids From 8b4e0dd91bccd41b8af0aa11b80672a1650605c5 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 09:15:45 +0000 Subject: [PATCH 15/19] refactor: Unify Steps and Timeline into single progressive disclosure block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove separate WorkflowTimeline component from main workflow view - Integrate timeline bar directly into StepBox - shows when step is expanded - Add step events display within StepBox (fetched from /logs endpoint) - Progressive disclosure: Steps header → click step → see bar + events Visual structure now: Steps (6 processed) └─ agent_execution (6 ok, 0 errors, 41.0s) └─ [expanded: timeline bar + events list] Co-Authored-By: Claude Opus 4.5 --- .../web/client/src/components/WorkflowRow.jsx | 110 ++++++++++++- src/kurt/web/client/src/styles.css | 149 +++++++++++++++++- 2 files changed, 253 insertions(+), 6 deletions(-) diff --git a/src/kurt/web/client/src/components/WorkflowRow.jsx b/src/kurt/web/client/src/components/WorkflowRow.jsx index 43ae2317..c47ad035 100644 --- a/src/kurt/web/client/src/components/WorkflowRow.jsx +++ b/src/kurt/web/client/src/components/WorkflowRow.jsx @@ -1,6 +1,5 @@ -import { useState, useEffect, useCallback } from 'react' +import { useState, useEffect, useCallback, useMemo } from 'react' import { Copy, ChevronDown, ChevronRight } from 'lucide-react' -import WorkflowTimeline from './WorkflowTimeline' const apiBase = import.meta.env.VITE_API_URL || '' const apiUrl = (path) => `${apiBase}${path}` @@ -156,11 +155,14 @@ function CommandBlock({ command }) { function StepBox({ step, logs, + events, isExpanded, onToggle, onOpen, children, showLogs = true, + maxDuration = 1, + isLoadingEvents = false, }) { const successCount = step.success ?? 0 const errorCount = step.error ?? 0 @@ -180,6 +182,10 @@ function StepBox({ ? 'step-status-success' : 'step-status-running' + // Timeline bar width calculation + const durationMs = step.duration_ms || 0 + const barWidthPercent = maxDuration > 0 ? Math.max((durationMs / maxDuration) * 100, 2) : 2 + const handleToggle = () => { if (!isExpanded && onOpen && showLogs) { onOpen() @@ -187,6 +193,28 @@ function StepBox({ onToggle() } + const formatEventTime = (timestamp) => { + if (!timestamp) return '' + const date = new Date(timestamp) + return date.toLocaleTimeString('en-US', { hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit' }) + } + + const getEventIcon = (status) => { + switch (status) { + case 'completed': + case 'success': + return '✓' + case 'failed': + case 'error': + return '✗' + case 'running': + case 'progress': + return '▸' + default: + return '○' + } + } + return (
{isExpanded && (
+ {/* Timeline bar */} +
+
+ {duration} +
+ + {/* Step errors */} {step.errors?.length > 0 && (
{step.errors.map((err, errIdx) => ( @@ -223,6 +262,36 @@ function StepBox({ ))}
)} + + {/* Step events (from logs endpoint) */} + {showLogs && ( +
+
+ Events + {events && {events.length}} +
+ {isLoadingEvents ? ( +
Loading...
+ ) : events && events.length > 0 ? ( +
+ {events.map((event, idx) => ( +
+ {getEventIcon(event.status)} + {formatEventTime(event.created_at)} + {event.message || event.status} + {event.current != null && event.total != null && ( + {event.current}/{event.total} + )} +
+ ))} +
+ ) : ( +
No events recorded
+ )} +
+ )} + + {/* Legacy logs display */} {showLogs && logs?.length > 0 && (
{logs.map((log, logIdx) => ( @@ -253,6 +322,8 @@ function WorkflowStepsSection({ const [stepsExpanded, setStepsExpanded] = useState(false) const [expandedSteps, setExpandedSteps] = useState({}) const [stepLogs, setStepLogs] = useState({}) + const [stepEvents, setStepEvents] = useState({}) + const [loadingEvents, setLoadingEvents] = useState({}) const [childWorkflows, setChildWorkflows] = useState([]) const [childFetchAttempted, setChildFetchAttempted] = useState(false) @@ -260,6 +331,8 @@ function WorkflowStepsSection({ setStepsExpanded(depth > 0) setExpandedSteps({}) setStepLogs({}) + setStepEvents({}) + setLoadingEvents({}) setChildWorkflows([]) setChildFetchAttempted(false) }, [workflow?.workflow_uuid]) @@ -269,12 +342,39 @@ function WorkflowStepsSection({ const totalError = steps.reduce((sum, s) => sum + (s.error || 0), 0) const total = totalSuccess + totalError + // Calculate max duration for timeline bar scaling + const maxDuration = useMemo(() => { + return Math.max(...steps.map((s) => s.duration_ms || 0), 1) + }, [steps]) + const hasSteps = steps.length > 0 || childWorkflows.length > 0 || workflow?.workflow_type === 'agent' + // Fetch step events from logs endpoint + const fetchStepEvents = useCallback(async (stepName) => { + if (!workflow?.workflow_uuid || stepEvents[stepName] || loadingEvents[stepName]) return + setLoadingEvents((prev) => ({ ...prev, [stepName]: true })) + try { + const response = await fetch( + apiUrl(`/api/workflows/${workflow.workflow_uuid}/logs?step_id=${encodeURIComponent(stepName)}&limit=50`) + ) + if (response.ok) { + const data = await response.json() + setStepEvents((prev) => ({ ...prev, [stepName]: data.events || [] })) + } + } catch (err) { + console.error('Failed to fetch step events:', err) + setStepEvents((prev) => ({ ...prev, [stepName]: [] })) + } finally { + setLoadingEvents((prev) => ({ ...prev, [stepName]: false })) + } + }, [workflow?.workflow_uuid, stepEvents, loadingEvents]) + const fetchStepLogs = useCallback(async (stepName) => { if (!workflow?.workflow_uuid || stepLogs[stepName]) return + // Also fetch events when fetching logs + fetchStepEvents(stepName) try { const response = await fetch( apiUrl(`/api/workflows/${workflow.workflow_uuid}/step-logs?step=${encodeURIComponent(stepName)}`) @@ -428,8 +528,11 @@ function WorkflowStepsSection({ key={step.name} step={step} logs={stepLogs[step.name]} + events={stepEvents[step.name]} isExpanded={stepIsExpanded} showLogs={showStepLogs} + maxDuration={maxDuration} + isLoadingEvents={loadingEvents[step.name]} onToggle={() => setExpandedSteps((prev) => ({ ...prev, @@ -1281,9 +1384,6 @@ export default function WorkflowRow({ depth={depth} /> )} - {liveStatus?.steps?.length > 0 && ( - - )}
)} diff --git a/src/kurt/web/client/src/styles.css b/src/kurt/web/client/src/styles.css index ab3a85c5..6eb669c7 100644 --- a/src/kurt/web/client/src/styles.css +++ b/src/kurt/web/client/src/styles.css @@ -4126,10 +4126,157 @@ html { .workflow-step-box-body { padding: var(--space-2); border-top: 1px solid var(--color-border); - max-height: 280px; + max-height: 400px; overflow: auto; } +/* Step Timeline Bar (integrated in StepBox) */ +.workflow-step-timeline-bar-container { + display: flex; + align-items: center; + gap: var(--space-2); + margin-bottom: var(--space-2); + padding: var(--space-1); + background: var(--color-bg-tertiary); + border-radius: var(--radius-sm); +} + +.workflow-step-timeline-bar { + height: 8px; + border-radius: 4px; + min-width: 8px; + transition: width 0.3s ease; +} + +.workflow-step-timeline-bar-success { + background: linear-gradient(90deg, var(--color-success) 0%, var(--color-success-hover) 100%); +} + +.workflow-step-timeline-bar-error { + background: linear-gradient(90deg, var(--color-error) 0%, #ef4444 100%); +} + +.workflow-step-timeline-bar-running { + background: linear-gradient(90deg, var(--color-info) 0%, #3b82f6 100%); + animation: pulse-bar 1.5s ease-in-out infinite; +} + +@keyframes pulse-bar { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.6; } +} + +.workflow-step-timeline-duration { + font-family: var(--font-mono); + font-size: 11px; + color: var(--color-text-secondary); + white-space: nowrap; +} + +/* Step Events (integrated in StepBox) */ +.workflow-step-events { + margin-top: var(--space-2); + border: 1px solid var(--color-border); + border-radius: var(--radius-sm); + background: var(--color-bg-primary); +} + +.workflow-step-events-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: var(--space-1) var(--space-2); + background: var(--color-bg-secondary); + border-bottom: 1px solid var(--color-border); + border-radius: var(--radius-sm) var(--radius-sm) 0 0; +} + +.workflow-step-events-title { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-text-primary); +} + +.workflow-step-events-count { + font-size: 10px; + color: var(--color-text-tertiary); + background: var(--color-bg-tertiary); + padding: 1px 6px; + border-radius: var(--radius-sm); +} + +.workflow-step-events-loading, +.workflow-step-events-empty { + font-size: var(--text-xs); + color: var(--color-text-tertiary); + text-align: center; + padding: var(--space-2); +} + +.workflow-step-events-list { + max-height: 180px; + overflow-y: auto; +} + +.workflow-step-event { + display: flex; + align-items: flex-start; + gap: var(--space-2); + padding: var(--space-1) var(--space-2); + border-bottom: 1px solid var(--color-border); + font-size: 11px; +} + +.workflow-step-event:last-child { + border-bottom: none; +} + +.workflow-step-event-icon { + width: 14px; + flex-shrink: 0; + text-align: center; + font-size: 10px; +} + +.workflow-step-event-running .workflow-step-event-icon, +.workflow-step-event-progress .workflow-step-event-icon { + color: var(--color-info); +} + +.workflow-step-event-completed .workflow-step-event-icon, +.workflow-step-event-success .workflow-step-event-icon { + color: var(--color-success); +} + +.workflow-step-event-failed .workflow-step-event-icon, +.workflow-step-event-error .workflow-step-event-icon { + color: var(--color-error); +} + +.workflow-step-event-time { + font-family: var(--font-mono); + font-size: 10px; + color: var(--color-text-tertiary); + flex-shrink: 0; + width: 55px; +} + +.workflow-step-event-message { + flex: 1; + color: var(--color-text-primary); + word-break: break-word; +} + +.workflow-step-event-progress { + font-family: var(--font-mono); + font-size: 10px; + color: var(--color-text-secondary); + flex-shrink: 0; + background: var(--color-bg-tertiary); + padding: 1px 4px; + border-radius: var(--radius-sm); +} + .workflow-step-children, .workflow-unassigned-children { margin-top: var(--space-2); From cc9ee0fc0da2b35a7e37e2096436d3c18b6ba85e Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 09:24:17 +0000 Subject: [PATCH 16/19] feat: Add detailed tool call tracking and step details display - Enhance track-tool hook to capture input/output summaries for each tool call - Store tool calls as events in database (not just count) - Replace empty legacy logs box with "Details" section showing: - Tool, Model, Tokens In/Out, Cost, Stop Reason - Tool calls will appear as events when workflows are run with new code Co-Authored-By: Claude Opus 4.5 --- .../web/client/src/components/WorkflowRow.jsx | 61 ++++++++++++++----- src/kurt/web/client/src/styles.css | 43 +++++++++++++ src/kurt/workflows/agents/cli.py | 47 ++++++++++++++ src/kurt/workflows/agents/executor.py | 38 +++++++++--- 4 files changed, 165 insertions(+), 24 deletions(-) diff --git a/src/kurt/web/client/src/components/WorkflowRow.jsx b/src/kurt/web/client/src/components/WorkflowRow.jsx index c47ad035..b3094e68 100644 --- a/src/kurt/web/client/src/components/WorkflowRow.jsx +++ b/src/kurt/web/client/src/components/WorkflowRow.jsx @@ -263,6 +263,53 @@ function StepBox({
)} + {/* Step details from logs metadata */} + {showLogs && logs?.length > 0 && logs[0]?.metadata && ( +
+
Details
+
+ {logs[0].tool && ( +
+ Tool + {logs[0].tool} +
+ )} + {logs[0].metadata.model && ( +
+ Model + {logs[0].metadata.model} +
+ )} + {logs[0].metadata.tokens_in != null && ( +
+ Tokens In + {logs[0].metadata.tokens_in?.toLocaleString()} +
+ )} + {logs[0].metadata.tokens_out != null && ( +
+ Tokens Out + {logs[0].metadata.tokens_out?.toLocaleString()} +
+ )} + {logs[0].metadata.cost_usd != null && ( +
+ Cost + + ${logs[0].metadata.cost_usd < 0.01 ? logs[0].metadata.cost_usd.toFixed(4) : logs[0].metadata.cost_usd.toFixed(2)} + +
+ )} + {logs[0].metadata.stop_reason && ( +
+ Stop Reason + {logs[0].metadata.stop_reason} +
+ )} +
+
+ )} + {/* Step events (from logs endpoint) */} {showLogs && (
@@ -290,20 +337,6 @@ function StepBox({ )}
)} - - {/* Legacy logs display */} - {showLogs && logs?.length > 0 && ( -
- {logs.map((log, logIdx) => ( -
- {log.message} -
- ))} -
- )} {children}
)} diff --git a/src/kurt/web/client/src/styles.css b/src/kurt/web/client/src/styles.css index 6eb669c7..1a274e4b 100644 --- a/src/kurt/web/client/src/styles.css +++ b/src/kurt/web/client/src/styles.css @@ -4173,6 +4173,49 @@ html { white-space: nowrap; } +/* Step Details (from logs metadata) */ +.workflow-step-details { + margin-top: var(--space-2); + padding: var(--space-2); + background: var(--color-bg-secondary); + border-radius: var(--radius-sm); + border: 1px solid var(--color-border); +} + +.workflow-step-details-header { + font-size: var(--text-xs); + font-weight: var(--font-semibold); + color: var(--color-text-primary); + margin-bottom: var(--space-2); + padding-bottom: var(--space-1); + border-bottom: 1px solid var(--color-border); +} + +.workflow-step-details-grid { + display: grid; + grid-template-columns: auto 1fr; + gap: var(--space-1) var(--space-3); +} + +.workflow-step-details-row { + display: contents; +} + +.workflow-step-details-label { + font-size: 11px; + color: var(--color-text-tertiary); +} + +.workflow-step-details-value { + font-size: 11px; + font-family: var(--font-mono); + color: var(--color-text-primary); +} + +.workflow-step-details-cost { + color: var(--color-violet); +} + /* Step Events (integrated in StepBox) */ .workflow-step-events { margin-top: var(--space-2); diff --git a/src/kurt/workflows/agents/cli.py b/src/kurt/workflows/agents/cli.py index 63c77f62..1b81d462 100644 --- a/src/kurt/workflows/agents/cli.py +++ b/src/kurt/workflows/agents/cli.py @@ -410,9 +410,16 @@ def track_tool_cmd(): Reads tool call JSON from stdin and appends to KURT_TOOL_LOG_FILE. This command is not meant to be called directly by users. + + Claude Code passes: + - tool_name: The tool that was used (Bash, Read, Write, etc.) + - tool_use_id: Unique ID for this tool call + - tool_input: The input parameters passed to the tool + - tool_result: The result/output from the tool (may be truncated) """ import os import sys + from datetime import datetime log_file = os.environ.get("KURT_TOOL_LOG_FILE") if not log_file: @@ -420,9 +427,49 @@ def track_tool_cmd(): try: data = json.load(sys.stdin) + + # Extract tool input summary + tool_input = data.get("tool_input", {}) + input_summary = None + if isinstance(tool_input, dict): + # Extract key info based on tool type + tool_name = data.get("tool_name", "") + if tool_name == "Bash": + input_summary = tool_input.get("command", "")[:200] + elif tool_name == "Read": + input_summary = tool_input.get("file_path", "") + elif tool_name == "Write": + input_summary = tool_input.get("file_path", "") + elif tool_name == "Edit": + input_summary = tool_input.get("file_path", "") + elif tool_name in ("Glob", "Grep"): + input_summary = tool_input.get("pattern", "") + elif tool_name == "WebFetch": + input_summary = tool_input.get("url", "") + elif tool_name == "WebSearch": + input_summary = tool_input.get("query", "") + else: + # Generic: try to get first string value + for v in tool_input.values(): + if isinstance(v, str) and v: + input_summary = v[:100] + break + + # Extract result summary (truncate long results) + tool_result = data.get("tool_result", "") + result_summary = None + if tool_result: + if isinstance(tool_result, str): + result_summary = tool_result[:300] if len(tool_result) > 300 else tool_result + elif isinstance(tool_result, dict): + result_summary = str(tool_result)[:300] + record = { "tool_name": data.get("tool_name"), "tool_use_id": data.get("tool_use_id"), + "input_summary": input_summary, + "result_summary": result_summary, + "timestamp": datetime.utcnow().isoformat(), } with open(log_file, "a") as f: f.write(json.dumps(record) + "\n") diff --git a/src/kurt/workflows/agents/executor.py b/src/kurt/workflows/agents/executor.py index 7f68a95b..5716a3a7 100644 --- a/src/kurt/workflows/agents/executor.py +++ b/src/kurt/workflows/agents/executor.py @@ -67,21 +67,25 @@ def _create_tool_tracking_settings() -> tuple[str, str]: return settings_path, tool_log_path -def _cleanup_tool_tracking(settings_path: str, tool_log_path: str) -> int: +def _cleanup_tool_tracking(settings_path: str, tool_log_path: str) -> tuple[int, list[dict]]: """ - Clean up temp files and return tool call count. + Clean up temp files and return tool call count and details. Args: settings_path: Path to temp settings file tool_log_path: Path to temp tool log file Returns: - Number of tool calls logged + Tuple of (tool_call_count, list of tool call details) """ - tool_calls = 0 + tool_calls = [] try: with open(tool_log_path) as f: - tool_calls = sum(1 for _ in f) + for line in f: + try: + tool_calls.append(json.loads(line.strip())) + except Exception: + pass except Exception: pass @@ -92,7 +96,7 @@ def _cleanup_tool_tracking(settings_path: str, tool_log_path: str) -> int: except Exception: pass - return tool_calls + return len(tool_calls), tool_calls def _get_project_root() -> str: @@ -539,8 +543,22 @@ def agent_execution_step( env=env, ) - # Get tool call count from hook logs (tracks ALL tools, not just web) - tool_calls = _cleanup_tool_tracking(settings_path, tool_log_path) + # Get tool call count and details from hook logs (tracks ALL tools, not just web) + tool_call_count, tool_call_details = _cleanup_tool_tracking(settings_path, tool_log_path) + + # Emit tool call events for each tool used + if on_progress and tool_call_details: + for tc in tool_call_details: + on_progress({ + "substep": "tool_call", + "status": "completed", + "message": f"{tc.get('tool_name', 'Unknown')}: {tc.get('input_summary', '')[:100]}", + "metadata": { + "tool_name": tc.get("tool_name"), + "input_summary": tc.get("input_summary"), + "result_summary": tc.get("result_summary"), + }, + }) # Parse JSON output if available output_data = {} @@ -601,7 +619,7 @@ def agent_execution_step( except subprocess.TimeoutExpired: # Ensure cleanup on timeout - tool_calls = _cleanup_tool_tracking(settings_path, tool_log_path) + tool_call_count, _ = _cleanup_tool_tracking(settings_path, tool_log_path) stop_reason = f"max_time ({max_time}s) exceeded" turns = 0 tokens_in = 0 @@ -632,7 +650,7 @@ def agent_execution_step( return { "turns": turns, - "tool_calls": tool_calls, + "tool_calls": tool_call_count, "tokens_in": tokens_in, "tokens_out": tokens_out, "cost_usd": cost, From cfb6459f1f777a66936c623b82ff151e89f07b4c Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 10:47:21 +0000 Subject: [PATCH 17/19] feat: Real-time tool call tracking via direct Dolt writes - Update track-tool hook to write directly to step_events table instead of temp file (enables real-time monitoring) - Add _get_kurt_executable() to resolve absolute path for hooks - Change hook command to "kurt workflow track-tool" for proper routing - Update WorkflowRow.jsx to display tool call events with details - Add comprehensive unit tests for tool tracking functionality: - Test DB writes with workflow ID - Test input summary extraction for 7 tool types - Test metadata JSON format - Test graceful error handling Co-Authored-By: Claude Opus 4.5 --- .../web/client/src/components/WorkflowRow.jsx | 36 ++-- src/kurt/web/client/src/styles.css | 38 +++++ src/kurt/workflows/agents/cli.py | 42 +++-- src/kurt/workflows/agents/executor.py | 40 ++++- src/kurt/workflows/agents/tests/test_cli.py | 161 ++++++++++++++---- .../workflows/agents/tests/test_executor.py | 31 +++- src/kurt/workflows/toml/cli.py | 2 + 7 files changed, 279 insertions(+), 71 deletions(-) diff --git a/src/kurt/web/client/src/components/WorkflowRow.jsx b/src/kurt/web/client/src/components/WorkflowRow.jsx index b3094e68..68f85ff9 100644 --- a/src/kurt/web/client/src/components/WorkflowRow.jsx +++ b/src/kurt/web/client/src/components/WorkflowRow.jsx @@ -321,16 +321,32 @@ function StepBox({
Loading...
) : events && events.length > 0 ? (
- {events.map((event, idx) => ( -
- {getEventIcon(event.status)} - {formatEventTime(event.created_at)} - {event.message || event.status} - {event.current != null && event.total != null && ( - {event.current}/{event.total} - )} -
- ))} + {events.map((event, idx) => { + const isToolCall = event.substep === 'tool_call' + const toolMeta = event.metadata || {} + return ( +
+ {isToolCall ? '⚙' : getEventIcon(event.status)} + {formatEventTime(event.created_at)} + {event.message || event.status} + {event.current != null && event.total != null && ( + {event.current}/{event.total} + )} + {isToolCall && toolMeta.input_summary && ( +
+ Input: + {toolMeta.input_summary} +
+ )} + {isToolCall && toolMeta.result_summary && ( +
+ Output: + {toolMeta.result_summary.length > 200 ? `${toolMeta.result_summary.slice(0, 200)}...` : toolMeta.result_summary} +
+ )} +
+ ) + })}
) : (
No events recorded
diff --git a/src/kurt/web/client/src/styles.css b/src/kurt/web/client/src/styles.css index 1a274e4b..b4450502 100644 --- a/src/kurt/web/client/src/styles.css +++ b/src/kurt/web/client/src/styles.css @@ -4320,6 +4320,44 @@ html { border-radius: var(--radius-sm); } +/* Tool call event styles */ +.workflow-step-event-tool { + flex-wrap: wrap; + background: var(--color-bg-secondary); +} + +.workflow-step-event-tool .workflow-step-event-icon { + color: var(--color-info); +} + +.workflow-step-event-tool-detail { + width: 100%; + margin-top: var(--space-1); + margin-left: 22px; + padding: var(--space-1); + background: var(--color-bg-primary); + border-radius: var(--radius-sm); + border: 1px solid var(--color-border); + display: flex; + flex-direction: column; + gap: 2px; +} + +.workflow-step-event-tool-label { + font-size: 10px; + color: var(--color-text-tertiary); + font-weight: var(--font-medium); +} + +.workflow-step-event-tool-code { + font-family: var(--font-mono); + font-size: 11px; + color: var(--color-text-secondary); + word-break: break-all; + white-space: pre-wrap; + background: transparent; +} + .workflow-step-children, .workflow-unassigned-children { margin-top: var(--space-2); diff --git a/src/kurt/workflows/agents/cli.py b/src/kurt/workflows/agents/cli.py index 1b81d462..30192f3d 100644 --- a/src/kurt/workflows/agents/cli.py +++ b/src/kurt/workflows/agents/cli.py @@ -408,7 +408,7 @@ def track_tool_cmd(): """ Internal command called by PostToolUse hook. - Reads tool call JSON from stdin and appends to KURT_TOOL_LOG_FILE. + Writes tool call events directly to Dolt for real-time monitoring. This command is not meant to be called directly by users. Claude Code passes: @@ -421,19 +421,18 @@ def track_tool_cmd(): import sys from datetime import datetime - log_file = os.environ.get("KURT_TOOL_LOG_FILE") - if not log_file: - sys.exit(0) # No tracking configured, skip silently + workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") + if not workflow_id: + sys.exit(0) # No workflow context, skip silently try: data = json.load(sys.stdin) + tool_name = data.get("tool_name", "Unknown") # Extract tool input summary tool_input = data.get("tool_input", {}) input_summary = None if isinstance(tool_input, dict): - # Extract key info based on tool type - tool_name = data.get("tool_name", "") if tool_name == "Bash": input_summary = tool_input.get("command", "")[:200] elif tool_name == "Read": @@ -456,23 +455,36 @@ def track_tool_cmd(): break # Extract result summary (truncate long results) - tool_result = data.get("tool_result", "") + tool_result = data.get("tool_result") or data.get("result") or data.get("output") or "" result_summary = None if tool_result: if isinstance(tool_result, str): result_summary = tool_result[:300] if len(tool_result) > 300 else tool_result elif isinstance(tool_result, dict): - result_summary = str(tool_result)[:300] - - record = { - "tool_name": data.get("tool_name"), + if "content" in tool_result: + result_summary = str(tool_result["content"])[:300] + else: + result_summary = str(tool_result)[:300] + + # Write directly to Dolt step_events for real-time monitoring + from kurt.db.dolt import DoltDB + from pathlib import Path + + db = DoltDB(Path.cwd()) + metadata_json = json.dumps({ + "tool_name": tool_name, "tool_use_id": data.get("tool_use_id"), "input_summary": input_summary, "result_summary": result_summary, - "timestamp": datetime.utcnow().isoformat(), - } - with open(log_file, "a") as f: - f.write(json.dumps(record) + "\n") + }) + message = f"{tool_name}: {input_summary[:80] if input_summary else ''}" + + db.execute( + """INSERT INTO step_events (run_id, step_id, substep, status, message, metadata_json) + VALUES (?, ?, ?, ?, ?, ?)""", + [workflow_id, "agent_execution", "tool_call", "completed", message, metadata_json] + ) + except Exception: pass # Don't fail the hook, Claude should continue diff --git a/src/kurt/workflows/agents/executor.py b/src/kurt/workflows/agents/executor.py index 5716a3a7..c3db54e5 100644 --- a/src/kurt/workflows/agents/executor.py +++ b/src/kurt/workflows/agents/executor.py @@ -31,6 +31,31 @@ logger = logging.getLogger(__name__) +def _get_kurt_executable() -> str: + """Get the absolute path to the kurt executable.""" + import shutil + import sys + + # First try to find kurt in PATH + kurt_path = shutil.which("kurt") + if kurt_path: + return kurt_path + + # Try virtualenv bin directory + venv_kurt = Path(sys.prefix) / "bin" / "kurt" + if venv_kurt.exists(): + return str(venv_kurt) + + # Try project root .venv + project_root = get_config_file_path().parent + local_venv_kurt = project_root / ".venv" / "bin" / "kurt" + if local_venv_kurt.exists(): + return str(local_venv_kurt) + + # Fall back to "kurt" and hope it's in PATH when the hook runs + return "kurt" + + def _create_tool_tracking_settings() -> tuple[str, str]: """ Create temp settings file with PostToolUse hook for tool call tracking. @@ -42,6 +67,11 @@ def _create_tool_tracking_settings() -> tuple[str, str]: tool_log_fd, tool_log_path = tempfile.mkstemp(suffix=".jsonl", prefix="kurt_tools_") os.close(tool_log_fd) + # Get absolute path to kurt executable for the hook command + kurt_path = _get_kurt_executable() + # Hook command - workflow_id will be passed via KURT_PARENT_WORKFLOW_ID env var + hook_command = f"{kurt_path} workflow track-tool" + # Create temp settings file with PostToolUse hook settings = { "hooks": { @@ -51,7 +81,7 @@ def _create_tool_tracking_settings() -> tuple[str, str]: "hooks": [ { "type": "command", - "command": "kurt agents track-tool", + "command": hook_command, "timeout": 5, } ], @@ -482,12 +512,10 @@ def agent_execution_step( # Set up environment for subprocess env = os.environ.copy() - env["KURT_TOOL_LOG_FILE"] = tool_log_path - # Pass parent workflow ID and step name so child workflows can be nested + env["KURT_TOOL_LOG_FILE"] = tool_log_path # Legacy, kept for backward compat + # Pass workflow ID for tool tracking and nested workflow support if run_id: - env["KURT_PARENT_WORKFLOW_ID"] = run_id - # For agent workflows, use "agent_execution" as the step name - # This allows frontend to group child workflows under the agent step + env["KURT_PARENT_WORKFLOW_ID"] = run_id # Used by tool hook for real-time events env["KURT_PARENT_STEP_NAME"] = "agent_execution" # Add workflow directory to PYTHONPATH for custom tool imports diff --git a/src/kurt/workflows/agents/tests/test_cli.py b/src/kurt/workflows/agents/tests/test_cli.py index 36ce6ba4..89adb69d 100644 --- a/src/kurt/workflows/agents/tests/test_cli.py +++ b/src/kurt/workflows/agents/tests/test_cli.py @@ -2,6 +2,7 @@ from __future__ import annotations +import json from unittest.mock import patch from click.testing import CliRunner @@ -319,11 +320,11 @@ def test_run_from_nonexistent_path(self, cli_runner: CliRunner): class TestTrackToolCommand: """Tests for track-tool hidden command.""" - def test_track_tool_no_env_var(self, cli_runner: CliRunner): - """Test track-tool exits silently when KURT_TOOL_LOG_FILE not set.""" + def test_track_tool_no_workflow_id(self, cli_runner: CliRunner): + """Test track-tool exits silently when KURT_PARENT_WORKFLOW_ID not set.""" from kurt.workflows.agents.cli import agents_group - # Without KURT_TOOL_LOG_FILE, command should exit 0 + # Without KURT_PARENT_WORKFLOW_ID, command should exit 0 result = cli_runner.invoke( agents_group, ["track-tool"], @@ -331,46 +332,142 @@ def test_track_tool_no_env_var(self, cli_runner: CliRunner): ) assert result.exit_code == 0 - def test_track_tool_with_env_var(self, cli_runner: CliRunner, tmp_path): - """Test track-tool logs tool calls to file.""" + def test_track_tool_writes_to_db(self, cli_runner: CliRunner): + """Test track-tool writes tool call events to Dolt database.""" from kurt.workflows.agents.cli import agents_group - log_file = tmp_path / "tools.jsonl" + captured_calls = [] + + class MockDoltDB: + def __init__(self, path): + pass + + def execute(self, sql, params): + captured_calls.append({"sql": sql, "params": params}) + + with patch("kurt.db.dolt.DoltDB", MockDoltDB): + result = cli_runner.invoke( + agents_group, + ["track-tool"], + input='{"tool_name": "Bash", "tool_use_id": "tool-123", "tool_input": {"command": "echo hello"}}', + env={"KURT_PARENT_WORKFLOW_ID": "wf-abc-123"}, + ) - result = cli_runner.invoke( - agents_group, - ["track-tool"], - input='{"tool_name": "Bash", "tool_use_id": "tool-123"}', - env={"KURT_TOOL_LOG_FILE": str(log_file)}, - ) assert result.exit_code == 0 + assert len(captured_calls) == 1 + + # Verify SQL and params + call = captured_calls[0] + assert "INSERT INTO step_events" in call["sql"] + assert call["params"][0] == "wf-abc-123" # run_id (workflow_id) + assert call["params"][1] == "agent_execution" # step_id + assert call["params"][2] == "tool_call" # substep + assert call["params"][3] == "completed" # status + assert "Bash" in call["params"][4] # message + assert "echo hello" in call["params"][4] # input summary in message + + def test_track_tool_extracts_input_summary(self, cli_runner: CliRunner): + """Test track-tool extracts correct input summary for different tool types.""" + from kurt.workflows.agents.cli import agents_group + + test_cases = [ + ("Bash", {"command": "ls -la"}, "ls -la"), + ("Read", {"file_path": "/path/to/file.txt"}, "/path/to/file.txt"), + ("Write", {"file_path": "/output.txt"}, "/output.txt"), + ("Glob", {"pattern": "**/*.py"}, "**/*.py"), + ("Grep", {"pattern": "TODO"}, "TODO"), + ("WebFetch", {"url": "https://example.com"}, "https://example.com"), + ("WebSearch", {"query": "python docs"}, "python docs"), + ] + + for tool_name, tool_input, expected_summary in test_cases: + captured_calls = [] + + class MockDoltDB: + def __init__(self, path): + pass + + def execute(self, sql, params): + captured_calls.append({"sql": sql, "params": params}) + + input_json = json.dumps({"tool_name": tool_name, "tool_input": tool_input}) + + with patch("kurt.db.dolt.DoltDB", MockDoltDB): + result = cli_runner.invoke( + agents_group, + ["track-tool"], + input=input_json, + env={"KURT_PARENT_WORKFLOW_ID": "wf-test"}, + ) - # Verify file was written - assert log_file.exists() - content = log_file.read_text() - assert "Bash" in content - assert "tool-123" in content + assert result.exit_code == 0, f"Failed for {tool_name}" + assert len(captured_calls) == 1, f"No DB call for {tool_name}" - def test_track_tool_appends(self, cli_runner: CliRunner, tmp_path): - """Test track-tool appends to existing file.""" + # Verify input summary in message + message = captured_calls[0]["params"][4] + assert tool_name in message + assert expected_summary[:50] in message, f"Expected '{expected_summary}' in message for {tool_name}" + + def test_track_tool_metadata_json(self, cli_runner: CliRunner): + """Test track-tool stores correct metadata in JSON format.""" from kurt.workflows.agents.cli import agents_group - log_file = tmp_path / "tools.jsonl" - log_file.write_text('{"tool_name": "Read", "tool_use_id": "1"}\n') + captured_calls = [] + + class MockDoltDB: + def __init__(self, path): + pass + + def execute(self, sql, params): + captured_calls.append({"sql": sql, "params": params}) + + input_data = { + "tool_name": "Read", + "tool_use_id": "read-456", + "tool_input": {"file_path": "/etc/hosts"}, + "tool_result": "127.0.0.1 localhost", + } + + with patch("kurt.db.dolt.DoltDB", MockDoltDB): + result = cli_runner.invoke( + agents_group, + ["track-tool"], + input=json.dumps(input_data), + env={"KURT_PARENT_WORKFLOW_ID": "wf-test"}, + ) - result = cli_runner.invoke( - agents_group, - ["track-tool"], - input='{"tool_name": "Write", "tool_use_id": "2"}', - env={"KURT_TOOL_LOG_FILE": str(log_file)}, - ) assert result.exit_code == 0 + assert len(captured_calls) == 1 + + # Parse and verify metadata JSON + metadata_json = captured_calls[0]["params"][5] + metadata = json.loads(metadata_json) + assert metadata["tool_name"] == "Read" + assert metadata["tool_use_id"] == "read-456" + assert metadata["input_summary"] == "/etc/hosts" + assert "127.0.0.1" in metadata["result_summary"] + + def test_track_tool_handles_db_error(self, cli_runner: CliRunner): + """Test track-tool handles database errors gracefully.""" + from kurt.workflows.agents.cli import agents_group + + class MockDoltDB: + def __init__(self, path): + pass - # Verify both entries exist - content = log_file.read_text() - assert "Read" in content - assert "Write" in content - assert content.count("\n") == 2 + def execute(self, sql, params): + raise Exception("Database connection failed") + + with patch("kurt.db.dolt.DoltDB", MockDoltDB): + result = cli_runner.invoke( + agents_group, + ["track-tool"], + input='{"tool_name": "Bash"}', + env={"KURT_PARENT_WORKFLOW_ID": "wf-test"}, + ) + + # Should still exit 0 (don't fail the Claude hook) + assert result.exit_code == 0 def test_track_tool_hidden(self, cli_runner: CliRunner): """Test track-tool command is hidden from help.""" diff --git a/src/kurt/workflows/agents/tests/test_executor.py b/src/kurt/workflows/agents/tests/test_executor.py index f0c393f7..221eb2d8 100644 --- a/src/kurt/workflows/agents/tests/test_executor.py +++ b/src/kurt/workflows/agents/tests/test_executor.py @@ -26,10 +26,9 @@ def test_create_tool_tracking_settings(self, tmp_path): assert "hooks" in settings assert "PostToolUse" in settings["hooks"] assert settings["hooks"]["PostToolUse"][0]["matcher"] == "*" - assert ( - "kurt agents track-tool" - in settings["hooks"]["PostToolUse"][0]["hooks"][0]["command"] - ) + # Hook command should use "kurt workflow track-tool" + hook_cmd = settings["hooks"]["PostToolUse"][0]["hooks"][0]["command"] + assert "workflow track-tool" in hook_cmd # Verify tool log file exists assert os.path.exists(tool_log_path) @@ -44,6 +43,16 @@ def test_create_tool_tracking_settings(self, tmp_path): except Exception: pass + def test_get_kurt_executable_finds_path(self): + """Test _get_kurt_executable finds kurt in PATH or venv.""" + from kurt.workflows.agents.executor import _get_kurt_executable + + result = _get_kurt_executable() + + # Should return a path (either from PATH or venv) + assert result is not None + assert "kurt" in result + def test_cleanup_tool_tracking_counts_lines(self, tmp_path): """Test _cleanup_tool_tracking counts tool calls correctly.""" from kurt.workflows.agents.executor import _cleanup_tool_tracking @@ -59,9 +68,13 @@ def test_cleanup_tool_tracking_counts_lines(self, tmp_path): '{"tool_name": "Write", "tool_use_id": "3"}\n' ) - count = _cleanup_tool_tracking(str(settings_path), str(tool_log_path)) + count, details = _cleanup_tool_tracking(str(settings_path), str(tool_log_path)) assert count == 3 + assert len(details) == 3 + assert details[0]["tool_name"] == "Bash" + assert details[1]["tool_name"] == "Read" + assert details[2]["tool_name"] == "Write" # Verify files were deleted assert not settings_path.exists() assert not tool_log_path.exists() @@ -76,9 +89,10 @@ def test_cleanup_tool_tracking_empty_file(self, tmp_path): settings_path.write_text("{}") tool_log_path.write_text("") - count = _cleanup_tool_tracking(str(settings_path), str(tool_log_path)) + count, details = _cleanup_tool_tracking(str(settings_path), str(tool_log_path)) assert count == 0 + assert details == [] def test_cleanup_tool_tracking_missing_file(self, tmp_path): """Test _cleanup_tool_tracking handles missing files gracefully.""" @@ -87,9 +101,10 @@ def test_cleanup_tool_tracking_missing_file(self, tmp_path): settings_path = tmp_path / "nonexistent_settings.json" tool_log_path = tmp_path / "nonexistent_tools.jsonl" - # Should not raise, should return 0 - count = _cleanup_tool_tracking(str(settings_path), str(tool_log_path)) + # Should not raise, should return 0 and empty list + count, details = _cleanup_tool_tracking(str(settings_path), str(tool_log_path)) assert count == 0 + assert details == [] class TestResolveTemplate: diff --git a/src/kurt/workflows/toml/cli.py b/src/kurt/workflows/toml/cli.py index 5652dd38..973efb24 100644 --- a/src/kurt/workflows/toml/cli.py +++ b/src/kurt/workflows/toml/cli.py @@ -1324,6 +1324,7 @@ def workflow_group(): from kurt.workflows.agents.cli import init_cmd as agents_init_cmd # noqa: E402 from kurt.workflows.agents.cli import list_cmd as agents_list_cmd # noqa: E402 from kurt.workflows.agents.cli import show_cmd as agents_show_cmd # noqa: E402 +from kurt.workflows.agents.cli import track_tool_cmd as agents_track_tool_cmd # noqa: E402 from kurt.workflows.agents.cli import validate_cmd as agents_validate_cmd # noqa: E402 workflow_group.add_command(agents_list_cmd, name="list") @@ -1332,3 +1333,4 @@ def workflow_group(): workflow_group.add_command(agents_history_cmd, name="history") workflow_group.add_command(agents_init_cmd, name="init") workflow_group.add_command(agents_create_cmd, name="create") +workflow_group.add_command(agents_track_tool_cmd, name="track-tool") # Hidden internal command From 933216d9c3e6ae8554c39c3fad65f73349541f6e Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 10:53:40 +0000 Subject: [PATCH 18/19] test: Update test for revised workflow run help text Co-Authored-By: Claude Opus 4.5 --- src/kurt/cli/tests/test_workflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kurt/cli/tests/test_workflow.py b/src/kurt/cli/tests/test_workflow.py index 7d4f4346..1467d450 100644 --- a/src/kurt/cli/tests/test_workflow.py +++ b/src/kurt/cli/tests/test_workflow.py @@ -179,7 +179,7 @@ def test_run_help(self, cli_runner: CliRunner): """Test run command shows help.""" result = invoke_cli(cli_runner, run_cmd, ["--help"]) assert_cli_success(result) - assert_output_contains(result, "Run a workflow from a TOML file") + assert_output_contains(result, "Run a workflow from a TOML or Markdown file") def test_run_shows_options(self, cli_runner: CliRunner): """Test run command lists options in help.""" From 321d02b7135cff0f8993c8d1cb856eac164923d2 Mon Sep 17 00:00:00 2001 From: Kurt User Date: Wed, 28 Jan 2026 12:09:27 +0000 Subject: [PATCH 19/19] fix: Resolve linting errors (unused imports, unsorted imports) - Remove unused datetime import in agents/cli.py - Remove unused pytest import in test_status.py - Remove unused adapter variable in test_adapters.py - Fix import sorting in dolt.py Co-Authored-By: Claude Opus 4.5 --- src/kurt/db/dolt.py | 2 +- src/kurt/integrations/research/tests/test_adapters.py | 2 -- src/kurt/observability/tests/test_status.py | 2 -- src/kurt/workflows/agents/cli.py | 4 ++-- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/kurt/db/dolt.py b/src/kurt/db/dolt.py index 667a7343..132d5163 100644 --- a/src/kurt/db/dolt.py +++ b/src/kurt/db/dolt.py @@ -728,8 +728,8 @@ def _query_embedded(self, sql: str, params: list[Any] | None = None) -> QueryRes def _execute_embedded(self, sql: str, params: list[Any] | None = None, max_retries: int = 5) -> QueryResult: """Execute statement using dolt CLI with retry for concurrent access.""" - import time import random + import time interpolated = self._interpolate_params(sql, params) diff --git a/src/kurt/integrations/research/tests/test_adapters.py b/src/kurt/integrations/research/tests/test_adapters.py index ed85a77d..1b475510 100644 --- a/src/kurt/integrations/research/tests/test_adapters.py +++ b/src/kurt/integrations/research/tests/test_adapters.py @@ -530,8 +530,6 @@ def test_search_linkedin(self, mock_post): def test_keyword_filtering(self): """Test keyword filtering in fetch_signals.""" - adapter = ApifyAdapter({"api_token": "test"}) - # Test signal that matches keywords signal_match = Signal( signal_id="test_1", diff --git a/src/kurt/observability/tests/test_status.py b/src/kurt/observability/tests/test_status.py index a4ef5b11..cb14e284 100644 --- a/src/kurt/observability/tests/test_status.py +++ b/src/kurt/observability/tests/test_status.py @@ -6,8 +6,6 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock -import pytest - from kurt.observability.status import ( _build_steps_array, _calculate_duration, diff --git a/src/kurt/workflows/agents/cli.py b/src/kurt/workflows/agents/cli.py index 30192f3d..662d8c4c 100644 --- a/src/kurt/workflows/agents/cli.py +++ b/src/kurt/workflows/agents/cli.py @@ -419,7 +419,6 @@ def track_tool_cmd(): """ import os import sys - from datetime import datetime workflow_id = os.environ.get("KURT_PARENT_WORKFLOW_ID") if not workflow_id: @@ -467,9 +466,10 @@ def track_tool_cmd(): result_summary = str(tool_result)[:300] # Write directly to Dolt step_events for real-time monitoring - from kurt.db.dolt import DoltDB from pathlib import Path + from kurt.db.dolt import DoltDB + db = DoltDB(Path.cwd()) metadata_json = json.dumps({ "tool_name": tool_name,