diff --git a/.claude/agents/ai-workflow-optimizer.md b/.claude/agents/ai-workflow-optimizer.md new file mode 100644 index 000000000..caec03c2b --- /dev/null +++ b/.claude/agents/ai-workflow-optimizer.md @@ -0,0 +1,69 @@ +--- +name: ai-workflow-optimizer +description: Use this agent when there are clear signs of AI system dysfunction or inefficiency that warrant analysis and improvement recommendations. Examples include: (1) When the main agent gets stuck in loops, repeatedly making the same mistakes, or fails to make progress on a task; (2) When code is left in a broken state with failing tests after AI assistance; (3) When the user has to rollback AI-generated changes due to quality issues; (4) When there are repeated misunderstandings between user and AI despite clear instructions; (5) When the AI consistently ignores project guidelines or makes the same type of errors repeatedly; (6) When workflow inefficiencies become apparent (e.g., unnecessary back-and-forth, redundant operations, or poor task decomposition); (7) When the AI commits "obvious step omissions" - skipping fundamental analysis steps that should be impossible to miss, such as not studying existing code patterns before implementing new code, assuming patterns without verification, implementing without understanding project conventions first, or making basic mistakes that indicate fundamental process failure. Do NOT use for minor issues, single mistakes, or normal learning curves - only for patterns that indicate systemic problems requiring intervention. +color: red +--- + +You are an AI Workflow Optimization Expert, a meta-analyst specializing in diagnosing and improving AI-human collaboration systems. Your role is to identify meaningful dysfunction patterns and provide targeted improvement recommendations. + +**Core Responsibilities:** +- Analyze AI behavior patterns that indicate systemic issues (loops, repeated failures, quality degradation) +- Evaluate prompt effectiveness and identify information gaps or contradictions +- Assess MCP configurations for conflicts or redundancies +- Review workflow efficiency and suggest process improvements +- Recommend context management strategies and conversation hygiene +- Provide actionable suggestions for user communication patterns + +**Analysis Framework:** +When examining AI dysfunction, systematically evaluate: +1. **Prompt Quality**: Is the system prompt too vague, contradictory, or missing critical context? +2. **Information Flow**: Is there too little context (causing confusion) or too much (causing overwhelm)? +3. **MCP Conflicts**: Are multiple tools or agents working against each other? +4. **Workflow Design**: Are the processes efficient or creating unnecessary friction? +5. **User Communication**: Could different phrasing or structure improve outcomes? +6. **Context Management**: Is conversation history helping or hindering performance? +7. **Process Adherence**: Is the AI following fundamental analysis workflows, or skipping "obvious" prerequisite steps that should never be omitted (like studying existing patterns before coding, verifying assumptions before implementing, understanding project conventions before making changes)? + +**Intervention Criteria (ONLY act when these occur):** +- AI gets stuck in loops or repetitive failure patterns +- Code quality consistently degrades requiring rollbacks +- Multiple consecutive misunderstandings despite clear instructions +- Workflow inefficiencies causing significant time waste +- Clear evidence of conflicting instructions or tool interference +- "Obvious step omissions" where fundamental analysis steps are skipped (e.g., not studying existing patterns before coding, implementing without understanding conventions, making mistakes that indicate basic process failure) + +**Response Structure:** +When intervention is warranted, provide: +1. **Issue Identification**: Clearly describe the dysfunction pattern observed +2. **Root Cause Analysis**: Identify the likely systemic cause (prompt, MCP, workflow, etc.) +3. **Specific Recommendations**: Provide actionable improvements with clear implementation steps +4. **Prevention Strategies**: Suggest how to avoid similar issues in the future +5. **Process Enforcement**: For obvious step omissions, identify which fundamental steps were skipped and recommend mandatory checkpoints to prevent similar workflow breakdowns +6. **Context Management**: Recommend when to compact or clear conversation history + +**Critical Constraints:** +- ONLY intervene for meaningful, systemic issues - not minor mistakes or normal learning +- Focus on patterns, not isolated incidents +- Provide specific, actionable recommendations, not generic advice +- Consider the project's solo development context when suggesting improvements +- Respect the user's expertise while offering system-level insights +- Be concise but thorough in your analysis + +**Communication Style:** +- Direct and analytical, focusing on system improvement +- Use specific examples when identifying patterns +- Provide clear before/after scenarios for recommendations +- Acknowledge when issues are within normal operational parameters +- Suggest timing for context management (when to clear/compact conversations) + +**Examples of "Obvious Step Omissions" that warrant intervention:** +- Implementing new code without first studying existing patterns in the codebase +- Assuming naming conventions, architectural patterns, or coding styles without verification +- Making changes to shared components without understanding their usage across the project +- Implementing features without reading project documentation or configuration files +- Adding dependencies or changing build processes without checking existing setup +- Modifying database schemas or API contracts without understanding current usage +- Creating new files/modules without understanding the project's organization structure +- Making "basic" mistakes that indicate fundamental process steps were completely skipped + +Your goal is to maintain and improve the AI collaboration system's effectiveness while avoiding unnecessary interruptions to productive workflows. diff --git a/.claude/agents/github-issue-manager.md b/.claude/agents/github-issue-manager.md new file mode 100644 index 000000000..47df77972 --- /dev/null +++ b/.claude/agents/github-issue-manager.md @@ -0,0 +1,77 @@ +--- +name: github-issue-manager +description: Use this agent when you need to manage GitHub repository issues, including viewing existing issues, creating new issues with proper templates and labels, updating issue status, managing milestones, or coordinating issue workflows. Examples: Context: User wants to create a new feature request issue for adding dark mode support. user: "I want to create an issue for adding dark mode to the app" assistant: "I'll use the github-issue-manager agent to create a properly formatted feature request issue with the correct labels and template." Context: User needs to review all open bugs before a release. user: "Show me all open bug issues that need to be fixed before v2.0 release" assistant: "Let me use the github-issue-manager agent to query and analyze all open bug issues filtered by the v2.0 milestone." Context: User wants to update an issue's labels and milestone after reviewing it. user: "Issue #123 should be labeled as high complexity and assigned to the v2.1 milestone" assistant: "I'll use the github-issue-manager agent to update issue #123 with the appropriate complexity label and milestone assignment." +color: purple +--- + +You are an expert GitHub Issue Manager with comprehensive knowledge of repository management, issue workflows, and GitHub CLI operations. You specialize in efficiently managing the complete issue lifecycle using gh commands and understanding repository standards. + +**Core Responsibilities:** +- View, create, update, and manage GitHub repository issues using gh CLI commands +- Apply proper issue templates, labels, and classifications according to repository standards +- Manage issue milestones, assignments, and project board coordination +- Ensure compliance with repository labeling conventions and workflow processes +- Coordinate issue-to-PR workflows and release planning + +**Repository Knowledge:** +You have deep understanding of: +- **Issue Types:** bug, feature, refactor, task, improvement, documentation, chore, epic, idea +- **Complexity Labels:** complexity-low, complexity-medium, complexity-high, complexity-very-high +- **Area Labels:** ui, backend, api, performance, data-consumption, accessibility +- **Status Labels:** blocked, needs-investigation, needs-design +- **Labeling Rules:** Always add at least one main type label, remove generic labels after classification, no duplicate or conflicting labels +- **Commit Standards:** Conventional commits format, English language requirement, atomic commits +- **Quality Gates:** All issues must reference pnpm check requirements and testing standards + +**GitHub CLI Operations:** +You excel at using gh commands for: +- `gh issue list` with advanced filtering (labels, milestones, assignees, states) +- `gh issue create` with proper templates and metadata +- `gh issue edit` for updating labels, milestones, and assignments +- `gh issue view` for detailed issue analysis +- `gh issue close/reopen` with appropriate reasoning +- `gh pr list` and `gh pr create` for issue-to-PR workflows +- `gh repo view` for repository context and settings + +**Issue Creation Excellence:** +When creating issues, you: +- Select appropriate issue templates based on type (bug report, feature request, etc.) +- Apply correct label combinations following repository standards +- Set appropriate milestones based on complexity and priority +- Write clear, actionable descriptions with acceptance criteria +- Include relevant technical context and implementation hints +- Reference related issues and dependencies +- Ensure all required fields are completed + +**Workflow Management:** +You understand and enforce: +- Issue-to-branch naming conventions +- PR creation and review processes +- Release planning and milestone management +- Quality gate requirements (pnpm check, testing, TypeScript compliance) +- Documentation and testing update requirements +- Solo project adaptations (removing team coordination overhead) + +**Quality Assurance:** +Before any issue operation, you: +- Verify label combinations are valid and non-conflicting +- Ensure issue descriptions meet repository standards +- Check milestone and project assignments are appropriate +- Validate that technical requirements are clearly specified +- Confirm compliance with repository coding standards and architecture + +**Communication Style:** +- Provide clear explanations of issue management decisions +- Suggest improvements to issue descriptions and metadata +- Offer proactive recommendations for related issues or dependencies +- Present options when multiple approaches are valid +- Always explain the reasoning behind label and milestone selections + +**Error Handling:** +When gh commands fail or issues arise: +- Provide clear diagnostic information +- Suggest alternative approaches or commands +- Verify repository permissions and authentication +- Offer step-by-step troubleshooting guidance + +You operate with efficiency and precision, ensuring every issue management action follows repository best practices and contributes to effective project coordination. diff --git a/.claude/agents/memory-optimization-engineer.md b/.claude/agents/memory-optimization-engineer.md new file mode 100644 index 000000000..2f55e7967 --- /dev/null +++ b/.claude/agents/memory-optimization-engineer.md @@ -0,0 +1,61 @@ +--- +name: memory-optimization-engineer +description: Use this agent when you notice repetitive operations, slow searches, or inefficient patterns that could benefit from storing contextual information in memory. Call this agent periodically (every 10-20 interactions) when working on a codebase to identify optimization opportunities, or when you find yourself repeatedly discovering the same patterns or relationships in code. Examples: Context: User has been repeatedly asking about TODO comments and their relationship to GitHub issues. user: "Can you find all the TODO comments related to issue #123?" assistant: "I'll search for TODO comments mentioning issue #123, then use the memory-optimization-engineer to store the pattern that TODO comments in this codebase often reference GitHub issues for future optimization." Context: User frequently asks about specific coding patterns or architectural decisions. user: "Why do we use this particular error handling pattern?" assistant: "Let me explain the error handling pattern, then I'll call the memory-optimization-engineer to store this architectural decision for faster future reference." +color: green +--- + +You are an Expert Memory Optimization Engineer specializing in identifying and implementing strategic memory optimizations for AI-assisted development workflows. Your core mission is to analyze repetitive operations, slow searches, and inefficient patterns to create targeted memory entries that significantly improve future performance. + +**Your Expertise:** +- Pattern recognition in development workflows and codebase interactions +- Strategic memory architecture for AI-assisted coding +- Performance optimization through intelligent caching of contextual information +- Balancing memory utility with storage efficiency + +**Your Responsibilities:** + +1. **Analyze Current Context**: Examine the recent conversation history, code interactions, and user patterns to identify optimization opportunities + +2. **Identify Memory-Worthy Patterns**: Look for: + - Repetitive searches or queries (e.g., "TODO comments as issue references") + - Slow operations that could benefit from cached context + - Architectural decisions or coding patterns frequently referenced + - Relationships between code elements that are repeatedly discovered + - Project-specific conventions that could speed up future interactions + +3. **Design Strategic Memory Entries**: Create memory entries that: + - Store actionable, specific information (not generic knowledge) + - Include clear triggers for when the memory should be used + - Contain enough context to be useful but remain concise + - Focus on project-specific patterns and relationships + +4. **Quality Control**: Ensure memories are: + - Specific to the current project/codebase + - Likely to be referenced again in future interactions + - Not duplicating existing memories + - Balanced between detail and brevity + +**Memory Creation Guidelines:** +- **Be Selective**: Only create memories for patterns that appear 2+ times or are clearly going to be repeated +- **Be Specific**: Include concrete examples, file paths, or code patterns +- **Be Actionable**: Memories should enable faster future operations, not just store facts +- **Be Contextual**: Include enough project context to make the memory useful + +**Decision Framework:** +Before creating a memory, ask: +- Will this information be needed again in future interactions? +- Does this represent a project-specific pattern or relationship? +- Will storing this information measurably improve future performance? +- Is this information not already captured in existing memories? + +**Output Format:** +Provide a brief analysis of optimization opportunities identified, then create 1-3 strategic memory entries using available memory tools. Explain the rationale for each memory and how it will improve future interactions. + +**Frequency Guidelines:** +You should be called periodically but not excessively. Ideal timing: +- After discovering significant project patterns +- When repetitive operations become apparent +- Every 10-20 interactions in active development sessions +- When architectural decisions or conventions are established + +Remember: Quality over quantity. A few well-crafted memories are far more valuable than many generic ones. Your goal is to create a strategic memory architecture that makes future AI-assisted development significantly more efficient. diff --git a/.claude/commands/README.md b/.claude/commands/README.md index d84100e88..1182891f6 100644 --- a/.claude/commands/README.md +++ b/.claude/commands/README.md @@ -1,165 +1,56 @@ -# Claude Commands for Macroflows +# Claude Commands -This directory contains Claude Code commands adapted from GitHub Copilot prompts for optimal development workflow. +Optimized commands for Macroflows development workflow. -## Command Categories +## Commands -### Workflow Commands (`workflow/`) -- **`/commit`** - Generate conventional commit messages and execute commits -- **`/pull-request`** (`/pr`) - Create pull requests with proper formatting and metadata +**Workflow:** +- `/orchestrate` - Multi-step workflow automation +- `/commit` - Generate conventional commits +- `/pull-request` (`/pr`) - Create PRs -### Quality Assurance (`quality/`) -- **`/fix`** - Automated codebase checks and error correction -- **`/review`** - Comprehensive code review for PR changes +**Quality:** +- `/fix` - Automated checks and fixes +- `/review` - Code review for PRs -### Issue Management (`issues/`) -- **`/create-issue`** - Create GitHub issues using proper templates -- **`/implement`** - Autonomous issue implementation after plan approval -- **`/prioritize-milestone`** - Analyze and prioritize milestone issues for optimal capacity +**Issues:** +- `/discover-issues` - Find existing issues/TODOs +- `/create-issue [type]` - Create GitHub issues +- `/implement ` - Autonomous implementation +- `/breakdown ` - Split complex issues -### Refactoring (`refactor/`) -- **`/refactor`** - Clean architecture refactoring and modularization +**Refactoring:** +- `/refactor` - Clean architecture refactoring -### Session Management (`session/`) -- **`/end-session`** (`/end`) - Session summary and knowledge export +**Session:** +- `/end-session` (`/end`) - Summary and knowledge export -## Quick Reference +## Quick Start -### Daily Workflow ```bash -# Start development -/fix # Ensure clean codebase -/create-issue feature # Create feature request -/implement 123 # Implement issue #123 -/commit # Generate and execute commit -/pull-request # Create PR for review +# Automated workflow +/orchestrate feature-development "dark mode" +/orchestrate issue-resolution 123 + +# Manual workflow +/fix +/create-issue feature +/implement 123 +/commit +/pull-request ``` -### Quality Assurance -```bash -/fix # Run comprehensive checks and fixes -/review # Generate code review for PR -``` - -### Project Management -```bash -/create-issue bug # Report and create bug issue -/create-issue refactor # Create refactoring task -/prioritize-milestone # Analyze and prioritize milestone issues -/end-session # Summarize learnings and export knowledge -``` - -## Command Features - -### Architecture Integration -- **Clean Architecture Compliance:** All commands respect domain/application/infrastructure layer separation -- **Error Handling Standards:** Enforces proper `handleApiError` usage patterns -- **Import Standards:** Maintains absolute imports with `~/` prefix -- **Type Safety:** Ensures TypeScript strict mode compliance - -### Project-Specific Adaptations -- **Solo Project Focus:** Commands adapted for single developer workflow -- **SolidJS Patterns:** Optimized for reactive programming patterns -- **Supabase Integration:** Handles database and real-time patterns -- **Portuguese UI Support:** Maintains pt-BR UI text while enforcing English code - -### Quality Integration -- **Automated Validation:** Commands integrate with `npm run copilot:check` -- **Test Updates:** Automatically updates tests for code changes -- **Lint Compliance:** Ensures ESLint and Prettier standards -- **Performance Focus:** Prioritizes O(n) algorithms and efficient patterns - -## Migration from GitHub Prompts - -### Mapping -``` -.github/prompts/commit.prompt.md → .claude/commands/workflow/commit.md -.github/prompts/fix.prompt.md → .claude/commands/quality/fix.md -.github/prompts/code-review.prompt.md → .claude/commands/quality/review.md -.github/prompts/github-issue-unified.md → .claude/commands/issues/create.md -.github/prompts/issue-implementation.md → .claude/commands/issues/implement.md -.github/prompts/milestone-prioritization.prompt.md → .claude/commands/prioritize-milestone.md -.github/prompts/refactor.prompt.md → .claude/commands/refactor/clean-architecture.md -.github/prompts/pull-request.prompt.md → .claude/commands/workflow/pull-request.md -.github/prompts/end-session.prompt.md → .claude/commands/session/end.md -``` - -### Improvements -- **Command Structure:** Organized into logical categories -- **Usage Documentation:** Clear usage patterns and examples -- **Integration Points:** Better integration with Claude Code's capabilities -- **Error Handling:** Improved error recovery and reporting -- **Solo Project Focus:** Removed team coordination overhead - -## Technical Requirements - -### Environment Setup -```bash -export GIT_PAGER=cat # Required for git/gh commands -``` - -### Dependencies -- **GitHub CLI (`gh`)** - Authenticated and functional -- **Node.js & pnpm** - Package management and script execution -- **Git repository** - Proper remote configuration -- **Project scripts** - `.scripts/` directory with validation tools - -### File System -- **Temp directory access** - Commands use `/tmp/` for intermediate files -- **Write permissions** - Repository write access for commits and PRs -- **Script execution** - Permission to execute project validation scripts - -## Integration with CLAUDE.md - -These commands are designed to work seamlessly with the patterns and standards documented in `CLAUDE.md`. They enforce: - -- Clean architecture layer separation -- Absolute import requirements -- Error handling standards -- Commit message conventions -- Quality validation procedures -- Solo project adaptations - -## Usage Guidelines - -### Command Execution -- Commands are designed for autonomous execution -- Quality validation is integrated into all commands -- Error recovery is built into command logic -- User confirmation is requested for destructive operations - -### Workflow Integration -- Commands chain together for complete development workflows -- State preservation between commands -- Quality gates prevent progression with errors -- Continuous validation throughout process - -### Customization -- Commands adapt to project-specific patterns -- User preferences are learned and applied -- Context is preserved across sessions -- Patterns are documented and reused - -## Best Practices - -1. **Start with `/fix`** - Ensure clean codebase before development -2. **Use proper issue types** - Choose correct template for `/create-issue` -3. **Plan before implementing** - Review implementation plan in `/implement` -4. **Validate continuously** - Let commands handle quality validation -5. **End sessions properly** - Use `/end-session` for knowledge preservation - -## Troubleshooting +## Features -### Common Issues -- **Permission errors:** Ensure proper git and GitHub authentication -- **Script failures:** Verify `.scripts/` directory and permissions -- **Validation failures:** Run `/fix` to resolve quality issues -- **Network issues:** Check GitHub CLI authentication and connectivity +- **Clean Architecture:** Enforces layer separation +- **Error Handling:** Proper `errorHandler.apiError` patterns +- **Solo Project:** Adapted for single developer +- **Quality Gates:** Integrated with `pnpm check` +- **Portuguese Support:** Maintains pt-BR UI text -### Recovery Procedures -- **Failed commits:** Commands will retry with proper shell escaping -- **PR creation errors:** Automatic retry with corrected parameters -- **Validation loops:** Commands will iterate until all checks pass -- **Missing dependencies:** Commands provide fallback strategies +## Requirements -For detailed usage instructions, see individual command documentation files. \ No newline at end of file +- GitHub CLI (`gh`) authenticated +- Node.js & pnpm +- Git repository with remote +- Project scripts in `.scripts/` \ No newline at end of file diff --git a/.claude/commands/issues/breakdown.md b/.claude/commands/issues/breakdown.md new file mode 100644 index 000000000..4d6b4cfb4 --- /dev/null +++ b/.claude/commands/issues/breakdown.md @@ -0,0 +1,245 @@ +# GitHub Issue Breakdown Analyzer + +Analyze a GitHub Issue to determine if it should be broken down into subissues. If so, suggest subissues using the template in `docs/ISSUE_TEMPLATE_SUBISSUE.md`. + +## Usage + +``` +/breakdown +``` + +**Parameters:** +- `issue-number` (required): GitHub issue number to analyze + +## Description + +This command analyzes existing GitHub issues to determine if they are complex enough to warrant breaking down into smaller, more manageable subissues. It provides analysis and generates ready-to-execute commands for creating subissues. + +## What it does + +1. **Issue Fetching:** + - Uses `gh` CLI to fetch issue details and comments + - Retrieves full issue content including description and comments + - Validates issue exists and is accessible + +2. **Complexity Analysis:** + - Assesses if issue is large, complex, or multi-step + - Identifies multiple distinct components or modules + - Evaluates if work can be parallelized or sequenced + - Considers current complexity labels and scope + +3. **Breakdown Assessment:** + - Determines if breakdown would provide value + - Identifies logical separation points + - Maps work to specific modules or architectural layers + - Considers development workflow and dependencies + +4. **Subissue Suggestion:** + - Creates logical subissues with clear scope + - Maps each subissue to specific modules or steps + - Ensures subissues are independently workable + - Maintains traceability to parent issue + +5. **Command Generation:** + - Generates `gh issue create` commands for each subissue + - Uses `printf` with heredoc for proper formatting + - Applies appropriate labels based on complexity and area + - Links subissues to parent issue + +## Analysis Criteria + +### Issues Suitable for Breakdown + +**Complex Features:** +- Multi-module implementation +- Multiple UI components + backend changes +- Sequential implementation phases +- Different skill sets or focus areas + +**Large Refactors:** +- Multiple files or modules affected +- Different architectural layers involved +- Can be done incrementally +- Risk mitigation through smaller changes + +**Epic-level Tasks:** +- Multiple related but independent features +- Long-term implementation timeline +- Different priority levels for components +- Team coordination benefits + +### Issues NOT Suitable for Breakdown + +**Simple Tasks:** +- Single file or component changes +- Straightforward bug fixes +- Small improvements or tweaks +- Clear single-step implementation + +**Tightly Coupled Work:** +- Changes that must be atomic +- Cannot be tested independently +- Shared critical dependencies +- Risk of integration issues + +## Subissue Generation + +### Template Compliance +- Uses `docs/ISSUE_TEMPLATE_SUBISSUE.md` format +- Includes parent issue reference +- Clear title and description +- Specific acceptance criteria +- Relevant context and links + +### Label Strategy +- **Inherits parent labels:** complexity, area, type +- **Adds subissue label:** `subissue` for identification +- **Maintains consistency:** Same milestone if applicable +- **Complexity adjustment:** May reduce complexity for smaller scope + +### Content Structure +```markdown +**Parent Issue:** # +**Title:** +**Description:** +**Acceptance Criteria:** +- [ ] Specific deliverable 1 +- [ ] Specific deliverable 2 +- [ ] Integration with parent scope +**Additional Context:** +``` + +## Shell Command Generation + +### Command Format +```bash +printf 'Content goes here' > /tmp/subissue-.md +gh issue create --title "Title" --body-file /tmp/subissue-.md --label "labels" +``` + +### Shell Compatibility +- Uses `printf` with proper escaping +- Handles special characters and newlines +- Creates temporary files for complex content +- Validates content before issue creation + +### Error Handling +- Checks `gh` CLI availability +- Validates authentication and permissions +- Handles network errors gracefully +- Provides clear error messages + +## Breakdown Strategies + +### Architecture-Based Breakdown +- **Domain Layer:** Pure business logic changes +- **Application Layer:** SolidJS orchestration and state +- **Infrastructure Layer:** Supabase integration and data +- **UI Layer:** Components and user interface + +### Feature-Based Breakdown +- **Core Functionality:** Essential business logic +- **User Interface:** Forms, displays, interactions +- **Data Layer:** Database changes and migrations +- **Integration:** API connections and external services + +### Phase-Based Breakdown +- **Phase 1:** Foundation and core requirements +- **Phase 2:** Additional features and enhancements +- **Phase 3:** Polish, optimization, and edge cases +- **Phase 4:** Documentation and testing + +## Analysis Output + +### Assessment Summary +- **Complexity Evaluation:** Why breakdown is/isn't recommended +- **Module Analysis:** Which parts of codebase are affected +- **Risk Assessment:** Integration complexity and dependencies +- **Work Estimation:** Relative effort for each component + +### Subissue Recommendations +- **Logical Groupings:** How work should be divided +- **Dependencies:** Order of implementation if sequential +- **Scope Definition:** Clear boundaries for each subissue +- **Integration Points:** How subissues connect to parent + +### Generated Commands +- **Ready-to-execute:** `gh issue create` commands +- **Proper formatting:** Uses temp files for complex content +- **Label application:** Appropriate labels for each subissue +- **Parent linking:** Clear traceability relationships + +## Integration with Existing Workflow + +### Command Chaining +```bash +/breakdown 123 # Analyze issue #123 +# Review suggestions +# Execute generated commands +/implement 124 # Implement first subissue +/implement 125 # Implement second subissue +``` + +### Quality Assurance +- **Template compliance:** Uses existing subissue template +- **Label consistency:** Follows `docs/labels-usage.md` +- **English language:** All content in English +- **Markdown formatting:** Proper structure and syntax + +## Best Practices + +### When to Use Breakdown +- **Issue complexity exceeds single session work** +- **Multiple developers could work in parallel** +- **Risk mitigation through incremental delivery** +- **Clear separation of concerns possible** + +### When NOT to Use Breakdown +- **Simple, focused changes** +- **Tightly coupled atomic operations** +- **Already appropriately scoped** +- **Integration complexity outweighs benefits** + +### Effective Subissue Creation +- **Clear scope boundaries:** No overlap between subissues +- **Independent testability:** Each can be verified separately +- **Logical sequence:** Dependencies are clear and minimal +- **Appropriate size:** 1-3 day effort per subissue typically + +## Requirements + +- GitHub CLI (`gh`) installed and authenticated +- Access to repository and issue creation permissions +- `docs/ISSUE_TEMPLATE_SUBISSUE.md` template file +- Shell environment with `printf` and temp file support + +## Solo Project Adaptations + +- **Focus on technical decomposition** rather than team coordination +- **Emphasizes risk reduction** through incremental implementation +- **Prioritizes clear testing boundaries** for self-validation +- **Maintains architectural consistency** across breakdown +- **Enables focused work sessions** on specific components + +## Example Output + +``` +Analysis for Issue #123: + +BREAKDOWN RECOMMENDED: +This feature involves multiple architectural layers and could benefit from incremental implementation. + +Suggested Subissues: +1. Domain layer implementation (nutrition calculation logic) +2. Database schema and Supabase functions +3. SolidJS components and state management +4. UI integration and styling + +Generated Commands: +printf 'Parent Issue: #123...' > /tmp/subissue-1.md +gh issue create --title "Domain layer: nutrition calculation logic" --body-file /tmp/subissue-1.md --label "subissue,feature,backend,complexity-medium" + +[Additional commands for remaining subissues...] +``` + +This command enables systematic breakdown of complex issues while maintaining the quality and consistency standards of the existing issue management workflow. \ No newline at end of file diff --git a/.claude/commands/issues/create.md b/.claude/commands/issues/create.md index d6d1b1ace..9dee0f708 100644 --- a/.claude/commands/issues/create.md +++ b/.claude/commands/issues/create.md @@ -16,6 +16,26 @@ Create any type of GitHub issue (bug, feature, improvement, refactor, task, subi This command creates GitHub issues using the appropriate templates from the docs/ directory. It handles all issue types with proper formatting, labels, and validation. +## Memory Integration + +**Context Loading:** +- Loads `workflow-optimization-patterns` for issue creation best practices +- References `todo-issue-relationship-pattern` for TODO correlation +- Uses `issue-creation-workflow-optimization` for template improvements +- Applies learned patterns from previous issue creations + +**Smart Defaults:** +- Suggests issue types based on code area and context +- Pre-fills templates with relevant project information +- Recommends appropriate labels based on module patterns +- Correlates with existing issues to prevent duplicates + +**Workflow Awareness:** +- Integrates with `/discover-issues` findings for context +- Maintains consistency with project labeling conventions +- Applies solo project adaptations automatically +- Uses session context for better issue correlation + ## What it does 1. **Type Clarification:** diff --git a/.claude/commands/issues/discover.md b/.claude/commands/issues/discover.md new file mode 100644 index 000000000..b115a3b89 --- /dev/null +++ b/.claude/commands/issues/discover.md @@ -0,0 +1,211 @@ +# Issue Discovery Automation + +Automatically discover existing issues, TODO patterns, and related functionality to prevent duplicate issue creation and provide comprehensive context for development decisions. + +## Usage + +``` +/discover-issues [feature-area] [search-term] +``` + +**Parameters:** +- `feature-area` (optional): Module or area to focus search (e.g., "recipe", "diet", "weight") +- `search-term` (optional): Specific functionality or error message to search for + +## Description + +This command performs comprehensive issue discovery by correlating TODO comments, existing GitHub issues, and related code areas. It provides consolidated findings with actionable recommendations to optimize development workflow. + +## What it does + +1. **TODO Pattern Discovery:** + - Searches codebase for TODO comments with issue references + - Identifies TODO patterns that suggest missing functionality + - Correlates TODO locations with module structure + - Extracts issue numbers from TODO comments + +2. **GitHub Issue Correlation:** + - Fetches existing issues using `gh issue list` + - Searches issue titles and descriptions for related keywords + - Maps TODO comments to existing GitHub issues + - Identifies gaps between code TODOs and tracked issues + +3. **Code Area Analysis:** + - Analyzes affected modules and file structures + - Identifies related functionality and dependencies + - Maps error messages to potential issue areas + - Suggests relevant files for investigation + +4. **Consolidation and Reporting:** + - Presents findings in structured format + - Categorizes by issue type (bug, feature, improvement) + - Provides actionable next steps + - Suggests whether new issue creation is needed + +## Discovery Categories + +### TODO-to-Issue Mapping +- **Tracked TODOs:** TODOs with existing GitHub issue references +- **Untracked TODOs:** TODO comments without corresponding issues +- **Implementation gaps:** Features mentioned in issues but not implemented +- **Orphaned issues:** Closed issues with remaining TODO comments + +### Functionality Discovery +- **Missing features:** User-facing functionality mentioned but not implemented +- **Error patterns:** Common error messages that suggest missing handling +- **Architecture gaps:** Domain/application layer incomplete implementations +- **Integration points:** Missing connections between modules + +### Code Area Analysis +- **Related files:** Files in same module or with similar functionality +- **Test coverage:** Areas with missing or outdated tests +- **Documentation gaps:** Missing JSDoc or implementation notes +- **Migration needs:** Legacy code requiring updates + +## Search Strategies + +### Keyword-Based Discovery +```bash +# Example searches performed: +rg "TODO.*[Ii]ssue|TODO.*#\d+" --type ts +rg "funcionalidade.*desenvolvimento|não.*possível" --type ts +rg "Error.*message|throw.*Error" --type ts +``` + +### Pattern Recognition +- **Error messages:** Portuguese UI messages suggesting limitations +- **Conditional blocks:** Code blocks with "not implemented" patterns +- **Feature flags:** Disabled functionality awaiting implementation +- **Migration comments:** Legacy code requiring updates + +### Issue Correlation +```bash +# GitHub issue searches: +gh issue list --search "recipe edit" --state all +gh issue list --label feature --state open +gh issue list --milestone "v0.14.0" --state open +``` + +## Output Format + +### Discovery Summary +```markdown +## Issue Discovery Results + +### Found TODOs with Issue References +- [ ] #695: Allow user to edit recipes inside recipes + - Location: src/sections/recipe/components/RecipeEditModal.tsx:112 + - Status: Open, assigned to marcuscastelo + - Implementation: Error message shown to users + +### Untracked TODOs +- [ ] Recipe validation improvements needed + - Location: src/modules/recipe/domain/recipe.ts:45 + - Suggestion: Create improvement issue for validation logic + +### Related Issues +- #123: Recipe editing improvements (Closed) +- #456: UI error message improvements (Open) +- #789: Domain validation refactoring (Open) + +### Recommended Actions +1. ✅ Issue #695 already tracks recipe editing - no new issue needed +2. 🆕 Create improvement issue for recipe validation +3. 🔗 Link recipe validation work to existing issues #456, #789 +``` + +### Error Pattern Analysis +```markdown +### Error Patterns Found +- "Ainda não é possível..." (pt-BR limitation messages) + - Locations: 3 files, 5 occurrences + - Patterns: User-facing feature limitations + - Suggestion: Audit all limitation messages for issue tracking + +- "throw new Error" without handleApiError + - Locations: domain layer violations + - Suggestion: Architecture review for error handling +``` + +## Integration Features + +### Memory Loading +- Loads `workflow-optimization-patterns` memory for context +- Uses `todo-issue-relationship-pattern` for correlation strategies +- References `issue-creation-workflow-optimization` for next steps + +### Command Chaining +- Integrates with `/create-issue` for seamless issue creation +- Provides context for `/implement` command execution +- Prepares data for `/prioritize-milestone` decisions + +### Quality Integration +- Validates search results against project standards +- Ensures English-only code comments and identifiers +- Checks for absolute import usage in found files + +## Solo Project Adaptations + +- **Focus:** Technical discovery over stakeholder coordination +- **Efficiency:** Automated correlation instead of manual tracking +- **Context:** Preserves developer context between sessions +- **Quality:** Integrates with existing quality validation tools + +## Advanced Features + +### Pattern Learning +- Learns from user's issue creation patterns +- Adapts search strategies based on project evolution +- Improves correlation accuracy over time + +### Smart Suggestions +- Suggests issue types based on TODO context +- Recommends labels and milestones based on code area +- Identifies refactoring opportunities during discovery + +### Context Preservation +- Saves discovery results for session continuity +- Builds knowledge base of issue patterns +- Facilitates faster future discovery operations + +## Requirements + +- **GitHub CLI (`gh`)** - Authenticated and functional +- **ripgrep (`rg`)** - Fast text search capabilities +- **Project structure** - Standard module organization +- **Git repository** - Proper remote configuration + +## Error Handling + +- **Missing tools:** Provides fallback strategies using standard grep +- **API limits:** Handles GitHub API rate limiting gracefully +- **Large codebases:** Optimizes search scope and performance +- **Network issues:** Caches results for offline operation + +## Best Practices + +1. **Start broad:** Use general search terms first +2. **Refine scope:** Use feature-area parameter for targeted discovery +3. **Validate findings:** Review suggestions before acting +4. **Update workflow:** Use discoveries to improve future searches +5. **Maintain context:** Save important findings for session continuity + +## Output + +Creates structured discovery report and suggests next actions: + +```bash +# Example workflow continuation: +/discover-issues recipe +# → Shows recipe editing is tracked in #695 +# → Suggests validation improvements needed +# → Recommends: /create-issue improvement "Recipe validation enhancements" +``` + +## Integration with Project Standards + +- **Clean Architecture:** Identifies layer violations during discovery +- **Error Handling:** Finds missing `handleApiError` usage patterns +- **Import Standards:** Validates absolute import usage in discovered files +- **Quality Gates:** Ensures discoveries align with `pnpm check` standards +- **Solo Workflow:** Optimized for single developer context and decisions \ No newline at end of file diff --git a/.claude/commands/issues/implement.md b/.claude/commands/issues/implement.md index 1519b478f..bdec74c93 100644 --- a/.claude/commands/issues/implement.md +++ b/.claude/commands/issues/implement.md @@ -1,6 +1,6 @@ # Issue Implementation -Fully implement GitHub issues with autonomous execution after plan approval. +Autonomous GitHub issue implementation after plan approval. ## Usage @@ -8,197 +8,61 @@ Fully implement GitHub issues with autonomous execution after plan approval. /implement ``` -**Parameters:** -- `issue-number`: GitHub issue number to implement - -## Description - -This command provides complete autonomous implementation of GitHub issues. After plan approval, it executes all implementation steps without user interaction until completion or hard blockers. - -## What it does - -1. **Preparation Phase:** - - Checks current branch name and compares with target: `marcuscastelo/issue` - - If already on correct branch, skips branch creation/checkout - - If not on correct branch: - - Fetches and checks out latest `rc/` branch or default base - - Creates feature branch: `marcuscastelo/issue` - - Retrieves issue data using `gh` CLI (title, body, labels, comments) - - Validates issue exists and is implementable - -2. **Planning Phase:** - - Analyzes issue requirements and acceptance criteria - - Checks referenced commits or working versions - - Drafts comprehensive implementation plan in Markdown - - Reviews plan with user and iterates until approved - - **Planning stops here - waits for explicit approval** - -3. **Implementation Phase (Post-Approval):** - - **Autonomous execution begins immediately** - - Makes all required code changes - - Fixes code style, type, and test issues as they arise - - Updates or rewrites tests to match changes - - Runs validation scripts until all pass - - Applies consistent patterns across codebase - - **No status updates or confirmations during execution** - - **Only stops for hard blockers or ambiguity** - -4. **Completion Validation:** - - Verifies all tests pass - - Confirms code quality checks pass (ESLint, Prettier, TypeScript) - - Ensures build succeeds - - Validates clean architecture preservation - - Commits all changes with proper conventional messages - - Confirms no uncommitted changes remain - -## Implementation Categories - -### Feature Implementation -- **New functionality:** Complete feature development -- **UI components:** SolidJS component creation with proper patterns -- **Domain logic:** Clean architecture compliance -- **Integration:** Database and API integration -- **Testing:** Comprehensive test coverage - -### Bug Fixes -- **Root cause analysis:** Investigate using error messages and stack traces -- **Targeted fixes:** Minimal changes to resolve issue -- **Regression testing:** Ensure fix doesn't break existing functionality -- **Error handling:** Improve error handling where applicable - -### Refactoring -- **Code restructuring:** Improve code organization and quality -- **Architecture alignment:** Ensure clean architecture compliance -- **Performance optimization:** Improve efficiency where needed -- **Legacy migration:** Update deprecated patterns - -### Improvements -- **Technical debt:** Address code quality issues -- **Performance enhancements:** Optimize slow operations -- **Developer experience:** Improve tooling and workflows -- **Documentation:** Update docs to match changes - -## Blocker Handling - -### Hard Blockers (Stop and Ask User) -- **Ambiguous requirements:** Unclear acceptance criteria or specifications -- **Missing dependencies:** Required packages or services unavailable -- **Breaking changes:** Changes that would break existing functionality -- **Infrastructure issues:** Database, deployment, or external service problems -- **Conflicting requirements:** Contradictory specifications in issue - -### Soft Blockers (Retry up to 3x) -- **Test failures:** Failing unit, integration, or e2e tests -- **Lint/type errors:** ESLint, Prettier, or TypeScript issues -- **Build failures:** Compilation or bundling errors -- **Validation failures:** Quality check script failures -- **Missing files:** Temporarily missing or locked files - -## Implementation Rules - -### Autonomous Execution -- **No pausing:** Never wait or ask for confirmation after plan approval -- **Silent operation:** No status updates during implementation -- **Complete execution:** Continue until fully done or hard blocked -- **Error recovery:** Automatically retry soft failures - -### Code Quality Standards -- **Clean architecture:** Maintain layer separation and dependencies -- **Type safety:** Ensure TypeScript strict mode compliance -- **Error handling:** Proper `handleApiError` usage in application layer -- **Testing:** Update tests for all changes -- **Formatting:** Apply ESLint and Prettier consistently - -### Commit Standards -- **Conventional commits:** Use proper type(scope): description format -- **Atomic changes:** One logical change per commit -- **English messages:** All commit messages in English -- **Descriptive:** Clear explanation of what and why - -## Branch and Git Workflow - -### Branch Management -- **Feature branches:** `marcuscastelo/issue` format -- **Branch optimization:** Skip branch creation if already on correct branch -- **Base branch:** Latest `rc/` branch or project default (when creating new branch) -- **Clean state:** Ensure working directory is clean before starting -- **Upstream tracking:** Set up proper remote tracking - -### Commit Strategy -- **Progressive commits:** Commit logical chunks of work -- **Descriptive messages:** Clear commit messages explaining changes -- **Test commits:** Separate commits for test updates -- **Fix commits:** Separate commits for quality fixes - -## Integration with Project Standards - -### Architecture Compliance -- **Domain layer:** Pure business logic, no side effects -- **Application layer:** Orchestration and error handling -- **Infrastructure layer:** External integrations and data access -- **UI layer:** Pure presentational components - -### Import and Module Standards -- **Absolute imports:** Use `~/` prefix for all internal imports -- **No barrel files:** Direct imports from specific files -- **Static imports:** No dynamic imports allowed -- **Module boundaries:** Respect clean architecture layers - -### Language and Style -- **English code:** All code, comments, and commit messages in English -- **Portuguese UI:** UI text may be in Portuguese when required -- **Consistent naming:** Descriptive, action-based names -- **Type safety:** Prefer type aliases over interfaces +## Process -## Success Criteria +**Preparation:** +1. Check/create branch: `marcuscastelo/issue` +2. Retrieve issue data via `gh` CLI +3. Validate issue exists and is implementable -### Technical Validation -- ✅ All tests pass (`pnpm test`) -- ✅ Type checking passes (`pnpm type-check`) -- ✅ Linting passes (`pnpm lint`) -- ✅ Build succeeds (`pnpm build`) -- ✅ Quality checks pass (`pnpm check`) +**Planning:** +1. Analyze requirements and acceptance criteria +2. Draft comprehensive implementation plan +3. **Wait for user approval** - execution stops here -### Code Quality -- ✅ Clean architecture maintained -- ✅ Proper error handling implemented -- ✅ Tests updated for all changes -- ✅ No TypeScript `any` types (except infrastructure) -- ✅ Consistent code style applied +**Implementation (Post-Approval):** +1. **Autonomous execution** - no status updates +2. Make all required code changes +3. Fix quality issues automatically +4. Update/rewrite tests as needed +5. Run validation until all pass +6. Commit with conventional messages +7. **Only stops for hard blockers** -### Git State -- ✅ All changes committed -- ✅ Conventional commit messages -- ✅ No uncommitted changes -- ✅ Feature branch ready for PR +## Implementation Types -## Output and Completion +**Features:** Complete development with UI, domain logic, integration +**Bugs:** Root cause analysis and targeted fixes +**Refactoring:** Code restructuring and architecture alignment +**Improvements:** Technical debt resolution and optimization -### Final Report -- **Success confirmation:** All criteria met -- **Changes summary:** High-level overview of modifications -- **Next steps:** PR creation or additional work needed -- **Blocker report:** Any issues encountered and resolved +## Blockers -### Error Reporting -- **Hard blockers:** Clear description of blocking issues -- **Resolution suggestions:** Recommended next steps -- **Partial completion:** What was accomplished before blocking -- **State preservation:** Current branch and commit state +**Hard Blockers (Stop and Ask):** +- Ambiguous requirements +- Missing dependencies +- Breaking changes +- Infrastructure issues -## Requirements +**Soft Blockers (Retry 3x):** +- Test failures +- Lint/type errors +- Build failures +- Validation failures -- GitHub CLI (`gh`) authenticated and functional -- Git repository with proper remote configuration -- Node.js and pnpm for package management -- All project scripts available (`.scripts/` directory) -- Write access to repository and branch creation permissions +## Success Criteria -## Best Practices +- ✅ All tests pass (`pnpm test`) +- ✅ Type checking passes (`pnpm type-check`) +- ✅ Linting passes (`pnpm lint`) +- ✅ Build succeeds (`pnpm build`) +- ✅ Quality checks pass (`pnpm check`) +- ✅ All changes committed +- ✅ Clean architecture maintained + +## Requirements -- **Plan thoroughly:** Comprehensive planning before approval -- **Execute completely:** Full autonomous implementation -- **Maintain quality:** Never compromise on code standards -- **Handle errors gracefully:** Proper error recovery and reporting -- **Document changes:** Clear commit messages and code comments -- **Test thoroughly:** Comprehensive test coverage for changes \ No newline at end of file +- GitHub CLI authenticated +- Git repository with write access +- Node.js and pnpm +- Project scripts available \ No newline at end of file diff --git a/.claude/commands/quality/fix.md b/.claude/commands/quality/fix.md index 3c4081d3e..907fb139b 100644 --- a/.claude/commands/quality/fix.md +++ b/.claude/commands/quality/fix.md @@ -1,6 +1,6 @@ -# Codebase Quality Fix +# Quality Fix -Automatically run comprehensive checks and fix all detected issues until the codebase passes all quality gates. +Automated codebase checks and fixes until all quality gates pass. ## Usage @@ -8,91 +8,29 @@ Automatically run comprehensive checks and fix all detected issues until the cod /fix ``` -## Description +## Process -This command performs automated codebase checks using `npm run copilot:check` and fixes all detected issues including linting errors, type errors, and test failures. It continues iterating until all checks pass. - -## What it does - -1. **Check Execution:** - - Runs `npm run copilot:check` with output redirection - -2. **Output Validation:** - - Checks for "COPILOT: All checks passed!" success message - - Detects error patterns: `failed`, `at constructor`, `error`, `replace` - - Never stops early - completes all validation scripts - -3. **Error Analysis:** - - Analyzes detected issues using agent capabilities - - Categorizes errors by type (lint, type, test, build) - - Prioritizes fixes by impact and dependencies - -4. **Automated Fixes:** - - **Linting errors:** Auto-fixes with ESLint rules - - **Type errors:** Adds proper types and null checks - - **Import issues:** Converts to absolute imports with ~/ - - **Test failures:** Updates tests for code changes - - **Formatting:** Applies Prettier consistently - -5. **Iteration Loop:** - - Re-runs full check process after each fix - - Continues until "COPILOT: All checks passed!" appears - - Never skips validation reruns +1. **Run** `pnpm copilot:check` +2. **Validate** output for "COPILOT: All checks passed!" +3. **Analyze** error patterns and categorize issues +4. **Fix** automatically (lint, types, tests, imports) +5. **Iterate** until all checks pass ## Fix Categories -### ESLint Fixes -- **Absolute imports:** Converts relative imports to `~/` format -- **Type safety:** Adds explicit null/undefined checks -- **Unused variables:** Removes or prefixes with underscore -- **Import ordering:** Applies simple-import-sort rules -- **Prettier formatting:** Fixes code style issues - -### TypeScript Fixes -- **Explicit types:** Replaces `any` with proper types -- **Null checks:** Adds strict null/undefined validation -- **Generic constraints:** Properly constrains type parameters -- **Callback types:** Specifies exact callback argument types -- **Library types:** Uses proper types for external libraries - -### Test Fixes -- **Orphaned tests:** Removes tests for deleted functionality -- **Test updates:** Updates tests to match code changes -- **Mock updates:** Updates mocks for new interfaces -- **Import fixes:** Updates test imports to absolute paths - -### Architecture Fixes -- **Layer violations:** Moves code to appropriate layers -- **Error handling:** Adds proper `handleApiError` calls -- **Domain purity:** Removes side effects from domain layer -- **Import structure:** Enforces module boundaries +**ESLint:** Absolute imports, unused variables, formatting +**TypeScript:** Explicit types, null checks, generic constraints +**Tests:** Update for code changes, remove orphaned tests +**Architecture:** Layer violations, error handling, import structure ## Output -Reports final status: -- ✅ "All checks passed!" - Success state -- ❌ "Remaining issues:" - Lists unfixable issues -- 🔄 "Iteration [N]:" - Shows progress during fixes - -## Error Handling - -- **Script missing:** Reports missing validation scripts -- **Permission denied:** Suggests file permission fixes -- **Network issues:** Handles dependency installation problems -- **Complex errors:** Documents manual intervention needed - -## Best Practices - -- **Atomic fixes:** Makes single-purpose changes -- **Comprehensive validation:** Always reruns full check suite -- **No shortcuts:** Never skips validation steps -- **Clear reporting:** Documents all changes made -- **Rollback safety:** Preserves git state for rollback +- ✅ "All checks passed!" - Success +- ❌ "Remaining issues:" - Manual intervention needed +- 🔄 "Iteration [N]:" - Progress during fixes -## Project-Specific Rules +## Requirements -- Enforces absolute imports with `~/` prefix -- Maintains clean architecture layer separation -- Preserves Portuguese UI text while fixing English code -- Updates JSDoc for exported functions only -- Follows conventional commit message format for any auto-commits \ No newline at end of file +- `pnpm copilot:check` script available +- Write permissions for auto-fixes +- Git repository for rollback safety \ No newline at end of file diff --git a/.claude/commands/refactor/clean-architecture.md b/.claude/commands/refactor/clean-architecture.md index d91d462c1..c726f58b0 100644 --- a/.claude/commands/refactor/clean-architecture.md +++ b/.claude/commands/refactor/clean-architecture.md @@ -278,7 +278,7 @@ const Component = lazy(() => import('./Component')) // ✅ ### Quality Checks 1. **Run comprehensive checks:** ```bash - npm run copilot:check | tee /tmp/copilot-terminal 2>&1 + npm run copilot:check ``` 2. **Verify success message:** diff --git a/.claude/commands/workflow/commit.md b/.claude/commands/workflow/commit.md index 8239c6aae..83491514d 100644 --- a/.claude/commands/workflow/commit.md +++ b/.claude/commands/workflow/commit.md @@ -1,6 +1,6 @@ -# Commit Message Generator +# Commit Generator -Analyze staged changes and generate a conventional commit message in English, then execute the commit. +Generate conventional commit messages and execute commits. ## Usage @@ -8,71 +8,29 @@ Analyze staged changes and generate a conventional commit message in English, th /commit ``` -## Description +## Process -This command analyzes the current staged git changes and generates a conventional commit message following the project's standards. It will automatically commit the changes after generating the message. +1. **Verify** staged changes exist +2. **Analyze** changes using `scripts/copilot-commit-info.sh` +3. **Generate** conventional commit message in English +4. **Execute** commit with proper shell escaping -## What it does +## Format -1. **Verification Phase:** - - Runs `scripts/copilot-commit-info.sh` to gather git information - - Checks if there are staged changes to commit - - Stops if no staged changes are found +- **Type:** feat, fix, refactor, test, chore, docs, style, perf, ci +- **Scope:** Module/component when applicable +- **Message:** `type(scope): description` +- **Language:** Always English +- **Security:** Never include sensitive data -2. **Analysis Phase:** - - Analyzes staged changes from script output - - Determines the type of changes (feat, fix, refactor, etc.) - - Identifies affected modules and components +## Example -3. **Generation Phase:** - - Creates a conventional commit message in English - - Ensures message is atomic and describes the main change - - Follows format: `type(scope): description` - -4. **Execution Phase:** - - Displays the generated commit message - - Executes the commit automatically - - Uses proper shell escaping for multi-line messages - -## Requirements - -- Staged git changes must exist -- `scripts/copilot-commit-info.sh` script must be available -- Git repository must be properly initialized - -## Output Format - -The command outputs the commit message in a markdown code block: - -````markdown -feat(day-diet): add copy previous day functionality -```` - -Then executes: ```bash -git commit -m "feat(day-diet): add copy previous day functionality" +feat(day-diet): add copy previous day functionality ``` -## Commit Message Rules - -- **Language:** Always in English -- **Format:** Conventional commits style (type(scope): description) -- **Types:** feat, fix, refactor, test, chore, docs, style, perf, ci -- **Scope:** Module or component name when applicable -- **Description:** Clear, concise summary of the main change -- **Security:** Never include sensitive data, code diffs, or secrets -- **Atomicity:** One logical change per commit - -## Error Handling - -- No staged changes: Warns user to stage changes first -- Script failures: Reports issue and suggests manual verification -- Shell errors: Uses file-based commit for complex messages -- Permission issues: Provides troubleshooting guidance - -## Project-Specific Rules +## Requirements -- References affected modules from src/modules/ structure -- Follows clean architecture layer naming -- Respects domain-driven design terminology -- Maintains consistency with existing commit history \ No newline at end of file +- Staged git changes +- `scripts/copilot-commit-info.sh` available +- Git repository initialized \ No newline at end of file diff --git a/.claude/commands/workflow/context.md b/.claude/commands/workflow/context.md new file mode 100644 index 000000000..a9701ed19 --- /dev/null +++ b/.claude/commands/workflow/context.md @@ -0,0 +1,285 @@ +# Workflow Context Management + +Manage shared context between commands and agent interactions to improve workflow continuity and reduce redundant operations. + +## Usage + +``` +/workflow-context [action] [context-type] +``` + +**Parameters:** +- `action` (optional): save, load, clear, status +- `context-type` (optional): discovery, implementation, quality, session + +## Description + +This command provides context preservation and sharing capabilities across different workflow phases and agent interactions. It prevents information loss during command handoffs and optimizes workflow efficiency. + +## What it does + +### Context Saving +1. **Discovery Context:** + - Saves TODO patterns and locations found + - Preserves GitHub issue correlation results + - Stores code area analysis findings + - Maintains search strategies that worked + +2. **Implementation Context:** + - Preserves issue analysis and planning decisions + - Stores code areas being modified + - Maintains architectural decisions made + - Saves test update patterns + +3. **Quality Context:** + - Stores common error patterns and solutions + - Preserves validation results and fixes applied + - Maintains coding standard decisions + - Saves performance optimization patterns + +4. **Session Context:** + - Preserves overall workflow progress + - Stores decisions made and rationale + - Maintains learning points for future sessions + - Saves effective command sequences + +### Context Loading +1. **Smart Context Retrieval:** + - Loads relevant context based on current command + - Provides historical patterns for similar operations + - Suggests next steps based on previous workflows + - Applies learned optimizations automatically + +2. **Cross-Command Integration:** + - Shares discovery results with create-issue command + - Provides implementation context to quality checks + - Maintains workflow state across agent handoffs + - Preserves session learning for future use + +## Context Structure + +### WorkflowContext Interface +```typescript +interface WorkflowContext { + // Workflow identification + sessionId: string + timestamp: string + phase: 'discovery' | 'analysis' | 'implementation' | 'optimization' + + // Code areas and files + codeAreas: string[] + modifiedFiles: string[] + relatedModules: string[] + + // Issue tracking + relatedIssues: number[] + todoPatterns: string[] + issueCorrelations: Record + + // Implementation details + architecturalDecisions: string[] + testPatterns: string[] + errorHandlingApproaches: string[] + + // Quality and validation + validationResults: Record + commonErrors: string[] + fixPatterns: string[] + + // Learning and optimization + effectiveCommands: string[] + workflowOptimizations: string[] + previousFindings: Record + + // Next steps and recommendations + suggestedActions: string[] + workflowContinuation: string[] +} +``` + +### Context Storage Patterns + +#### Discovery Phase Context +```typescript +// Saved during /discover-issues or issue analysis +{ + phase: 'discovery', + codeAreas: ['recipe/components', 'recipe/domain'], + relatedIssues: [695, 123, 456], + todoPatterns: ['recipe editing limitations', 'validation improvements'], + suggestedActions: ['create validation issue', 'link to #456'] +} +``` + +#### Implementation Phase Context +```typescript +// Saved during /implement or code development +{ + phase: 'implementation', + modifiedFiles: ['RecipeEditModal.tsx', 'recipe.ts'], + architecturalDecisions: ['use domain validation', 'add handleApiError'], + testPatterns: ['mock validation', 'test error scenarios'], + suggestedActions: ['run quality checks', 'update related tests'] +} +``` + +## Integration Features + +### Memory Integration +- **Loads relevant memories** based on context type and phase +- **Updates memory patterns** with new workflow learnings +- **Consolidates context** with existing memory knowledge +- **Prevents memory fragmentation** through smart consolidation + +### Command Integration +- **Automatic context saving** at key workflow transitions +- **Smart context loading** when commands start +- **Context-aware suggestions** for next steps +- **Workflow continuity** across agent handoffs + +### Quality Integration +- **Context validation** against project standards +- **Consistency checking** across workflow phases +- **Pattern verification** with established conventions +- **Quality gate integration** with context awareness + +## Command Actions + +### Save Context +```bash +/workflow-context save discovery +# Saves current discovery findings to context +# Includes TODO patterns, issues found, code areas analyzed +``` + +### Load Context +```bash +/workflow-context load implementation +# Loads implementation context for current workflow +# Provides architectural decisions, patterns, next steps +``` + +### Status Check +```bash +/workflow-context status +# Shows current context state and available contexts +# Provides workflow phase identification and next steps +``` + +### Clear Context +```bash +/workflow-context clear session +# Clears session context while preserving learnings +# Optionally consolidates learnings into memory +``` + +## Workflow Automation + +### Phase Transitions +```typescript +// Automatic context handoffs between workflow phases +discovery → analysis: Transfer TODO patterns and issue correlations +analysis → implementation: Provide architectural decisions and scope +implementation → quality: Share modifications and test requirements +quality → optimization: Consolidate learnings and patterns +``` + +### Command Chaining +```typescript +// Smart context sharing between commands +/discover-issues → saves discovery context +/create-issue → loads discovery context for better issue creation +/implement → loads analysis context for informed implementation +/fix → loads implementation context for targeted fixes +``` + +## Context Persistence + +### Session Context +- **In-memory storage** during active workflow sessions +- **Automatic cleanup** after workflow completion +- **Learning extraction** to permanent memory +- **Session correlation** for pattern identification + +### Permanent Patterns +- **Memory consolidation** of effective workflows +- **Pattern extraction** from successful contexts +- **Optimization learning** from context analysis +- **Workflow improvement** based on context data + +## Solo Project Adaptations + +- **No team coordination** context needed +- **Technical focus** over business stakeholder context +- **Individual workflow** optimization patterns +- **Self-review context** instead of peer review handoffs +- **Quality gate** integration for personal validation + +## Advanced Features + +### Intelligent Context Prediction +- **Phase detection** based on current command and context +- **Next step suggestions** based on workflow patterns +- **Risk assessment** for context transitions +- **Optimization recommendations** for workflow efficiency + +### Context Analytics +- **Workflow efficiency tracking** across sessions +- **Pattern success analysis** for optimization +- **Command sequence optimization** based on context data +- **Learning curve analysis** for workflow improvement + +### Error Recovery +- **Context restoration** after interrupted workflows +- **Partial context recovery** from incomplete sessions +- **Workflow restart** with preserved context +- **Error pattern learning** for future prevention + +## Best Practices + +1. **Save context at phase transitions** for continuity +2. **Load context before major operations** for efficiency +3. **Clear context after completion** to prevent pollution +4. **Review context patterns** periodically for optimization +5. **Consolidate learnings** into permanent memory + +## Integration with Project Standards + +- **Clean Architecture** context awareness for layer decisions +- **Error Handling** context for consistent patterns +- **Import Standards** context for maintaining absolute imports +- **Quality Gates** integration with context validation +- **Solo Workflow** optimization for individual developer context + +## Requirements + +- **Temporary storage** capability for session context +- **Memory integration** for permanent pattern storage +- **Command integration** for automatic context management +- **JSON serialization** for context data persistence + +## Output + +Provides structured context information and workflow guidance: + +```bash +# Context status example +Current Phase: implementation +Active Context: recipe-editing-feature +Code Areas: recipe/components, recipe/domain +Related Issues: #695 (recipe editing), #456 (validation) +Next Steps: + 1. Implement validation logic in domain layer + 2. Add error handling in application layer + 3. Update tests for new functionality + 4. Run quality checks with /fix + +# Context handoff example +Discovery context loaded for issue creation: +- Found TODO at RecipeEditModal.tsx:112 +- Related issue #695 already exists +- Validation improvements needed +- Suggested: Create improvement issue for validation +``` + +This command bridges the gap between individual commands and provides workflow intelligence that learns and optimizes over time. \ No newline at end of file diff --git a/.claude/commands/workflow/orchestrate.md b/.claude/commands/workflow/orchestrate.md new file mode 100644 index 000000000..8c465b440 --- /dev/null +++ b/.claude/commands/workflow/orchestrate.md @@ -0,0 +1,500 @@ +# Workflow Orchestration + +Automatically orchestrate multi-step development workflows with intelligent command routing, context management, and error recovery. + +## Usage + +``` +/orchestrate [parameters] +``` + +**Workflow Types:** +- `feature-development` - Complete feature development cycle +- `bug-investigation` - Bug discovery, analysis, and resolution +- `refactor-cycle` - Architecture improvement and validation +- `issue-resolution` - End-to-end issue implementation +- `quality-improvement` - Comprehensive quality enhancement + +**Parameters:** +- Feature development: `[feature-description]` +- Bug investigation: `[error-message|issue-number]` +- Refactor cycle: `[target-area]` +- Issue resolution: `` +- Quality improvement: `[focus-area]` + +## Description + +This command provides intelligent workflow orchestration that automatically sequences commands, manages context, and handles errors across multi-step development processes. It reduces cognitive overhead and ensures consistent workflow execution. + +## Orchestration Framework + +### Workflow Definition Structure +```typescript +interface WorkflowDefinition { + name: string + description: string + phases: WorkflowPhase[] + contextRequirements: string[] + qualityGates: QualityGate[] + errorRecovery: ErrorRecoveryStrategy[] + successCriteria: string[] +} + +interface WorkflowPhase { + name: string + commands: CommandSequence[] + prerequisites: string[] + outcomes: string[] + nextPhaseConditions: string[] + rollbackStrategy?: string +} +``` + +### Supported Workflows + +#### Feature Development Orchestration +```typescript +const featureDevelopmentWorkflow = { + name: 'feature-development', + phases: [ + { + name: 'discovery', + commands: [ + { command: '/discover-issues', params: ['feature-area'] }, + { command: '/workflow-context', params: ['save', 'discovery'] } + ], + outcomes: ['existing issues identified', 'context preserved'], + nextPhase: 'planning' + }, + { + name: 'planning', + commands: [ + { command: '/create-issue', params: ['feature', 'description'] }, + { command: '/workflow-context', params: ['save', 'planning'] } + ], + outcomes: ['issue created', 'scope defined'], + nextPhase: 'implementation' + }, + { + name: 'implementation', + commands: [ + { command: '/implement', params: ['issue-number'] }, + { command: '/workflow-context', params: ['save', 'implementation'] } + ], + outcomes: ['feature implemented', 'tests updated'], + nextPhase: 'quality' + }, + { + name: 'quality', + commands: [ + { command: '/fix', params: [] }, + { command: '/review', params: [] }, + { command: '/workflow-context', params: ['save', 'quality'] } + ], + outcomes: ['quality gates passed', 'review completed'], + nextPhase: 'delivery' + }, + { + name: 'delivery', + commands: [ + { command: '/commit', params: [] }, + { command: '/pull-request', params: [] }, + { command: '/workflow-context', params: ['clear', 'session'] } + ], + outcomes: ['changes committed', 'PR created'], + nextPhase: 'complete' + } + ] +} +``` + +#### Bug Investigation Orchestration +```typescript +const bugInvestigationWorkflow = { + name: 'bug-investigation', + phases: [ + { + name: 'discovery', + commands: [ + { command: '/discover-issues', params: ['bug', 'error-message'] }, + { command: '/workflow-context', params: ['save', 'discovery'] } + ], + outcomes: ['related issues found', 'code areas identified'], + nextPhase: 'analysis' + }, + { + name: 'analysis', + commands: [ + // Intelligent codebase analysis based on discovery results + { command: 'analyze-code-area', dynamic: true }, + { command: '/workflow-context', params: ['save', 'analysis'] } + ], + outcomes: ['root cause identified', 'fix strategy determined'], + nextPhase: 'resolution' + }, + { + name: 'resolution', + commands: [ + { command: '/create-issue', params: ['bug', 'findings'] }, + { command: '/implement', params: ['issue-number'] }, + { command: '/fix', params: [] } + ], + outcomes: ['bug fixed', 'quality validated'], + nextPhase: 'delivery' + } + ] +} +``` + +#### Issue Resolution Orchestration +```typescript +const issueResolutionWorkflow = { + name: 'issue-resolution', + phases: [ + { + name: 'preparation', + commands: [ + { command: '/workflow-context', params: ['load', 'session'] }, + // Load issue context and related information + { command: 'load-issue-context', params: ['issue-number'] } + ], + outcomes: ['context loaded', 'issue analyzed'], + nextPhase: 'implementation' + }, + { + name: 'implementation', + commands: [ + { command: '/implement', params: ['issue-number'] } + ], + outcomes: ['issue implemented'], + nextPhase: 'validation' + }, + { + name: 'validation', + commands: [ + { command: '/fix', params: [] }, + { command: '/review', params: [] } + ], + outcomes: ['quality validated'], + nextPhase: 'delivery' + }, + { + name: 'delivery', + commands: [ + { command: '/commit', params: [] }, + { command: '/pull-request', params: [] } + ], + outcomes: ['changes delivered'], + nextPhase: 'complete' + } + ] +} +``` + +## Intelligent Features + +### Context-Aware Command Routing +```typescript +// Dynamic command selection based on context +const intelligentRouting = { + contextAnalysis: { + loadWorkflowContext: 'analyze current session state', + evaluatePhase: 'determine optimal next command', + assessPrerequisites: 'verify command readiness' + }, + + commandAdaptation: { + parameterOptimization: 'adapt parameters based on context', + skipUnnecessary: 'bypass completed or irrelevant steps', + dynamicSequencing: 'reorder commands based on current state' + }, + + errorPrevention: { + prerequisiteCheck: 'verify command prerequisites before execution', + contextValidation: 'ensure context compatibility', + qualityGateEnforcement: 'prevent progression with quality issues' + } +} +``` + +### Adaptive Workflow Execution +```typescript +// Real-time workflow adaptation +const adaptiveExecution = { + phaseSkipping: { + completedWork: 'skip phases already completed in context', + userOverride: 'allow manual phase specification', + intelligentDetection: 'detect when phases can be safely skipped' + }, + + errorRecovery: { + automaticRetry: 'retry commands with corrected parameters', + contextRestoration: 'restore previous stable state', + workflowContinuation: 'resume from stable checkpoint' + }, + + optimizationLearning: { + patternRecognition: 'learn effective command sequences', + timingOptimization: 'optimize command execution timing', + contextPrediction: 'predict likely next steps' + } +} +``` + +### Quality Gate Integration +```typescript +// Automatic quality validation at key points +const qualityIntegration = { + mandatoryGates: { + beforeCommit: 'ensure pnpm check passes', + beforePR: 'validate complete implementation', + beforeDelivery: 'confirm all quality standards met' + }, + + contextualGates: { + architectureCompliance: 'verify clean architecture adherence', + errorHandling: 'confirm proper handleApiError usage', + testCoverage: 'validate test updates for changes' + }, + + recoveryActions: { + qualityFailure: 'automatically invoke /fix command', + contextLoss: 'restore context from last stable state', + workflowInterrupt: 'save state and provide recovery options' + } +} +``` + +## Command Execution Engine + +### Command Sequencing +```typescript +// Intelligent command execution with context preservation +const executionEngine = { + commandPreparation: { + contextLoading: 'load relevant context before command', + parameterOptimization: 'adapt parameters based on workflow state', + prerequisiteValidation: 'ensure command readiness' + }, + + executionMonitoring: { + progressTracking: 'monitor command execution progress', + errorDetection: 'detect and categorize execution errors', + outputAnalysis: 'analyze command outputs for next steps' + }, + + contextPreservation: { + stateCapture: 'capture state before and after each command', + learningExtraction: 'extract patterns for workflow optimization', + continuityMaintenance: 'preserve context across command boundaries' + } +} +``` + +### Error Recovery Strategies +```typescript +// Robust error handling for workflow continuity +const errorRecovery = { + commandFailure: { + retry: 'retry with corrected parameters or context', + skip: 'skip optional commands that fail', + substitute: 'use alternative commands for same outcome' + }, + + workflowFailure: { + rollback: 'return to last stable workflow checkpoint', + partial: 'complete achievable parts of workflow', + manual: 'transition to manual execution with context' + }, + + contextFailure: { + reconstruction: 'rebuild context from available information', + recovery: 'restore context from memory and session data', + continuation: 'continue with reduced but functional context' + } +} +``` + +## User Interaction Patterns + +### Progress Reporting +```typescript +// Real-time workflow progress communication +const progressReporting = { + phaseTransition: { + summary: 'summarize completed phase outcomes', + preview: 'preview next phase objectives', + estimation: 'provide time estimates for remaining work' + }, + + commandExecution: { + status: 'real-time command execution status', + outcomes: 'summarize command results and impacts', + nextSteps: 'preview upcoming commands and rationale' + }, + + errorCommunication: { + diagnosis: 'clear explanation of errors and impacts', + options: 'present recovery options with trade-offs', + recommendations: 'suggest optimal recovery path' + } +} +``` + +### Workflow Customization +```typescript +// User control over orchestration behavior +const customizationOptions = { + interactiveMode: { + phaseApproval: 'request approval before each phase', + commandReview: 'show commands before execution', + outcomeValidation: 'confirm outcomes before continuation' + }, + + automationLevel: { + full: 'complete automation with error recovery', + guided: 'automated execution with progress reporting', + manual: 'command suggestions with user execution' + }, + + scopeControl: { + phaseSelection: 'run specific workflow phases only', + commandFiltering: 'exclude or include specific commands', + outcomeTargeting: 'focus on specific workflow outcomes' + } +} +``` + +## Risk Mitigation + +### Safe Orchestration Practices +```typescript +// Minimize risk during automated workflow execution +const riskMitigation = { + safetyChecks: { + destructiveOperations: 'require explicit confirmation for destructive actions', + qualityGates: 'enforce quality validation at key checkpoints', + contextValidation: 'verify context integrity before major operations' + }, + + rollbackCapability: { + checkpointing: 'create rollback points at phase boundaries', + statePreservation: 'maintain rollback state throughout workflow', + quickRecovery: 'enable rapid recovery from failed operations' + }, + + failSafe: { + gracefulDegradation: 'fallback to manual execution when automation fails', + contextPreservation: 'maintain context even during failures', + userCommunication: 'clear communication about failures and options' + } +} +``` + +### Compatibility Assurance +```typescript +// Ensure compatibility with existing commands and workflows +const compatibilityAssurance = { + commandIntegration: { + existingCommands: 'use existing commands without modification', + parameterCompatibility: 'maintain existing parameter interfaces', + outputCompatibility: 'preserve existing command output formats' + }, + + workflowCoexistence: { + manualOverride: 'allow manual command execution at any point', + workflowExit: 'enable graceful exit from orchestration', + hybridExecution: 'support mix of orchestrated and manual commands' + }, + + systemIntegration: { + memoryCompatibility: 'integrate with existing memory system', + contextCompatibility: 'work with existing context management', + qualityCompatibility: 'maintain existing quality gates and standards' + } +} +``` + +## Solo Project Adaptations + +### Individual Developer Optimization +- **No team coordination**: Focus on individual productivity optimization +- **Technical decision speed**: Reduce decision overhead for solo development +- **Quality automation**: Automate quality checks without team approval processes +- **Context preservation**: Maintain individual developer context across sessions +- **Learning acceleration**: Optimize based on individual patterns and preferences + +### Project-Specific Integration +- **Clean architecture**: Enforce architectural patterns automatically +- **SolidJS patterns**: Apply framework-specific best practices +- **Supabase integration**: Handle database and real-time patterns consistently +- **Portuguese UI support**: Maintain pt-BR UI text while enforcing English code +- **Quality standards**: Integrate with project's `pnpm check` validation + +## Best Practices + +1. **Start with low-risk workflows** to build confidence +2. **Use interactive mode** initially to understand orchestration behavior +3. **Leverage context preservation** for workflow continuity +4. **Monitor quality gates** to ensure standards compliance +5. **Learn from workflow patterns** to optimize future orchestrations + +## Requirements + +- **All existing commands** available and functional +- **Context management** system operational +- **Memory system** for pattern storage and learning +- **Quality validation** tools (`pnpm check`) functional +- **Git and GitHub CLI** for delivery phase operations + +## Integration with Project Standards + +- **Command compatibility**: Works with all existing `/` commands +- **Quality integration**: Enforces `pnpm check` at appropriate points +- **Memory utilization**: Uses existing memory system for pattern storage +- **Error handling**: Applies project error handling standards +- **Solo workflow**: Optimized for individual developer productivity + +## Output + +Provides structured workflow progress and intelligent guidance: + +```bash +# Orchestration example +$ /orchestrate feature-development "dark mode toggle" + +🚀 Starting Feature Development Orchestration + +Phase 1: Discovery +→ Running /discover-issues feature "dark mode" +✅ Found: No existing dark mode issues +✅ Context saved: discovery phase + +Phase 2: Planning +→ Running /create-issue feature "Add dark mode toggle to settings" +✅ Created: Issue #789 - Dark mode toggle implementation +✅ Context saved: planning phase + +Phase 3: Implementation +→ Running /implement 789 +✅ Implementation completed with tests +✅ Context saved: implementation phase + +Phase 4: Quality +→ Running /fix +✅ All quality checks passed +→ Running /review +✅ Code review completed +✅ Context saved: quality phase + +Phase 5: Delivery +→ Running /commit +✅ Commit created: "feat: add dark mode toggle to settings" +→ Running /pull-request +✅ PR created: #156 - Add dark mode toggle + +🎉 Feature Development Orchestration Complete! + Issue: #789 | PR: #156 | Time: 45 minutes +``` + +This orchestration system provides intelligent automation while maintaining full compatibility with existing workflows and preserving user control at all times. \ No newline at end of file diff --git a/.claude/commands/workflow/pull-request.md b/.claude/commands/workflow/pull-request.md index 371c83e39..72c916aaf 100644 --- a/.claude/commands/workflow/pull-request.md +++ b/.claude/commands/workflow/pull-request.md @@ -122,22 +122,34 @@ Closes #456 ## Shell and CLI Handling -### Multiline Content Management +### Multiline Content Management (CRITICAL - HEREDOC RULES) + +**🚨 MANDATORY HEREDOC FORMAT:** ```bash -# Uses cat with heredoc for proper shell escaping -cat <<'EOF' > /tmp/pr-description.md +# ALWAYS use single quotes around EOF delimiter to prevent variable expansion +cat << 'EOF' > /tmp/pr-description.md ## Summary -Comprehensive PR description with proper formatting. +Your PR description content here. + +## Implementation Details +- Bullet points work fine +- Code blocks with `backticks` are safe +- Variables like $VAR will NOT be expanded (good!) -## Details -- Multiple lines -- Code blocks with `backticks` -- No shell interpretation issues +Closes #123 EOF -gh pr create --title "feat: new feature" --body-file /tmp/pr-description.md +# THEN use the file with gh CLI +gh pr create --title "your title" --body-file /tmp/pr-description.md ``` +**🚨 CRITICAL RULES:** +1. **ALWAYS use `cat << 'EOF'`** (with single quotes) +2. **NEVER use `cat <&1`. After the main command finishes, check `cat /tmp/copilot-terminal-[N]`. Never repeat the main command. Confirm that you understand and follow this instruction until I ask you to stop. Never combine commands with `&&`, `||` or `;` - ## Terminal & Script Usage - Always check for the existence of referenced scripts (e.g., `.scripts/semver.sh`) before using them. If missing, suggest alternatives or prompt the user. - The preferred method for app version reporting is `.scripts/semver.sh`, not `git describe --tags --always`. @@ -56,11 +54,8 @@ During this session, always wait until the end of the execution of any requested - If a script is missing or not executable, add a troubleshooting step or warning. ## Codebase Check & Output Validation -1. Run `npm run copilot:check` in the project root, redirecting both stdout and stderr to `/tmp/copilot-terminal-[N]` using `| tee /tmp/copilot-terminal-[N] 2>&1` (with a unique [N] for each run). -2. After the command finishes, run each of the following custom scripts (each should simply output the contents of `/tmp/copilot-terminal-[N]`): - - `.scripts/cat1.sh /tmp/copilot-terminal-[N]` - - `.scripts/cat2.sh /tmp/copilot-terminal-[N]` - - `.scripts/cat3.sh /tmp/copilot-terminal-[N]` +1. Run `npm run copilot:check` in the project root. +2. After the command finishes, Check the output of each script, in order, until either: - The message "COPILOT: All checks passed!" appears in the output, or - Any of the following error patterns (case-insensitive) appear: `failed`, `at constructor`, `error`, `replace`, or similar. diff --git a/.github/prompts/fix.prompt.md b/.github/prompts/fix.prompt.md index 9930a5daf..acdce36aa 100644 --- a/.github/prompts/fix.prompt.md +++ b/.github/prompts/fix.prompt.md @@ -10,10 +10,8 @@ Your task is to ensure the codebase passes all checks and is error-free. Never o ## Instructions -1. Run `npm run copilot:check` in the project root, redirecting both stdout and stderr to `/tmp/copilot-terminal-[N]` using `| tee /tmp/copilot-terminal-[N] 2>&1` (with a unique [N] for each run). -2. After the command finishes, run each of the following custom scripts (each should simply output the contents of `/tmp/copilot-terminal-[N]`): - - `.scripts/cat1.sh /tmp/copilot-terminal-[N]` - - `.scripts/cat2.sh /tmp/copilot-terminal-[N]` +1. Run `npm run copilot:check` in the project root. +2. After the command finishes, Check the output of each script, in order, until either: - The message "COPILOT: All checks passed!" appears in the output, or - Any of the following error patterns (case-insensitive) appear: `failed`, `at constructor`, `error`, `replace`, or similar. diff --git a/.github/prompts/pull-request.prompt.md b/.github/prompts/pull-request.prompt.md index fc3edf138..d94f176c0 100644 --- a/.github/prompts/pull-request.prompt.md +++ b/.github/prompts/pull-request.prompt.md @@ -1,74 +1,328 @@ --- -description: 'Review all changes from HEAD to the nearest rc/** branch (local or remote), push unpushed commits, and generate and open a PR using gh. Confirm PR details with the user before creation. PR is created to the nearest rc/** branch.' +description: 'Review changes, push commits, and create pull request to the nearest rc/** branch.' mode: 'agent' tools: ['changes', 'codebase', 'editFiles', 'extensions', 'fetch', 'findTestFiles', 'githubRepo', 'new', 'openSimpleBrowser', 'problems', 'runCommands', 'runNotebooks', 'runTasks', 'search', 'searchResults', 'terminalLastCommand', 'terminalSelection', 'testFailure', 'usages', 'vscodeAPI', 'activePullRequest'] --- -# Pull Request Review, Push & Creation Agent +# Pull Request Creator -Antes de tudo, exiba para o usuário: +Review changes, push commits, and create pull request to the nearest rc/** branch. -`AGENT HAS CHANGED, NEW AGENT: .github/prompts/pull-request.prompt.md`. +## Usage -You are: github-copilot.v1/pull-request +``` +/pull-request +/pr +``` -Analyze all modifications in the codebase from the current `HEAD` to the nearest base branch matching `rc/**` (e.g., `rc/v0.12.0`), searching both local and remote branches. If no such branch exists, prompt the user for the correct base branch or fail gracefully. +## Description -## Required Output +This command analyzes all changes from HEAD to the nearest `rc/**` branch, pushes any unpushed commits, and creates a properly formatted pull request using GitHub CLI. -1. **PR Title**: Output as a single, concise, action-oriented summary in a standalone Markdown code block. -2. **PR Description**: Output as a separate Markdown code block, including: - - What was changed and why, with emphasis on code and application logic if present. - - Relevant context or motivation. - - Notable implementation details or breaking changes. - - References to related documentation or issues. - - A list of issues that this PR closes (e.g., `closes #123`), included at the end of the description. **If no issues are closed, omit this section.** - - If the current branch name matches the pattern `issue` (e.g., `marcuscastelo/issue698`), automatically extract the issue number and add `closes #` to the PR description. -3. **Labels**: Output as a plain list (not Markdown) for user copy-paste. Only use labels that already exist in the repository unless explicitly instructed otherwise. -4. **Milestone**: Output as a plain value (not Markdown) for user copy-paste. +## What it does -## Instructions +1. **Change Analysis:** + - Identifies nearest `rc/**` branch (local or remote) + - Analyzes all modifications from HEAD to base branch + - Collects commit messages and metadata + - Determines scope and type of changes -- Use a single zsh code block to run all required commands for determining the diff from `HEAD` to the nearest `rc/**` branch (searching both local and remote), collecting commit messages, and gathering relevant metadata. Always group all git commands in a single zsh block for efficiency and clarity. -- If no `rc/**` branch is found, output a clear message and prompt the user for the correct base branch. -- When both code and documentation/.github/prompt changes are present, prioritize summarizing the code and application logic changes in the PR title and description. Only mention documentation or prompt changes as secondary details. -- Summarize the changes in a way that is clear and actionable for reviewers. -- Output the PR Title and PR Description in two separate Markdown code blocks. Output labels and milestone as plain text lists for user convenience. -- Output all results in English. -- If any required information is missing or ambiguous, ask clarifying questions before proceeding. -- Before creating the PR, always check for and handle unpushed commits, and confirm PR details (title, description, labels, milestone) with the user. -- For multiline PR descriptions, **always write the body to a temp file using `cat` with heredoc and single quotes for the delimiter** (e.g., `cat <<'EOF' > file`). This ensures that any backticks or variables inside the heredoc are not interpreted by the shell. Backticks are allowed inside the heredoc for Markdown/code, but the heredoc delimiter must always use single quotes. Never use `printf` or `echo` for this purpose. -- After creating the PR, always check and report the PR URL to the user. -- For multi-line PR descriptions or commit messages, **always use `cat` with heredoc and single quotes for the delimiter** to write the message to a temp file. Backticks are allowed inside the heredoc, but the delimiter must be single quotes to prevent shell interpretation. Never use `printf` for this purpose. -- If any step fails (e.g., push fails, `gh` command fails), output a clear error message and stop. -- If the user has already created the pull request manually, acknowledge this and gracefully end the workflow without duplicating actions. See [copilot-instructions.md](../copilot-instructions.md) for global rules. -- For any PR involving critical feature logic changes, confirm that regression testing and feature comparison steps were performed, and document this in the PR checklist. +2. **Content Generation:** + - Creates action-oriented PR title + - Generates comprehensive PR description + - Suggests appropriate labels and milestone + - Extracts issue numbers from branch names ---- +3. **Validation and Push:** + - Checks for unpushed commits + - Pushes local commits to remote branch + - Validates GitHub CLI authentication + - Confirms PR details with user + +4. **PR Creation:** + - Uses `gh` CLI to create pull request + - Sets proper title, description, labels, milestone + - Links to closing issues automatically + - Reports PR URL upon success + +## Change Analysis Process + +### Branch Detection +```bash +# Searches for nearest rc/** branch +git branch -r | grep 'rc/' | head -1 # Remote branches +git branch -l | grep 'rc/' | head -1 # Local branches +``` + +### Diff Analysis +- Compares HEAD to detected base branch +- Analyzes file changes and commit history +- Prioritizes code changes over documentation +- Identifies breaking changes or major features + +### Issue Extraction +- Detects branch pattern: `marcuscastelo/issue` +- Automatically adds `closes #` to PR description +- Links related issues mentioned in commits + +## PR Content Structure + +### Title Format +``` +type(scope): concise action-oriented summary +``` + +Examples: +- `feat(day-diet): add copy previous day functionality` +- `fix(unified-item): resolve hierarchy validation errors` +- `refactor(weight): optimize period grouping algorithm` + +### Description Sections + +1. **Summary:** What was changed and why +2. **Implementation Details:** Notable technical decisions +3. **Breaking Changes:** Any backward incompatible changes +4. **Testing:** How changes were validated +5. **Issues:** `closes #123` if applicable + +### Example Description +```markdown +## Summary +Implements copy previous day functionality allowing users to duplicate their previous day's meals and macros to the current day. + +## Implementation Details +- Added `CopyLastDayButton` component with confirmation modal +- Created `copyDayDiet` domain operation with validation +- Integrated with existing day diet infrastructure +- Maintains macro targets and meal structure + +## Testing +- Added unit tests for domain operations +- Tested UI interaction flows +- Verified data consistency after copy + +Closes #456 +``` + +## Label Suggestions + +### Type Labels +- `feature` - New functionality +- `bug` - Bug fixes +- `refactor` - Code restructuring +- `improvement` - Enhancements +- `chore` - Maintenance tasks + +### Area Labels +- `ui` - User interface changes +- `backend` - Server-side logic +- `api` - API modifications +- `performance` - Performance improvements +- `accessibility` - Accessibility enhancements + +### Complexity Labels +- `complexity-low` - Simple changes +- `complexity-medium` - Moderate complexity +- `complexity-high` - Complex implementation +- `complexity-very-high` - Very complex changes + +## Shell and CLI Handling + +### Multiline Content Management (CRITICAL - HEREDOC RULES) + +**🚨 MANDATORY HEREDOC FORMAT:** +```bash +# ALWAYS use single quotes around EOF delimiter to prevent variable expansion +cat << 'EOF' > /tmp/pr-description.md +## Summary +Your PR description content here. + +## Implementation Details +- Bullet points work fine +- Code blocks with `backticks` are safe +- Variables like $VAR will NOT be expanded (good!) + +Closes #123 +EOF + +# THEN use the file with gh CLI +gh pr create --title "your title" --body-file /tmp/pr-description.md +``` + +**🚨 CRITICAL RULES:** +1. **ALWAYS use `cat << 'EOF'`** (with single quotes) +2. **NEVER use `cat <` to description +- **Cross-references:** Links related issues from commits + +### Documentation Updates +- **Architecture compliance:** References clean architecture changes +- **Code review:** Mentions significant architectural decisions +- **Migration notes:** Documents any breaking changes + +### Milestone Association +- **Version-based:** Associates with target release milestone +- **Feature-based:** Links to relevant feature milestones +- **Bug-based:** Associates with current sprint milestone + +## Output Format + +### Generated Content +```markdown +**Title:** +feat(day-diet): add copy previous day functionality + +**Description:** +## Summary +Implements copy previous day functionality allowing users to... + +**Labels:** +feature ui complexity-medium + +**Milestone:** +v0.14.0 +``` + +### GitHub CLI Command (CORRECTED) +```bash +# CRITICAL: Always detect rc/ branch first - NEVER hardcode stable +RC_BRANCH=$(git branch -r | grep 'origin/rc/' | head -1 | sed 's/.*origin\///') +if [ -z "$RC_BRANCH" ]; then + echo "❌ No rc/ branch found - cannot create PR" + exit 1 +fi + +# Create PR with detected rc/ branch +gh pr create \ + --title "feat(day-diet): add copy previous day functionality" \ + --body-file /tmp/pr-description.md \ + --label feature,ui,complexity-medium \ + --milestone "v0.14.0" \ + --base "$RC_BRANCH" + +# Clean up +rm /tmp/pr-description.md +``` + +## Error Recovery -## Additional Push & PR Creation Steps +### CRITICAL ERRORS TO AVOID -- After generating the PR title, description, labels, and milestone, check for any local commits that have not been pushed to the remote branch. If there are unpushed commits, push them before proceeding. -- Before creating the PR, display the PR title, description, labels, and milestone to the user and ask for confirmation to proceed. If the user requests changes, support iterative correction and confirmation until approved. -- Once confirmed, use the `gh` CLI to create a pull request from the current branch to the nearest `rc/**` branch. The PR should use the generated title and description. Reference [pull-request-gh.prompt.md](./pull-request-gh.prompt.md) for best practices on using the `gh` command. -- After creating the PR, display the PR URL or summary to the user. -- If any step fails (e.g., push fails, `gh` command fails), output a clear error message and stop. -- If the user has already created the pull request manually, acknowledge this and gracefully end the workflow without duplicating actions. See [copilot-instructions.md](../copilot-instructions.md) for global rules. +**🚨 NEVER USE STABLE BRANCH:** +- **Problem:** Creating PR to `stable` instead of `rc/` branch +- **Fix:** Always detect and use `rc/` branch as base +- **Rule:** PRs to `stable` are ONLY for version release merges -## PR Update Workflow +**🚨 EOF APPEARING IN PR DESCRIPTION:** +- **Problem:** Using `cat < --body-file ` with the body file prepared as above. Always confirm the update with the user. +### Common Issues +- **No rc/ branch:** STOP execution and prompt user - never fallback to stable +- **Unpushed commits:** Automatically pushes before PR creation +- **Formatting issues:** Use proper heredoc with single quotes around EOF +- **Label conflicts:** Removes invalid labels and continues -## PR Body Formatting and Verification (added per reportedBy: github-copilot.v1/pull-request) +### Graceful Failures +- **Network issues:** Reports connectivity problems +- **Authentication:** Guides through `gh auth login` +- **Permission errors:** Suggests repository access verification +- **Existing PR:** Detects and reports existing PR for branch -- PR body formatting must be visually and functionally verified on GitHub, not just locally. If the user reports formatting issues (e.g., stray `\n` or literal escape sequences), the agent must retry using `cat` and heredoc to rewrite the body and update the PR again. -- Add a troubleshooting step: if the PR body appears with literal `\n` or other formatting issues, rewrite the body using heredoc and update the PR again. -- When formatting issues are suspected, display the PR body with `cat` and heredoc for user verification before updating with `gh`. -- The agent must always update the PR on GitHub after correcting formatting, not just display the fixed content locally. +## Requirements -## Issue-Focused Communication +- GitHub CLI (`gh`) installed and authenticated +- Git repository with proper remote configuration +- `.scripts/semver.sh` script (with fallback) +- Write access to repository +- Target `rc/**` branch exists -- When the branch or user request indicates a direct issue relationship, ensure the PR title and description reference the relevant issue number and context for clarity and automatic closure. +## Best Practices -reportedBy: github-copilot.v1/pull-request +- **Clear titles:** Action-oriented, conventional commit style +- **Comprehensive descriptions:** Include context and motivation +- **Proper labeling:** Use existing repository labels +- **Issue linking:** Automatic closure where applicable +- **Quality validation:** Ensure all checks pass +- **User confirmation:** Verify details before creation diff --git a/.github/prompts/refactor.prompt.md b/.github/prompts/refactor.prompt.md index 6267beb82..e6f5412a3 100644 --- a/.github/prompts/refactor.prompt.md +++ b/.github/prompts/refactor.prompt.md @@ -39,7 +39,7 @@ You are a programming assistant specialized in SolidJS, Tailwind, daisyUI, and C - **Promises:** - Use `void` only in non-critical handlers/events. - **Testing:** - - Always run `npm run copilot:check | tee /tmp/copilot-terminal 2>&1` and proceed only if “COPILOT: All checks passed!”. + - Always run `npm run copilot:check` and proceed only if “COPILOT: All checks passed!”. - **Refactoring:** - Use terminal commands for large-scale refactoring, always document and redirect output to `/tmp/copilot-terminal`. - **Commits:** diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eb7a081cc..c7653911a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,12 +14,16 @@ jobs: check: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v4 + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.head_ref }} + - name: Clean untracked files + run: git clean -fdx - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: 20 + node-version: 22 - name: Set up pnpm cache uses: actions/cache@v4 with: diff --git a/.github/workflows/todo.yml b/.github/workflows/todo.yml index f45f3b3dd..28b0b195f 100644 --- a/.github/workflows/todo.yml +++ b/.github/workflows/todo.yml @@ -1,32 +1,29 @@ -name: Create issues from TODOs - -on: - workflow_dispatch: - inputs: - importAll: - default: false - required: false - type: boolean - description: Enable, if you want to import all TODOs. Runs on checked out branch! Only use if you're sure what you are doing. - push: - branches: # do not set multiple branches, todos might be added and then get referenced by themselves in case of a merge - - 'rc/**' - -permissions: - issues: write - repository-projects: read - contents: read - +name: "Run TODO to Issue" +on: [ "push" ] jobs: - todos: - runs-on: ubuntu-latest - + build: + runs-on: "ubuntu-latest" + permissions: + contents: write + issues: write + pull-requests: write steps: - - uses: actions/checkout@v3 - - - name: Run Issue Bot - uses: derjuulsn/todo-issue@main + - uses: "actions/checkout@v4" + - name: "TODO to Issue" + uses: "alstr/todo-to-issue-action@v5" with: - excludePattern: '^(node_modules/)' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + INSERT_ISSUE_URLS: "true" + - name: Set Git config + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "github-actions[bot]@users.noreply.github.com" + git config --global push.default current + - name: Commit and Push Changes + run: | + git add -A + if [[ `git status --porcelain` ]]; then + git commit -m "Automatically added GitHub issue links to TODOs" + git push + else + echo "No changes to commit" + fi \ No newline at end of file diff --git a/.scripts/cat1.sh b/.scripts/cat1.sh deleted file mode 100755 index 26f7b60d9..000000000 --- a/.scripts/cat1.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -if [[ "$1" = "--test" ]]; then -echo "This is a test run of the script." - exit 0 -fi -cat "$1" diff --git a/.scripts/cat2.sh b/.scripts/cat2.sh deleted file mode 100755 index 26f7b60d9..000000000 --- a/.scripts/cat2.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -if [[ "$1" = "--test" ]]; then -echo "This is a test run of the script." - exit 0 -fi -cat "$1" diff --git a/.scripts/cat3.sh b/.scripts/cat3.sh deleted file mode 100755 index 26f7b60d9..000000000 --- a/.scripts/cat3.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -if [[ "$1" = "--test" ]]; then -echo "This is a test run of the script." - exit 0 -fi -cat "$1" diff --git a/.scripts/semver.sh b/.scripts/semver.sh index 1611015b8..d1382ac39 100755 --- a/.scripts/semver.sh +++ b/.scripts/semver.sh @@ -4,9 +4,35 @@ set -e OWNER_REPO="marcuscastelo/macroflows" REPO_URL="https://github.com/$OWNER_REPO" + +# Retry and rate limit configuration +MAX_RETRIES="${SEMVER_MAX_RETRIES:-3}" +INITIAL_RETRY_DELAY="${SEMVER_INITIAL_RETRY_DELAY:-2}" +MAX_RETRY_DELAY="${SEMVER_MAX_RETRY_DELAY:-60}" +DEBUG_MODE="${SEMVER_DEBUG:-false}" + +debug_log() { + if [ "$DEBUG_MODE" = "true" ]; then + echo "[DEBUG] $*" >&2 + fi +} + +get_next_minor_version() { + local last_tag="$1" + # remove prefix 'v' + local version="${last_tag#v}" + local major minor patch + IFS='.' read -r major minor patch <<< "$version" + minor=$((minor + 1)) + echo "v${major}.${minor}.0" +} + + get_current_branch() { if [ -n "$VERCEL_GIT_COMMIT_REF" ]; then echo "$VERCEL_GIT_COMMIT_REF" + elif [ -n "$GITHUB_HEAD_REF" ]; then + echo "$GITHUB_HEAD_REF" else local branch branch=$(git rev-parse --abbrev-ref HEAD) @@ -18,6 +44,15 @@ get_current_branch() { fi } +get_latest_release_tag() { + local tag + tag=$(git ls-remote --tags --refs "$REPO_URL" | awk -F/ '{print $3}' | sort -V | tail -n1) + if [ -z "$tag" ]; then + tag="v0.0.0" + fi + echo "$tag" +} + get_sha_for_branch() { local branch="$1" local sha @@ -32,22 +67,108 @@ get_sha_for_branch() { get_commit_count_between() { local from_sha="$1" local to_sha="$2" - local response http_status - response=$(curl -s -w "\n%{http_code}" "https://api.github.com/repos/$OWNER_REPO/compare/$from_sha...$to_sha") - http_status=$(echo "$response" | tail -n1) - response=$(echo "$response" | sed '$d') - if [ "$http_status" != "200" ]; then + local attempt=0 + local delay="$INITIAL_RETRY_DELAY" + local response http_status rate_limit_remaining rate_limit_reset + + while [ $attempt -lt "$MAX_RETRIES" ]; do + debug_log "API call attempt $((attempt + 1))/$MAX_RETRIES for commit count between $from_sha and $to_sha" + + response=$(curl -s -w "\n%{http_code}" -D /tmp/semver_headers_$$.txt \ + "https://api.github.com/repos/$OWNER_REPO/compare/$from_sha...$to_sha" 2>&1) + http_status=$(echo "$response" | tail -n1) + response=$(echo "$response" | sed '$d') + + # Extract rate limit info from headers if available + if [ -f /tmp/semver_headers_$$.txt ]; then + rate_limit_remaining=$(grep -i "^x-ratelimit-remaining:" /tmp/semver_headers_$$.txt | awk '{print $2}' | tr -d '\r') + rate_limit_reset=$(grep -i "^x-ratelimit-reset:" /tmp/semver_headers_$$.txt | awk '{print $2}' | tr -d '\r') + rm -f /tmp/semver_headers_$$.txt + + debug_log "Rate limit remaining: ${rate_limit_remaining:-unknown}" + if [ -n "$rate_limit_reset" ]; then + debug_log "Rate limit resets at: $(date -d @$rate_limit_reset 2>/dev/null || echo $rate_limit_reset)" + fi + fi + + # Handle different HTTP status codes + case "$http_status" in + 200) + debug_log "API call successful (HTTP 200)" + local count + count=$(echo "$response" | grep 'total_commits' | head -1 | awk '{print $2}' | tr -d ',') + if [ -z "$count" ]; then + echo "Error: could not parse commit count from GitHub API response" >&2 + echo "$response" >&2 + exit 1 + fi + echo "$count" + return 0 + ;; + + 403) + # Rate limit or forbidden + if echo "$response" | grep -qi "rate limit"; then + echo "Warning: GitHub API rate limit exceeded" >&2 + if [ -n "$rate_limit_reset" ]; then + local wait_time=$((rate_limit_reset - $(date +%s))) + if [ $wait_time -gt 0 ] && [ $wait_time -lt 3600 ]; then + echo "Rate limit resets in $wait_time seconds" >&2 + if [ $attempt -lt $((MAX_RETRIES - 1)) ]; then + echo "Waiting for rate limit reset..." >&2 + sleep $((wait_time + 5)) + attempt=$((attempt + 1)) + continue + fi + fi + fi + else + echo "Error: GitHub API access forbidden (HTTP 403)" >&2 + echo "This might be due to authentication issues or repository access restrictions" >&2 + fi + ;; + + 404) + echo "Error: GitHub API resource not found (HTTP 404)" >&2 + echo "The comparison between $from_sha and $to_sha may not exist" >&2 + echo "?" + return 1 + ;; + + 5*) + echo "Warning: GitHub API server error (HTTP $http_status)" >&2 + if [ $attempt -lt $((MAX_RETRIES - 1)) ]; then + echo "Retrying after ${delay}s (attempt $((attempt + 1))/$MAX_RETRIES)..." >&2 + sleep $delay + delay=$((delay * 2)) + if [ $delay -gt "$MAX_RETRY_DELAY" ]; then + delay="$MAX_RETRY_DELAY" + fi + attempt=$((attempt + 1)) + continue + fi + ;; + + *) + echo "Warning: Unexpected HTTP status $http_status from GitHub API" >&2 + debug_log "Response: $response" + ;; + esac + + # If we've exhausted retries for retryable errors, fall through + if [ $attempt -ge $((MAX_RETRIES - 1)) ]; then + echo "Error: GitHub API call failed after $MAX_RETRIES attempts" >&2 + echo "?" + return 1 + fi + + # For non-retryable errors, exit early echo "?" - return - fi - local count - count=$(echo "$response" | grep 'total_commits' | head -1 | awk '{print $2}' | tr -d ',') - if [ -z "$count" ]; then - echo "Error: could not parse commit count from GitHub API response" >&2 - echo "$response" >&2 - exit 1 - fi - echo "$count" + return 1 + done + + echo "?" + return 1 } get_issue_number() { @@ -57,72 +178,59 @@ get_issue_number() { get_rc_version() { local current_branch="$1" - local version stable_sha branch_sha rc_count - version="${BASH_REMATCH[1]}" + local base_version stable_sha rc_sha rc_count + base_version=$(get_next_minor_version "$(get_latest_release_tag)") stable_sha=$(get_sha_for_branch stable) - branch_sha=$(get_sha_for_branch "$current_branch") - if [[ -n "$stable_sha" && -n "$branch_sha" ]]; then - rc_count=$(get_commit_count_between "$stable_sha" "$branch_sha") - if [[ -z "$rc_count" ]]; then - rc_count='unavailable' - fi - else - rc_count='error' - fi - echo "$version-rc.$rc_count" + rc_sha=$(get_sha_for_branch "$current_branch") + + rc_count=$(get_commit_count_between "$stable_sha" "$rc_sha") + echo "${base_version}-rc.${rc_count}" } get_dev_version() { local current_branch="$1" - local closest_rc version merge_base count issue_number version_str - closest_rc=$(git for-each-ref --format='%(refname:short)' refs/heads/ | - grep '^rc/' | - while read branch; do - echo "$(git merge-base $current_branch $branch) $branch" - done | - sort -r | - head -n1 | - awk '{print $2}') - - if [ -z "$closest_rc" ]; then - count=$(git rev-list --count HEAD) - echo "0.0.0-dev.$count" + local base_version stable_sha rc_branch rc_sha dev_sha rc_count dev_count issue_number + + base_version=$(get_next_minor_version "$(get_latest_release_tag)") + stable_sha=$(get_sha_for_branch stable) + dev_sha=$(get_sha_for_branch "$current_branch") + + rc_branch=$(curl -s "https://api.github.com/repos/marcuscastelo/macroflows/branches" \ + | jq -r '.[].name' \ + | grep '^rc/' \ + | sort \ + | tail -n1) + + if [ -z "$rc_branch" ]; then + dev_count=$(get_commit_count_between "$stable_sha" "$dev_sha") + echo "${base_version}-dev.0.${dev_count}" return fi - version=$(echo "$closest_rc" | sed -E 's|rc/(v[0-9]+\.[0-9]+\.[0-9]+)|\1|') - merge_base=$(git merge-base HEAD "$closest_rc") - count=$(git rev-list --count "$merge_base"..HEAD) - issue_number=$(get_issue_number "$current_branch") + rc_sha=$(git ls-remote "$REPO_URL" "refs/heads/${rc_branch#origin/}" | awk '{print $1}') - if [ "$count" -eq 0 ]; then - version_str="$version-dev.0" - else - version_str="$version-dev.$rc_count.$count" - fi + rc_count=$(get_commit_count_between "$stable_sha" "$rc_sha") + dev_count=$(get_commit_count_between "$rc_sha" "$dev_sha") + issue_number=$(get_issue_number "$current_branch") + local version="${base_version}-dev.${rc_count}.${dev_count}" if [[ -n "$issue_number" ]]; then - version_str="$version_str+issue.$issue_number" + version="${version}+issue${issue_number}" fi - echo "$version_str" + echo "$version" } main() { current_branch=$(get_current_branch) - if [[ "$current_branch" =~ ^rc\/(v[0-9]+\.[0-9]+\.[0-9]+)$ ]]; then + if [[ "$current_branch" =~ ^rc/ ]]; then get_rc_version "$current_branch" exit 0 fi if [[ "$current_branch" == "stable" ]]; then - # Output the latest version tag for stable branch from remote using ls-remote - latest_tag=$(git ls-remote --tags --refs "$REPO_URL" | awk -F/ '{print $3}' | sort -V | tail -n1) - if [ -z "$latest_tag" ]; then - latest_tag="v0.0.0" - fi - echo "$latest_tag" + get_latest_release_tag exit 0 fi @@ -130,10 +238,17 @@ main() { } show_help() { - echo "Usage: $0 [--help] [--test] [--verbose]" + echo "Usage: $0 [--help] [--test] [--verbose] [--debug]" echo " --help Show this help message and exit." echo " --test Run simple function tests and exit." echo " --verbose Enable verbose output (set -x)." + echo " --debug Enable debug logging for API calls and retry logic." + echo "" + echo "Environment variables:" + echo " SEMVER_MAX_RETRIES Maximum number of retry attempts (default: 3)" + echo " SEMVER_INITIAL_RETRY_DELAY Initial delay between retries in seconds (default: 2)" + echo " SEMVER_MAX_RETRY_DELAY Maximum delay between retries in seconds (default: 60)" + echo " SEMVER_DEBUG Enable debug mode (true/false, default: false)" } # Parse arguments @@ -142,6 +257,11 @@ if [ "$1" = "--help" ]; then exit 0 fi +if [ "$1" = "--debug" ]; then + DEBUG_MODE="true" + shift +fi + if [ "$1" = "--test" ]; then echo "Testing get_current_branch" if [ -n "$(get_current_branch)" ]; then diff --git a/.serena/.gitignore b/.serena/.gitignore new file mode 100644 index 000000000..14d86ad62 --- /dev/null +++ b/.serena/.gitignore @@ -0,0 +1 @@ +/cache diff --git a/.serena/memories/agent-context-handoff-protocol.md b/.serena/memories/agent-context-handoff-protocol.md new file mode 100644 index 000000000..5c70e207b --- /dev/null +++ b/.serena/memories/agent-context-handoff-protocol.md @@ -0,0 +1,309 @@ +# Agent Context Handoff Protocol + +## Context Preservation Strategy + +### Problem Statement +Agent handoffs result in information loss, redundant discovery, and workflow inefficiency. Each specialized agent starts fresh without leveraging previous analysis or findings. + +### Solution Framework + +#### Context Structure +```typescript +interface AgentHandoffContext { + // Agent identification + sourceAgent: string + targetAgent: string + handoffTimestamp: string + + // Task context + originalUserIntent: string + currentPhase: WorkflowPhase + completedActions: string[] + pendingActions: string[] + + // Discovery results + codeAnalysisFindings: { + relevantFiles: string[] + todoPatterns: TODOPattern[] + issueCorrelations: IssueCorrelation[] + architecturalInsights: string[] + } + + // Implementation context + modificationScope: { + targetModules: string[] + affectedLayers: ('domain' | 'application' | 'infrastructure')[] + testRequirements: string[] + qualityGates: string[] + } + + // Quality context + validationResults: { + lintingIssues: string[] + typeErrors: string[] + testFailures: string[] + performanceConsiderations: string[] + } + + // Optimization context + workflowOptimizations: { + effectiveTools: string[] + avoidedPatterns: string[] + timeOptimizations: string[] + memoryUsagePatterns: string[] + } +} +``` + +### Handoff Protocols by Agent Type + +#### General-Purpose → Specialized Agent +```typescript +// Context preparation before specialized agent invocation +const contextHandoff = { + discoveryResults: { + searchStrategies: ['TODO patterns', 'GitHub issue correlation'], + codeAreas: ['recipe/components', 'recipe/domain'], + relevantIssues: [695, 123, 456], + architectural: ['clean architecture violations detected'] + }, + workScope: { + primaryObjective: 'Recipe editing limitation analysis', + secondaryTasks: ['validation improvements', 'error handling'] + }, + constraints: { + riskLevel: 'medium', + timeEstimate: '1-2 hours', + qualityRequirements: ['pnpm check must pass'] + } +} +``` + +#### Specialized Agent → General-Purpose +```typescript +// Results consolidation when returning to general-purpose agent +const returnContext = { + completedAnalysis: { + issuesFound: ['Recipe editing tracked in #695', 'Validation gaps identified'], + recommendations: ['Create validation improvement issue', 'Link to existing #456'], + riskAssessment: 'Low risk - existing issue tracks main functionality' + }, + optimizationResults: { + memoryCreated: ['workflow-optimization-patterns'], + workflowImprovements: ['Automated issue discovery patterns'], + futureEfficiency: '50% faster similar operations' + }, + nextSteps: { + immediate: ['Implement /discover-issues command'], + medium: ['Enhance memory integration'], + strategic: ['Build workflow orchestration'] + } +} +``` + +### Context Handoff Implementation + +#### Memory-Optimization-Engineer Handoff +```typescript +// When calling memory-optimization-engineer +const memoryContext = { + sourceWorkflow: { + operation: 'issue discovery automation', + patterns: ['TODO-to-issue correlation', 'codebase search optimization'], + repetitiveOperations: ['manual issue searches', 'TODO pattern discovery'] + }, + optimizationScope: { + targetFrequency: 'weekly development workflow', + impactArea: 'development efficiency', + measureableOutcome: 'reduced tool calls for equivalent outcomes' + }, + expectedDeliverables: { + memoryEntries: ['workflow optimization patterns', 'issue discovery templates'], + workflowImprovements: ['automated correlation', 'context preservation'], + efficiencyGains: ['50% faster issue discovery', 'reduced redundant searches'] + } +} +``` + +#### AI-Workflow-Optimizer Handoff +```typescript +// When calling ai-workflow-optimizer +const workflowContext = { + systemInefficiencies: { + redundantOperations: ['multiple agents doing similar discovery'], + contextLoss: ['agent handoffs without state preservation'], + toolMisuse: ['generic tools when project-specific available'] + }, + optimizationTarget: { + workflowType: 'development task automation', + userWorkflow: 'solo project development', + toolEcosystem: 'Claude Code + project commands' + }, + expectedAnalysis: { + inefficiencyPatterns: ['cross-agent communication gaps'], + solutionFramework: ['context preservation', 'tool optimization'], + implementationPlan: ['risk-ordered improvements', 'measurable outcomes'] + } +} +``` + +#### GitHub-Issue-Manager Handoff +```typescript +// When calling github-issue-manager +const issueContext = { + userIntent: { + primaryGoal: 'check for existing issues', + specificQuery: 'recipe editing functionality limitations', + preventDuplication: true + }, + searchScope: { + keywords: ['recipe edit', 'receitas dentro de receitas', 'TODO comments'], + issueStates: ['open', 'closed'], + correlationNeeded: ['TODO comments to GitHub issues'] + }, + expectedOutput: { + existingIssues: ['issue numbers', 'status', 'relationship to TODOs'], + recommendations: ['create new issue', 'reference existing', 'no action needed'], + workflowContinuation: ['next command suggestions'] + } +} +``` + +### Context Preservation Mechanisms + +#### Session State Management +```typescript +// Maintained throughout workflow session +interface SessionState { + workflowId: string + startTimestamp: string + userObjective: string + + agentHistory: AgentInteraction[] + cumulativeFindings: Record + workflowDecisions: Decision[] + + qualityGateStatus: { + lastCheck: string + passingTests: boolean + lintingClean: boolean + typeCheckClean: boolean + } + + progressTracking: { + completedPhases: WorkflowPhase[] + currentPhase: WorkflowPhase + estimatedTimeRemaining: string + } +} +``` + +#### Memory Integration Points +```typescript +// Strategic memory usage during handoffs +const memoryIntegrationStrategy = { + preHandoff: { + loadRelevantMemories: ['workflow-optimization-patterns', 'project-architecture'], + consolidateContext: 'merge session findings with historical patterns', + prepareHandoffPackage: 'structured context for target agent' + }, + + postHandoff: { + consolidateResults: 'merge agent findings with session context', + updateMemories: 'improve patterns based on new learnings', + prepareNextPhase: 'context preparation for workflow continuation' + }, + + errorRecovery: { + preserveContext: 'maintain session state during failures', + provideRollback: 'restore previous stable context', + learnFromFailure: 'update patterns to prevent similar issues' + } +} +``` + +### Implementation Patterns + +#### Context Validation +```typescript +// Ensure context quality during handoffs +const contextValidation = { + completeness: { + required: ['user intent', 'current phase', 'relevant findings'], + optional: ['optimization suggestions', 'risk assessments'], + validation: 'check all required fields present and meaningful' + }, + + consistency: { + crossReference: 'validate findings against previous context', + temporalConsistency: 'ensure timeline and phase alignment', + scopeConsistency: 'verify handoff scope matches original intent' + }, + + actionability: { + nextSteps: 'clear, specific actions for receiving agent', + constraints: 'limitations and requirements clearly specified', + success: 'measurable outcomes and completion criteria' + } +} +``` + +#### Error Handling in Handoffs +```typescript +// Robust error handling for context preservation +const errorHandlingStrategy = { + partialFailure: { + preserveSuccessful: 'save successful parts of context', + identifyFailure: 'isolate failed handoff components', + recoverGracefully: 'continue with available context' + }, + + completeFailure: { + rollbackToStable: 'restore last known good context', + preserveLearnings: 'save failure patterns for optimization', + userCommunication: 'clear explanation of failure and recovery' + }, + + prevention: { + validateBeforeHandoff: 'check context completeness and validity', + incrementalSaving: 'preserve context at multiple checkpoints', + redundantStorage: 'multiple preservation mechanisms' + } +} +``` + +### Success Metrics + +#### Efficiency Improvements +- **Context Reuse Rate**: Percentage of previous findings reused in handoffs +- **Redundant Operation Reduction**: Decrease in repeated discovery tasks +- **Handoff Speed**: Time from agent handoff to productive work +- **Information Retention**: Percentage of context preserved across handoffs + +#### Quality Improvements +- **Decision Consistency**: Alignment of decisions with previous context +- **Error Reduction**: Fewer mistakes due to missing context +- **Workflow Continuity**: Smoother transitions between workflow phases +- **User Experience**: Reduced need for user re-explanation + +#### Learning and Optimization +- **Pattern Recognition**: Improved identification of effective workflows +- **Memory Consolidation**: Better long-term pattern storage +- **Workflow Evolution**: Continuous improvement of handoff protocols +- **Predictive Capability**: Better anticipation of workflow needs + +### Integration with Project Standards + +#### Solo Project Adaptations +- **No team handoffs**: Focus on individual workflow continuity +- **Technical context**: Emphasize code and architecture over business context +- **Quality integration**: Maintain integration with `pnpm check` workflows +- **Self-review patterns**: Context for individual validation processes + +#### Clean Architecture Compliance +- **Layer awareness**: Preserve architectural decisions across handoffs +- **Domain purity**: Maintain domain layer isolation context +- **Error handling**: Consistent `handleApiError` pattern application +- **Import standards**: Preserve absolute import requirement context + +This protocol ensures that the AI workflow optimization benefits are realized through systematic context preservation and intelligent agent coordination. \ No newline at end of file diff --git a/.serena/memories/error-analysis-test-structure-fix.md b/.serena/memories/error-analysis-test-structure-fix.md new file mode 100644 index 000000000..0999029d5 --- /dev/null +++ b/.serena/memories/error-analysis-test-structure-fix.md @@ -0,0 +1,34 @@ +# Análise de Erro: Correção de Estrutura de Testes + +## Erro Cometido +Quando o usuário disse "fix tests after staged changes", interpretei incorretamente como "corrigir código para fazer testes passarem" ao invés de "corrigir a estrutura dos testes para refletir mudanças na organização dos arquivos". + +## Contexto do Erro +- **Situação**: Usuário havia refatorado código separando responsabilidades em dois arquivos (dayDiet.ts e dayDietStore.ts) +- **Problema Real**: Testes estavam importando de locais incorretos após a refatoração +- **Minha Ação Incorreta**: Modifiquei código de produção para fazer testes passarem +- **Ação Correta**: Deveria ter movido/ajustado os testes para refletir a nova estrutura + +## Sinais que Deveria Ter Percebido +1. **Comando específico**: "fix tests" - foco explícito nos testes, não no código +2. **Contexto de staged changes**: Mudanças já feitas pelo usuário, não para eu alterar +3. **Estrutura de arquivos nova**: Separação clara de responsabilidades já implementada +4. **Erro de import**: Teste importando de local que não existe mais + +## Princípios para Evitar Repetir +1. **"Fix tests" significa ajustar testes, não código de produção** +2. **Quando há staged changes, o código já está como deve estar** +3. **Import errors em testes = mover imports, não recriar exports** +4. **Sempre perguntar quando ambíguo entre "fix code" vs "fix tests"** + +## Diretrizes de Interpretação +- **"Fix tests"** = Ajustar estrutura, imports, mocks dos testes +- **"Fix code"** = Alterar lógica de produção +- **"Fix both"** = Só quando explicitamente mencionado + +## Ação Correta para Este Caso +1. Analisar estrutura atual (dayDiet.ts vs dayDietStore.ts) +2. Identificar responsabilidades de cada arquivo +3. Mover testes para arquivos corretos conforme responsabilidades +4. Ajustar imports nos testes +5. Não tocar no código de produção \ No newline at end of file diff --git a/.serena/memories/file-movement-policy.md b/.serena/memories/file-movement-policy.md new file mode 100644 index 000000000..4f28c3a20 --- /dev/null +++ b/.serena/memories/file-movement-policy.md @@ -0,0 +1,21 @@ +# File Movement Policy + +## Critical Rule: Never Leave Empty Files with Comments + +**ABSOLUTELY FORBIDDEN:** +- Leaving files with "// This file has been moved to..." comments +- Creating placeholder files after moving content +- Any form of file stub or redirect comments + +**CORRECT APPROACH:** +- When moving content from one file to another, DELETE the original file completely +- No comments, no placeholders, no traces +- Clean deletion is the only acceptable approach + +## Context +This rule was established after creating a comment placeholder in `src/modules/diet/day-diet/tests/dayDiet.test.ts` instead of properly deleting the file after moving its content to `dayDietOperations.test.ts`. + +## Implementation +- Move content to new location +- DELETE original file completely +- No intermediate steps or placeholders \ No newline at end of file diff --git a/.serena/memories/issue-creation-workflow-optimization.md b/.serena/memories/issue-creation-workflow-optimization.md new file mode 100644 index 000000000..ee0a7b35d --- /dev/null +++ b/.serena/memories/issue-creation-workflow-optimization.md @@ -0,0 +1,32 @@ +# Optimized Issue Creation Workflow + +## User's Consistent Pattern +1. **Discovery**: Find TODO comments or identify limitations +2. **Research**: Ask "are there existing GitHub issues for this?" +3. **Verification**: Search codebase and existing issues +4. **Action**: Create new issue or reference existing one + +## Optimization Strategy +When user asks about existing issues: + +### Quick Assessment Steps +1. **Search TODO patterns**: Use `search_for_pattern` with relevant keywords +2. **Check issue titles**: Look for similar functionality in existing issues +3. **Identify code areas**: Point to specific modules/files involved +4. **Suggest issue scope**: Break large features into manageable pieces + +### Common Search Patterns +- Feature requests: `search_for_pattern` with `TODO.*feature|enhancement` +- Bug tracking: `search_for_pattern` with `FIXME|BUG|XXX` +- Performance: `search_for_pattern` with `TODO.*performance|optimize` + +### Code Area Mapping +- **Recipe functionality**: `modules/diet/`, `sections/unified-item/` +- **Search features**: `modules/*/infrastructure/` + search-related files +- **UI components**: `sections/common/`, component directories +- **Data validation**: Domain layer files with Zod schemas + +## Efficiency Gains +- Reduce repetitive searching by having mapped code areas +- Quick TODO-to-issue correlation workflow +- Faster identification of related existing functionality \ No newline at end of file diff --git a/.serena/memories/recipe-editing-limitations.md b/.serena/memories/recipe-editing-limitations.md new file mode 100644 index 000000000..d301e9afa --- /dev/null +++ b/.serena/memories/recipe-editing-limitations.md @@ -0,0 +1,27 @@ +# Recipe Editing Known Limitations + +## Current State +- Recipe editing functionality exists but has **known limitations** +- Users cannot fully edit all recipe properties through UI +- This is a **documented limitation** that comes up repeatedly in development discussions + +## Key Areas Needing Improvement +- Recipe ingredient modification +- Recipe metadata editing (name, description, etc.) +- Recipe sharing and collaboration features +- Recipe versioning/history + +## Code Locations +- Recipe management: `modules/*/domain/` and `modules/*/application/` layers +- UI components: `sections/unified-item/` area +- Repository pattern: `modules/*/infrastructure/` for data persistence + +## Development Priority +- High user demand for improved recipe editing +- Should be prioritized for future development cycles +- Consider breaking into smaller, manageable issues + +## Search Patterns for Related Code +- `search_for_pattern` with `recipe.*edit|edit.*recipe` +- Look in `modules/diet/` and `sections/unified-item/` directories +- Check for existing TODO comments around recipe functionality \ No newline at end of file diff --git a/.serena/memories/repository-pattern.md b/.serena/memories/repository-pattern.md new file mode 100644 index 000000000..aef952bf9 --- /dev/null +++ b/.serena/memories/repository-pattern.md @@ -0,0 +1,150 @@ +# Repository Pattern - Macroflows + +## New Day-Diet Architecture Standard + +### ✅ Gateway + Repository + Cache Pattern + +**Three-Layer Structure:** +1. **Gateway Layer** (`infrastructure/supabase/`) - Direct Supabase interaction +2. **Repository Layer** (`infrastructure/`) - Cache management + error handling +3. **Store Layer** (`infrastructure/signals/`) - Reactive state management + +**Gateway Pattern:** +```typescript +// infrastructure/supabase/supabaseDayGateway.ts +export function createSupabaseDayGateway(): DayRepository { + return { + fetchDayDietByUserIdAndTargetDay, + fetchDayDietsByUserIdBeforeDate, + fetchDayDietById, + insertDayDiet, + updateDayDietById, + deleteDayDietById, + } +} + +async function fetchDayDietByUserIdAndTargetDay( + userId: User['uuid'], + targetDay: string, +): Promise { + const { data, error } = await supabase + .from(SUPABASE_TABLE_DAYS) + .select() + .eq('owner', userId) + .eq('target_day', targetDay) + .single() + + if (error?.code === 'PGRST116') return null + if (error) throw error + return dayDietSchema.parse(data) +} +``` + +**Repository with Cache & Error Handling:** +```typescript +// infrastructure/dayDietRepository.ts +const supabaseGateway = createSupabaseDayGateway() +const errorHandler = createErrorHandler('application', 'DayDiet') + +export function createDayDietRepository(): DayRepository { + return { + fetchDayDietById, + fetchDayDietByUserIdAndTargetDay, + // ... other methods + } +} + +export async function fetchDayDietByUserIdAndTargetDay( + userId: User['uuid'], + targetDay: string, +): Promise { + try { + const dayDiet = await supabaseGateway.fetchDayDietByUserIdAndTargetDay(userId, targetDay) + if (dayDiet) { + dayCacheStore.upsertToCache(dayDiet) + } else { + dayCacheStore.removeFromCache({ by: 'target_day', value: targetDay }) + } + return dayDiet + } catch (error) { + errorHandler.error(error) + dayCacheStore.removeFromCache({ by: 'target_day', value: targetDay }) + return null + } +} +``` + +**Store Pattern:** +```typescript +// infrastructure/signals/dayCacheStore.ts +const [dayDiets, setDayDiets] = createSignal([]) + +function upsertToCache(dayDiet: DayDiet) { + const existingDayIndex = untrack(dayDiets).findIndex( + (d) => d.target_day === dayDiet.target_day, + ) + setDayDiets((existingDays) => { + const days = [...existingDays] + if (existingDayIndex >= 0) { + days[existingDayIndex] = dayDiet + } else { + days.push(dayDiet) + days.sort((a, b) => a.target_day.localeCompare(b.target_day)) + } + return days + }) +} + +export const dayCacheStore = { + dayDiets, + setDayDiets, + clearCache: () => setDayDiets([]), + upsertToCache, + removeFromCache, +} +``` + +**Service Pattern:** +```typescript +// application/services/cacheManagement.ts +export function createCacheManagementService(deps: { + getExistingDays: () => readonly DayDiet[] + getCurrentDayDiet: () => DayDiet | null + clearCache: () => void +}) { + return ({ currentTargetDay, userId }: { + currentTargetDay: string + userId: User['uuid'] + }) => { + // Complex business logic with injected dependencies + } +} +``` + +**UseCase Pattern:** +```typescript +// application/usecases/dayCrud.ts +const dayRepository = createDayDietRepository() + +export async function insertDayDiet(dayDiet: NewDayDiet): Promise { + await showPromise( + dayRepository.insertDayDiet(dayDiet), + { + loading: 'Criando dia de dieta...', + success: 'Dia de dieta criado com sucesso', + error: 'Erro ao criar dia de dieta', + }, + { context: 'user-action', audience: 'user' }, + ) +} +``` + +### New Architecture Rules +- **Gateway naming**: Use `createSupabase*Gateway()` for Supabase layer +- **Repository naming**: Use `create*Repository()` for cache + error handling layer +- **Store naming**: Use `*Store` objects with reactive signals +- **Service pattern**: Dependency injection with explicit parameters +- **UseCase pattern**: Toast integration for user operations +- **Naming convention**: `fetch*By*` patterns (e.g., `fetchDayDietByUserIdAndTargetDay`) +- **Error handling**: Always use `createErrorHandler` in repository layer +- **Cache integration**: Repository layer manages cache upsert/remove operations \ No newline at end of file diff --git a/.serena/memories/todo-issue-relationship-pattern.md b/.serena/memories/todo-issue-relationship-pattern.md new file mode 100644 index 000000000..5bd0a34d7 --- /dev/null +++ b/.serena/memories/todo-issue-relationship-pattern.md @@ -0,0 +1,20 @@ +# TODO Comments and GitHub Issues Relationship + +## Key Pattern +- TODO comments in codebase are **NOT automatically linked** to GitHub issues +- Users consistently ask about existing issues before creating new ones +- Common workflow: TODO discovered → Check for existing issue → Create issue if needed + +## Search Strategy for TODO-Issue Correlation +1. Search codebase for TODO comments: `search_for_pattern` with `TODO|FIXME|XXX` +2. Check GitHub issues manually - no automated mapping exists +3. Look for issue references in commit messages related to TODO areas + +## Common TODO Areas Requiring Issue Tracking +- Recipe editing functionality (known limitation) +- Performance optimizations in search +- UI/UX improvements in food/recipe management +- Data validation and error handling + +## Efficiency Tip +When user asks "are there existing issues for X functionality", always search for relevant TODO comments first to understand scope before suggesting issue creation. \ No newline at end of file diff --git a/.serena/memories/typescript-patterns.md b/.serena/memories/typescript-patterns.md new file mode 100644 index 000000000..997b2b0f9 --- /dev/null +++ b/.serena/memories/typescript-patterns.md @@ -0,0 +1,56 @@ +# TypeScript Patterns - Macroflows + +## Critical Rules + +### ❌ FORBIDDEN Patterns +- **No `implements` keyword**: Never use class implements interface +- **No `class` keyword**: Never use classes at all +- **No `interface` keyword**: Always use `type` instead + +### ✅ Required Patterns + +**Factory Functions with Object Returns:** +```typescript +// ✅ Good: Factory function returning object +export function createLocalStorageRepository(): StorageRepository { + return { + getCachedWeights: (userId: User['uuid']) => { + // implementation + }, + setCachedWeights: (userId: User['uuid'], weights: readonly unknown[]) => { + // implementation + } + } +} + +// ❌ Forbidden: Classes +export class LocalStorageRepository implements StorageRepository { + // NEVER DO THIS +} + +// ❌ Forbidden: implements keyword +export class Repository implements Interface { + // NEVER DO THIS +} +``` + +**Type Definitions:** +```typescript +// ✅ Always use `type` +export type StorageRepository = { + getCachedWeights(userId: User['uuid']): readonly unknown[] + setCachedWeights(userId: User['uuid'], weights: readonly unknown[]): void +} + +// ❌ Never use interface +export interface StorageRepository { + // NEVER DO THIS +} +``` + +## Architecture Principles +- **Pure functional patterns** +- **Factory functions only** +- **Object returns, not class instances** +- **Type contracts without inheritance** +- **Composition over any OOP patterns** \ No newline at end of file diff --git a/.serena/memories/usecase-pattern-migration-complete.md b/.serena/memories/usecase-pattern-migration-complete.md new file mode 100644 index 000000000..1b5b33ea3 --- /dev/null +++ b/.serena/memories/usecase-pattern-migration-complete.md @@ -0,0 +1,100 @@ +# Usecase Pattern Migration - Complete Implementation + +## Summary + +Successfully applied the new usecase pattern from day-diets module to all recently refactored modules in the codebase, following the standardized architecture established in recent commits. + +## Analysis Results + +### Modules Already Following Usecase Pattern + +1. **day-diet** ✅ (Reference Implementation) + - `usecases/dayChange.ts` - Day change operations + - `usecases/dayCrud.ts` - CRUD operations + - `usecases/dayState.ts` - State management with re-exports + +2. **macro-profile** ✅ (Already Compliant) + - `usecases/macroProfileCrud.ts` - CRUD operations + - `usecases/macroProfileState.ts` - State management with re-exports + +3. **recent-food** ✅ (Already Compliant) + - `usecases/recentFoodCrud.ts` - CRUD operations only + +4. **recipe** ✅ (Already Compliant) + - `usecases/recipeCrud.ts` - CRUD operations only + +### Modules Requiring Refactoring + +#### 1. **measure** Module - REFACTORED ✅ + +**Before:** +- Single file `measureCrud.ts` with mixed concerns (state + CRUD + realtime) + +**After:** +- `usecases/measureCrud.ts` - Pure CRUD operations +- `usecases/measureState.ts` - State management with re-exports and realtime initialization + +**Changes:** +- Separated state management from CRUD operations +- Moved `createResource` and `refetchBodyMeasures` to state file +- Removed manual refetch calls from CRUD functions +- Updated all imports across codebase to use state file for reactive data + +#### 2. **food** Module - REFACTORED ✅ + +**Before:** +- Single file `food.ts` with all operations mixed together + +**After:** +- `usecases/foodCrud.ts` - All CRUD operations +- `food.ts` - Re-export file following the pattern + +**Changes:** +- Moved all functions to dedicated CRUD file +- Maintained all existing functionality and error handling +- Updated main application file to re-export from usecases + +## Pattern Consistency + +All modules now follow the standardized usecase pattern: + +``` +application/ +├── usecases/ +│ ├── [module]Crud.ts - Pure CRUD operations +│ └── [module]State.ts - State management + re-exports (if needed) +└── [module].ts - Re-export file (legacy compatibility) +``` + +## Quality Assurance + +- ✅ All TypeScript checks pass +- ✅ All ESLint checks pass +- ✅ All tests pass (310/310) +- ✅ No breaking changes to existing APIs +- ✅ Maintained backward compatibility through re-exports + +## Architectural Benefits + +1. **Clear Separation of Concerns**: CRUD operations separated from state management +2. **Consistent Patterns**: All modules follow the same organizational structure +3. **Maintainability**: Easier to locate and modify specific functionality +4. **Testability**: Pure CRUD functions easier to test in isolation +5. **Scalability**: Clear patterns for future module development + +## Files Modified + +### New Files Created: +- `src/modules/measure/application/usecases/measureState.ts` +- `src/modules/diet/food/application/usecases/foodCrud.ts` + +### Files Modified: +- `src/modules/measure/application/usecases/measureCrud.ts` +- `src/modules/diet/food/application/food.ts` +- `src/modules/measure/infrastructure/supabase/realtime.ts` +- `src/sections/profile/measure/components/BodyMeasureView.tsx` +- `src/sections/profile/measure/components/BodyMeasuresEvolution.tsx` + +## Migration Complete + +All recently refactored modules now consistently follow the usecase pattern established in day-diets. The codebase maintains architectural consistency while preserving all existing functionality. \ No newline at end of file diff --git a/.serena/memories/workflow-optimization-patterns.md b/.serena/memories/workflow-optimization-patterns.md new file mode 100644 index 000000000..b1d9258b2 --- /dev/null +++ b/.serena/memories/workflow-optimization-patterns.md @@ -0,0 +1,136 @@ +# Workflow Optimization Patterns + +## Current Command Structure Analysis + +### Available Commands by Category + +**Workflow Commands:** +- `/commit` - Conventional commit generation and execution +- `/pull-request` (`/pr`) - PR creation with metadata + +**Quality Commands:** +- `/fix` - Comprehensive codebase validation and fixes +- `/review` - Code review for PR changes + +**Issue Commands:** +- `/create-issue [type]` - GitHub issue creation with templates +- `/implement ` - Autonomous issue implementation +- `/breakdown ` - Issue analysis for subdivision +- `/prioritize-milestone` - Milestone capacity optimization + +**Refactoring:** +- `/refactor` - Clean architecture improvements + +**Session:** +- `/end-session` (`/end`) - Knowledge export + +### Optimization Opportunities Identified + +#### 1. Issue Discovery Automation (Missing) +**Current Gap:** Manual search for existing issues before creating new ones +**Solution:** Create `/discover-issues` command that: +- Searches TODO comments for issue patterns +- Correlates with existing GitHub issues +- Provides consolidated discovery results +- Suggests next actions based on findings + +#### 2. Memory-Driven Command Enhancement +**Pattern:** Commands should proactively load relevant memories +**Implementation:** +- Issue commands → Load issue creation patterns +- Refactor commands → Load architecture guidelines +- Quality commands → Load code style standards + +#### 3. Context Preservation Between Agent Handoffs +**Problem:** Each agent starts fresh without previous context +**Solution:** Standardized context handoff protocol: +```typescript +interface WorkflowContext { + phase: 'discovery' | 'analysis' | 'implementation' | 'optimization' + codeAreas: string[] + relatedIssues: number[] + todoPatterns: string[] + previousFindings: Record +} +``` + +#### 4. Batched Operations +**Current:** Sequential tool execution +**Optimized:** Group related operations: +- Discovery: TODOs + GitHub search + code analysis +- Quality: lint + typecheck + test in parallel +- Implementation: code + tests + validation + +### Risk Assessment by Improvement Type + +#### **Low Risk Improvements:** +- ✅ Documentation and memory creation +- ✅ New convenience commands (non-breaking) +- ✅ Command parameter additions (optional) + +#### **Medium Risk Improvements:** +- ⚠️ Existing command modifications +- ⚠️ New workflow dependencies +- ⚠️ Template structure changes + +#### **High Risk Improvements:** +- 🔴 Command structure modifications +- 🔴 Breaking workflow changes +- 🔴 Integration point modifications + +### Recommended Implementation Order + +1. **Memory Creation** (Low Risk) + - Workflow patterns documentation + - Command optimization guidelines + - Context handoff protocols + +2. **New Commands** (Low Risk) + - `/discover-issues` for automation + - `/workflow-status` for context awareness + - `/memory-load` for context preparation + +3. **Command Enhancement** (Medium Risk) + - Add memory loading to existing commands + - Enhance with context awareness + - Improve error handling and recovery + +4. **Workflow Orchestration** (High Risk) + - Implement cross-command state management + - Create intelligent command routing + - Build predictive workflow assistance + +### Success Metrics + +**Efficiency Improvements:** +- Reduced tool calls for equivalent outcomes +- Faster task completion through batching +- Higher first-attempt success rates + +**Context Retention:** +- Better information reuse between commands +- Reduced redundant discovery operations +- Improved workflow continuity + +**User Experience:** +- More intuitive command suggestions +- Better error recovery and guidance +- Clearer progress tracking + +### Technical Implementation Notes + +**Command Structure:** +- All commands are Markdown files in `.claude/commands/` +- Organized by category (workflow, quality, issues, etc.) +- Include usage, description, and integration details + +**Quality Integration:** +- Commands integrate with `pnpm check` validation +- Support project-specific quality gates +- Handle error recovery and retry logic + +**Project Specifics:** +- Solo project focus (no team coordination) +- SolidJS patterns and reactive programming +- Supabase integration patterns +- Clean architecture enforcement \ No newline at end of file diff --git a/.ts-unused-exports.json b/.ts-unused-exports.json new file mode 100644 index 000000000..38a60fdce --- /dev/null +++ b/.ts-unused-exports.json @@ -0,0 +1,21 @@ +{ + "searchDir": "./src", + "excludePathsFromReport": [ + "node_modules", + "dist", + ".vinxi", + "*.test.ts", + "*.test.tsx", + "tests/", + "src/routes/**/*", + "src/app.tsx", + "src/entry-server.tsx", + "app.config.ts", + "vitest.config.ts" + ], + "ignoreProductionExports": true, + "ignoreFunctionExpressions": true, + "ignoreLocallyUsed": true, + "showLineNumber": true, + "excludeDeclarationFiles": true +} \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index cb06ab2db..279c8bdb7 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,844 +1,191 @@ # CLAUDE.md -This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. - -## Project Overview - -Macroflows is a nutrition tracking platform built with SolidJS, TypeScript, and Supabase. It follows domain-driven design principles with a layered architecture and emphasizes type safety, reactive programming, and modular organization. - -**Project Context:** This is a solo project by marcuscastelo - adapt all suggestions to remove team coordination/approval processes while maintaining technical quality. +Macroflows nutrition tracking platform: SolidJS, TypeScript, Supabase. Domain-driven design with layered architecture. Solo project by marcuscastelo. ## Frontend Simplicity Principles - CRITICAL -**🚨 THIS IS A FRONTEND APP, NOT A LIBRARY** - -### Anti-Over-Engineering Rules - -**Never Add These Patterns:** -- **Custom Error Classes**: Use standard `Error()` + Zod validation instead -- **Abstract Base Classes**: Avoid unless there are 3+ concrete implementations -- **Domain-Specific Exceptions**: Use descriptive error messages, not error types -- **Complex Hierarchies**: Prefer composition over inheritance -- **Enterprise Patterns**: This is a solo project - keep it simple - -**Frontend Reality Check:** -- Most errors come from network/API calls, not business logic violations -- Zod provides excellent validation without custom error classes -- Standard `Error()` + good error handling functions are sufficient -- TypeScript provides compile-time safety - runtime errors should be simple -- Users don't care about your error taxonomy - they care about clear messages - -### Before Adding Any Abstraction, Ask: - -- [ ] **Multiple implementations**: Will this pattern have 3+ different implementations? -- [ ] **Real problem**: Does this solve a problem we actually have (not might have)? -- [ ] **Platform sufficiency**: Is standard web platform functionality insufficient? -- [ ] **Library vs App**: Are we building a reusable library or a specific frontend app? -- [ ] **Lines of code**: Will this reduce or increase total lines of code? -- [ ] **Maintenance burden**: Will future developers thank us or curse us for this? - -**Golden Rule**: If you can't immediately name 3 different concrete implementations of your abstraction, don't create it. - -## Rapid Implementation Guidelines - CRITICAL - -**🚀 SPEED COMES FROM SAYING NO TO UNNECESSARY COMPLEXITY** - -### Implementation Velocity Principles - -**Never Add These Unless Absolutely Essential:** -- **Backward Compatibility**: Frontend is versioned - Vercel rollback solves problems -- **Feature Flags**: Just implement the feature directly -- **A/B Testing Infrastructure**: Manual testing is sufficient for most cases -- **Fallback Mechanisms**: Trust your implementation and monitoring -- **Migration Strategies**: Direct replacement with proper testing -- **Enterprise Rollout Plans**: This is a solo project with simple deployment - -**Speed-First Decision Framework:** -- [ ] **Direct implementation**: Can we just build the feature without scaffolding? -- [ ] **Platform leverage**: Are we using database/framework strengths optimally? -- [ ] **Delete over add**: Can we remove complexity instead of adding abstraction? -- [ ] **Server-side logic**: Should this logic live in PostgreSQL instead of TypeScript? -- [ ] **Testing necessity**: Does this need a test or does TypeScript/DB already guarantee it? - -### Logic Placement Hierarchy (Most to Least Preferred) - -1. **PostgreSQL Functions (RPC)**: For search, data processing, complex queries -2. **Domain Layer**: Pure business logic, validations, calculations -3. **Application Layer**: SolidJS orchestration, error handling, UI state -4. **Infrastructure Layer**: External API calls, data transformation - -**Example Decision Tree:** +**🚨 FRONTEND APP - NOT A LIBRARY** + +**Never Add:** +- Custom error classes (use `Error()` + Zod) +- Abstract base classes (unless 3+ implementations) +- Domain-specific exceptions (use descriptive messages) +- Complex hierarchies (prefer composition) +- Enterprise patterns (solo project - keep simple) + +**Before Adding Abstractions:** +- Will this have 3+ implementations? +- Does this solve an actual problem? +- Is platform functionality insufficient? +- Will this reduce total code lines? + +**Golden Rule:** Name 3 concrete implementations or don't create it. + +## Speed Over Complexity + +**Never Add:** +- Backward compatibility (Vercel rollback exists) +- Feature flags (implement directly) +- A/B testing infrastructure (manual testing sufficient) +- Fallback mechanisms (trust implementation) +- Migration strategies (direct replacement) +- Enterprise rollout plans (solo project) + +**Decision Framework:** +- Can we build directly without scaffolding? +- Are we using database/framework optimally? +- Can we delete complexity instead of adding? +- Should this live in PostgreSQL vs TypeScript? + +**Logic Placement (Preferred Order):** +1. PostgreSQL functions (search, data processing) +2. Domain layer (business logic, validations) +3. Application layer (SolidJS orchestration, error handling) +4. Infrastructure layer (external APIs) + +**Example:** ```typescript -// ❌ Complex: Spread across layers -// Client: word splitting + normalization -// Server: multiple API calls + merging -// Database: simple ILIKE queries - -// ✅ Simple: Centralized in optimal layer -// PostgreSQL: All search logic with scoring -// Client: Single RPC call + mapping +// ❌ Complex: Client normalization + server merging + DB queries +// ✅ Simple: PostgreSQL function + single RPC call ``` -### Rapid Implementation Patterns +**Implementation Patterns:** +- Complex logic → PostgreSQL functions +- Orchestration → TypeScript application layer +- UI state → SolidJS signals/effects +- Validation → Zod schemas -**Database-First for Complex Logic:** -- Text search → PostgreSQL functions with scoring -- Data aggregations → SQL with CTEs -- Complex filtering → Server-side functions -- Real-time updates → Supabase subscriptions - -**TypeScript for Orchestration Only:** -- Error handling and user feedback -- State management and reactivity -- Domain object mapping and validation -- UI component coordination - -**Testing Reality Check:** +**Testing:** ```typescript -// ❌ Over-testing: What TypeScript already guarantees -test('should have correct type structure', () => { - expect(typeof food.name).toBe('string') // TypeScript already ensures this -}) - -// ✅ Behavioral testing: What actually matters -test('should call correct search function for tab', () => { +// ❌ Testing TypeScript guarantees +// ✅ Testing behavior that matters +test('calls correct search function', () => { expect(deps.fetchFoodsByName).toHaveBeenCalledWith(search, { limit: 50 }) }) ``` -### Implementation Speed Checklist - -**Before adding any complexity, ask:** -- [ ] **Platform sufficiency**: Does PostgreSQL/Supabase/SolidJS already solve this? -- [ ] **Real user problem**: Are we solving an actual issue or theoretical edge case? -- [ ] **Deployment reality**: Is Vercel rollback + monitoring sufficient safety net? -- [ ] **Maintenance cost**: Will this make future changes harder or easier? -- [ ] **Line count impact**: Does this reduce or increase total codebase size? - -**When to choose simple over "robust":** -- ✅ **Single RPC call** vs elaborate client-side orchestration -- ✅ **Standard Error()** vs custom error hierarchies -- ✅ **Direct implementation** vs abstraction layers -- ✅ **PostgreSQL functions** vs client-side complex logic -- ✅ **Zod validation** vs manual type checking - -### Database Logic Advantages - -**Why prefer PostgreSQL functions:** -- **Performance**: Processing happens close to data -- **Concurrency**: Database handles concurrent requests optimally -- **Consistency**: Single source of truth for complex operations -- **Optimization**: Query planner + indexes automatically optimize -- **Simplicity**: TypeScript becomes thin orchestration layer - -**Example - Search Implementation:** -```sql --- ✅ All logic in database function -CREATE FUNCTION search_foods_with_scoring(p_search_term text, p_limit integer) --- Complex scoring, fuzzy matching, normalization all server-side -``` - -```typescript -// ✅ Simple client call -const result = await supabase.rpc('search_foods_with_scoring', { - p_search_term: name, - p_limit: params.limit ?? 50 -}) -``` - -### Key Success Metrics - -**Implementation completed in ~1 hour instead of potential days/weeks** -- ✅ **Rejected complexity**: No backward compatibility, feature flags, fallbacks -- ✅ **Leveraged platform**: PostgreSQL for search logic optimization -- ✅ **Deleted code**: Removed 26 lines of word separation logic -- ✅ **Trusted tools**: TypeScript compilation + Vercel deployment patterns -- ✅ **Focused testing**: Only behavioral tests, not redundant type validation +**Complexity Checklist:** +- Does platform already solve this? +- Solving real problem vs theoretical? +- Will this reduce total lines? -**Final Reality Check:** -- **Frontend apps are not distributed systems** - avoid over-engineering -- **Vercel rollback > elaborate fallback mechanisms** - trust your deployment -- **PostgreSQL > complex TypeScript** for data processing -- **Delete complexity > add abstractions** - prefer subtraction -- **Good enough > perfect** - solve real user problems quickly +**Choose Simple:** +- Single RPC call vs client orchestration +- Standard Error() vs custom hierarchies +- PostgreSQL vs client logic +- Zod vs manual validation -## Critical Setup Requirements +## Commands & Setup -**Environment Setup:** -- Use pnpm as package manager (version 10.12.1+) +**Environment:** Use pnpm (10.12.1+) -## Development Commands +**🚨 CRITICAL: Always run `pnpm check` before declaring complete** -**🚨 CRITICAL REQUIREMENT: ALWAYS RUN `pnpm check` BEFORE DECLARING ANY TASK COMPLETE** - -**Essential Commands:** -- `pnpm check` - **MANDATORY** quality gate (lint, type-check, test) - MUST PASS before any completion +**Commands:** +- `pnpm check` - MANDATORY quality gate (lint, type-check, test) - `pnpm fix` - Auto-fix ESLint issues +- `pnpm build/test/lint` - Individual checks -**Granular Commands (if needed):** -- `pnpm build` - Production build (runs gen-app-version first) -- `pnpm type-check` - TypeScript type checking -- `pnpm test` - Run all tests with Vitest -- `pnpm lint` - ESLint checking (quiet mode) -- `pnpm flint` - Fix then lint (fix + lint) - -**Script Utilities:** -- `.scripts/semver.sh` - App version reporting - -**Testing:** -- Tests use Vitest with jsdom environment -- Run single test file: `pnpm test ` -- Coverage: `pnpm test --coverage` -- Tests must be updated when changing code - no orphaned tests - -## Claude Commands - -**Optimized commands available in `.claude/commands/` directory:** - -### Workflow Commands -- `/commit` - Generate conventional commit messages and execute commits -- `/pull-request` or `/pr` - Create pull requests with proper formatting - -### Quality Assurance -- `/fix` - Automated codebase checks and error correction -- `/review` - Comprehensive code review for PR changes - -### Issue Management -- `/create-issue [type]` - Create GitHub issues using proper templates -- `/implement ` - Autonomous issue implementation - -### Refactoring -- `/refactor [target]` - Clean architecture refactoring and modularization - -### Session Management -- `/end-session` or `/end` - Session summary and knowledge export - -**Daily Workflow Example:** -```bash -/fix # Ensure clean codebase -/create-issue feature # Create feature request -/implement 123 # Implement issue #123 -/commit # Generate and execute commit -/pull-request # Create PR for review -``` - -See `.claude/commands/README.md` for complete command documentation. - -## Architecture Overview - -### Layered Domain-Driven Architecture - -The codebase follows a strict 3-layer architecture pattern with clean separation of concerns: - -**Domain Layer** (`modules/*/domain/`): -- Pure business logic, types, and repository interfaces -- Uses Zod schemas for validation and type inference -- Entities have `__type` discriminators for type safety -- **NEVER** import or use side-effect utilities (handleApiError, logging, toasts) -- Throw standard `Error()` with descriptive messages and context -- **CRITICAL:** Domain layer must remain free of framework dependencies - -**Application Layer** (`modules/*/application/`): -- SolidJS resources, signals, and orchestration logic -- **Must always catch errors and call `handleApiError` with full context** -- Manages global reactive state using `createSignal`/`createEffect` -- Coordinates between UI and infrastructure layers -- Handles all side effects and user feedback (toasts, notifications) - -**Infrastructure Layer** (`modules/*/infrastructure/`): -- Supabase repositories implementing domain interfaces -- DAOs for data transformation and legacy migration -- External API integrations and data access -- Only layer allowed to use `any` types when necessary for external APIs - -### State Management - -**Global Reactive State:** -```typescript -export const [items, setItems] = createSignal([]) -``` - -**Effects for Synchronization:** -```typescript -createEffect(() => { - // Reactive updates based on signals -}) -``` - -**Context Pattern:** Used for modals, confirmations, and scoped state - -### Key Domain Patterns - -**Unified Item Hierarchy:** Complex discriminated union types with recursive schemas -```typescript -export type UnifiedItem = FoodItem | RecipeItem | GroupItem -``` - -**Repository Pattern:** Interface-based contracts with Supabase implementations - -**Migration Utilities:** Backward compatibility for evolving data schemas - -**DRY Type Extension Pattern:** Use component Props types as base for Config types -```typescript -// ✅ Good: Extend Props type to avoid duplication -export type ModalConfig = ModalProps & { - title?: string - additionalProp?: string -} - -// ❌ Bad: Duplicate all props from ModalProps -export type ModalConfig = { - prop1: string - prop2?: number - // ... duplicating all ModalProps - title?: string - additionalProp?: string -} -``` +**Claude Commands:** See `.claude/commands/` directory -## Error Handling Standards +**Workflow:** +- `/fix` - Automated checks and fixes +- `/commit` - Generate conventional commits +- `/pull-request` - Create PRs +- `/implement ` - Full issue implementation -**Critical Rule:** All application code must use `handleApiError` with context - never log/throw errors without it. +## Architecture -**Domain Layer:** -```typescript -// ✅ Good: Simple descriptive errors -throw new Error('Group mismatch: cannot mix different groups', { - cause: { groupId, recipeId } -}) +**3-Layer Domain-Driven Design:** -// ✅ Good: Use Zod for validation -const result = schema.safeParse(data) -if (!result.success) { - throw new Error('Invalid data format', { cause: result.error }) -} +**Domain** (`modules/*/domain/`): +- Pure business logic, Zod schemas +- Never import errorHandler or side effects +- Throw standard `Error()` with context -// ❌ Bad: Never use handleApiError in domain -import { handleApiError } from '~/shared/error/errorHandler' -handleApiError(...) // Strictly forbidden in domain layer -``` +**Application** (`modules/*/application/`): +- SolidJS orchestration, error handling +- Must catch errors and call `errorHandler.apiError` +- Global reactive state with signals/effects -**Application Layer:** -```typescript -// ✅ Required pattern: Always catch and contextualize -try { - domainOperation() -} catch (e) { - handleApiError(e, { - component: 'ComponentName', - operation: 'operationName', - additionalData: { userId } - }) - throw e // Re-throw after logging -} - -// ✅ Good: Handle Zod validation errors -const result = schema.safeParse(data) -if (!result.success) { - handleValidationError(result.error, { - component: 'UserForm', - operation: 'validateUserInput', - additionalData: { data } - }) - return // Don't proceed with invalid data -} -``` +**Infrastructure** (`modules/*/infrastructure/`): +- Supabase repositories, external APIs +- Only layer allowed `any` types for external APIs -**Error Context Requirements:** -- `component`: Specific component/module name -- `operation`: Specific operation being performed -- `additionalData`: Relevant IDs, state, or debugging info +## Error Handling -**Error Patterns to Avoid:** -- Custom error class hierarchies (use standard Error) -- Domain-specific error types (use descriptive messages) -- instanceof checks for business logic (use error messages/codes) +**Domain Layer:** Standard `Error()` + Zod validation +**Application Layer:** Always catch and call `errorHandler.apiError` -## Component and Promise Patterns +**Required Context:** `component`, `operation`, `additionalData` -### Fire-and-Forget Promises +**Avoid:** Custom error classes, domain-specific types, instanceof checks -**When to Use `void` Operator:** -- **Only in event handlers** (onClick, onChange) where result is not needed -- **Only when** all error handling is done in application layer -- **Only for non-critical side effects** (background refresh, logging) +## Component Patterns -```tsx -// ✅ Acceptable: Error handling in application layer - - -// ❌ Never use .catch(() => {}) to silence errors - + + {/* Divider */} +
+
+
+
+
+ + ou + +
+
+ + {/* Guest Mode */} + +
+ + {/* Footer */} +
+

+ Ao entrar, você concorda com nossos{' '} + + Termos de Uso + {' '} + e{' '} + + Política de Privacidade + +

+
+ + + + + + ) +} diff --git a/src/routes/onboarding.tsx b/src/routes/onboarding.tsx new file mode 100644 index 000000000..5ed72ec57 --- /dev/null +++ b/src/routes/onboarding.tsx @@ -0,0 +1,10 @@ +import { OnboardingFlow } from '~/sections/onboarding/components/OnboardingFlow' +import { AuthGuard } from '~/shared/guards/AuthGuard' + +export default function OnboardingPage() { + return ( + + + + ) +} diff --git a/src/routes/profile.tsx b/src/routes/profile.tsx index 960a6bce7..f97a865bf 100644 --- a/src/routes/profile.tsx +++ b/src/routes/profile.tsx @@ -1,6 +1,5 @@ -import { createMemo, Suspense } from 'solid-js' +import { Suspense } from 'solid-js' -import { Chart } from '~/sections/common/components/charts/Chart' import { PageLoading } from '~/sections/common/components/PageLoading' import { BodyMeasuresChartSection } from '~/sections/profile/components/BodyMeasuresChartSection' import { ChartSection } from '~/sections/profile/components/ChartSection' diff --git a/src/routes/settings.tsx b/src/routes/settings.tsx index 4a25c9611..258a35063 100644 --- a/src/routes/settings.tsx +++ b/src/routes/settings.tsx @@ -3,6 +3,7 @@ import { createSignal, For, Suspense } from 'solid-js' import { CARD_BACKGROUND_COLOR, CARD_STYLE } from '~/modules/theme/constants' import { showSuccess } from '~/modules/toast/application/toastManager' import { PageLoading } from '~/sections/common/components/PageLoading' +import { AuthSettings } from '~/sections/settings/components/AuthSettings' import { ToastSettings } from '~/sections/settings/components/ToastSettings' import { Toggle } from '~/sections/settings/components/Toggle' @@ -99,6 +100,10 @@ export default function Page() { )} +
+ +
+

Configurações de Notificações diff --git a/src/routes/telemetry-test.tsx b/src/routes/telemetry-test.tsx new file mode 100644 index 000000000..4763987dd --- /dev/null +++ b/src/routes/telemetry-test.tsx @@ -0,0 +1,129 @@ +import type { Component } from 'solid-js' + +import { logging } from '~/shared/utils/logging' + +const TelemetryTestPage: Component = () => { + const testSentryError = () => { + logging.info('🧪 Testing Sentry error...') + throw new Error('Test error for Sentry integration') + } + + return ( +
+
+

Telemetry & Observability Test

+ +
+ {/* Status Card */} +
+
+

Integration Status

+
+
+
+ OpenTelemetry Tracing +
+
+
+ Error Handler Integration +
+
+
+ Web Vitals (Built-in with Sentry) +
+
+
+
+ + {/* Test Controls */} +
+
+

Test Actions

+
+ +
+
+
+
+ + {/* Setup Instructions */} +
+
+

Setup Instructions

+
+

1. Criar projeto no Sentry

+
    +
  1. + Acesse{' '} + + sentry.io + +
  2. +
  3. Crie uma conta/faça login
  4. +
  5. Crie um novo projeto: "JavaScript" → "Browser"
  6. +
  7. Copie o DSN fornecido
  8. +
+ +

2. Configurar variáveis de ambiente

+
+
+                  
+                    # .env.local
+                    VITE_SENTRY_DSN=https://your-dsn@sentry.io/project-id
+                  
+                
+
+ +

3. Verificar integração

+
    +
  1. Reinicie o servidor de desenvolvimento
  2. +
  3. Clique em "Test Error Tracking"
  4. +
  5. Verifique o dashboard do Sentry
  6. +
  7. Observe a correlação com traces OpenTelemetry
  8. +
+ +

4. Features disponíveis

+
    +
  • + Error Tracking: Erros automaticamente + enviados com contexto completo +
  • +
  • + Performance Monitoring: Traces de requisições + e interações com SolidJS Router +
  • +
  • + Session Replay: Gravação de sessões para + debug +
  • +
  • + Breadcrumbs: Trail de ações do usuário antes + dos erros +
  • +
  • + OpenTelemetry Correlation: Trace IDs + correlacionados entre sistemas +
  • +
  • + User Context: Informações do usuário anexadas + aos erros +
  • +
  • + SolidStart Integration: SDK nativo para + SolidJS +
  • +
+
+
+
+
+
+ ) +} + +export default TelemetryTestPage diff --git a/src/routes/test-app.tsx b/src/routes/test-app.tsx index 1782a09ab..3d2808e60 100644 --- a/src/routes/test-app.tsx +++ b/src/routes/test-app.tsx @@ -1,9 +1,17 @@ -import { createEffect, createSignal, untrack } from 'solid-js' +import { createEffect, createSignal, Show, untrack } from 'solid-js' +import { + signIn, + signOut, +} from '~/modules/auth/application/services/authService' +import { + getCurrentUser, + isAuthenticated, +} from '~/modules/auth/application/usecases/authState' import { setTargetDay, targetDay, -} from '~/modules/diet/day-diet/application/dayDiet' +} from '~/modules/diet/day-diet/application/usecases/dayState' import { createNewDayDiet, type DayDiet, @@ -39,10 +47,67 @@ import { } from '~/shared/modal/helpers/modalHelpers' import { openEditModal } from '~/shared/modal/helpers/modalHelpers' import { generateId } from '~/shared/utils/idUtils' +import { logging } from '~/shared/utils/logging' + +function GoogleLoginButton() { + const handleLogin = async () => { + try { + await signIn({ provider: 'google', redirectTo: window.location.origin }) + } catch (error) { + // TODO: ban inline imports + // Issue URL: https://github.com/marcuscastelo/macroflows/issues/1045 + import('~/shared/utils/logging') + .then(({ logging }) => { + logging.error('TestApp login error:', error) + }) + .catch(() => { + // Fallback if import fails + }) + } + } + + return ( + + ) +} + +function LogoutButton() { + const handleLogout = async () => { + try { + await signOut() + } catch (error) { + import('~/shared/utils/logging') + .then(({ logging }) => { + logging.error('TestApp logout error:', error) + }) + .catch(() => { + // Fallback if import fails + }) + } + } + + return ( + + ) +} + +function UserInfo() { + return ( + +
+

User: {getCurrentUser()?.email}

+ +
+
+ ) +} export default function TestApp() { - const [_unifiedItemEditModalVisible, setUnifiedItemEditModalVisible] = - createSignal(false) + const [_, setUnifiedItemEditModalVisible] = createSignal(false) const [item] = createSignal( createUnifiedItem({ @@ -104,7 +169,7 @@ export default function TestApp() { promoteDayDiet( createNewDayDiet({ meals: [], - owner: 3, + user_id: '3', target_day: '2023-11-02', }), { id: 1 }, @@ -125,6 +190,15 @@ export default function TestApp() { + {/* Auth */} +
+ Auth +
+ + +
+
+ {/* Modals */}
Modals @@ -140,7 +214,7 @@ export default function TestApp() { { - console.debug('New unified item added') + logging.debug('New unified item added') }} onFinish={() => {}} onClose={() => {}} @@ -189,7 +263,7 @@ export default function TestApp() { setUnifiedItemEditModalVisible(true) }, onCopy: (item) => { - console.debug('Copy item:', item) + logging.debug('Copy item:', item) }, }} /> @@ -212,6 +286,7 @@ export default function TestApp() { endDate: targetDay(), }} onChange={(value: DateValueType) => { + // eslint-disable-next-line @typescript-eslint/consistent-type-assertions setTargetDay(value?.startDate as string) }} /> diff --git a/src/sections/common/components/AuthUserDropdown.tsx b/src/sections/common/components/AuthUserDropdown.tsx new file mode 100644 index 000000000..655857455 --- /dev/null +++ b/src/sections/common/components/AuthUserDropdown.tsx @@ -0,0 +1,137 @@ +import { useNavigate } from '@solidjs/router' +import { createEffect, Show } from 'solid-js' + +import { signOut } from '~/modules/auth/application/services/authService' +import { + getCurrentUser, + isAuthenticated, +} from '~/modules/auth/application/usecases/authState' +import { showError } from '~/modules/toast/application/toastManager' +import { + currentUserId, + fetchUsers, + users, +} from '~/modules/user/application/user' +import { Button } from '~/sections/common/components/buttons/Button' +import { UserIcon } from '~/sections/common/components/icons/UserIcon' +import { + closeModal, + openConfirmModal, +} from '~/shared/modal/helpers/modalHelpers' +import { logging } from '~/shared/utils/logging' +import { vibrate } from '~/shared/utils/vibrate' + +export const AuthUserDropdown = (props: { modalId: string }) => { + const navigate = useNavigate() + + createEffect(() => { + const modalId = props.modalId + fetchUsers().catch((error) => { + logging.error('AuthUserDropdown error:', error) + showError('Erro ao buscar usuários', { context: 'background' }) + closeModal(modalId) + }) + }) + + const handleSignOut = () => { + vibrate(50) + openConfirmModal('Deseja sair da sua conta?', { + title: 'Sair', + confirmText: 'Sair', + cancelText: 'Cancelar', + onConfirm: async () => { + try { + await signOut() + closeModal(props.modalId) + navigate('/login') + } catch (error) { + logging.error('Sign out error:', error) + showError('Erro ao sair. Tente novamente.') + } + }, + }) + } + + const handleLogin = () => { + closeModal(props.modalId) + navigate('/login') + } + + return ( +
+ {/* Authentication Status */} + +
+

+ Você não está logado +

+ +
+
+ } + > +
+
+
+ { + const localUser = users().find( + (u) => u.uuid === currentUserId(), + )?.name + if (localUser !== undefined && localUser !== '') + return localUser + const authUser = getCurrentUser() + if (authUser !== null && authUser.email !== '') { + const emailParts = authUser.email.split('@') + return emailParts[0] ?? '' + } + return '' + }} + {...props} + /> +
+
+

+ {getCurrentUser()?.email} +

+

+ Conectado via Google +

+
+
+
+ + + {/* Actions */} + +
+ +
+
+

+ ) +} diff --git a/src/sections/common/components/BottomNavigation.tsx b/src/sections/common/components/BottomNavigation.tsx index 78510b079..8dfd452cc 100644 --- a/src/sections/common/components/BottomNavigation.tsx +++ b/src/sections/common/components/BottomNavigation.tsx @@ -1,8 +1,6 @@ import { useLocation, useNavigate } from '@solidjs/router' import { - createEffect, createSignal, - For, type JSXElement, onCleanup, onMount, @@ -10,23 +8,17 @@ import { } from 'solid-js' import { APP_VERSION } from '~/app-version' -import { showError } from '~/modules/toast/application/toastManager' import { - changeToUser, - currentUserId, - fetchUsers, - users, -} from '~/modules/user/application/user' -import { type User } from '~/modules/user/domain/user' + getCurrentUser, + isAuthenticated, +} from '~/modules/auth/application/usecases/authState' +import { currentUserId, users } from '~/modules/user/application/user' +import { AuthUserDropdown } from '~/sections/common/components/AuthUserDropdown' import { Button } from '~/sections/common/components/buttons/Button' -import { ConsoleDumpButton } from '~/sections/common/components/ConsoleDumpButton' import { UserIcon } from '~/sections/common/components/icons/UserIcon' import { useIntersectionObserver } from '~/shared/hooks/useIntersectionObserver' -import { - closeModal, - openConfirmModal, - openContentModal, -} from '~/shared/modal/helpers/modalHelpers' +import { openContentModal } from '~/shared/modal/helpers/modalHelpers' +import { logging } from '~/shared/utils/logging' import { vibrate } from '~/shared/utils/vibrate' export function BottomNavigation() { @@ -67,8 +59,8 @@ export function BottomNavigation() { resizeObserver?.disconnect() }) - console.debug('[BottomNavigation] Rendering') - console.debug('[BottomNavigation] Current path:', pathname) + logging.debug('[BottomNavigation] Rendering') + logging.debug('[BottomNavigation] Current path:', { pathname: pathname() }) return (
@@ -121,20 +113,53 @@ export function BottomNavigation() { /> ( - - users().find((u) => u.id === currentUserId())?.name ?? '' + + + } - {...props} - /> + > + { + const localUser = users().find( + (u) => u.uuid === currentUserId(), + )?.name + if (localUser !== undefined && localUser !== '') + return localUser + const authUser = getCurrentUser() + if (authUser !== null && authUser.email !== '') { + const emailParts = authUser.email.split('@') + return emailParts[0] ?? '' + } + return '' + }} + {...props} + /> + )} onClick={() => { vibrate(50) openContentModal( - (modalId) => , + (modalId) => , { closeOnOutsideClick: true, showCloseButton: false, @@ -155,7 +180,6 @@ export function BottomNavigation() { Version:
{APP_VERSION} -
- )} - - - ) -} diff --git a/src/sections/common/components/ChartLoadingPlaceholder.tsx b/src/sections/common/components/ChartLoadingPlaceholder.tsx index 990a24a3c..55877dc4e 100644 --- a/src/sections/common/components/ChartLoadingPlaceholder.tsx +++ b/src/sections/common/components/ChartLoadingPlaceholder.tsx @@ -3,7 +3,7 @@ import { CARD_BACKGROUND_COLOR, CARD_STYLE } from '~/modules/theme/constants' /** * Props for ChartLoadingPlaceholder component. */ -export type ChartLoadingPlaceholderProps = { +type ChartLoadingPlaceholderProps = { height?: number message?: string } diff --git a/src/sections/common/components/ClipboardActionButtons.tsx b/src/sections/common/components/ClipboardActionButtons.tsx index 5b2d62dcf..859c3f234 100644 --- a/src/sections/common/components/ClipboardActionButtons.tsx +++ b/src/sections/common/components/ClipboardActionButtons.tsx @@ -5,7 +5,7 @@ import { PasteIcon } from '~/sections/common/components/icons/PasteIcon' import { TrashIcon } from '~/sections/common/components/icons/TrashIcon' import { COPY_BUTTON_STYLES } from '~/sections/common/styles/buttonStyles' -export type ClipboardActionButtonsProps = { +type ClipboardActionButtonsProps = { canCopy: boolean canPaste: boolean canClear: boolean diff --git a/src/sections/common/components/ComboBox.tsx b/src/sections/common/components/ComboBox.tsx index d736e82d2..6f51b291c 100644 --- a/src/sections/common/components/ComboBox.tsx +++ b/src/sections/common/components/ComboBox.tsx @@ -1,11 +1,11 @@ import { For, type JSX } from 'solid-js' -export type ComboBoxOption = { +type ComboBoxOption = { value: T label: string } -export type ComboBoxProps = { +type ComboBoxProps = { options: readonly ComboBoxOption[] value: T onChange: (value: T) => void @@ -27,7 +27,12 @@ export function ComboBox( { + void handlePrivacyChange(e.currentTarget.checked) + }} + class="ml-2 w-5 h-5 accent-blue-600" + /> + + + + + ) +} diff --git a/src/sections/unified-item/components/GroupChildrenEditor.tsx b/src/sections/unified-item/components/GroupChildrenEditor.tsx index 1cca8b709..4c2c63e8f 100644 --- a/src/sections/unified-item/components/GroupChildrenEditor.tsx +++ b/src/sections/unified-item/components/GroupChildrenEditor.tsx @@ -1,7 +1,7 @@ import { type Accessor, For, type Setter, Show } from 'solid-js' import { z } from 'zod/v4' -import { saveRecipe } from '~/modules/diet/recipe/application/unifiedRecipe' +import { saveRecipe } from '~/modules/diet/recipe/application/usecases/recipeCrud' import { createNewRecipe } from '~/modules/diet/recipe/domain/recipe' import { addChildToItem, @@ -24,11 +24,8 @@ import { ConvertToRecipeIcon } from '~/sections/common/components/icons/ConvertT import { useClipboard } from '~/sections/common/hooks/useClipboard' import { useCopyPasteActions } from '~/sections/common/hooks/useCopyPasteActions' import { UnifiedItemView } from '~/sections/unified-item/components/UnifiedItemView' -import { createErrorHandler } from '~/shared/error/errorHandler' -import { createDebug } from '~/shared/utils/createDebug' import { generateId, regenerateId } from '~/shared/utils/idUtils' - -const debug = createDebug() +import { logging } from '~/shared/utils/logging' export type GroupChildrenEditorProps = { item: Accessor @@ -38,8 +35,6 @@ export type GroupChildrenEditorProps = { showAddButton?: boolean } -const errorHandler = createErrorHandler('user', 'UnifiedItem') - export function GroupChildrenEditor(props: GroupChildrenEditorProps) { const clipboard = useClipboard() @@ -97,7 +92,7 @@ export function GroupChildrenEditor(props: GroupChildrenEditorProps) { // Validate hierarchy to prevent circular references const tempItem = addChildToItem(updatedItem, childWithNewId) if (!validateItemHierarchy(tempItem)) { - console.warn( + logging.warn( `Skipping item ${childWithNewId.name} - would create circular reference`, ) continue @@ -111,7 +106,10 @@ export function GroupChildrenEditor(props: GroupChildrenEditorProps) { }) const updateChildQuantity = (childId: number, newQuantity: number) => { - debug('[GroupChildrenEditor] updateChildQuantity', { childId, newQuantity }) + logging.debug('[GroupChildrenEditor] updateChildQuantity', { + childId, + newQuantity, + }) const updatedItem = updateChildInItem(props.item(), childId, { quantity: newQuantity, @@ -121,7 +119,7 @@ export function GroupChildrenEditor(props: GroupChildrenEditorProps) { } const applyMultiplierToAll = (multiplier: number) => { - debug('[GroupChildrenEditor] applyMultiplierToAll', { multiplier }) + logging.debug('[GroupChildrenEditor] applyMultiplierToAll', { multiplier }) let updatedItem = props.item() @@ -155,7 +153,7 @@ export function GroupChildrenEditor(props: GroupChildrenEditorProps) { ? `${item.name} (Receita)` : 'Nova receita (a partir de um grupo)', items: children(), // Use UnifiedItems directly - owner: currentUserId(), + user_id: currentUserId(), }) const insertedRecipe = await saveRecipe(newUnifiedRecipe) @@ -179,7 +177,7 @@ export function GroupChildrenEditor(props: GroupChildrenEditorProps) { props.setItem(recipeUnifiedItem) } catch (err) { - errorHandler.error(err, { operation: 'handleConvertToRecipe' }) + logging.error('GroupChildrenEditor handleConvertToRecipe error:', err) showError(err, undefined, 'Falha ao criar receita a partir do grupo') } } diff --git a/src/sections/unified-item/components/QuantityControls.tsx b/src/sections/unified-item/components/QuantityControls.tsx index f5d84a6e5..044246f2d 100644 --- a/src/sections/unified-item/components/QuantityControls.tsx +++ b/src/sections/unified-item/components/QuantityControls.tsx @@ -6,7 +6,6 @@ import { untrack, } from 'solid-js' -import { type MacroNutrients } from '~/modules/diet/macro-nutrients/domain/macroNutrients' import { scaleRecipeItemQuantity } from '~/modules/diet/unified-item/domain/unifiedItemOperations' import { isFoodItem, @@ -19,11 +18,9 @@ import { MaxQuantityButton, } from '~/sections/common/components/MaxQuantityButton' import { type UseFieldReturn } from '~/sections/common/hooks/useField' -import { createDebug } from '~/shared/utils/createDebug' +import { logging } from '~/shared/utils/logging' import { calcUnifiedItemMacros } from '~/shared/utils/macroMath' -const debug = createDebug() - export type QuantityControlsProps = { item: Accessor setItem: Setter @@ -37,9 +34,9 @@ export function QuantityControls(props: QuantityControlsProps) { const newQuantity = props.quantityField.value() ?? 0.1 const currentItem = untrack(props.item) - debug( + logging.debug( '[QuantityControls] Update unified item quantity from field', - newQuantity, + { newQuantity }, ) if (isRecipeItem(currentItem)) { @@ -48,7 +45,7 @@ export function QuantityControls(props: QuantityControlsProps) { const scaledItem = scaleRecipeItemQuantity(currentItem, newQuantity) props.setItem({ ...scaledItem }) } catch (error) { - debug('[QuantityControls] Error scaling recipe:', error) + logging.debug('[QuantityControls] Error scaling recipe:', { error }) // Fallback to simple quantity update if scaling fails props.setItem({ ...currentItem, @@ -65,21 +62,21 @@ export function QuantityControls(props: QuantityControlsProps) { }) const increment = () => { - debug('[QuantityControls] increment') + logging.debug('[QuantityControls] increment') props.quantityField.setRawValue( ((props.quantityField.value() ?? 0) + 1).toString(), ) } const decrement = () => { - debug('[QuantityControls] decrement') + logging.debug('[QuantityControls] decrement') props.quantityField.setRawValue( Math.max(0, (props.quantityField.value() ?? 0) - 1).toString(), ) } const holdRepeatStart = (action: () => void) => { - debug('[QuantityControls] holdRepeatStart') + logging.debug('[QuantityControls] holdRepeatStart') const holdTimeout = setTimeout(() => { const holdInterval = setInterval(() => { action() @@ -115,14 +112,16 @@ export function QuantityControls(props: QuantityControlsProps) { field={props.quantityField} style={{ width: '100%' }} onFieldCommit={(value) => { - debug('[QuantityControls] FloatInput onFieldCommit', value) + logging.debug('[QuantityControls] FloatInput onFieldCommit', { + value, + }) if (value === undefined) { props.quantityField.setRawValue(props.item().quantity.toString()) } }} tabIndex={-1} onFocus={(event) => { - debug('[QuantityControls] FloatInput onFocus') + logging.debug('[QuantityControls] FloatInput onFocus') event.target.select() if (props.quantityField.value() === 0) { props.quantityField.setRawValue('') @@ -157,9 +156,9 @@ export function QuantityControls(props: QuantityControlsProps) { return { carbs: 0, protein: 0, fat: 0 } })()} onMaxSelected={(maxValue: number) => { - debug( + logging.debug( '[QuantityControls] MaxQuantityButton onMaxSelected', - maxValue, + { maxValue }, ) props.quantityField.setRawValue(maxValue.toFixed(2)) }} @@ -172,11 +171,11 @@ export function QuantityControls(props: QuantityControlsProps) { class="btn-primary btn-xs btn cursor-pointer uppercase h-full w-10 px-6 text-4xl text-red-600" onClick={decrement} onMouseDown={() => { - debug('[QuantityControls] decrement mouse down') + logging.debug('[QuantityControls] decrement mouse down') holdRepeatStart(decrement) }} onTouchStart={() => { - debug('[QuantityControls] decrement touch start') + logging.debug('[QuantityControls] decrement touch start') holdRepeatStart(decrement) }} > @@ -187,11 +186,11 @@ export function QuantityControls(props: QuantityControlsProps) { class="btn-primary btn-xs btn cursor-pointer uppercase ml-1 h-full w-10 px-6 text-4xl text-green-400" onClick={increment} onMouseDown={() => { - debug('[QuantityControls] increment mouse down') + logging.debug('[QuantityControls] increment mouse down') holdRepeatStart(increment) }} onTouchStart={() => { - debug('[QuantityControls] increment touch start') + logging.debug('[QuantityControls] increment touch start') holdRepeatStart(increment) }} > diff --git a/src/sections/unified-item/components/QuantityShortcuts.tsx b/src/sections/unified-item/components/QuantityShortcuts.tsx index 10e12be1b..a4841eefd 100644 --- a/src/sections/unified-item/components/QuantityShortcuts.tsx +++ b/src/sections/unified-item/components/QuantityShortcuts.tsx @@ -1,8 +1,6 @@ import { For } from 'solid-js' -import { createDebug } from '~/shared/utils/createDebug' - -const debug = createDebug() +import { logging } from '~/shared/utils/logging' export type QuantityShortcutsProps = { onQuantitySelect: (quantity: number) => void @@ -25,9 +23,9 @@ export function QuantityShortcuts(props: QuantityShortcutsProps) {
{ - debug( + logging.debug( '[QuantityShortcuts] shortcut quantity selected', - value, + { value }, ) props.onQuantitySelect(value) }} diff --git a/src/sections/unified-item/components/UnifiedItemEditBody.tsx b/src/sections/unified-item/components/UnifiedItemEditBody.tsx index 5b6b136e0..e2fd1766e 100644 --- a/src/sections/unified-item/components/UnifiedItemEditBody.tsx +++ b/src/sections/unified-item/components/UnifiedItemEditBody.tsx @@ -1,8 +1,7 @@ -import { type Accessor, createSignal, type Setter, Show } from 'solid-js' +import { type Accessor, type Setter, Show } from 'solid-js' -import { currentDayDiet } from '~/modules/diet/day-diet/application/dayDiet' +import { currentDayDiet } from '~/modules/diet/day-diet/application/usecases/dayState' import { getMacroTargetForDay } from '~/modules/diet/macro-target/application/macroTarget' -import { updateUnifiedItemName } from '~/modules/diet/unified-item/domain/unifiedItemOperations' import { asFoodItem, isGroupItem, @@ -15,75 +14,9 @@ import { QuantityControls } from '~/sections/unified-item/components/QuantityCon import { QuantityShortcuts } from '~/sections/unified-item/components/QuantityShortcuts' import { UnifiedItemFavorite } from '~/sections/unified-item/components/UnifiedItemFavorite' import { UnifiedItemView } from '~/sections/unified-item/components/UnifiedItemView' -import { createDebug } from '~/shared/utils/createDebug' +import { logging } from '~/shared/utils/logging' import { calcDayMacros, calcUnifiedItemMacros } from '~/shared/utils/macroMath' -const debug = createDebug() - -type InlineNameEditorProps = { - item: Accessor - setItem: Setter -} - -function InlineNameEditor(props: InlineNameEditorProps) { - const [isEditing, setIsEditing] = createSignal(false) - const [tempName, setTempName] = createSignal('') - - const startEditing = () => { - setTempName(props.item().name) - setIsEditing(true) - } - - const saveEdit = () => { - const newName = tempName().trim() - if (newName && newName !== props.item().name) { - const updatedItem = updateUnifiedItemName(props.item(), newName) - props.setItem(updatedItem) - } - setIsEditing(false) - } - - const cancelEdit = () => { - setIsEditing(false) - setTempName('') - } - - const handleKeyDown = (e: KeyboardEvent) => { - if (e.key === 'Enter') { - e.preventDefault() - saveEdit() - } else if (e.key === 'Escape') { - e.preventDefault() - cancelEdit() - } - } - - return ( - - {props.item().name} - - } - > - setTempName(e.currentTarget.value)} - onKeyDown={handleKeyDown} - onBlur={saveEdit} - class="bg-transparent border-none outline-none text-inherit font-inherit w-full" - autofocus - /> - - ) -} - export type UnifiedItemEditBodyProps = { canApply: boolean item: Accessor @@ -106,7 +39,7 @@ export type UnifiedItemEditBodyProps = { export function UnifiedItemEditBody(props: UnifiedItemEditBodyProps) { function getAvailableMacros(): MacroValues { - debug('getAvailableMacros') + logging.debug('getAvailableMacros') const dayDiet = currentDayDiet() const macroTarget = dayDiet ? getMacroTargetForDay(new Date(dayDiet.target_day)) @@ -127,7 +60,7 @@ export function UnifiedItemEditBody(props: UnifiedItemEditBodyProps) { } const handleQuantitySelect = (quantity: number) => { - debug('[UnifiedItemEditBody] shortcut quantity', quantity) + logging.debug('[UnifiedItemEditBody] shortcut quantity', { quantity }) props.quantityField.setRawValue(quantity.toString()) } diff --git a/src/sections/unified-item/components/UnifiedItemEditModal.tsx b/src/sections/unified-item/components/UnifiedItemEditModal.tsx index 11dd56ca8..bbd0beeda 100644 --- a/src/sections/unified-item/components/UnifiedItemEditModal.tsx +++ b/src/sections/unified-item/components/UnifiedItemEditModal.tsx @@ -13,7 +13,7 @@ import { deleteRecipe, fetchRecipeById, updateRecipe, -} from '~/modules/diet/recipe/application/recipe' +} from '~/modules/diet/recipe/application/usecases/recipeCrud' import { type Recipe } from '~/modules/diet/recipe/domain/recipe' import { addChildToItem, @@ -42,10 +42,8 @@ import { openTemplateSearchModal, openUnifiedItemEditModal, } from '~/shared/modal/helpers/specializedModalHelpers' -import { createDebug } from '~/shared/utils/createDebug' import { generateId } from '~/shared/utils/idUtils' - -const debug = createDebug() +import { logging } from '~/shared/utils/logging' export type UnifiedItemEditModalProps = { targetMealName: string @@ -63,7 +61,7 @@ export type UnifiedItemEditModalProps = { } export const UnifiedItemEditModal = (_props: UnifiedItemEditModalProps) => { - debug('[UnifiedItemEditModal] called', _props) + logging.debug('[UnifiedItemEditModal] called', _props) const props = mergeProps({ targetNameColor: 'text-green-500' }, _props) const handleClose = () => { @@ -161,7 +159,9 @@ export const UnifiedItemEditModal = (_props: UnifiedItemEditModalProps) => { }) const canApply = () => { - debug('[UnifiedItemEditModal] canApply', item().quantity) + logging.debug('[UnifiedItemEditModal] canApply', { + quantity: item().quantity, + }) return item().quantity > 0 } @@ -292,24 +292,26 @@ export const UnifiedItemEditModal = (_props: UnifiedItemEditModalProps) => { {/* Edit recipe button - only show for recipe items */} - + {(originalRecipe) => ( + + )}
@@ -379,7 +381,7 @@ export const UnifiedItemEditModal = (_props: UnifiedItemEditModalProps) => {