diff --git a/cli/src/commands/__tests__/insights.test.ts b/cli/src/commands/__tests__/insights.test.ts index 7498fbf..7e4f55c 100644 --- a/cli/src/commands/__tests__/insights.test.ts +++ b/cli/src/commands/__tests__/insights.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import Database from 'better-sqlite3'; import { runMigrations } from '../../db/migrate.js'; @@ -421,3 +421,218 @@ describe('syncSingleFile', () => { expect(mockInsertMessages).not.toHaveBeenCalled(); }); }); + +// ── insightsCheckCommand tests ──────────────────────────────────────────────── + +describe('insightsCheckCommand — count-based behavior', () => { + let stdoutSpy: ReturnType; + let consoleSpy: ReturnType; + + beforeEach(() => { + mockDb = new Database(':memory:'); + runMigrations(mockDb); + mockRunAnalysis.mockReset(); + mockValidate.mockReset(); + mockFromConfig.mockReset(); + mockProviderRunAnalysis.mockReset(); + stdoutSpy = vi.spyOn(process.stdout, 'write').mockImplementation(() => true); + consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + }); + + afterEach(() => { + stdoutSpy.mockRestore(); + consoleSpy.mockRestore(); + }); + + function seedSessions(db: Database.Database, count: number, analyzedCount = 0): void { + db.exec(`INSERT OR IGNORE INTO projects (id, name, path, last_activity) VALUES ('pc1', 'proj', '/p', datetime('now'));`); + for (let i = 0; i < count; i++) { + const sid = `chk-sess-${i}`; + db.exec(`INSERT OR IGNORE INTO sessions (id, project_id, project_name, project_path, started_at, ended_at, message_count) VALUES ('${sid}', 'pc1', 'proj', '/p', datetime('now', '-${i} minutes'), datetime('now', '-${i} minutes'), 10);`); + if (i < analyzedCount) { + db.exec(`INSERT OR IGNORE INTO analysis_usage (session_id, analysis_type, provider, model) VALUES ('${sid}', 'session', 'openai', 'gpt-4');`); + } + } + } + + it('exits silently when 0 unanalyzed sessions', async () => { + seedSessions(mockDb, 2, 2); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false }); + expect(consoleSpy).not.toHaveBeenCalled(); + expect(stdoutSpy).not.toHaveBeenCalled(); + }); + + it('--quiet outputs just the count for unanalyzed sessions', async () => { + seedSessions(mockDb, 5, 0); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: true }); + const written = (stdoutSpy.mock.calls as Array<[unknown]>).map(c => String(c[0])).join(''); + expect(written.trim()).toBe('5'); + expect(consoleSpy).not.toHaveBeenCalled(); + }); + + it('--quiet exits silently when 0 unanalyzed sessions', async () => { + seedSessions(mockDb, 3, 3); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: true }); + expect(stdoutSpy).not.toHaveBeenCalled(); + }); + + it('prints count and suggest --analyze for 3-10 unanalyzed sessions', async () => { + seedSessions(mockDb, 5, 0); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false }); + const output = (consoleSpy.mock.calls as Array).map(c => String(c[0])).join('\n'); + expect(output).toContain('5'); + expect(output).toMatch(/insights check --analyze/i); + // No time estimate for < 11 sessions + expect(output).not.toMatch(/~\d+ min/i); + }); + + it('prints count + time estimate for 11+ unanalyzed sessions', async () => { + seedSessions(mockDb, 12, 0); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false }); + const output = (consoleSpy.mock.calls as Array).map(c => String(c[0])).join('\n'); + expect(output).toContain('12'); + expect(output).toMatch(/insights check --analyze/i); + // Should have time estimate (~X min) + expect(output).toMatch(/~\d/); + }); + + it('respects --days lookback window', async () => { + mockDb.exec(`INSERT OR IGNORE INTO projects (id, name, path, last_activity) VALUES ('pd1', 'proj', '/p', datetime('now'));`); + mockDb.exec(`INSERT OR IGNORE INTO sessions (id, project_id, project_name, project_path, started_at, ended_at, message_count) VALUES ('old-s', 'pd1', 'proj', '/p', datetime('now', '-8 days'), datetime('now', '-8 days'), 10);`); + mockDb.exec(`INSERT OR IGNORE INTO sessions (id, project_id, project_name, project_path, started_at, ended_at, message_count) VALUES ('new-s', 'pd1', 'proj', '/p', datetime('now', '-1 days'), datetime('now', '-1 days'), 10);`); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: true }); + const written = (stdoutSpy.mock.calls as Array<[unknown]>).map(c => String(c[0])).join(''); + expect(written.trim()).toBe('1'); + }); +}); + +describe('insightsCheckCommand — auto-analyze (1-2 sessions)', () => { + let consoleSpy: ReturnType; + let consoleErrSpy: ReturnType; + + beforeEach(() => { + mockDb = new Database(':memory:'); + runMigrations(mockDb); + mockRunAnalysis.mockReset(); + mockValidate.mockReset(); + mockProviderRunAnalysis.mockReset(); + consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + consoleErrSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + }); + + afterEach(() => { + consoleSpy.mockRestore(); + consoleErrSpy.mockRestore(); + }); + + function seedOne(db: Database.Database, id: string): void { + db.exec(`INSERT OR IGNORE INTO projects (id, name, path, last_activity) VALUES ('pa1', 'proj', '/p', datetime('now'));`); + db.exec(`INSERT OR IGNORE INTO sessions (id, project_id, project_name, project_path, started_at, ended_at, message_count) VALUES ('${id}', 'pa1', 'proj', '/p', datetime('now'), datetime('now'), 10);`); + } + + it('auto-analyzes 1 unanalyzed session using native runner', async () => { + seedOne(mockDb, 'auto-1'); + mockRunAnalysis + .mockResolvedValueOnce({ rawJson: makeAnalysisResponse(), durationMs: 500, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makePQResponse(), durationMs: 400, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false }); + expect(mockValidate).toHaveBeenCalledTimes(1); + expect(mockRunAnalysis).toHaveBeenCalledTimes(2); + }); + + it('auto-analyzes 2 unanalyzed sessions using native runner', async () => { + seedOne(mockDb, 'auto-2a'); + seedOne(mockDb, 'auto-2b'); + mockRunAnalysis + .mockResolvedValueOnce({ rawJson: makeAnalysisResponse(), durationMs: 500, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makePQResponse(), durationMs: 400, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makeAnalysisResponse(), durationMs: 500, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makePQResponse(), durationMs: 400, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false }); + expect(mockValidate).toHaveBeenCalled(); + expect(mockRunAnalysis).toHaveBeenCalledTimes(4); + }); +}); + +describe('insightsCheckCommand — --analyze flag', () => { + let consoleSpy: ReturnType; + let consoleErrSpy: ReturnType; + let stdoutSpy: ReturnType; + + beforeEach(() => { + mockDb = new Database(':memory:'); + runMigrations(mockDb); + mockRunAnalysis.mockReset(); + mockValidate.mockReset(); + mockProviderRunAnalysis.mockReset(); + consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + consoleErrSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + stdoutSpy = vi.spyOn(process.stdout, 'write').mockImplementation(() => true); + }); + + afterEach(() => { + consoleSpy.mockRestore(); + consoleErrSpy.mockRestore(); + stdoutSpy.mockRestore(); + }); + + function seedSessions(db: Database.Database, count: number): void { + db.exec(`INSERT OR IGNORE INTO projects (id, name, path, last_activity) VALUES ('pb1', 'proj', '/p', datetime('now'));`); + for (let i = 0; i < count; i++) { + db.exec(`INSERT OR IGNORE INTO sessions (id, project_id, project_name, project_path, started_at, ended_at, message_count) VALUES ('an-sess-${i}', 'pb1', 'proj', '/p', datetime('now', '-${i} minutes'), datetime('now', '-${i} minutes'), 10);`); + } + } + + it('processes all sessions with --analyze and shows [N/total] progress', async () => { + seedSessions(mockDb, 3); + for (let i = 0; i < 3; i++) { + mockRunAnalysis + .mockResolvedValueOnce({ rawJson: makeAnalysisResponse(), durationMs: 1000, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makePQResponse(), durationMs: 800, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }); + } + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false, analyze: true }); + // Progress lines go to process.stdout.write + const stdoutOutput = (stdoutSpy.mock.calls as Array<[unknown]>).map(c => String(c[0])).join(''); + expect(stdoutOutput).toMatch(/\[1\/3\]/); + expect(stdoutOutput).toMatch(/\[2\/3\]/); + expect(stdoutOutput).toMatch(/\[3\/3\]/); + // Summary line goes to console.log + const logOutput = (consoleSpy.mock.calls as Array).map(c => String(c[0])).join('\n'); + expect(logOutput).toMatch(/Analyzed 3 session/i); + }); + + it('continues processing after one session fails', async () => { + seedSessions(mockDb, 3); + mockRunAnalysis + .mockRejectedValueOnce(new Error('fail on session 0')) + .mockResolvedValueOnce({ rawJson: makeAnalysisResponse(), durationMs: 1000, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makePQResponse(), durationMs: 800, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makeAnalysisResponse(), durationMs: 1000, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }) + .mockResolvedValueOnce({ rawJson: makePQResponse(), durationMs: 800, inputTokens: 0, outputTokens: 0, model: 'claude-native', provider: 'claude-code-native' }); + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false, analyze: true }); + const stdoutOutput = (stdoutSpy.mock.calls as Array<[unknown]>).map(c => String(c[0])).join(''); + const errOutput = (consoleErrSpy.mock.calls as Array).map(c => String(c[0])).join('\n'); + const logOutput = (consoleSpy.mock.calls as Array).map(c => String(c[0])).join('\n'); + expect(stdoutOutput).toMatch(/\[1\/3\]/); + expect(errOutput).toMatch(/fail on session 0/i); + expect(logOutput).toMatch(/Analyzed 2 session/i); + }); + + it('exits silently with --analyze when 0 unanalyzed sessions', async () => { + const { insightsCheckCommand } = await import('../insights.js'); + await insightsCheckCommand({ days: 7, quiet: false, analyze: true }); + expect(mockRunAnalysis).not.toHaveBeenCalled(); + expect(consoleSpy).not.toHaveBeenCalled(); + expect(stdoutSpy).not.toHaveBeenCalled(); + }); +}); diff --git a/cli/src/commands/insights.ts b/cli/src/commands/insights.ts index a93809b..641b3ca 100644 --- a/cli/src/commands/insights.ts +++ b/cli/src/commands/insights.ts @@ -97,6 +97,8 @@ export interface InsightsCommandOptions { force?: boolean; quiet?: boolean; source?: string; + /** Pre-built runner to reuse across batch calls. Skips runner construction and validate(). */ + _runner?: AnalysisRunner; } // ── Core logic ──────────────────────────────────────────────────────────────── @@ -109,9 +111,11 @@ export interface InsightsCommandOptions { export async function runInsightsCommand(options: InsightsCommandOptions): Promise { const log = options.quiet ? () => {} : console.log.bind(console); - // 1. Build the runner + // 1. Build the runner (or reuse a pre-built one from batch callers) let runner: AnalysisRunner; - if (options.native) { + if (options._runner) { + runner = options._runner; + } else if (options.native) { ClaudeNativeRunner.validate(); runner = new ClaudeNativeRunner(); } else { @@ -320,9 +324,17 @@ export async function insightsCommand( // ── Subcommand: insights check ──────────────────────────────────────────────── -export function insightsCheckCommand(opts: { days?: number; quiet?: boolean }): void { +// Seconds per session estimate (15-30s each; use 22s as mid-range) +const SECONDS_PER_SESSION = 22; + +export async function insightsCheckCommand(opts: { + days?: number; + quiet?: boolean; + analyze?: boolean; +}): Promise { const days = opts.days ?? 7; const quiet = opts.quiet ?? false; + const analyze = opts.analyze ?? false; const log = quiet ? () => {} : console.log.bind(console); try { @@ -330,14 +342,14 @@ export function insightsCheckCommand(opts: { days?: number; quiet?: boolean }): const cutoff = new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString(); const rows = db.prepare(` - SELECT s.id + SELECT s.id, s.generated_title, s.custom_title, s.started_at, s.message_count FROM sessions s LEFT JOIN analysis_usage au ON au.session_id = s.id AND au.analysis_type = 'session' WHERE s.started_at >= ? AND s.deleted_at IS NULL - AND au.analysis_type IS NULL + AND au.session_id IS NULL ORDER BY s.started_at DESC - `).all(cutoff) as Array<{ id: string }>; + `).all(cutoff) as Array<{ id: string; generated_title: string | null; custom_title: string | null; started_at: string; message_count: number }>; const count = rows.length; @@ -351,8 +363,61 @@ export function insightsCheckCommand(opts: { days?: number; quiet?: boolean }): return; } + // --analyze: process all found sessions with progress output + if (analyze) { + ClaudeNativeRunner.validate(); + const runner = new ClaudeNativeRunner(); + let successCount = 0; + + for (let i = 0; i < rows.length; i++) { + const row = rows[i]; + const label = row.custom_title ?? row.generated_title ?? row.id; + const position = `[${i + 1}/${count}]`; + process.stdout.write(`${position} ${label} ... `); + const start = Date.now(); + try { + await runInsightsCommand({ sessionId: row.id, native: true, quiet: true, _runner: runner }); + const elapsed = Math.round((Date.now() - start) / 1000); + process.stdout.write(`done (${elapsed}s)\n`); + successCount++; + } catch (err) { + process.stdout.write('failed\n'); + console.error(chalk.red(` [Code Insights] ${err instanceof Error ? err.message : 'Analysis failed'}`)); + } + } + + log(chalk.green(`Analyzed ${successCount} session${successCount !== 1 ? 's' : ''}.`)); + return; + } + + // Auto-analyze silently when 1-2 unanalyzed sessions + if (count <= 2) { + ClaudeNativeRunner.validate(); + const runner = new ClaudeNativeRunner(); + for (const row of rows) { + try { + await runInsightsCommand({ sessionId: row.id, native: true, quiet: true, _runner: runner }); + } catch { + // Silently ignore auto-analyze errors for 1-2 sessions + } + } + return; + } + + // 3-10: print count + suggestion + if (count <= 10) { + log(chalk.yellow(`[Code Insights] ${count} unanalyzed session${count > 1 ? 's' : ''} in the last ${days} days.`)); + log(chalk.dim(` Run: code-insights insights check --analyze to process them`)); + return; + } + + // 11+: print count + time estimate + const estimateSecs = count * SECONDS_PER_SESSION; + const estimateMins = Math.round(estimateSecs / 60); + const timeLabel = estimateMins < 2 ? `~${estimateSecs}s` : `~${estimateMins} min`; log(chalk.yellow(`[Code Insights] ${count} unanalyzed session${count > 1 ? 's' : ''} in the last ${days} days.`)); - log(chalk.dim(` Run: code-insights insights --native to analyze the most recent session.`)); + log(chalk.dim(` Estimated time: ${timeLabel} (~${SECONDS_PER_SESSION}s each)`)); + log(chalk.dim(` Run: code-insights insights check --analyze to process them`)); } catch (error) { if (!quiet) { console.error(chalk.red(`[Code Insights] ${error instanceof Error ? error.message : 'Check failed'}`)); diff --git a/cli/src/index.ts b/cli/src/index.ts index be9535c..35cbf90 100644 --- a/cli/src/index.ts +++ b/cli/src/index.ts @@ -138,8 +138,13 @@ insightsCmd .description('Check for unanalyzed sessions in the last N days') .option('--days ', 'Lookback window in days', '7') .option('-q, --quiet', 'Machine-readable output (just count)') - .action((opts) => { - insightsCheckCommand({ days: opts.days ? parseInt(opts.days, 10) : 7, quiet: opts.quiet }); + .option('--analyze', 'Process all found sessions sequentially') + .action(async (opts) => { + await insightsCheckCommand({ + days: opts.days ? parseInt(opts.days, 10) : 7, + quiet: opts.quiet, + analyze: opts.analyze, + }); }); // Default action: running `code-insights` with no arguments opens the dashboard.