From 6bf7349153f6827fe04c89d009aa50fe93423dc6 Mon Sep 17 00:00:00 2001 From: aparupganguly Date: Thu, 22 Jan 2026 00:30:27 +0530 Subject: [PATCH] Add health lens --- health-lens/.gitignore | 43 + health-lens/README.md | 53 + health-lens/app/api/chat/route.ts | 120 + health-lens/app/api/classify/route.ts | 102 + health-lens/app/api/files/route.ts | 252 + health-lens/app/api/generate-title/route.ts | 46 + health-lens/app/api/prescrape/route.ts | 55 + health-lens/app/api/search/route.ts | 40 + health-lens/app/api/studies/scrape/route.ts | 41 + health-lens/app/globals.css | 81 + health-lens/app/layout.tsx | 34 + health-lens/app/page.tsx | 5 + health-lens/components/app-layout.tsx | 24 + health-lens/components/health-screen.tsx | 1054 ++ health-lens/components/sidebar.tsx | 714 ++ health-lens/components/sources-sidebar.tsx | 225 + health-lens/components/streaming-text.tsx | 46 + health-lens/components/thinking-animation.tsx | 158 + health-lens/components/ui/button.tsx | 54 + health-lens/components/ui/dialog.tsx | 119 + health-lens/components/ui/input.tsx | 24 + health-lens/components/ui/scroll-area.tsx | 45 + health-lens/components/ui/tabs.tsx | 52 + health-lens/components/ui/textarea.tsx | 23 + health-lens/eslint.config.mjs | 18 + health-lens/lib/hyperbrowser.ts | 283 + health-lens/lib/memory.ts | 435 + health-lens/lib/research-cache.ts | 166 + health-lens/lib/research-query-generator.ts | 95 + health-lens/lib/search-cache.ts | 144 + health-lens/lib/utils.ts | 6 + health-lens/next.config.ts | 7 + health-lens/package-lock.json | 9334 +++++++++++++++++ health-lens/package.json | 49 + health-lens/postcss.config.mjs | 7 + health-lens/public/file.svg | 1 + health-lens/public/globe.svg | 1 + .../public/hyperbrowser_symbol-DARK.svg | 4 + health-lens/public/next.svg | 1 + health-lens/public/vercel.svg | 1 + health-lens/public/window.svg | 1 + health-lens/tsconfig.json | 34 + 42 files changed, 13997 insertions(+) create mode 100644 health-lens/.gitignore create mode 100644 health-lens/README.md create mode 100644 health-lens/app/api/chat/route.ts create mode 100644 health-lens/app/api/classify/route.ts create mode 100644 health-lens/app/api/files/route.ts create mode 100644 health-lens/app/api/generate-title/route.ts create mode 100644 health-lens/app/api/prescrape/route.ts create mode 100644 health-lens/app/api/search/route.ts create mode 100644 health-lens/app/api/studies/scrape/route.ts create mode 100644 health-lens/app/globals.css create mode 100644 health-lens/app/layout.tsx create mode 100644 health-lens/app/page.tsx create mode 100644 health-lens/components/app-layout.tsx create mode 100644 health-lens/components/health-screen.tsx create mode 100644 health-lens/components/sidebar.tsx create mode 100644 health-lens/components/sources-sidebar.tsx create mode 100644 health-lens/components/streaming-text.tsx create mode 100644 health-lens/components/thinking-animation.tsx create mode 100644 health-lens/components/ui/button.tsx create mode 100644 health-lens/components/ui/dialog.tsx create mode 100644 health-lens/components/ui/input.tsx create mode 100644 health-lens/components/ui/scroll-area.tsx create mode 100644 health-lens/components/ui/tabs.tsx create mode 100644 health-lens/components/ui/textarea.tsx create mode 100644 health-lens/eslint.config.mjs create mode 100644 health-lens/lib/hyperbrowser.ts create mode 100644 health-lens/lib/memory.ts create mode 100644 health-lens/lib/research-cache.ts create mode 100644 health-lens/lib/research-query-generator.ts create mode 100644 health-lens/lib/search-cache.ts create mode 100644 health-lens/lib/utils.ts create mode 100644 health-lens/next.config.ts create mode 100644 health-lens/package-lock.json create mode 100644 health-lens/package.json create mode 100644 health-lens/postcss.config.mjs create mode 100644 health-lens/public/file.svg create mode 100644 health-lens/public/globe.svg create mode 100755 health-lens/public/hyperbrowser_symbol-DARK.svg create mode 100644 health-lens/public/next.svg create mode 100644 health-lens/public/vercel.svg create mode 100644 health-lens/public/window.svg create mode 100644 health-lens/tsconfig.json diff --git a/health-lens/.gitignore b/health-lens/.gitignore new file mode 100644 index 0000000..45254b6 --- /dev/null +++ b/health-lens/.gitignore @@ -0,0 +1,43 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# env files (can opt-in for committing if needed) +.env* + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts + +/app/generated/prisma diff --git a/health-lens/README.md b/health-lens/README.md new file mode 100644 index 0000000..5bf941c --- /dev/null +++ b/health-lens/README.md @@ -0,0 +1,53 @@ +**Built with [Hyperbrowser](https://hyperbrowser.ai)** + +# HealthLens + +Upload lab reports, chat with AI, get research-backed health insights. + +## Setup + +**Get an API key** at https://hyperbrowser.ai + +Install and configure: + +```bash +npm install +``` + +Create `.env`: + +```env +HYPERBROWSER_API_KEY=your_key_here +ANTHROPIC_API_KEY=your_key_here +``` + +Run: + +```bash +npm run dev +``` + +Open http://localhost:3000 + +## How It Works + +1. Upload PDF lab report +2. AI extracts health markers (cholesterol, glucose, etc.) +3. Searches PubMed API for relevant research +4. Chat about your results with research context + +## Tech + +- Next.js 16 + TypeScript +- Anthropic Claude Sonnet 4.5 +- PubMed E-utilities API +- Hyperbrowser SDK (for article scraping) +- localStorage (no database) + +## Disclaimer + +Informational only. Not medical advice. Consult your doctor. + +--- + +Follow @hyperbrowser for updates. diff --git a/health-lens/app/api/chat/route.ts b/health-lens/app/api/chat/route.ts new file mode 100644 index 0000000..db4414d --- /dev/null +++ b/health-lens/app/api/chat/route.ts @@ -0,0 +1,120 @@ +import { streamText } from "ai"; +import { anthropic } from "@ai-sdk/anthropic"; +import { openai } from "@ai-sdk/openai"; +import type { NextRequest } from "next/server"; + +// Using Node.js runtime for better compatibility + +function getTextFromParts(parts: Array<{ type: string; text?: string }>): string { + return parts + .filter((p) => p.type === "text" && typeof p.text === "string") + .map((p) => p.text) + .join(""); +} + +const SYSTEM_PROMPT = `You are a knowledgeable health AI assistant. Analyze health data and provide insights based on medical research. + +IMPORTANT: You have access to the user's uploaded medical files (lab reports, health records, etc.) in the context below. When a user asks about their lab results or health data, USE THE FILE CONTENT provided in the context to give specific, personalized answers based on their actual data. + +When responding: +1. Reference specific values from their uploaded files when relevant +2. Cite evidence from medical literature when relevant +3. Note limitations and uncertainties +4. Suggest questions for their doctor +5. Provide actionable next steps + +Format your response with clear sections using markdown: +- Use **bold** for emphasis +- Use bullet points for lists +- DO NOT use emojis in your response. Keep the tone professional, clean, and clinical. + +Always include a short **Data references** section at the end: +- List the specific uploaded file names and values you used +- List research sources with URLs when available +- If no external sources were used, state "Data references: None" + +CRITICAL: Always remind users this is informational only and not medical advice. Lab results should be discussed with their healthcare provider.`; + +export async function POST(req: NextRequest) { + try { + const body = await req.json(); + const uiMessages = body?.messages; + const researchContext = body?.researchContext; + const memoryContext = body?.memoryContext; + + if (!Array.isArray(uiMessages)) { + return new Response("messages must be an array", { status: 400 }); + } + + const provider = + process.env.ANTHROPIC_API_KEY ? "anthropic" : process.env.OPENAI_API_KEY ? "openai" : null; + + if (!provider) { + return new Response("API key not configured", { status: 500 }); + } + + const coreMessages = uiMessages + .filter((m: any) => m && (m.role === "user" || m.role === "assistant")) + .map((m: any) => ({ + role: m.role, + content: + typeof m.content === "string" + ? m.content + : Array.isArray(m.parts) + ? getTextFromParts(m.parts) + : "", + })) + .filter((m: any) => m.content && m.content.trim().length > 0); + + // Build enhanced system prompt with research context and memory + let systemPrompt = SYSTEM_PROMPT; + + // Add memory context (files + conversation history) + if (memoryContext && typeof memoryContext === "string" && memoryContext.trim().length > 0) { + systemPrompt += `\n\n## Context from User's Health History:\n${memoryContext}\n\nUse this context to provide personalized responses and remember previous discussions.`; + } + + // Add research context + if (researchContext && Array.isArray(researchContext) && researchContext.length > 0) { + const researchSummary = researchContext + .map((result: any) => { + const studies = result.studies || []; + return `\n**${result.source}:**\n${studies + .map((s: any) => { + let details = `- **${s.title || "Study"}** ${s.year ? `(${s.year})` : ""}`; + if (s.abstract) details += `\n _Abstract/Snippet:_ ${s.abstract}`; + if (s.keyOutcomes) details += `\n _Key Outcomes:_ ${s.keyOutcomes}`; + return details; + }) + .join("\n")}`; + }) + .join("\n"); + + // Debug: Log what research is being passed to LLM + const totalStudies = researchContext.reduce((acc: number, r: any) => acc + (r.studies?.length || 0), 0); + const studiesWithAbstracts = researchContext.reduce((acc: number, r: any) => + acc + (r.studies?.filter((s: any) => s.abstract)?.length || 0), 0); + console.log(`📚 Passing ${totalStudies} studies to LLM (${studiesWithAbstracts} with abstracts)`); + + systemPrompt += `\n\n## Recent Research Found:\n${researchSummary}\n\nPlease reference these sources in your response when relevant. Use the abstract/outcomes to provide specific details.`; + } + + const model = + provider === "anthropic" + ? anthropic("claude-sonnet-4-5-20250929") + : openai("gpt-4o-mini"); + + const result = await streamText({ + model, + system: systemPrompt, + messages: coreMessages, + temperature: 0.5, + maxOutputTokens: 1500, + }); + + return result.toTextStreamResponse(); + } catch (error: any) { + console.error("Chat API error:", error); + return new Response(error?.message || "Internal server error", { status: 500 }); + } +} diff --git a/health-lens/app/api/classify/route.ts b/health-lens/app/api/classify/route.ts new file mode 100644 index 0000000..1ca901b --- /dev/null +++ b/health-lens/app/api/classify/route.ts @@ -0,0 +1,102 @@ +import { NextRequest, NextResponse } from "next/server"; +import { anthropic } from "@ai-sdk/anthropic"; +import { openai } from "@ai-sdk/openai"; +import { generateText } from "ai"; + +// Using Node.js runtime for better compatibility + +const CLASSIFICATION_PROMPT = `You are a medical question classifier. Analyze if a health question needs research backing from medical databases. + +Questions that NEED research: +- Specific medical conditions, treatments, or medications +- Health claims about supplements, diet, exercise +- "What does research say about..." +- "Is there evidence for..." +- Questions about effectiveness, safety, or outcomes + +Questions that DON'T need research: +- General symptom descriptions without specific questions +- Personal health tracking ("I feel tired today") +- Appointment scheduling or logistics +- Simple clarifications + +Respond ONLY with valid JSON in this exact format: +{ + "needsSearch": true/false, + "searchTerms": ["term1", "term2"], + "confidence": 0.0-1.0 +}`; + +interface ClassificationResult { + needsSearch: boolean; + searchTerms: string[]; + confidence: number; +} + +export async function POST(req: NextRequest) { + try { + const body = await req.json(); + const { question } = body; + + if (!question || typeof question !== "string") { + return NextResponse.json( + { error: "Question is required" }, + { status: 400 } + ); + } + + const provider = process.env.ANTHROPIC_API_KEY + ? "anthropic" + : process.env.OPENAI_API_KEY + ? "openai" + : null; + + if (!provider) { + return NextResponse.json( + { error: "API key not configured" }, + { status: 500 } + ); + } + + const model = + provider === "anthropic" + ? anthropic("claude-sonnet-4-5-20250929") + : openai("gpt-4o-mini"); + + const result = await generateText({ + model, + messages: [ + { role: "system", content: CLASSIFICATION_PROMPT }, + { role: "user", content: question }, + ], + temperature: 0.3, + maxOutputTokens: 200, + }); + + // Parse the JSON response + const text = result.text.trim(); + const jsonMatch = text.match(/\{[\s\S]*\}/); + + if (!jsonMatch) { + // Default to not searching if parsing fails + return NextResponse.json({ + needsSearch: false, + searchTerms: [], + confidence: 0, + }); + } + + const classification: ClassificationResult = JSON.parse(jsonMatch[0]); + + return NextResponse.json(classification); + } catch (error) { + console.error("Classification error:", error); + // Default to not searching on error + return NextResponse.json({ + needsSearch: false, + searchTerms: [], + confidence: 0, + }); + } +} + diff --git a/health-lens/app/api/files/route.ts b/health-lens/app/api/files/route.ts new file mode 100644 index 0000000..4988b8f --- /dev/null +++ b/health-lens/app/api/files/route.ts @@ -0,0 +1,252 @@ +import { NextRequest, NextResponse } from "next/server"; +import Papa from "papaparse"; +import { anthropic } from "@ai-sdk/anthropic"; +import { generateText } from "ai"; +import { generateResearchQueries } from "@/lib/research-query-generator"; +import { searchAllSources } from "@/lib/hyperbrowser"; + +// Optimized duration for PDF processing + background research +export const maxDuration = 30; // PDF extraction + single Hyperbrowser search (optimized) + +async function extractTextFromPdf(buffer: Buffer): Promise { + const PDFParser = require("pdf2json"); + + return new Promise((resolve, reject) => { + const pdfParser = new PDFParser(null, 1); // rawTextMode = true + + pdfParser.on("pdfParser_dataReady", (pdfData: any) => { + try { + // Extract text from parsed data + let text = ""; + if (pdfData && pdfData.Pages) { + pdfData.Pages.forEach((page: any) => { + if (page.Texts) { + page.Texts.forEach((textItem: any) => { + if (textItem.R) { + textItem.R.forEach((run: any) => { + if (run.T) { + // Decode URI encoded text + text += decodeURIComponent(run.T) + " "; + } + }); + } + }); + text += "\n"; + } + }); + } + resolve(text.trim()); + } catch (error) { + reject(error); + } + }); + + pdfParser.on("pdfParser_dataError", (error: any) => { + reject(error); + }); + + pdfParser.parseBuffer(buffer); + }); +} + +export async function POST(request: NextRequest) { + try { + const formData = await request.formData(); + const file = formData.get("file") as File; + + if (!file) { + return NextResponse.json({ error: "No file provided" }, { status: 400 }); + } + + const filename = file.name; + const fileType = file.type; + const fileSize = file.size; + const arrayBuffer = await file.arrayBuffer(); + const buffer = Buffer.from(arrayBuffer); + + let extractedData: any = { + filename, + fileType, + fileSize, + uploadedAt: new Date().toISOString(), + markers: [], + rawText: "", + }; + + // Parse PDF files + if (fileType === "application/pdf" || filename.endsWith(".pdf")) { + try { + const text = await extractTextFromPdf(buffer); + extractedData.rawText = text; + + // Debug: log extracted text (first 500 chars) + console.log("PDF extracted text preview:", text.substring(0, 500)); + + // AI-powered marker extraction using Claude Sonnet 4.5 + const markers = await extractHealthMarkers(text); + console.log("Extracted markers:", markers); + extractedData.markers = markers; + } catch (error) { + console.error("PDF parsing error:", error); + extractedData.error = "Failed to parse PDF"; + } + } + + // Parse CSV files + else if (fileType === "text/csv" || filename.endsWith(".csv")) { + const text = buffer.toString("utf-8"); + const parsed = Papa.parse(text, { header: true }); + extractedData.rawText = text; + extractedData.csvData = parsed.data; + + // Try to extract health markers from CSV + const markers = extractHealthMarkersFromCSV(parsed.data); + extractedData.markers = markers; + } + + // Parse text files + else if (fileType === "text/plain" || filename.endsWith(".txt")) { + const text = buffer.toString("utf-8"); + extractedData.rawText = text; + const markers = await extractHealthMarkers(text); + extractedData.markers = markers; + } + + // Generate a unique file ID for caching research + const fileId = `file_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`; + extractedData.id = fileId; + + // Perform research synchronously and return results + let researchResults = null; + if (extractedData.rawText && extractedData.rawText.length > 100) { + try { + console.log(`🔍 Starting research for file ${fileId}...`); + + // Generate single health topic using AI + const topics = await generateResearchQueries(extractedData.markers || [], extractedData.rawText); + console.log(`📋 Health topic: "${topics[0] || 'none'}"`); + + if (topics.length > 0) { + // Scrape MedlinePlus for health information + const results = await searchAllSources(topics); + console.log(`✅ Research completed for file ${fileId}: Health info retrieved`); + + researchResults = { + fileId, + queries: topics, + results, + status: "completed" + }; + extractedData.researchStatus = "completed"; + } else { + extractedData.researchStatus = "failed"; + } + } catch (err) { + console.error("Research failed:", err); + extractedData.researchStatus = "failed"; + } + } + + return NextResponse.json({ + success: true, + file: extractedData, + research: researchResults, // Include research results in response + }); + } catch (error) { + console.error("File upload error:", error); + return NextResponse.json( + { error: "Failed to process file" }, + { status: 500 } + ); + } +} + +// Extract health markers from text using AI (Claude Sonnet 4.5) +async function extractHealthMarkers(text: string): Promise { + try { + // Use full text (Claude Sonnet 4.5 has 200K context window) + const prompt = `You are a medical data extraction expert. Analyze this lab report and extract ALL health markers/test results. + +Lab Report Text: +${text} + +Instructions: +- Extract EVERY test name, value, and unit you find +- Use the exact test names as they appear in the report +- Be thorough - don't miss any markers +- If there are 50+ markers, extract all of them +- Return ONLY a valid JSON array, no other text + +Return format: +[ + {"name": "Total Cholesterol", "value": "170", "unit": "mg/dL"}, + {"name": "HDL Cholesterol", "value": "96", "unit": "mg/dL"} +] + +JSON array:`; + + const result = await generateText({ + model: anthropic("claude-sonnet-4-5-20250929"), + prompt, + temperature: 0.1, + maxOutputTokens: 8192, // Allow large responses for comprehensive lab reports + }); + + // Parse the JSON response + const jsonMatch = result.text.match(/\[[\s\S]*\]/); + if (!jsonMatch) { + console.error("AI extraction failed: no JSON found in response"); + return []; + } + + const markers = JSON.parse(jsonMatch[0]); + + // Add timestamp to each marker + return markers.map((m: any) => ({ + ...m, + date: new Date().toISOString(), + })); + } catch (error) { + console.error("AI extraction error:", error); + // Fallback to empty array if AI extraction fails + return []; + } +} + +// Extract health markers from CSV data +function extractHealthMarkersFromCSV(data: any[]): any[] { + const markers: any[] = []; + + // Look for common column names + const markerColumns = ["test", "marker", "name", "measurement", "lab_test"]; + const valueColumns = ["value", "result", "measurement"]; + const unitColumns = ["unit", "units", "uom"]; + const dateColumns = ["date", "test_date", "collection_date"]; + + data.forEach((row: any) => { + const markerName = markerColumns.find((col) => row[col]) + ? row[markerColumns.find((col) => row[col]) as string] + : null; + const value = valueColumns.find((col) => row[col]) + ? row[valueColumns.find((col) => row[col]) as string] + : null; + const unit = unitColumns.find((col) => row[col]) + ? row[unitColumns.find((col) => row[col]) as string] + : null; + const date = dateColumns.find((col) => row[col]) + ? row[dateColumns.find((col) => row[col]) as string] + : new Date().toISOString(); + + if (markerName && value) { + markers.push({ + name: markerName, + value: value.toString(), + unit: unit || "", + date, + }); + } + }); + + return markers; +} + diff --git a/health-lens/app/api/generate-title/route.ts b/health-lens/app/api/generate-title/route.ts new file mode 100644 index 0000000..18590d5 --- /dev/null +++ b/health-lens/app/api/generate-title/route.ts @@ -0,0 +1,46 @@ +import { NextRequest, NextResponse } from "next/server"; +import { Anthropic } from "@anthropic-ai/sdk"; + +const client = new Anthropic({ + apiKey: process.env.ANTHROPIC_API_KEY, +}); + +export async function POST(request: NextRequest) { + try { + const { userMessage, assistantResponse } = await request.json(); + + if (!userMessage) { + return NextResponse.json({ error: "No message provided" }, { status: 400 }); + } + + const prompt = `Based on this health-related conversation, generate a short, descriptive title (max 6 words). + +User: ${userMessage} +${assistantResponse ? `Assistant: ${assistantResponse.substring(0, 200)}...` : ''} + +Generate only the title, nothing else. Make it specific to the health topic discussed.`; + + const message = await client.messages.create({ + model: "claude-3-5-sonnet-20241022", + max_tokens: 30, + temperature: 0.7, + messages: [ + { + role: "user", + content: prompt, + }, + ], + }); + + const titleText = message.content[0].type === "text" ? message.content[0].text : ""; + const cleanTitle = titleText.trim().replace(/^["']|["']$/g, ''); + + return NextResponse.json({ title: cleanTitle }); + } catch (error) { + console.error("Title generation error:", error); + return NextResponse.json( + { error: "Failed to generate title" }, + { status: 500 } + ); + } +} diff --git a/health-lens/app/api/prescrape/route.ts b/health-lens/app/api/prescrape/route.ts new file mode 100644 index 0000000..de74d56 --- /dev/null +++ b/health-lens/app/api/prescrape/route.ts @@ -0,0 +1,55 @@ +import { NextRequest, NextResponse } from "next/server"; +import { searchPubMedAPI, SearchResult } from "@/lib/hyperbrowser"; + +// Pre-scrape trending health topics for quick access +export const maxDuration = 30; + +const TRENDING_HEALTH_TOPICS = [ + "blood glucose diabetes management 2024", + "cholesterol heart disease prevention", + "vitamin D deficiency symptoms treatment", + "complete blood count interpretation", + "thyroid function test results", +]; + +export async function GET(req: NextRequest) { + try { + // Get optional topic from query params + const { searchParams } = new URL(req.url); + const customTopic = searchParams.get("topic"); + + const topics = customTopic + ? [customTopic] + : TRENDING_HEALTH_TOPICS.slice(0, 2); // Default to 2 topics for speed + + const results: SearchResult[] = []; + + for (const topic of topics) { + console.log(`🔄 Pre-scraping: ${topic}`); + + const studies = await searchPubMedAPI(topic); + + results.push({ + source: "PubMed", + studies, + searchTerms: [topic], + timestamp: Date.now(), + }); + + console.log(`✅ Pre-scraped ${studies.length} studies for: ${topic}`); + } + + return NextResponse.json({ + success: true, + results, + totalStudies: results.reduce((acc, r) => acc + r.studies.length, 0), + timestamp: Date.now(), + }); + } catch (error: any) { + console.error("Pre-scrape error:", error); + return NextResponse.json( + { error: error?.message || "Pre-scrape failed" }, + { status: 500 } + ); + } +} diff --git a/health-lens/app/api/search/route.ts b/health-lens/app/api/search/route.ts new file mode 100644 index 0000000..667abd7 --- /dev/null +++ b/health-lens/app/api/search/route.ts @@ -0,0 +1,40 @@ +import { NextRequest, NextResponse } from "next/server"; +import { searchAllSources, type SearchResult } from "@/lib/hyperbrowser"; + +// Using Node.js runtime for Hyperbrowser SDK compatibility +export const maxDuration = 15; // Single PubMed search: 10s + buffer + +export async function POST(req: NextRequest) { + try { + const body = await req.json(); + const { terms } = body; + + if (!terms || !Array.isArray(terms) || terms.length === 0) { + return NextResponse.json( + { error: "Search terms are required" }, + { status: 400 } + ); + } + + // Search all sources in parallel + // Note: In a production environment with SSE, we would stream results here. + // For now, we wait for all to complete but the client will simulate progress + // or we could implement a streaming response if needed. + const results: SearchResult[] = await searchAllSources(terms); + + return NextResponse.json({ + results, + searchTerms: terms, + timestamp: Date.now(), + totalSources: results.length, + totalStudies: results.reduce((acc, r) => acc + r.studies.length, 0), + }); + } catch (error: any) { + console.error("Search API error:", error); + return NextResponse.json( + { error: error?.message || "Search failed" }, + { status: 500 } + ); + } +} + diff --git a/health-lens/app/api/studies/scrape/route.ts b/health-lens/app/api/studies/scrape/route.ts new file mode 100644 index 0000000..ece6a4b --- /dev/null +++ b/health-lens/app/api/studies/scrape/route.ts @@ -0,0 +1,41 @@ +import { NextRequest, NextResponse } from "next/server"; +import { scrapePubMedArticle } from "@/lib/hyperbrowser"; + +export async function POST(request: NextRequest) { + try { + const body = await request.json(); + const { url, pmid } = body; + + if (!url && !pmid) { + return NextResponse.json({ error: "URL or PMID is required" }, { status: 400 }); + } + + // Extract PMID from URL if not provided directly + const articlePmid = pmid || url?.match(/\/(\d{7,8})\/?/)?.[1]; + + if (!articlePmid) { + return NextResponse.json( + { error: "Could not extract PMID from URL" }, + { status: 400 } + ); + } + + // Scrape the PubMed article using Hyperbrowser + const scrapedData = await scrapePubMedArticle(articlePmid); + + if (!scrapedData) { + return NextResponse.json( + { error: "Failed to scrape study" }, + { status: 500 } + ); + } + + return NextResponse.json(scrapedData); + } catch (error) { + console.error("Error scraping study:", error); + return NextResponse.json( + { error: "Failed to scrape study" }, + { status: 500 } + ); + } +} diff --git a/health-lens/app/globals.css b/health-lens/app/globals.css new file mode 100644 index 0000000..bf8621c --- /dev/null +++ b/health-lens/app/globals.css @@ -0,0 +1,81 @@ +@import "tailwindcss"; + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 0 0% 0%; + + /* Refined Neutral Scale - Slightly warmer/softer for premium feel */ + --neutral-50: 0 0% 99%; + --neutral-100: 0 0% 97%; + --neutral-200: 0 0% 92%; + --neutral-300: 0 0% 88%; + --neutral-400: 0 0% 75%; + --neutral-500: 0 0% 55%; + --neutral-600: 0 0% 40%; + --neutral-700: 0 0% 28%; + --neutral-800: 0 0% 18%; + --neutral-900: 0 0% 10%; + --neutral-950: 0 0% 5%; + } + + * { + @apply border-neutral-200; + } + + body { + @apply bg-white text-neutral-900 antialiased tracking-tight selection:bg-neutral-900 selection:text-white; + font-feature-settings: "ss01", "ss02", "cv01", "cv02"; + } + + h1, h2, h3, h4, h5, h6 { + @apply font-semibold tracking-tight text-neutral-900; + } +} + +@layer components { + /* Premium Input Styles */ + .input-premium { + @apply h-10 w-full rounded-lg border border-neutral-200 bg-white px-3 py-2 text-sm ring-offset-white file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-neutral-400 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-neutral-900 focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 transition-all duration-200 hover:border-neutral-300; + } + + /* Premium Card Styles */ + .card-premium { + @apply rounded-xl border border-neutral-200 bg-white text-neutral-900 shadow-sm transition-all duration-300 hover:shadow-md hover:-translate-y-0.5; + } + + /* Glassmorphism */ + .glass { + @apply bg-white/80 backdrop-blur-md border border-white/20; + } +} + +@layer utilities { + .text-balance { + text-wrap: balance; + } + + /* Custom Scrollbar */ + .scrollbar-thin::-webkit-scrollbar { + width: 6px; + height: 6px; + } + + .scrollbar-thin::-webkit-scrollbar-track { + background: transparent; + } + + .scrollbar-thin::-webkit-scrollbar-thumb { + @apply bg-neutral-200 rounded-full hover:bg-neutral-300 transition-colors; + } + + /* Hide scrollbar but keep functionality */ + .scrollbar-hide { + -ms-overflow-style: none; + scrollbar-width: none; + } + + .scrollbar-hide::-webkit-scrollbar { + display: none; + } +} diff --git a/health-lens/app/layout.tsx b/health-lens/app/layout.tsx new file mode 100644 index 0000000..32cb42a --- /dev/null +++ b/health-lens/app/layout.tsx @@ -0,0 +1,34 @@ +import type { Metadata } from "next"; +import { Manrope } from "next/font/google"; +import "./globals.css"; + +const manrope = Manrope({ + subsets: ["latin"], + variable: "--font-manrope", + display: "swap", +}); + +export const metadata: Metadata = { + title: "HealthLens - Built with Hyperbrowser", + description: + "Compare your health data with recent medical studies. Powered by Hyperbrowser AI web scraping. Not for medical diagnosis.", + icons: { + icon: "/hyperbrowser_symbol-DARK.svg", + shortcut: "/hyperbrowser_symbol-DARK.svg", + apple: "/hyperbrowser_symbol-DARK.svg", + }, +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + {children} + + + ); +} diff --git a/health-lens/app/page.tsx b/health-lens/app/page.tsx new file mode 100644 index 0000000..3c2bffd --- /dev/null +++ b/health-lens/app/page.tsx @@ -0,0 +1,5 @@ +import HealthScreen from "@/components/health-screen"; + +export default function Home() { + return ; +} diff --git a/health-lens/components/app-layout.tsx b/health-lens/components/app-layout.tsx new file mode 100644 index 0000000..f0ef865 --- /dev/null +++ b/health-lens/components/app-layout.tsx @@ -0,0 +1,24 @@ +"use client"; + +import { motion } from "framer-motion"; + +interface AppLayoutProps { + children: React.ReactNode; +} + +export function AppLayout({ children }: AppLayoutProps) { + return ( +
+
+ + {children} + +
+
+ ); +} diff --git a/health-lens/components/health-screen.tsx b/health-lens/components/health-screen.tsx new file mode 100644 index 0000000..d98a317 --- /dev/null +++ b/health-lens/components/health-screen.tsx @@ -0,0 +1,1054 @@ +"use client"; + +import { useEffect, useRef, useState, useMemo } from "react"; +import { Button } from "@/components/ui/button"; +import { Textarea } from "@/components/ui/textarea"; +import { ArrowRight, Sparkles, Loader2, Plus, FileText, FlaskConical, Heart, CheckCircle2, File } from "lucide-react"; +import { motion, AnimatePresence } from "framer-motion"; +import { StreamingText } from "@/components/streaming-text"; +import { getCachedSearch, cacheSearch } from "@/lib/search-cache"; +import { SearchResult } from "@/lib/hyperbrowser"; +import { getActiveConversation, addMessage, addFile, buildContextForAI, getAllFiles, updateConversationTitle, type ChatMessage } from "@/lib/memory"; +import { Sidebar } from "@/components/sidebar"; +import { getCachedResearchForFile, getCachedResearchForFiles } from "@/lib/research-cache"; + +interface Message { + id: string; + role: "user" | "assistant"; + content: string; + timestamp: number; + fileAttachment?: { + filename: string; + fileType: string; + fileSize: number; + markers: Array<{ + name: string; + value: string; + unit?: string; + }>; + }; +} + +export default function HealthScreen() { + const scrollRef = useRef(null); + const fileInputRef = useRef(null); + const [input, setInput] = useState(""); + const [messages, setMessages] = useState([]); + const [isLoading, setIsLoading] = useState(false); + const [isSearching, setIsSearching] = useState(false); + const [searchResults, setSearchResults] = useState(undefined); + const [currentResponse, setCurrentResponse] = useState(""); + const [isStreaming, setIsStreaming] = useState(false); + const [uploadStatus, setUploadStatus] = useState(null); + const [pendingFiles, setPendingFiles] = useState; + researchStatus?: "pending" | "completed" | "failed"; + isExtracting?: boolean; + }>>([]); + const [conversationKey, setConversationKey] = useState(0); + const [userHasScrolled, setUserHasScrolled] = useState(false); + const [isBackgroundResearch, setIsBackgroundResearch] = useState(false); + const [processingStage, setProcessingStage] = useState<"extracting" | "researching" | "completed" | null>(null); + const [prescrapedResearch, setPrescrapedResearch] = useState([]); + const [isPrescrapingDone, setIsPrescrapingDone] = useState(false); + + const isResearchBlocking = processingStage === "extracting" || processingStage === "researching"; + + // Load messages from active conversation + const loadActiveConversation = () => { + const activeConv = getActiveConversation(); + if (activeConv && activeConv.messages.length > 0) { + // Reconstruct messages with file attachments + const loadedMessages: Message[] = activeConv.messages.map(m => { + const baseMessage: Message = { + id: m.id, + role: m.role, + content: m.content, + timestamp: m.timestamp, + }; + + // Check if this is a file upload message and reconstruct the attachment + if (m.role === "user" && m.content.startsWith("Uploaded ")) { + const filename = m.content.replace("Uploaded ", ""); + const file = activeConv.files.find(f => f.filename === filename); + + if (file) { + baseMessage.fileAttachment = { + filename: file.filename, + fileType: file.fileType, + fileSize: 0, // Not stored in FileMemory, but not critical + markers: file.markers || [], + }; + } + } + + return baseMessage; + }); + + setMessages(loadedMessages); + } else { + setMessages([]); + } + setPendingFiles([]); + setSearchResults(undefined); + }; + + // Load on mount - will be empty if no active conversation + useEffect(() => { + loadActiveConversation(); + }, []); + + // Pre-scrape trending health research on app load + useEffect(() => { + const prescrapeResearch = async () => { + try { + console.log("🔄 Pre-scraping health research..."); + const response = await fetch("/api/prescrape"); + + if (response.ok) { + const data = await response.json(); + if (data.results && data.results.length > 0) { + setPrescrapedResearch(data.results); + console.log(`✅ Pre-scraped ${data.totalStudies} studies`); + } + } + } catch (error) { + console.error("Pre-scrape failed:", error); + } finally { + setIsPrescrapingDone(true); + } + }; + + // Only pre-scrape if we don't have cached research + if (!isPrescrapingDone && prescrapedResearch.length === 0) { + prescrapeResearch(); + } + }, []); + + const handleSubmit = async (e?: { preventDefault?: () => void }) => { + e?.preventDefault?.(); + const text = input.trim(); + if (!text || isLoading) return; + if (isResearchBlocking) { + setUploadStatus("Research is running. Chat unlocks when complete."); + setTimeout(() => setUploadStatus(null), 2500); + return; + } + + setInput(""); + setUserHasScrolled(false); // Reset scroll flag on submit + + // Add pending file messages first (if any) + const fileMessages: Message[] = pendingFiles.map(file => ({ + id: `file-${file.id}`, + role: "user" as const, + content: `Uploaded ${file.filename}`, + timestamp: Date.now(), + fileAttachment: file, + })); + + // Add user text message + const userMessage: Message = { + id: Date.now().toString(), + role: "user", + content: text, + timestamp: Date.now(), + }; + + // Combine file messages and user message + const newMessages = [...fileMessages, userMessage]; + setMessages(prev => [...prev, ...newMessages]); + + // Clear pending files + setPendingFiles([]); + + // Save file messages to memory + fileMessages.forEach(msg => { + addMessage({ + id: msg.id, + role: msg.role, + content: msg.content, + timestamp: msg.timestamp, + }); + }); + + // Save user message to memory + addMessage({ + id: userMessage.id, + role: userMessage.role, + content: userMessage.content, + timestamp: userMessage.timestamp, + }); + + setIsLoading(true); + setCurrentResponse(""); + setIsStreaming(false); + + try { + let researchContext: SearchResult[] | null = null; + + // Step 1: Check for cached research from uploaded files + const allFiles = getAllFiles(); + const fileIds = allFiles.map(f => f.id); + const cachedFileResearch = getCachedResearchForFiles(fileIds); + + if (cachedFileResearch.length > 0) { + // Use cached research from files + console.log("Using cached research from uploaded files:", cachedFileResearch.length, "sources"); + researchContext = cachedFileResearch; + setSearchResults(cachedFileResearch); + } else { + // Step 2: Fall back to classify + search flow + const classifyResponse = await fetch("/api/classify", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ question: text }), + }); + + if (classifyResponse.ok) { + const classification = await classifyResponse.json(); + + if (classification.needsSearch && classification.searchTerms.length > 0) { + // Step 3: Check search cache + const cached = getCachedSearch(classification.searchTerms); + + if (cached) { + researchContext = cached; + setSearchResults(cached); + } else { + // Step 4: Perform deep search + setIsSearching(true); + setSearchResults(undefined); + + const searchResponse = await fetch("/api/search", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ terms: classification.searchTerms }), + }); + + if (searchResponse.ok) { + const searchData = await searchResponse.json(); + researchContext = searchData.results; + setSearchResults(searchData.results); + + // Cache the results + cacheSearch(classification.searchTerms, searchData.results); + } + + setIsSearching(false); + } + } + } + } + + // Step 4: Stream chat response with research context and memory + setIsStreaming(true); + + const chatMessages = messages.concat(userMessage).map(m => ({ + role: m.role, + parts: [{ type: "text", text: m.content }], + })); + + // Build context from memory (files + recent conversation) + const memoryContext = buildContextForAI(); + + const chatResponse = await fetch("/api/chat", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + messages: chatMessages, + researchContext, + memoryContext, + }), + }); + + if (!chatResponse.ok) { + throw new Error("Chat failed"); + } + + const reader = chatResponse.body?.getReader(); + const decoder = new TextDecoder(); + let accumulatedText = ""; + + if (reader) { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value); + accumulatedText += chunk; + setCurrentResponse(accumulatedText); + } + } + + // Add assistant message + const assistantMessage: Message = { + id: (Date.now() + 1).toString(), + role: "assistant", + content: accumulatedText, + timestamp: Date.now(), + }; + setMessages(prev => [...prev, assistantMessage]); + + // Save assistant message to memory + addMessage({ + id: assistantMessage.id, + role: assistantMessage.role, + content: assistantMessage.content, + timestamp: assistantMessage.timestamp, + hasResearch: researchContext !== null && researchContext.length > 0, + }); + + // Generate smart title for new conversations (after first exchange) + const activeConv = getActiveConversation(); + if (activeConv && activeConv.title === "New Chat" && activeConv.messages.length >= 2) { + try { + const titleResponse = await fetch("/api/generate-title", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + userMessage: text, + assistantResponse: accumulatedText, + }), + }); + + if (titleResponse.ok) { + const { title } = await titleResponse.json(); + if (title) { + updateConversationTitle(activeConv.id, title); + // Trigger sidebar update + setConversationKey(prev => prev + 1); + } + } + } catch (error) { + console.error("Failed to generate title:", error); + } + } + + // Trigger sidebar refresh + setConversationKey(prev => prev + 1); + + setCurrentResponse(""); + + } catch (error) { + console.error("Error:", error); + const errorMessage: Message = { + id: (Date.now() + 1).toString(), + role: "assistant", + content: "Sorry, I encountered an error. Please try again.", + timestamp: Date.now(), + }; + setMessages(prev => [...prev, errorMessage]); + + // Save error message to memory + addMessage({ + id: errorMessage.id, + role: errorMessage.role, + content: errorMessage.content, + timestamp: errorMessage.timestamp, + }); + } finally { + setIsLoading(false); + setIsSearching(false); + setIsStreaming(false); + // Don't clear searchResults - keep them visible for reference in sidebar + } + }; + + const handleFileUpload = async (e: React.ChangeEvent) => { + const files = e.target.files; + if (!files || files.length === 0) return; + + // Check if files already exist in global profile + const existingFiles = getAllFiles(); + const newFiles = Array.from(files).filter(file => { + const isDuplicate = existingFiles.some(f => f.filename === file.name); + if (isDuplicate) { + setUploadStatus(`"${file.name}" already uploaded`); + setTimeout(() => setUploadStatus(null), 3000); + } + return !isDuplicate; + }); + + if (newFiles.length === 0) { + if (fileInputRef.current) fileInputRef.current.value = ""; + return; + } + + setUploadStatus(newFiles.length === 1 ? "Uploading..." : `Uploading ${newFiles.length} files...`); + setProcessingStage("extracting"); + console.log("🔬 Starting file extraction and research..."); + + for (const file of newFiles) { + const formData = new FormData(); + formData.append("file", file); + + try { + const response = await fetch("/api/files", { + method: "POST", + body: formData, + }); + + if (response.ok) { + const data = await response.json(); + + // Convert file to base64 for storage + const reader = new FileReader(); + const fileId = data.file.id || Date.now().toString() + Math.random().toString(36).substr(2, 9); + + reader.onloadend = () => { + const base64data = reader.result as string; + + // Save file to memory with full content + addFile({ + id: fileId, + filename: data.file.filename, + fileType: data.file.fileType, + uploadedAt: Date.now(), + rawText: data.file.rawText || "", // Include full text content + fileData: base64data, // Store base64 encoded file + markers: data.file.markers || [], + summary: data.file.markers && data.file.markers.length > 0 + ? `Extracted ${data.file.markers.length} health markers` + : undefined, + researchStatus: data.file.researchStatus, + }); + }; + reader.readAsDataURL(file); + + // Cache research results client-side if included in response + if (data.research && data.research.status === "completed") { + import("@/lib/research-cache").then(({ cacheResearchForFile }) => { + cacheResearchForFile(fileId, data.research.queries, data.research.results); + console.log("✅ Research cached for file:", fileId); + }); + } + + // Add to pending files + setPendingFiles(prev => [...prev, { + id: fileId, + filename: data.file.filename, + fileType: data.file.fileType, + fileSize: data.file.fileSize || 0, + markers: data.file.markers || [], + researchStatus: data.file.researchStatus, + isExtracting: false, + }]); + + // Research is now completed immediately in the API + if (data.file.researchStatus === "completed") { + console.log("✅ File uploaded and research completed"); + setProcessingStage("completed"); + // Update search results immediately + if (data.research?.results) { + setSearchResults(data.research.results); + } + // Auto-clear after 2 seconds + setTimeout(() => { + setProcessingStage(null); + }, 2000); + } else if (data.file.researchStatus === "failed") { + console.log("⚠️ Research failed for file"); + setProcessingStage(null); + } + + } else { + console.error(`Upload failed for ${file.name}`); + } + } catch (err) { + console.error(`Upload failed for ${file.name}`, err); + } + } + + setUploadStatus(null); + if (fileInputRef.current) fileInputRef.current.value = ""; + }; + + // Detect user scroll + useEffect(() => { + const scrollContainer = scrollRef.current; + if (!scrollContainer) return; + + const handleScroll = () => { + const isAtBottom = scrollContainer.scrollHeight - scrollContainer.scrollTop - scrollContainer.clientHeight < 100; + setUserHasScrolled(!isAtBottom); + }; + + scrollContainer.addEventListener('scroll', handleScroll); + return () => scrollContainer.removeEventListener('scroll', handleScroll); + }, []); + + // Auto-scroll only when appropriate + useEffect(() => { + if (scrollRef.current && !userHasScrolled) { + const scrollContainer = scrollRef.current; + const isNearBottom = scrollContainer.scrollHeight - scrollContainer.scrollTop - scrollContainer.clientHeight < 100; + + // Only auto-scroll if user hasn't manually scrolled up and is near bottom + if (isNearBottom) { + scrollContainer.scrollTop = scrollContainer.scrollHeight; + } + } + }, [messages, currentResponse, isSearching, userHasScrolled]); + + // Clean up pending files when no longer needed + useEffect(() => { + // Auto-clear pending files after they're acknowledged + if (pendingFiles.length === 0 && processingStage !== "extracting") { + setIsBackgroundResearch(false); + } + }, [pendingFiles, processingStage]); + + // Calculate all research results to display in sidebar + const allResearchResults = useMemo(() => { + const results: SearchResult[] = []; + const seenKeys = new Set(); + + const addResults = (items: SearchResult[]) => { + items.forEach(r => { + // Use source + first study title as unique key + const key = `${r.source}-${r.studies[0]?.title || r.searchTerms.join(',')}`; + if (!seenKeys.has(key)) { + results.push(r); + seenKeys.add(key); + } + }); + }; + + // 1. Active search results (highest priority) + if (searchResults) addResults(searchResults); + + // 2. Pending files research + if (pendingFiles.length > 0) { + const pendingIds = pendingFiles.map(f => f.id); + const pendingResearch = getCachedResearchForFiles(pendingIds); + addResults(pendingResearch); + } + + // 3. Active conversation files research + const activeConv = getActiveConversation(); + if (activeConv && activeConv.files && activeConv.files.length > 0) { + const fileIds = activeConv.files.map(f => f.id); + const fileResearch = getCachedResearchForFiles(fileIds); + addResults(fileResearch); + } + + // 4. Pre-scraped research (shown when no other results) + if (results.length === 0 && prescrapedResearch.length > 0) { + addResults(prescrapedResearch); + } + + return results; + }, [searchResults, pendingFiles, messages, conversationKey, prescrapedResearch]); + + return ( +
+ +
+
+
+
+ {messages.length === 0 ? ( +
+ {/* Floating Heart */} + +
+
+ +
+ + +

+ How are you feeling? +

+

+ I can help analyze your health data, explain lab results, or just chat about your wellness. +

+

+ Powered by Hyperbrowser +

+ + {/* Pending Files Preview */} + {pendingFiles.length > 0 && ( +
+ {pendingFiles.map((file) => ( + + {/* Subtle background pattern */} +
+
+
+ + {/* File info */} +
+
+ + PDF +
+
+
+ + {file.fileType === "application/pdf" ? "PDF" : "DOC"} + + | + + {(file.fileSize / 1024).toFixed(1)} KB + +
+

+ {file.filename} +

+

+ {`>`} + Ready to analyze +

+
+ +
+ + {/* Extracted markers */} + {file.markers && file.markers.length > 0 && ( +
+
+
+
+ +
+ + {file.markers.length} MARKERS EXTRACTED + +
+ {file.markers.length > 4 && ( + + [Top 4] + + )} +
+
+ {file.markers.slice(0, 4).map((marker, idx) => ( +
+ {marker.name} +
+ + {marker.value} + + {marker.unit && ( + {marker.unit} + )} +
+
+ ))} + {file.markers.length > 4 && ( +
+ +{file.markers.length - 4} more... +
+ )} +
+
+ )} + + ))} +
+ )} + + {/* Search Bar Area */} +
+
+
+ + + setInput(e.target.value)} + onKeyDown={(e) => { + if (e.key === "Enter") handleSubmit(); + }} + placeholder={isResearchBlocking ? "Research in progress..." : "Ask anything about your health..."} + disabled={isResearchBlocking} + className="flex-1 bg-transparent border-none outline-none text-lg placeholder:text-neutral-400 text-neutral-900 h-12 disabled:text-neutral-400 disabled:cursor-not-allowed" + /> +
+ {input.trim() ? ( + + ) : ( +
+ )} +
+
+
+ + {/* Upload Status */} + + {uploadStatus && ( + + {uploadStatus.includes("Uploading") && ( + + )} + {uploadStatus} + + )} + + + {/* Get Started Cards */} +
+
+ + + +
+
+
+ ) : ( +
+ + {messages.map((message, index) => ( + + {message.role === "user" ? ( +
+ {message.fileAttachment ? ( + // File attachment message +
+ {/* Subtle background pattern */} +
+
+
+ + {/* File info */} +
+
+ + PDF +
+
+
+ + {message.fileAttachment.fileType === "application/pdf" ? "PDF" : "DOC"} + + | + + {(message.fileAttachment.fileSize / 1024).toFixed(1)} KB + +
+

+ {message.fileAttachment.filename} +

+

+ {`>`} + Successfully uploaded and analyzed +

+
+
+ + {/* Extracted markers */} + {message.fileAttachment.markers && message.fileAttachment.markers.length > 0 && ( +
+
+
+
+ +
+ + {message.fileAttachment.markers.length} MARKERS EXTRACTED + +
+ {message.fileAttachment.markers.length > 4 && ( + + [Top 4] + + )} +
+
+ {message.fileAttachment.markers.slice(0, 4).map((marker, idx) => ( +
+ {marker.name} +
+ + {marker.value} + + {marker.unit && ( + {marker.unit} + )} +
+
+ ))} +
+
+ )} +
+ ) : ( + // Regular text message +
+

+ {message.content} +

+
+ )} +
+ ) : ( +
+
+ +
+
+
+ +
+
+
+ )} + + ))} + + + {/* Thinking Indicator */} + {isLoading && !isSearching && !isStreaming && ( + +
+ +
+
+
+ + + +
+ Thinking... +
+
+ )} + + {/* Deep Search Animation */} + {/* Streaming Response */} + {isStreaming && currentResponse && ( + +
+ +
+
+
+ +
+
+
+ )} +
+ )} +
+
+ + {/* Floating Input Area (only visible when chat has started) */} + {messages.length > 0 && ( +
+
+ {/* Pending Files Preview */} + {pendingFiles.length > 0 && ( +
+ {pendingFiles.map((file) => ( + +
+
+ + PDF +
+
+

+ {file.filename} +

+

+ {(file.fileSize / 1024).toFixed(1)} KB • Ready +

+
+ +
+
+ ))} +
+ )} +
+ + +