diff --git a/dream_layer_frontend/src/features/Txt2Img/Txt2ImgPage.tsx b/dream_layer_frontend/src/features/Txt2Img/Txt2ImgPage.tsx index 1d3f2045..6a64f787 100644 --- a/dream_layer_frontend/src/features/Txt2Img/Txt2ImgPage.tsx +++ b/dream_layer_frontend/src/features/Txt2Img/Txt2ImgPage.tsx @@ -58,6 +58,21 @@ const Txt2ImgPage: React.FC = ({ selectedModel, onTabChange }) const handleBatchSettingsChange = (batchSize: number, batchCount: number) => { updateCoreSettings({ batch_size: batchSize, batch_count: batchCount }); }; + const handleGenerateReport = async () => { + try { + const response = await fetch("/api/generate-report", { method: "POST" }); + if (!response.ok) throw new Error("Failed to generate report"); + const blob = await response.blob(); + const url = window.URL.createObjectURL(blob); + const link = document.createElement("a"); + link.href = url; + link.download = "report.zip"; + link.click(); + } catch (error) { + console.error("Error generating report:", error); + } +}; + const handleSamplingSettingsChange = (sampler: string, scheduler: string, steps: number, cfg: number) => { updateCoreSettings({ @@ -403,6 +418,7 @@ const Txt2ImgPage: React.FC = ({ selectedModel, onTabChange }) }; return ( +
{/* Left Column - Controls */}
@@ -410,6 +426,13 @@ const Txt2ImgPage: React.FC = ({ selectedModel, onTabChange })

Generation Settings

+
{isMobile && } diff --git a/tasks/report_bundle.zip b/tasks/report_bundle.zip new file mode 100644 index 00000000..f4bf0600 Binary files /dev/null and b/tasks/report_bundle.zip differ diff --git a/tasks/report_bundle/README.txt b/tasks/report_bundle/README.txt new file mode 100644 index 00000000..fbf72d5a --- /dev/null +++ b/tasks/report_bundle/README.txt @@ -0,0 +1,13 @@ +# Report Bundle + +Hi! +This report includes everything you need to review the image generation results. + +What’s inside: +- `results.csv` — a table with image scores and file paths +- `config.json` — the exact settings used during generation (models, prompts, etc.) +- Grid images — labeled previews of the outputs + +Feel free to explore or reuse any part of it. Let me know if anything’s missing! + +— Lial diff --git a/tasks/report_bundle/config.json b/tasks/report_bundle/config.json new file mode 100644 index 00000000..dd5d0128 --- /dev/null +++ b/tasks/report_bundle/config.json @@ -0,0 +1,8 @@ +{ + "task_id": "123", + "report_type": "Csv", + "data": { + "title": "My Report", + "content": "This is the content of the report." + } +} \ No newline at end of file diff --git a/tasks/report_bundle/generate_report.py b/tasks/report_bundle/generate_report.py new file mode 100644 index 00000000..119f4f67 --- /dev/null +++ b/tasks/report_bundle/generate_report.py @@ -0,0 +1,139 @@ +import os +import json +import csv +import zipfile +from PIL import Image +import torch +import clip +import lpips + +RESULTS_FILE = 'results.csv' +CONFIG_FILE = 'config.json' +GRIDS_DIR = 'grids' +README_FILE = 'README.txt' +OUTPUT_ZIP = 'report.zip' + +# Load models (once) +device = "cuda" if torch.cuda.is_available() else "cpu" +clip_model, preprocess = clip.load("ViT-B/32", device=device) +lpips_model = lpips.LPIPS(net='alex').to(device) + +def compute_scores(prompt, image_path, reference_image_path=None): + """ + compute clip_score between prompt and image_path, + compute lpips between image_path and reference_image_path if provided, + otherwise lpips = 0 (placeholder). + """ + # open image + img = Image.open(image_path).convert("RGB") + image_tensor = preprocess(img).unsqueeze(0).to(device) + text = clip.tokenize([prompt]).to(device) + + with torch.no_grad(): + image_features = clip_model.encode_image(image_tensor) + text_features = clip_model.encode_text(text) + clip_score = torch.cosine_similarity(image_features, text_features).item() + + lpips_score = None + if reference_image_path and os.path.exists(reference_image_path): + ref_img = Image.open(reference_image_path).convert("RGB") + # lpips expects tensors in range [-1,1] as float with shape [1,3,H,W] + # use lpips helper if available or convert + from torchvision import transforms + to_tensor = transforms.ToTensor() + a = to_tensor(img).unsqueeze(0).mul(2).sub(1).to(device) # map [0,1] -> [-1,1] + b = to_tensor(ref_img).unsqueeze(0).mul(2).sub(1).to(device) + with torch.no_grad(): + lpips_score = float(lpips_model(a, b).item()) + else: + lpips_score = 0.0 # placeholder if no reference provided + + return round(clip_score, 6), round(lpips_score, 6) + + +def add_scores_to_csv(results_file): + """ + Read results.csv, expect columns at least: id, prompt, image_path + Optional column: reference_image_path (for lpips) + Writes back clip_score, lpips_score, and adds run_id column. + """ + rows = [] + with open(results_file, newline='', encoding='utf-8') as csvfile: + reader = csv.DictReader(csvfile) + fieldnames = list(reader.fieldnames) + if 'clip_score' not in fieldnames: + fieldnames.append('clip_score') + if 'lpips_score' not in fieldnames: + fieldnames.append('lpips_score') + if 'run_id' not in fieldnames: + fieldnames.append('run_id') + + for row in reader: + img_path = row.get('image_path') + prompt = row.get('prompt', '') # لو موجود + ref_path = row.get('reference_image_path') # لو موجود + if not img_path or not os.path.exists(img_path): + raise FileNotFoundError(f"Image file not found: {img_path}") + + clip_s, lpips_s = compute_scores(prompt, img_path, reference_image_path=ref_path) + row['clip_score'] = clip_s + row['lpips_score'] = lpips_s + # إنشاء run_id من id (مثلاً run_1, run_2, ...) + row['run_id'] = f"run_{row.get('id', '')}" + rows.append(row) + + with open(results_file, 'w', newline='', encoding='utf-8') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + + + with open(results_file, 'w', newline='', encoding='utf-8') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + + +def generate_config_from_frontend(config_file, settings_dict): + """ + settings_dict expected format: + { "run_id1": { "prompt": "...", "seed": 123, "sampler": "ddim", ... }, ... } + This function writes config.json from given settings (coming from frontend). + """ + with open(config_file, 'w', encoding='utf-8') as f: + json.dump(settings_dict, f, indent=4, ensure_ascii=False) + + +def create_report_zip(output_zip=OUTPUT_ZIP): + with zipfile.ZipFile(output_zip, 'w') as zf: + zf.write(RESULTS_FILE) + zf.write(CONFIG_FILE) + zf.write(README_FILE) + for image_file in os.listdir(GRIDS_DIR): + full_path = os.path.join(GRIDS_DIR, image_file) + if os.path.isfile(full_path): + zf.write(full_path, arcname=os.path.join('grids', image_file)) + + +def run_report(settings_from_frontend): + """ + Main entrypoint used by the API. + settings_from_frontend: dict (see generate_config_from_frontend) + """ + # validations + if not os.path.exists(RESULTS_FILE): + raise FileNotFoundError(f"{RESULTS_FILE} not found") + if not os.path.isdir(GRIDS_DIR): + raise FileNotFoundError(f"{GRIDS_DIR} not found") + if not os.path.exists(README_FILE): + raise FileNotFoundError(f"{README_FILE} not found") + + # 1. compute scores and update CSV + add_scores_to_csv(RESULTS_FILE) + + # 2. write config.json from frontend settings + generate_config_from_frontend(CONFIG_FILE, settings_from_frontend) + + # 3. create zip + create_report_zip() + return os.path.abspath(OUTPUT_ZIP) diff --git a/tasks/report_bundle/grids/grid1.png b/tasks/report_bundle/grids/grid1.png new file mode 100644 index 00000000..18d16099 Binary files /dev/null and b/tasks/report_bundle/grids/grid1.png differ diff --git a/tasks/report_bundle/grids/grid2.png b/tasks/report_bundle/grids/grid2.png new file mode 100644 index 00000000..2b8b6583 Binary files /dev/null and b/tasks/report_bundle/grids/grid2.png differ diff --git a/tasks/report_bundle/report.zip b/tasks/report_bundle/report.zip new file mode 100644 index 00000000..1fb27bd4 Binary files /dev/null and b/tasks/report_bundle/report.zip differ diff --git a/tasks/report_bundle/results.csv b/tasks/report_bundle/results.csv new file mode 100644 index 00000000..f8d159b5 --- /dev/null +++ b/tasks/report_bundle/results.csv @@ -0,0 +1,3 @@ +id,image_path,score,clip_score,lpips_score,run_id +1,grids/grid1.png,0.95,0.214652,0.0,run_1 +2,grids/grid2.png,0.88,0.237358,0.0,run_2 diff --git a/tasks/report_bundle/server.py b/tasks/report_bundle/server.py new file mode 100644 index 00000000..01e58f3b --- /dev/null +++ b/tasks/report_bundle/server.py @@ -0,0 +1,40 @@ +from fastapi import FastAPI, Body, HTTPException +from fastapi.responses import FileResponse, HTMLResponse +from fastapi.middleware.cors import CORSMiddleware +import os +from generate_report import run_report + +app = FastAPI() + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +@app.get("/", response_class=HTMLResponse) +def read_root(): + html_content = """ + + + DreamLayer Report API + + +

Welcome to DreamLayer Report API

+

Use the /generate_report POST endpoint to generate reports.

+ + + """ + return html_content + +@app.post("/generate_report") +async def generate_report_endpoint(frontend_settings: dict = Body(...)): + try: + zip_path = run_report(frontend_settings) + if not os.path.exists(zip_path): + raise HTTPException(status_code=500, detail=f"{zip_path} not found after generation.") + return FileResponse(zip_path, filename="report.zip", media_type="application/zip") + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) diff --git a/tasks/report_bundle/src/components/GenerateReportButton.jsx b/tasks/report_bundle/src/components/GenerateReportButton.jsx new file mode 100644 index 00000000..df619e68 --- /dev/null +++ b/tasks/report_bundle/src/components/GenerateReportButton.jsx @@ -0,0 +1,76 @@ +// src/components/GenerateReportButton.jsx +import React, { useState } from "react"; + +/** + * props: + * - frontendSettings: object like { run_001: { prompt, seed, sampler }, ... } + * - className: optional CSS classes (reuse your "Generate Images" classes) + * - apiUrl: optional backend url (default http://localhost:8000/generate_report) + */ +export default function GenerateReportButton({ + frontendSettings = null, + getFrontendSettings = null, + className = "", + apiUrl = "http://localhost:8000/generate_report", +}) { + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + + // helper to get settings either from prop or by calling a function passed from parent + const resolveSettings = () => { + if (typeof getFrontendSettings === "function") return getFrontendSettings(); + return frontendSettings; + }; + + const handleClick = async () => { + setLoading(true); + setError(null); + + try { + const settings = resolveSettings(); + if (!settings || Object.keys(settings).length === 0) { + throw new Error("No frontend settings provided. Pass frontendSettings or getFrontendSettings."); + } + + const res = await fetch(apiUrl, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(settings), + }); + + if (!res.ok) { + const txt = await res.text(); + throw new Error(txt || `Server responded ${res.status}`); + } + + // response is a zip file — download it + const blob = await res.blob(); + const url = window.URL.createObjectURL(blob); + const a = document.createElement("a"); + a.href = url; + a.download = "report.zip"; + document.body.appendChild(a); + a.click(); + a.remove(); + window.URL.revokeObjectURL(url); + } catch (err) { + console.error("GenerateReport error:", err); + setError(err.message || "Failed to generate report"); + } finally { + setLoading(false); + } + }; + + return ( +
+ + {error &&
{error}
} +
+ ); +} diff --git a/tasks/report_bundle/test_schema.py b/tasks/report_bundle/test_schema.py new file mode 100644 index 00000000..f5838bdb --- /dev/null +++ b/tasks/report_bundle/test_schema.py @@ -0,0 +1,14 @@ +import csv + +def test_csv_schema(filename): + required_columns = {'id', 'image_path', 'score'} + with open(filename, newline='') as csvfile: + reader = csv.DictReader(csvfile) + headers = set(reader.fieldnames) + missing = required_columns - headers + if missing: + raise ValueError(f"CSV is missing required columns: {missing}") + print("✅ CSV schema test passed!") + +if __name__ == "__main__": + test_csv_schema('results.csv')