Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 38 additions & 27 deletions flask_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,20 +296,6 @@ def execute_plan():
extracted_content = plan_data.get('extracted_content') # Retrieve extracted content

# Use API key from request if provided (stateless execution)
# However, for consistency, if the user provided an API key during plan generation, we should probably stick to it or ask for it again.
# Ideally, we should receive it again here or store it in cache (not recommended for secrets).
# Let's assume the user has to provide it if not in env, or it's passed in data.
# But `html_ui` currently only sends `plan_id`.
# I'll stick to env var for now unless I update `execute` frontend call too.
# Wait, I should update frontend `approvePlan` to send API key if it was set in settings.
# But `approvePlan` logic is separate.
# Let's rely on `orchestrator`'s API key.
# Actually, `plans_cache` is in-memory. I can store the API key there TEMPORARILY for the session?
# A better practice is to pass it from frontend.

# Retrieve potential API key from plans_cache if I decided to store it there (I didn't).
# So I will check if data has api_key (I need to update frontend to send it).

api_key = data.get('api_key') or os.getenv('OPENAI_API_KEY')

logger.info(f"🚀 Executing plan {plan_id}")
Expand Down Expand Up @@ -344,7 +330,8 @@ def execute_plan():
'path': output_path,
'topic': query,
'template': template_key,
'plan_id': plan_id
'plan_id': plan_id,
'api_key': api_key # Cache API key for regeneration
}

logger.info(f"✅ Slides generated: {report_id}")
Expand Down Expand Up @@ -431,17 +418,43 @@ def chat_slide():
if not report_id or not instruction:
return jsonify({'error': 'Missing parameters'}), 400

if report_id not in slides_cache:
return jsonify({'error': 'Report not found'}), 404

logger.info(f"💬 Chat for {report_id} slide {slide_idx}: {instruction}")

# Placeholder response for demo purposes
return jsonify({
'success': True,
'message': 'Slide updated based on instruction',
'updated_content': {
'title': f"Updated Slide {slide_idx}",
'bullets': ["Refined bullet 1", "Refined bullet 2"]
}
})
cached = slides_cache[report_id]
output_path = cached['path']
template_key = cached['template']
api_key = cached.get('api_key') or os.getenv('OPENAI_API_KEY')

log_path = str(output_path).replace('.pptx', '.execution.json')

# Re-initialize orchestrator just for regeneration
# We need the template path
template_file = GlobalConfig.PPTX_TEMPLATE_FILES[template_key]['file']
orchestrator = ExecutionOrchestrator(
api_key=api_key,
template_path=template_file
)

# Call regeneration
updated_content = orchestrator.regenerate_slide_content(
slide_idx=int(slide_idx),
instruction=instruction,
execution_log_path=log_path,
output_path=str(output_path)
)

if updated_content:
return jsonify({
'success': True,
'message': 'Slide updated based on instruction',
'updated_content': updated_content
})
else:
return jsonify({'error': 'Could not update slide content'}), 500

except Exception as e:
logger.error(f"Chat failed: {e}", exc_info=True)
return jsonify({'error': str(e)}), 500
Expand All @@ -465,9 +478,7 @@ def preview_report(report_id):
with open(log_path, 'r') as f:
execution_log = json.load(f)

# Add Title Slide (usually implicit or first in log? logic says it's manually added before loop)
# The execution log only contains content slides generated in loop.
# We add title slide manually to preview.
# Add Title Slide
slides.append({
'title': cached.get('topic', 'Title Slide'),
'type': 'title',
Expand Down
223 changes: 114 additions & 109 deletions src/slidedeckai/agents/content_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"""
import logging
import json
from typing import List, Dict
from typing import List, Dict, Any
from openai import OpenAI
from slidedeckai.global_config import GlobalConfig

Expand Down Expand Up @@ -43,26 +43,21 @@ def generate_subtitle(self, slide_title: str, purpose: str,

Return ONLY the subtitle text, nothing else."""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate concise subtitles."},
{"role": "user", "content": prompt}
],
temperature=0.4,
max_tokens=20
)

subtitle = response.choices[0].message.content.strip().strip('"\'')
return subtitle if subtitle else "Key Insights"

except Exception as e:
logger.error(f"Subtitle generation failed: {e}")
return "Analysis"
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate concise subtitles."},
{"role": "user", "content": prompt}
],
temperature=0.4,
max_tokens=20
)

subtitle = response.choices[0].message.content.strip().strip('"\'')
return subtitle

def generate_bullets(self, slide_title: str, purpose: str,
search_facts: List[str], max_bullets: int = 5) -> List[str]:
search_facts: List[str], max_bullets: int = 5, max_words_per_bullet: int = 20) -> List[str]:
"""
Generate bullet points from search facts
"""
Expand All @@ -79,34 +74,29 @@ def generate_bullets(self, slide_title: str, purpose: str,

Requirements:
- Generate EXACTLY {max_bullets} bullet points
- Each bullet: 10-20 words
- CRITICAL: Each bullet MUST be under {max_words_per_bullet} words.
- Include QUANTITATIVE data (numbers, percentages)
- Professional, executive-level tone
- NO preamble, ONLY bullet points

Return as plain text, one bullet per line."""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate concise, data-driven bullet points."},
{"role": "user", "content": prompt}
],
temperature=0.3,
max_tokens=300
)

content = response.choices[0].message.content.strip()
bullets = [line.strip('- ').strip() for line in content.split('\n')
if line.strip() and not line.startswith('```')]

logger.info(f" ✓ {len(bullets)} bullets")
return bullets[:max_bullets]

except Exception as e:
logger.error(f"Bullet generation failed: {e}")
return [f"Analysis of {slide_title}", "Key findings pending", "Data review in progress"]
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate concise, data-driven bullet points."},
{"role": "user", "content": prompt}
],
temperature=0.3,
max_tokens=300
)

content = response.choices[0].message.content.strip()
bullets = [line.strip('- ').strip() for line in content.split('\n')
if line.strip() and not line.startswith('```')]

logger.info(f" ✓ {len(bullets)} bullets")
return bullets[:max_bullets]

def generate_chart(self, slide_title: str, purpose: str,
search_facts: List[str], chart_type: str = 'column') -> Dict:
Expand Down Expand Up @@ -140,30 +130,20 @@ def generate_chart(self, slide_title: str, purpose: str,
]
}}"""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate chart data in JSON format. Return ONLY valid JSON."},
{"role": "user", "content": prompt}
],
temperature=0.2,
max_tokens=400,
response_format={"type": "json_object"}
)

chart_data = json.loads(response.choices[0].message.content)
logger.info(f" ✓ Chart: {len(chart_data.get('categories', []))} cats")
return chart_data

except Exception as e:
logger.error(f"Chart generation failed: {e}")
return {
"title": slide_title,
"type": chart_type,
"categories": ["Q1", "Q2", "Q3", "Q4"],
"series": [{"name": "Data", "values": [100, 120, 140, 160]}]
}
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate chart data in JSON format. Return ONLY valid JSON."},
{"role": "user", "content": prompt}
],
temperature=0.2,
max_tokens=400,
response_format={"type": "json_object"}
)

chart_data = json.loads(response.choices[0].message.content)
logger.info(f" ✓ Chart: {len(chart_data.get('categories', []))} cats")
return chart_data

def generate_table(self, slide_title: str, purpose: str,
search_facts: List[str]) -> Dict:
Expand Down Expand Up @@ -195,31 +175,20 @@ def generate_table(self, slide_title: str, purpose: str,
]
}}"""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate table data in JSON. Return ONLY valid JSON."},
{"role": "user", "content": prompt}
],
temperature=0.2,
max_tokens=500,
response_format={"type": "json_object"}
)

table_data = json.loads(response.choices[0].message.content)
logger.info(f" ✓ Table: {len(table_data.get('headers', []))} cols")
return table_data

except Exception as e:
logger.error(f"Table generation failed: {e}")
return {
"headers": ["Metric", "Value", "Change"],
"rows": [
["Revenue", "$XXB", "+X%"],
["Profit", "$XXB", "+X%"]
]
}
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate table data in JSON. Return ONLY valid JSON."},
{"role": "user", "content": prompt}
],
temperature=0.2,
max_tokens=500,
response_format={"type": "json_object"}
)

table_data = json.loads(response.choices[0].message.content)
logger.info(f" ✓ Table: {len(table_data.get('headers', []))} cols")
return table_data

def generate_kpi(self, slide_title: str, fact: str) -> Dict:
"""
Expand All @@ -242,22 +211,58 @@ def generate_kpi(self, slide_title: str, fact: str) -> Dict:
"label": "Q4 Revenue"
}}"""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Extract KPI data. Return ONLY valid JSON."},
{"role": "user", "content": prompt}
],
temperature=0.1,
max_tokens=100,
response_format={"type": "json_object"}
)

kpi_data = json.loads(response.choices[0].message.content)
logger.info(f" ✓ KPI: {kpi_data.get('label', 'N/A')}")
return kpi_data

except Exception as e:
logger.error(f"KPI generation failed: {e}")
return {"value": "N/A", "label": slide_title[:20]}
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Extract KPI data. Return ONLY valid JSON."},
{"role": "user", "content": prompt}
],
temperature=0.1,
max_tokens=100,
response_format={"type": "json_object"}
)

kpi_data = json.loads(response.choices[0].message.content)
logger.info(f" ✓ KPI: {kpi_data.get('label', 'N/A')}")
return kpi_data

def generate_pictogram_data(self, slide_title: str, purpose: str, facts: List[str], count: int = 4) -> List[Dict]:
"""
Generate data for pictogram/icon grid
"""
facts_text = "\n".join(facts)

prompt = f"""Generate {count} key points for a visual icon grid:

Title: {slide_title}
Purpose: {purpose}
Facts: {facts_text}

For each point provide:
- label: Short bold title (2-4 words)
- description: Brief supporting text (10-15 words)
- icon_keyword: A single word visual metaphor (e.g. 'growth', 'money', 'users')

Return ONLY valid JSON:
[
{{"label": "Title", "description": "Desc", "icon_keyword": "keyword"}}
]"""

response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate icon grid data. Return JSON array."},
{"role": "user", "content": prompt}
],
temperature=0.3,
max_tokens=400,
response_format={"type": "json_object"}
)

data = json.loads(response.choices[0].message.content)
items = data.get('points', data.get('items', []))
if not items and isinstance(data, list):
items = data

logger.info(f" ✓ Pictogram: {len(items)} items")
return items[:count]
Loading