Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
133 changes: 109 additions & 24 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,9 @@
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
import io
import json

# ═══════════════════════════════════════════════════════════════
# PAGE CONFIGURATION
Expand Down Expand Up @@ -39,7 +35,7 @@
},
'system_parameters': {
'length_km': 96,
'lifetime_years': 100,
'lifetime_years': 50,
Copy link

Copilot AI Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

CHEN_2022_BENCHMARK['system_parameters']['lifetime_years'] was changed to 50, which appears to be the tool’s assessment lifetime rather than the Chen 2022 benchmark lifetime. In this repo the benchmark metadata elsewhere still uses 100 years (see SD_LCA_LCCA_Enhanced.py), so this update risks misrepresenting the Chen reference and confusing validation. Consider keeping the benchmark lifetime aligned with the source (or the existing benchmark metadata) and using ASSESSMENT_LIFETIME_YEARS only for the tool’s configurable assessment horizon.

Suggested change
'lifetime_years': 50,
'lifetime_years': 100,

Copilot uses AI. Check for mistakes.
'functional_unit': '1 passenger-km',
'system_boundary': 'Cradle-to-grave'
},
Expand Down Expand Up @@ -77,6 +73,28 @@
REF_FRP = 1000
REF_GLASS = 500
REF_LENGTH = 96
ASSESSMENT_LIFETIME_YEARS = 50

ENVIRONMENTAL_REFERENCES = {
'co2_min_tons': 5000.0,
'co2_max_tons': 50000.0,
'renewable_target_pct': 40.0,
'noise_target_db': 15.0,
'land_efficiency_target_pax_ha': 5000.0
}

OPERATIONAL_REFERENCES = {
'time_savings_target_hours_day': 2500.0,
'availability_target_pct': 98.0,
'energy_efficiency_baseline_kwh_pkm': 0.20,
'land_efficiency_target_pax_ha': 5000.0
}

ECONOMIC_REFERENCES = {
'jobs_target': 12000.0,
'multiplier_target': 3.5,
'max_maintenance_ratio': 1.0
Comment on lines +80 to +96
Copy link

Copilot AI Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Several keys in the new reference dictionaries are currently unused (renewable_target_pct, noise_target_db, land_efficiency_target_pax_ha in ENVIRONMENTAL_REFERENCES; availability_target_pct in OPERATIONAL_REFERENCES; max_maintenance_ratio in ECONOMIC_REFERENCES). Keeping unused reference targets makes it harder to tell which thresholds actually drive scoring. Either wire these values into the corresponding calculations (replacing remaining literals) or remove the unused entries until they’re needed.

Suggested change
'co2_max_tons': 50000.0,
'renewable_target_pct': 40.0,
'noise_target_db': 15.0,
'land_efficiency_target_pax_ha': 5000.0
}
OPERATIONAL_REFERENCES = {
'time_savings_target_hours_day': 2500.0,
'availability_target_pct': 98.0,
'energy_efficiency_baseline_kwh_pkm': 0.20,
'land_efficiency_target_pax_ha': 5000.0
}
ECONOMIC_REFERENCES = {
'jobs_target': 12000.0,
'multiplier_target': 3.5,
'max_maintenance_ratio': 1.0
'co2_max_tons': 50000.0
}
OPERATIONAL_REFERENCES = {
'time_savings_target_hours_day': 2500.0,
'energy_efficiency_baseline_kwh_pkm': 0.20,
'land_efficiency_target_pax_ha': 5000.0
}
ECONOMIC_REFERENCES = {
'jobs_target': 12000.0,
'multiplier_target': 3.5

Copilot uses AI. Check for mistakes.
}

# ═══════════════════════════════════════════════════════════════
# EMBODIED ENERGY COEFFICIENTS (MJ per kg)
Expand Down Expand Up @@ -147,6 +165,28 @@ def calculate_intensity_score(value, ref_value, min_scale=0.5, max_scale=1.5):
return 100


def calculate_threshold_score(value, bad_value, good_value, higher_is_better=True):
"""Calculate a 0-100 score against explicit threshold references."""
span = abs(good_value - bad_value)
if span == 0:
return 100.0

if higher_is_better:
normalized = (value - bad_value) / span
else:
normalized = (bad_value - value) / span
return float(np.clip(normalized, 0, 1) * 100.0)


def classify_factor_status(score):
"""Convert factor score to qualitative status."""
if score >= 75:
return "Good"
if score >= 50:
return "Moderate"
return "Poor"


def calculate_material_score_integrated(material_data):
"""Integrated material score with recycling bonuses"""
concrete_intensity = material_data['concrete_intensity']
Expand Down Expand Up @@ -193,9 +233,12 @@ def calculate_environmental_score_integrated(env_data):
noise_quality_multiplier = 1.0 + (noise_reduction / 15.0) * 0.10
Copy link

Copilot AI Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

noise_quality_multiplier still hard-codes 15.0 even though ENVIRONMENTAL_REFERENCES defines noise_target_db. This partially defeats the goal of centralizing thresholds and can lead to drift if references change. Use the reference value in this normalization to keep scoring behavior consistent with the configured targets.

Suggested change
noise_quality_multiplier = 1.0 + (noise_reduction / 15.0) * 0.10
noise_quality_multiplier = 1.0 + (
noise_reduction / ENVIRONMENTAL_REFERENCES['noise_target_db']
) * 0.10

Copilot uses AI. Check for mistakes.
noise_quality_multiplier = np.clip(noise_quality_multiplier, 1.0, 1.10)

co2_normalized = (total_co2_adjusted - 5000.0) / (50000.0 - 5000.0)
co2_normalized = np.clip(co2_normalized, 0, 1)
co2_score = (1.0 - co2_normalized) * 100.0
co2_score = calculate_threshold_score(
total_co2_adjusted,
ENVIRONMENTAL_REFERENCES['co2_max_tons'],
ENVIRONMENTAL_REFERENCES['co2_min_tons'],
higher_is_better=False
)

environmental_score = co2_score * noise_quality_multiplier
environmental_score = np.clip(environmental_score, 0, 100)
Expand All @@ -211,13 +254,25 @@ def calculate_operational_score_integrated(op_data):

availability_factor = availability / 100.0
effective_time_savings = time_savings * availability_factor
time_score = min(100.0, (effective_time_savings / 2500.0) * 100.0)
time_score = calculate_threshold_score(
effective_time_savings,
0.0,
OPERATIONAL_REFERENCES['time_savings_target_hours_day'],
higher_is_better=True
)

energy_bonus = (0.20 - energy_efficiency) / 0.20
energy_bonus = (
OPERATIONAL_REFERENCES['energy_efficiency_baseline_kwh_pkm'] - energy_efficiency
) / OPERATIONAL_REFERENCES['energy_efficiency_baseline_kwh_pkm']
energy_bonus = np.clip(energy_bonus, 0, 1)

adjusted_land_efficiency = land_efficiency * (1.0 + energy_bonus * 0.20)
land_score = min(100.0, (adjusted_land_efficiency / 5000.0) * 100.0)
land_score = calculate_threshold_score(
adjusted_land_efficiency,
0.0,
OPERATIONAL_REFERENCES['land_efficiency_target_pax_ha'],
higher_is_better=True
)

synergy_multiplier = 1.0 + (availability_factor * float(energy_bonus) * 0.10)
availability_score = availability * synergy_multiplier
Expand Down Expand Up @@ -253,12 +308,22 @@ def calculate_economic_score_integrated(econ_data):
synergy_factor = 1.0 + (economic_multiplier - 1.0) * 0.20
adjusted_jobs = total_economic_impact * synergy_factor

jobs_score = min(100.0, (adjusted_jobs / 12000.0) * 100.0)
jobs_score = calculate_threshold_score(
adjusted_jobs,
0.0,
ECONOMIC_REFERENCES['jobs_target'],
higher_is_better=True
)

cost_efficiency_score = 100.0 * maintenance_efficiency * (1.0 + job_efficiency_bonus)
cost_efficiency_score = min(100.0, cost_efficiency_score)

multiplier_score = min(100.0, (economic_multiplier / 3.5) * 100.0)
multiplier_score = calculate_threshold_score(
economic_multiplier,
1.0,
ECONOMIC_REFERENCES['multiplier_target'],
higher_is_better=True
)

economic_score = (
jobs_score * 0.50 +
Expand Down Expand Up @@ -355,7 +420,7 @@ def run_full_assessment(params):

effective_carbon_intensity = carbon_intensity * (1 - renewable_share / 100.0)
raw_carbon_intensity_val = energy_per_pax_km_calibrated * effective_carbon_intensity * 0.800
calibrated_carbon_intensity = raw_carbon_intensity_val
calibrated_carbon_intensity = apply_chen_calibration(raw_carbon_intensity_val, 'operational_carbon')
annual_co2_operational = calibrated_carbon_intensity * daily_pax_km * 365 / 1000
Comment on lines 421 to 424
Copy link

Copilot AI Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

raw_carbon_intensity_val is already computed from the Chen-calibrated operational energy (energy_per_pax_km_calibrated) and the user-provided grid carbon intensity (plus the existing 0.800 Chen factor). Applying operational_carbon_factor again here likely double-calibrates operational carbon and will cause the benchmark carbon intensity (0.0352 kgCO₂/pkm) to be under-shot when using the default grid carbon intensity. Consider removing this extra calibration step or re-deriving the operational carbon calibration approach so energy/carbon aren’t calibrated twice.

Copilot uses AI. Check for mistakes.

steel_recycle_rate = params['steel_recycle'] / 100.0
Expand All @@ -372,9 +437,10 @@ def run_full_assessment(params):
steel_recycling_credit_ee = ee_steel * (params['steel_recycle'] / 100) * 0.70
aluminum_recycling_credit_ee = ee_aluminum * (params['aluminum_recycle'] / 100) * 0.85

total_ee = (ee_concrete + ee_steel + ee_aluminum +
ee_wood + ee_frp + ee_glass -
steel_recycling_credit_ee - aluminum_recycling_credit_ee)
total_ee_raw = (ee_concrete + ee_steel + ee_aluminum +
ee_wood + ee_frp + ee_glass -
steel_recycling_credit_ee - aluminum_recycling_credit_ee)
total_ee = apply_chen_calibration(total_ee_raw, 'embodied_energy')
Copy link

Copilot AI Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

total_ee_raw is calculated using COEFFICIENTS that are already labeled as Chen-calibrated in-file (see the coefficient block header). Multiplying by apply_chen_calibration(..., 'embodied_energy') here likely applies calibration twice, which will skew embodied energy results and break the Chen benchmark validation (which compares against already-calibrated benchmark totals). Prefer a single calibration strategy: either keep base coefficients and apply calibration once, or keep calibrated coefficients and do not apply an additional embodied-energy adjustment factor.

Suggested change
total_ee = apply_chen_calibration(total_ee_raw, 'embodied_energy')
# COEFFICIENTS are already Chen-calibrated; avoid applying embodied-energy calibration twice.
total_ee = total_ee_raw

Copilot uses AI. Check for mistakes.

# Embodied Carbon
carbon_concrete = concrete_volume * DENSITIES['concrete'] * EMISSION_FACTORS['concrete']
Expand All @@ -390,6 +456,7 @@ def run_full_assessment(params):
total_carbon_raw = (carbon_concrete + carbon_steel + carbon_aluminum +
carbon_wood + carbon_frp + carbon_glass -
steel_recycling_credit_c - aluminum_recycling_credit_c)
total_carbon_raw = apply_chen_calibration(total_carbon_raw, 'embodied_carbon')
Copy link

Copilot AI Apr 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

total_carbon_raw is computed from EMISSION_FACTORS that are already documented as Chen-calibrated, but it is then overwritten with apply_chen_calibration(..., 'embodied_carbon'). This likely double-applies calibration and will inflate embodied carbon (and thus total CO₂) relative to the Chen benchmark totals used elsewhere in the app. Consider removing the extra embodied_carbon calibration here (or switching emission factors back to uncalibrated values and calibrating only once).

Suggested change
total_carbon_raw = apply_chen_calibration(total_carbon_raw, 'embodied_carbon')

Copilot uses AI. Check for mistakes.

total_embodied_co2 = total_carbon_raw / 1000
total_co2 = annual_co2_operational + total_embodied_co2
Expand All @@ -404,7 +471,7 @@ def run_full_assessment(params):
jobs_created = params['jobs_created']
economic_multiplier = params['economic_multiplier']
total_jobs = jobs_created * economic_multiplier
total_maintenance_cost = annual_maintenance * 30
total_maintenance_cost = annual_maintenance * ASSESSMENT_LIFETIME_YEARS

noise_reduction = params['noise_reduction']
land_use_efficiency = params['land_use']
Expand Down Expand Up @@ -438,7 +505,7 @@ def run_full_assessment(params):
'maintenance_cost': annual_maintenance,
'jobs_created': jobs_created,
'economic_multiplier': economic_multiplier,
'lifetime': 50
'lifetime': ASSESSMENT_LIFETIME_YEARS
}

material_score = calculate_material_score_integrated(material_data)
Expand All @@ -463,6 +530,13 @@ def run_full_assessment(params):
adjusted_economic_score * 0.20
)
overall_score = float(np.clip(overall_score, 0, 100))
factor_scores = {
'materials': material_score,
'environmental': environmental_score,
'operational': operational_score,
'economic': economic_score
}
factor_status = {k: classify_factor_status(v) for k, v in factor_scores.items()}

return {
'material_score': material_score,
Expand Down Expand Up @@ -500,7 +574,7 @@ def run_full_assessment(params):
'noise_reduction': noise_reduction,
'land_use_efficiency': land_use_efficiency,
'sustainability_score': overall_score,
'total_cost': construction_cost * 1e6 + annual_maintenance * 1e6 * 30,
'total_cost': construction_cost * 1e6 + annual_maintenance * 1e6 * ASSESSMENT_LIFETIME_YEARS,
'carbon_intensity': carbon_intensity,
'ee_concrete': ee_concrete,
'ee_steel': ee_steel,
Expand All @@ -514,6 +588,8 @@ def run_full_assessment(params):
'carbon_wood': carbon_wood,
'carbon_frp': carbon_frp,
'carbon_glass': carbon_glass,
'factor_scores': factor_scores,
'factor_status': factor_status,
}


Expand Down Expand Up @@ -767,8 +843,8 @@ def run_full_assessment(params):
with m4:
st.markdown(f"""
<div class="metric-card">
<div class="metric-value">${params['construction_cost'] + params['maintenance_cost']*30:,.0f}M</div>
<div class="metric-label">30-Year Cost</div>
<div class="metric-value">${params['construction_cost'] + params['maintenance_cost']*ASSESSMENT_LIFETIME_YEARS:,.0f}M</div>
<div class="metric-label">{ASSESSMENT_LIFETIME_YEARS}-Year Cost</div>
<div class="metric-delta">Jobs: {results['total_jobs']:,.0f}</div>
</div>""", unsafe_allow_html=True)
with m5:
Expand Down Expand Up @@ -834,6 +910,15 @@ def run_full_assessment(params):
)
st.plotly_chart(fig_radar, use_container_width=True)

st.markdown("#### 🧭 Factor Quality Classification")
factor_status_df = pd.DataFrame([
{'Factor': 'Materials', 'Score': f"{results['factor_scores']['materials']:.1f}", 'Status': results['factor_status']['materials']},
{'Factor': 'Environmental', 'Score': f"{results['factor_scores']['environmental']:.1f}", 'Status': results['factor_status']['environmental']},
{'Factor': 'Operational', 'Score': f"{results['factor_scores']['operational']:.1f}", 'Status': results['factor_status']['operational']},
{'Factor': 'Economic', 'Score': f"{results['factor_scores']['economic']:.1f}", 'Status': results['factor_status']['economic']},
])
st.dataframe(factor_status_df, use_container_width=True, hide_index=True)

# Interaction Analysis
st.markdown("#### 🔄 Cross-Category Interaction Effects")
effects = results['interaction_effects']
Expand Down Expand Up @@ -921,7 +1006,7 @@ def run_full_assessment(params):
• Construction Cost: ${params['construction_cost']:.1f} million
• Annual Maintenance: ${params['maintenance_cost']:.1f} million
• Total Jobs Created: {results['total_jobs']:.0f}
30-Year Maintenance: ${results['total_maintenance_cost']:.1f} million
{ASSESSMENT_LIFETIME_YEARS}-Year Maintenance: ${results['total_maintenance_cost']:.1f} million

━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

Expand Down Expand Up @@ -955,7 +1040,7 @@ def run_full_assessment(params):
{'Category': 'Environmental', 'Metric': 'Embodied Energy', 'Value': f"{results['total_ee']:.0f}", 'Unit': 'MJ'},
{'Category': 'Operational', 'Metric': 'Operational Score', 'Value': f"{results['operational_score']:.1f}", 'Unit': '/100'},
{'Category': 'Economic', 'Metric': 'Total Jobs', 'Value': f"{results['total_jobs']:.0f}", 'Unit': ''},
{'Category': 'Economic', 'Metric': '30-Year Cost', 'Value': f"{params['construction_cost'] + params['maintenance_cost']*30:.0f}", 'Unit': '$M'},
{'Category': 'Economic', 'Metric': f'{ASSESSMENT_LIFETIME_YEARS}-Year Cost', 'Value': f"{params['construction_cost'] + params['maintenance_cost']*ASSESSMENT_LIFETIME_YEARS:.0f}", 'Unit': '$M'},
]).to_csv(index=False)
st.download_button("📥 Download CSV", csv_data,
file_name=f"monorail_data_{datetime.now().strftime('%Y%m%d')}.csv")
Expand Down