From 300d0eab6301d6aab68fb967f80b27d95db95df7 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Thu, 19 Feb 2026 15:41:17 +0200 Subject: [PATCH 01/24] Web-UI, Web-API - Add what-if scenario simulation to Decision Journey view - New backend POST /method/enautilus/simulate endpoint runs E-NAUTILUS greedily to completion for a chosen objective (best or worst), ephemeral with no DB writes - New ENautilusSimulateRequest/Response/StepResult models with deprioritize flag for worst-case selection - Frontend what-if buttons in iteration detail panel: "best" (blue) and "worst" (red) per objective - Chart overlay renders simulated path as dashed lines with hollow circle markers - Comparison table shows actual vs what-if final values with colored deltas - Extract normalizePoint, buildNormalizationMaps, computeSimulatedJourneySteps helpers in journey-utils - Highlight selected iteration in chart with vertical band - Default visualization pane to 65% height for more chart room - Regenerate frontend client --- desdeo/api/models/__init__.py | 6 + desdeo/api/models/enautilus.py | 34 + desdeo/api/routers/enautilus.py | 166 +++ uv.lock | 2 +- .../decision-journey/decision-journey.svelte | 154 ++- .../custom/decision-journey/journey-utils.ts | 93 ++ .../objective-evolution-chart.svelte | 101 +- .../custom/method_layout/base-layout.svelte | 4 +- webui/src/lib/gen/endpoints/DESDEOFastAPI.ts | 171 +++- .../src/lib/gen/endpoints/DESDEOFastAPIzod.ts | 954 +++++++++++++++--- .../BodyAddProblemJsonProblemAddJsonPost.ts | 10 + .../gen/models/ENautilusSimulateRequest.ts | 21 + .../gen/models/ENautilusSimulateResponse.ts | 21 + ...sSimulateResponseFinalIntermediatePoint.ts | 9 + .../gen/models/ENautilusSimulateStepResult.ts | 22 + ...imulateStepResultIntermediatePointsItem.ts | 9 + ...NautilusSimulateStepResultSelectedPoint.ts | 12 + webui/src/lib/gen/models/SolutionReference.ts | 6 + .../lib/gen/models/SolutionReferenceLite.ts | 3 + .../SolutionReferenceLiteObjectiveValues.ts | 3 + .../SolutionReferenceObjectiveValues.ts | 3 + .../models/SolutionReferenceVariableValues.ts | 3 + webui/src/lib/gen/models/index.ts | 13 +- .../interactive_methods/E-NAUTILUS/handler.ts | 25 +- 24 files changed, 1693 insertions(+), 152 deletions(-) create mode 100644 webui/src/lib/gen/models/ENautilusSimulateRequest.ts create mode 100644 webui/src/lib/gen/models/ENautilusSimulateResponse.ts create mode 100644 webui/src/lib/gen/models/ENautilusSimulateResponseFinalIntermediatePoint.ts create mode 100644 webui/src/lib/gen/models/ENautilusSimulateStepResult.ts create mode 100644 webui/src/lib/gen/models/ENautilusSimulateStepResultIntermediatePointsItem.ts create mode 100644 webui/src/lib/gen/models/ENautilusSimulateStepResultSelectedPoint.ts diff --git a/desdeo/api/models/__init__.py b/desdeo/api/models/__init__.py index f7b1fda1c..f8b8c23f5 100644 --- a/desdeo/api/models/__init__.py +++ b/desdeo/api/models/__init__.py @@ -15,6 +15,9 @@ "ENautilusFinalState", "ENautilusRepresentativeSolutionsResponse", "ENautilusSessionTreeResponse", + "ENautilusSimulateRequest", + "ENautilusSimulateResponse", + "ENautilusSimulateStepResult", "ENautilusState", "ENautilusStateResponse", "ENautilusStepRequest", @@ -151,6 +154,9 @@ ENautilusFinalizeResponse, ENautilusRepresentativeSolutionsResponse, ENautilusSessionTreeResponse, + ENautilusSimulateRequest, + ENautilusSimulateResponse, + ENautilusSimulateStepResult, ENautilusStateResponse, ENautilusStepRequest, ENautilusStepResponse, diff --git a/desdeo/api/models/enautilus.py b/desdeo/api/models/enautilus.py index 7485bb788..d9c964a68 100644 --- a/desdeo/api/models/enautilus.py +++ b/desdeo/api/models/enautilus.py @@ -131,6 +131,40 @@ class ENautilusTreeNodeResponse(SQLModel): ) +class ENautilusSimulateRequest(SQLModel): + """Run E-NAUTILUS greedily from a state to completion.""" + + state_id: int = Field(description="Starting ENautilusState to branch from.") + preferred_objective: str = Field(description="Objective symbol to favor (e.g., 'f_1').") + deprioritize: bool = Field( + default=False, + description="If True, always pick the WORST value for the objective instead of the best.", + ) + number_of_intermediate_points: int = Field( + default=3, description="Number of intermediate points per simulated step." + ) + + +class ENautilusSimulateStepResult(SQLModel): + """One step in the simulated path.""" + + iteration: int + iterations_left: int + selected_point: dict[str, float] = Field(sa_column=Column(JSON), description="The auto-picked intermediate point.") + selected_point_index: int + intermediate_points: list[dict[str, float]] = Field(sa_column=Column(JSON)) + closeness_measures: list[float] + + +class ENautilusSimulateResponse(SQLModel): + """Result of greedy E-NAUTILUS simulation.""" + + preferred_objective: str + steps: list[ENautilusSimulateStepResult] + final_solution: SolverResults = Field(description="Projected Pareto-optimal solution.") + final_intermediate_point: dict[str, float] = Field(sa_column=Column(JSON)) + + class ENautilusDecisionEventResponse(SQLModel): """A decision event capturing a transition from parent to child node.""" diff --git a/desdeo/api/routers/enautilus.py b/desdeo/api/routers/enautilus.py index fab033cf8..7bd5dc1a2 100644 --- a/desdeo/api/routers/enautilus.py +++ b/desdeo/api/routers/enautilus.py @@ -15,6 +15,9 @@ ENautilusFinalState, ENautilusRepresentativeSolutionsResponse, ENautilusSessionTreeResponse, + ENautilusSimulateRequest, + ENautilusSimulateResponse, + ENautilusSimulateStepResult, ENautilusState, ENautilusStateResponse, ENautilusStepRequest, @@ -509,6 +512,169 @@ def get_session_tree( ) +@router.post("/simulate") +def simulate( + request: ENautilusSimulateRequest, + db_session: Annotated[Session, Depends(get_session)], +) -> ENautilusSimulateResponse: + """Run E-NAUTILUS greedily from a state to completion. + + Given a starting state, this endpoint greedily selects the best intermediate + point for the preferred objective at each iteration until iterations_left == 0, + then projects to the Pareto front. No database writes are performed. + """ + # Load starting state + state_db: StateDB | None = db_session.exec(select(StateDB).where(StateDB.id == request.state_id)).first() + if state_db is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"StateDB with id={request.state_id} not found." + ) + + if not isinstance(state_db.state, ENautilusState): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail="The referenced state is not an ENautilusState." + ) + + enautilus_state: ENautilusState = state_db.state + result: ENautilusResult = enautilus_state.enautilus_results + + # Load problem + problem_db = db_session.exec(select(ProblemDB).where(ProblemDB.id == state_db.problem_id)).first() + if problem_db is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"ProblemDB with id={state_db.problem_id} not found." + ) + + problem = Problem.from_problemdb(problem_db) + + # Validate preferred_objective + obj_symbols = [obj.symbol for obj in problem.objectives] + if request.preferred_objective not in obj_symbols: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"'{request.preferred_objective}' is not a valid objective. Valid: {obj_symbols}", + ) + + # Determine if the preferred objective is maximized. + # When deprioritize is True, invert the selection logic (pick worst instead of best). + pref_obj = next(obj for obj in problem.objectives if obj.symbol == request.preferred_objective) + pref_maximize = pref_obj.maximize if not request.deprioritize else (not pref_obj.maximize) + + # Load non-dominated solutions + non_dom_db = db_session.exec( + select(RepresentativeNonDominatedSolutions).where( + RepresentativeNonDominatedSolutions.id == enautilus_state.non_dominated_solutions_id + ) + ).first() + if non_dom_db is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"RepresentativeNonDominatedSolutions with id={enautilus_state.non_dominated_solutions_id} not found.", + ) + + non_dom_solutions = non_dom_db.solution_data + + # Start from the existing result's intermediate points + current_intermediate_points = result.intermediate_points + current_reachable_indices = result.reachable_point_indices + current_closeness = result.closeness_measures + current_iteration = result.current_iteration + iterations_left = result.iterations_left + + steps: list[ENautilusSimulateStepResult] = [] + + while iterations_left > 0: + # Greedy selection: pick the intermediate point best for preferred_objective + best_idx = _pick_best_for_objective(current_intermediate_points, request.preferred_objective, pref_maximize) + + selected_point = current_intermediate_points[best_idx] + reachable_for_selected = current_reachable_indices[best_idx] + + steps.append( + ENautilusSimulateStepResult( + iteration=current_iteration, + iterations_left=iterations_left, + selected_point=selected_point, + selected_point_index=best_idx, + intermediate_points=current_intermediate_points, + closeness_measures=current_closeness, + ) + ) + + if iterations_left == 0: + break + + # Run E-NAUTILUS step (core computation, no DB writes) + next_result: ENautilusResult = enautilus_step( + problem=problem, + non_dominated_points=non_dom_solutions, + current_iteration=current_iteration, + iterations_left=iterations_left, + selected_point=selected_point, + number_of_intermediate_points=request.number_of_intermediate_points, + reachable_point_indices=reachable_for_selected, + ) + + current_intermediate_points = next_result.intermediate_points + current_reachable_indices = next_result.reachable_point_indices + current_closeness = next_result.closeness_measures + current_iteration = next_result.current_iteration + iterations_left = next_result.iterations_left + + # Final selection at iterations_left == 0 + final_best_idx = _pick_best_for_objective(current_intermediate_points, request.preferred_objective, pref_maximize) + final_intermediate_point = current_intermediate_points[final_best_idx] + + steps.append( + ENautilusSimulateStepResult( + iteration=current_iteration, + iterations_left=iterations_left, + selected_point=final_intermediate_point, + selected_point_index=final_best_idx, + intermediate_points=current_intermediate_points, + closeness_measures=current_closeness, + ) + ) + + # Build a result object for projection + final_result = ENautilusResult( + current_iteration=current_iteration, + iterations_left=iterations_left, + intermediate_points=current_intermediate_points, + reachable_best_bounds=[], # not needed for projection + reachable_worst_bounds=[], + closeness_measures=current_closeness, + reachable_point_indices=current_reachable_indices, + ) + + non_dom_df = pl.DataFrame(non_dom_solutions) + representative_solutions = enautilus_get_representative_solutions(problem, final_result, non_dom_df) + final_solution = representative_solutions[final_best_idx] + + return ENautilusSimulateResponse( + preferred_objective=request.preferred_objective, + steps=steps, + final_solution=final_solution, + final_intermediate_point=final_intermediate_point, + ) + + +def _pick_best_for_objective( + intermediate_points: list[dict[str, float]], + objective: str, + maximize: bool, +) -> int: + """Pick the index of the intermediate point with the best value for an objective.""" + best_idx = 0 + best_val = intermediate_points[0][objective] + for i in range(1, len(intermediate_points)): + val = intermediate_points[i][objective] + if (maximize and val > best_val) or (not maximize and val < best_val): + best_val = val + best_idx = i + return best_idx + + def _match_chosen_point( chosen: dict[str, float] | None, options: list[dict[str, float]], diff --git a/uv.lock b/uv.lock index 94214f931..f153954fc 100644 --- a/uv.lock +++ b/uv.lock @@ -694,7 +694,7 @@ wheels = [ [[package]] name = "desdeo" -version = "2.2.1" +version = "2.2.2" source = { editable = "." } dependencies = [ { name = "bayesian-optimization" }, diff --git a/webui/src/lib/components/custom/decision-journey/decision-journey.svelte b/webui/src/lib/components/custom/decision-journey/decision-journey.svelte index 2b296299d..def11184d 100644 --- a/webui/src/lib/components/custom/decision-journey/decision-journey.svelte +++ b/webui/src/lib/components/custom/decision-journey/decision-journey.svelte @@ -1,8 +1,9 @@ + + + + + +
+ {#if loading} +
+ Loading map... +
+ {/if} + {#if error} +
+ {error} +
+ {/if} +
+
diff --git a/webui/src/lib/components/custom/decision-journey/decision-journey.svelte b/webui/src/lib/components/custom/decision-journey/decision-journey.svelte index def11184d..9a05193c4 100644 --- a/webui/src/lib/components/custom/decision-journey/decision-journey.svelte +++ b/webui/src/lib/components/custom/decision-journey/decision-journey.svelte @@ -1,5 +1,5 @@ diff --git a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts index 465d7c62d..20e067b6a 100644 --- a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts +++ b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts @@ -6736,7 +6736,9 @@ export const BuildMapSiteSelectionMapPostResponse = zod }) .describe('A coverage connection edge between two nodes.') ), - center: zod.array(zod.number()) + center: zod.array(zod.number()), + site_variable_symbols: zod.array(zod.string()), + site_node_names: zod.array(zod.string()) }) .describe('Response body for the site selection map endpoint.'); diff --git a/webui/src/lib/gen/models/SiteSelectionMapResponse.ts b/webui/src/lib/gen/models/SiteSelectionMapResponse.ts index 231123d74..2ab425fe4 100644 --- a/webui/src/lib/gen/models/SiteSelectionMapResponse.ts +++ b/webui/src/lib/gen/models/SiteSelectionMapResponse.ts @@ -15,4 +15,6 @@ export interface SiteSelectionMapResponse { nodes: SiteSelectionMapNode[]; edges: SiteSelectionMapEdge[]; center: number[]; + site_variable_symbols: string[]; + site_node_names: string[]; } diff --git a/webui/src/routes/interactive_methods/E-NAUTILUS/+page.svelte b/webui/src/routes/interactive_methods/E-NAUTILUS/+page.svelte index 1e48e3502..6e981262c 100644 --- a/webui/src/routes/interactive_methods/E-NAUTILUS/+page.svelte +++ b/webui/src/routes/interactive_methods/E-NAUTILUS/+page.svelte @@ -1,8 +1,9 @@ {#snippet ColumnHeader({ column, title, colorIdx }: { column: Column; title: string; colorIdx?: number })} @@ -708,7 +819,47 @@ {/if} {#if finalView === 'map'} {#if finalSolution} - +
+
+ +
+ {#if constraintSummary || rpmResult || resolveError} +
+
+ {#if constraintSummary} + Site constraints: {constraintSummary} + {/if} + {#if siteFixings.length > 0} + + {/if} + {#if rpmResult} + + {/if} + {#if rpmResult || siteFixings.length > 0} + + {/if} +
+ {#if resolveError} +
{resolveError}
+ {/if} +
+ {/if} +
{:else}
No solution available for map. @@ -735,8 +886,8 @@ currentPreferenceValues={[]} previousPreferenceType={''} currentPreferenceType={''} - solutionsObjectiveValues={representativeObjectiveValues.length > 0 ? [representativeObjectiveValues[final_selected_index]] : []} - previousObjectiveValues={[]} + solutionsObjectiveValues={finalSolution ? [objective_keys.map(k => { const v = finalSolution.optimal_objectives[k]; return Array.isArray(v) ? v[0] : v as number; })] : []} + previousObjectiveValues={adoptedSolution && originalFinalSolution ? [objective_keys.map(k => { const v = originalFinalSolution.optimal_objectives[k]; return Array.isArray(v) ? v[0] : v as number; })] : []} externalSelectedIndexes={[0]} /> {/if} @@ -753,9 +904,69 @@ + {:else if finalView === 'map' && rpmResult && rpmResult.solver_results.length > 0 && problem_info && originalFinalSolution} +
+
+ + +
+
+ {#if comparisonTab === 'chart'} +
+ { const v = rpmResult.solver_results[0].optimal_objectives[k]; return Array.isArray(v) ? v[0] : v as number; })]} + previousObjectiveValues={[objective_keys.map(k => { const v = originalFinalSolution.optimal_objectives[k]; return Array.isArray(v) ? v[0] : v as number; })]} + externalSelectedIndexes={[0]} + referenceDataLabels={{ previousSolutionLabels: ['Original E-NAUTILUS'] }} + lineLabels={{ '0': 'Constrained' }} + /> +
+ {:else} +
+ + + + + + + + + + + {#each problem_info.objectives as obj, i} + {@const origVal = Array.isArray(originalFinalSolution.optimal_objectives[obj.symbol]) ? originalFinalSolution.optimal_objectives[obj.symbol][0] : originalFinalSolution.optimal_objectives[obj.symbol]} + {@const newVal = Array.isArray(rpmResult.solver_results[0].optimal_objectives[obj.symbol]) ? rpmResult.solver_results[0].optimal_objectives[obj.symbol][0] : rpmResult.solver_results[0].optimal_objectives[obj.symbol]} + {@const delta = (newVal as number) - (origVal as number)} + {@const improved = obj.maximize ? delta > 0 : delta < 0} + + + + + + + {/each} + +
ObjectiveOriginalConstrainedΔ
{obj.name}{formatNumber(origVal as number, 2)}{formatNumber(newVal as number, 2)} + {delta > 0 ? '+' : ''}{formatNumber(delta, 2)} + {improved ? '↑' : delta === 0 ? '' : '↓'} +
+
+ {/if} +
+
{/if} {/snippet} diff --git a/webui/src/routes/interactive_methods/E-NAUTILUS/handler.ts b/webui/src/routes/interactive_methods/E-NAUTILUS/handler.ts index ffebd2159..e8483c2cd 100644 --- a/webui/src/routes/interactive_methods/E-NAUTILUS/handler.ts +++ b/webui/src/routes/interactive_methods/E-NAUTILUS/handler.ts @@ -1,6 +1,6 @@ -import type { ENautilusRepresentativeSolutionsResponse, ENautilusSessionTreeResponse, ENautilusSimulateResponse, ENautilusStateResponse, ENautilusStepRequest, ENautilusStepResponse, ProblemInfo } from "$lib/gen/models"; +import type { ENautilusRepresentativeSolutionsResponse, ENautilusSessionTreeResponse, ENautilusSimulateResponse, ENautilusStateResponse, ENautilusStepRequest, ENautilusStepResponse, ProblemInfo, VariableFixing, RPMState } from "$lib/gen/models"; import type { getRepresentativeMethodEnautilusGetRepresentativeStateIdGetResponse, getSessionTreeMethodEnautilusSessionTreeSessionIdGetResponse, getStateMethodEnautilusGetStateStateIdGetResponse, simulateMethodEnautilusSimulatePostResponse, stepMethodEnautilusStepPostResponse } from "$lib/gen/endpoints/DESDEOFastAPI"; -import { stepMethodEnautilusStepPost, getProblemProblemProblemIdGet, getStateMethodEnautilusGetStateStateIdGet, getRepresentativeMethodEnautilusGetRepresentativeStateIdGet, getSessionTreeMethodEnautilusSessionTreeSessionIdGet, simulateMethodEnautilusSimulatePost } from "$lib/gen/endpoints/DESDEOFastAPI"; +import { stepMethodEnautilusStepPost, getProblemProblemProblemIdGet, getStateMethodEnautilusGetStateStateIdGet, getRepresentativeMethodEnautilusGetRepresentativeStateIdGet, getSessionTreeMethodEnautilusSessionTreeSessionIdGet, simulateMethodEnautilusSimulatePost, createConstrainedVariantProblemProblemIdConstrainedVariantPost, solveSolutionsMethodRpmSolvePost, deleteProblemProblemProblemIdDelete } from "$lib/gen/endpoints/DESDEOFastAPI"; import type { getProblemProblemProblemIdGetResponse } from "$lib/gen/endpoints/DESDEOFastAPI"; import { fetch_sessions, create_session } from '../../methods/sessions/handler'; export { fetch_sessions, create_session }; @@ -134,3 +134,93 @@ export async function simulate_enautilus( return response.data; } + +export async function resolveWithSiteConstraints( + problem_id: number, + fixings: VariableFixing[], + reference_point: Record, + solver?: string, +): Promise<{ constrained_problem_id: number; rpm_result: RPMState } | null> { + // Step 1: Create constrained variant + const variantResp = await createConstrainedVariantProblemProblemIdConstrainedVariantPost( + problem_id, + { variable_fixings: fixings } + ); + + if (variantResp.status !== 200) { + console.error("Failed to create constrained variant:", variantResp.status); + return null; + } + + const constrained_problem_id = variantResp.data.problem_id; + + // Step 2: Solve with RPM using E-NAUTILUS final objectives as reference point + try { + const rpmResp = await solveSolutionsMethodRpmSolvePost({ + problem_id: constrained_problem_id, + preference: { + preference_type: "reference_point", + aspiration_levels: reference_point, + }, + solver: solver ?? undefined, + }); + + if (rpmResp.status !== 200) { + console.error("RPM solve failed:", rpmResp.status); + // Cleanup on failure + await cleanupConstrainedVariant(constrained_problem_id); + return null; + } + + return { + constrained_problem_id, + rpm_result: rpmResp.data, + }; + } catch (e) { + // Cleanup on error + await cleanupConstrainedVariant(constrained_problem_id); + throw e; + } +} + +/** + * Unroll tensor variables in a SolverResults object. + * RPM returns tensor variables as e.g. {"sv": [[v1], [v2], ...]} for shape [N, 1]. + * The map and other components expect unrolled names: {"sv_1": v1, "sv_2": v2, ...}. + * Scalar values and already-unrolled variables are passed through unchanged. + */ +export function unrollTensorVariables( + variables: Record +): Record { + const result: Record = {}; + for (const [key, value] of Object.entries(variables)) { + if (Array.isArray(value) && value.length > 0 && Array.isArray(value[0])) { + // Nested list (tensor variable) — flatten with 1-based indexing + let flatIdx = 1; + const flatten = (arr: unknown[]): void => { + for (const el of arr) { + if (Array.isArray(el)) { + flatten(el); + } else { + result[`${key}_${flatIdx}`] = el; + flatIdx++; + } + } + }; + flatten(value); + } else { + result[key] = value; + } + } + return result; +} + +export async function cleanupConstrainedVariant(constrained_problem_id: number): Promise { + try { + await deleteProblemProblemProblemIdDelete(constrained_problem_id); + } catch (e) { + console.warn("Failed to clean up constrained variant:", e); + } +} + +export type { VariableFixing }; From e99678d16989551fa3e168d0d9af068bcbc08631 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Thu, 26 Mar 2026 16:57:48 +0200 Subject: [PATCH 09/24] Rahti deployment testing. --- .s2i/bin/assemble | 110 +++++++----------------------- .s2i/environment | 24 ++++++- deploy/api-buildconfig.yaml | 85 +++++++++++++++++++++++ deploy/api-deployment.yaml | 124 ++++++++++++++++++++++++++++++++++ deploy/api-imagestream.yaml | 18 +++++ deploy/db-init-job.yaml | 63 +++++++++++++++++ deploy/postgres.yaml | 108 +++++++++++++++++++++++++++++ deploy/secrets-template.yaml | 56 +++++++++++++++ deploy/webui-buildconfig.yaml | 62 +++++++++++++++++ deploy/webui-deployment.yaml | 98 +++++++++++++++++++++++++++ deploy/webui-imagestream.yaml | 11 +++ desdeo/api/app.py | 6 ++ desdeo/api/db_init_prod.py | 84 +++++++++++++++++++++++ webui/Dockerfile | 71 +++++++++++++++++++ 14 files changed, 833 insertions(+), 87 deletions(-) create mode 100644 deploy/api-buildconfig.yaml create mode 100644 deploy/api-deployment.yaml create mode 100644 deploy/api-imagestream.yaml create mode 100644 deploy/db-init-job.yaml create mode 100644 deploy/postgres.yaml create mode 100644 deploy/secrets-template.yaml create mode 100644 deploy/webui-buildconfig.yaml create mode 100644 deploy/webui-deployment.yaml create mode 100644 deploy/webui-imagestream.yaml create mode 100644 desdeo/api/db_init_prod.py create mode 100644 webui/Dockerfile diff --git a/.s2i/bin/assemble b/.s2i/bin/assemble index d6d56e973..7acaa3c6c 100644 --- a/.s2i/bin/assemble +++ b/.s2i/bin/assemble @@ -1,98 +1,36 @@ #!/bin/bash - -function is_django_installed() { - python -c "import django" &>/dev/null -} - -function should_collectstatic() { - is_django_installed && [[ -z "$DISABLE_COLLECTSTATIC" ]] -} - -function virtualenv_bin() { - # New versions of Python (>3.6) should use venv module - # from stdlib instead of virtualenv package - python3.12 -m venv $1 -} - -# Install pipenv or micropipenv to the separate virtualenv to isolate it -# from system Python packages and packages in the main -# virtualenv. Executable is simlinked into ~/.local/bin -# to be accessible. This approach is inspired by pipsi -# (pip script installer). -function install_tool() { - echo "---> Installing $1 packaging tool ..." - VENV_DIR=$HOME/.local/venvs/$1 - virtualenv_bin "$VENV_DIR" - # First, try to install the tool without --isolated which means that if you - # have your own PyPI mirror, it will take it from there. If this try fails, try it - # again with --isolated which ignores external pip settings (env vars, config file) - # and installs the tool from PyPI (needs internet connetion). - # $1$2 combines package name with [extras] or version specifier if is defined as $2``` - if ! $VENV_DIR/bin/pip install -U $1$2; then - echo "WARNING: Installation of $1 failed, trying again from official PyPI with pip --isolated install" - $VENV_DIR/bin/pip install --isolated -U $1$2 # Combines package name with [extras] or version specifier if is defined as $2``` - fi - mkdir -p $HOME/.local/bin - ln -s $VENV_DIR/bin/$1 $HOME/.local/bin/$1 -} - +# S2I assemble script for DESDEO API (FastAPI / gunicorn + uvicorn). +# +# Key differences from the original script: +# - Uses uv instead of pip for dependency installation. +# - UV_PROJECT_ENVIRONMENT points uv at the S2I-managed virtualenv +# (/opt/app-root) so it does NOT create a separate .venv directory. +# - --frozen → reproduces exactly what is pinned in uv.lock. +# - --no-dev → skips dev-only deps (pytest, ruff, etc.). +# - --group web --group server → pulls in FastAPI, gunicorn, uvicorn, etc. +# - Django collectstatic block removed (not applicable here). +# set -e - -# First of all, check that we don't have disallowed combination of ENVs -if [[ ! -z "$ENABLE_PIPENV" && ! -z "$ENABLE_MICROPIPENV" ]]; then - echo "ERROR: Pipenv and micropipenv cannot be enabled at the same time!" - # podman/buildah does not relay this exit code but it will be fixed hopefuly - # https://github.com/containers/buildah/issues/2305 - exit 3 -fi - shopt -s dotglob + echo "---> Installing application source ..." mv /tmp/src/* "$HOME" -# set permissions for any installed artifacts +# Restore permissions after source injection. fix-permissions /opt/app-root -P +echo "---> Installing uv ..." +pip install -q --upgrade pip +pip install -q uv -if [[ ! -z "$UPGRADE_PIP_TO_LATEST" ]]; then - echo "---> Upgrading pip, setuptools and wheel to latest version ..." - if ! pip install -U pip setuptools wheel; then - echo "WARNING: Installation of the latest pip, setuptools and wheel failed, trying again from official PyPI with pip --isolated install" - pip install --isolated -U pip setuptools wheel - fi -fi - -pip install $DESDEO_INSTALL - - -if should_collectstatic; then - ( - echo "---> Collecting Django static files ..." - - APP_HOME=$(readlink -f "${APP_HOME:-.}") - # Change the working directory to APP_HOME - PYTHONPATH="$(pwd)${PYTHONPATH:+:$PYTHONPATH}" - cd "$APP_HOME" - - # Look for 'manage.py' in the current directory - manage_file=./manage.py - - if [[ ! -f "$manage_file" ]]; then - echo "WARNING: seems that you're using Django, but we could not find a 'manage.py' file." - echo "'manage.py collectstatic' ignored." - exit - fi - - if ! python $manage_file collectstatic --dry-run --noinput &> /dev/null; then - echo "WARNING: could not run 'manage.py collectstatic'. To debug, run:" - echo " $ python $manage_file collectstatic --noinput" - echo "Ignore this warning if you're not serving static files with Django." - exit - fi +echo "---> Syncing Python dependencies via uv ..." +# UV_PROJECT_ENVIRONMENT: use the existing S2I venv instead of creating .venv. +# UV_PYTHON_PREFERENCE: do not let uv download its own Python interpreter. +UV_PROJECT_ENVIRONMENT="${VIRTUAL_ENV:-/opt/app-root}" \ +UV_PYTHON_PREFERENCE=only-system \ + uv sync --frozen --no-dev --group web --group server - python $manage_file collectstatic --noinput - ) -fi +echo "---> Dependencies installed." -# set permissions for any installed artifacts +# Restore permissions for any artifacts written during install. fix-permissions /opt/app-root -P diff --git a/.s2i/environment b/.s2i/environment index 30e018bca..96b178108 100644 --- a/.s2i/environment +++ b/.s2i/environment @@ -1,5 +1,27 @@ +# S2I environment variables for the DESDEO API build and runtime. +# These are read both during the S2I *build* (assemble) and at *runtime*. +# +# UPGRADE_PIP_TO_LATEST is no longer needed because the assemble script +# upgrades pip explicitly before installing uv. Kept here as a no-op for +# compatibility with any base-image hooks that check for it. UPGRADE_PIP_TO_LATEST=1 + +# Entry point: Gunicorn loads the FastAPI app from this module path. APP_MODULE=desdeo.api.app:app + +# Gunicorn flags. +# --workers=1 Single worker is safe for the current single-pod setup. +# Increase to 2-4 if the pod gets >1 CPU allocated. +# --worker-class uvicorn.workers.UvicornWorker gives async support. +# --bind Must be 0.0.0.0:8080 to match the Service targetPort. +# --access-logfile - Log to stdout so OpenShift can capture it. GUNICORN_CMD_ARGS=--bind=0.0.0.0:8080 --workers=1 --access-logfile=- --worker-class uvicorn.workers.UvicornWorker -DESDEO_INSTALL=. --group web --group server + +# Passed to `uv sync` in the assemble script. +# Format: flags passed after the implicit project root (.). +# --group web --group server: include the FastAPI/gunicorn/uvicorn dependency groups. +# NOTE: This variable is used in assemble, not by pip directly. +DESDEO_INSTALL=--group web --group server + +# Runtime default; override via Deployment env or Secret. DEBUG=false diff --git a/deploy/api-buildconfig.yaml b/deploy/api-buildconfig.yaml new file mode 100644 index 000000000..7e70ba950 --- /dev/null +++ b/deploy/api-buildconfig.yaml @@ -0,0 +1,85 @@ +# deploy/api-buildconfig.yaml +# +# BuildConfig for the DESDEO API using OpenShift S2I strategy. +# +# Builder image: python:3.12-ubi9 from the shared 'openshift' namespace. +# The custom .s2i/bin/assemble script in the repository overrides the default +# assemble behaviour and uses uv to install dependencies. +# +# Triggers: +# - GitHub webhook (push to DEPLOY_BRANCH) ← main CI/CD trigger +# - ImageChange on the builder image ← rebuilds on Python security patches +# +# To get the webhook URL after applying: +# oc describe bc/desdeo-api | grep -A2 Webhook +--- +apiVersion: build.openshift.io/v1 +kind: BuildConfig +metadata: + name: desdeo-api + labels: + app: desdeo-api +spec: + # ── Source ─────────────────────────────────────────────────────────────── + source: + type: Git + git: + uri: https://github.com/industrial-optimization-group/DESDEO.git + # Replace with the branch you want to track (e.g. master, deploy, main). + ref: + # No contextDir: the pyproject.toml and .s2i/ are at the repo root. + + # ── Build strategy ─────────────────────────────────────────────────────── + strategy: + type: Source + sourceStrategy: + # Python 3.12 UBI9 S2I builder available in every Rahti project via the + # shared 'openshift' ImageStream. Verify with: + # oc get is python -n openshift + from: + kind: ImageStreamTag + name: python:3.12-ubi9 + namespace: openshift + # Environment variables available during the S2I *build* (assemble). + # Runtime env vars go in the Deployment, not here. + env: + - name: UPGRADE_PIP_TO_LATEST + value: "1" + - name: DESDEO_INSTALL + value: "--group web --group server" + - name: DEBUG + value: "false" + + # ── Output ─────────────────────────────────────────────────────────────── + output: + to: + kind: ImageStreamTag + name: desdeo-api:latest + + # ── Triggers ───────────────────────────────────────────────────────────── + triggers: + # Rebuild when the Python builder image is updated (security patches). + - type: ImageChange + imageChange: {} + # GitHub webhook — authenticates with WEBHOOK_SECRET_API from the Secret. + - type: GitHub + github: + secretReference: + name: desdeo-secrets + # Key in the Secret that holds the webhook secret string. + # Note: this uses secretReference (key in existing Secret) rather than + # an inline secret so we don't store the value in this manifest. + # Manual / API trigger. + - type: ConfigChange + + # ── Run policy ─────────────────────────────────────────────────────────── + runPolicy: Serial + + # ── Resource limits for the build pod ──────────────────────────────────── + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "2Gi" + cpu: "1" diff --git a/deploy/api-deployment.yaml b/deploy/api-deployment.yaml new file mode 100644 index 000000000..1d4cae5e3 --- /dev/null +++ b/deploy/api-deployment.yaml @@ -0,0 +1,124 @@ +# deploy/api-deployment.yaml +# +# Deployment, Service, and Route for the DESDEO FastAPI backend. +# +# The Deployment has an ImageChange trigger: whenever the BuildConfig pushes a +# new image to the desdeo-api ImageStream, the pod is replaced with a rolling +# update automatically. +# +# All secrets are consumed as environment variables from the desdeo-secrets +# Secret; no secret values live in this file. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: desdeo-api + labels: + app: desdeo-api + annotations: + # This annotation tells the OpenShift image change controller to update the + # Deployment when a new image is pushed to the ImageStream. + image.openshift.io/triggers: > + [{"from":{"kind":"ImageStreamTag","name":"desdeo-api:latest"}, + "fieldPath":"spec.template.spec.containers[0].image"}] +spec: + replicas: 1 + selector: + matchLabels: + app: desdeo-api + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 # zero-downtime: new pod must be ready before old is removed + maxSurge: 1 + template: + metadata: + labels: + app: desdeo-api + spec: + containers: + - name: api + # Placeholder: the image change annotation above overwrites this on deploy. + image: desdeo-api:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + name: http + protocol: TCP + env: + # ── Pulled from Secret ──────────────────────────────────────────── + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DATABASE_URL + - name: SECRET_KEY + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: SECRET_KEY + # ── Static runtime config ───────────────────────────────────────── + - name: DEBUG + value: "false" + # CORS: allow the public webui origin. + - name: ALLOWED_ORIGINS + value: "https://gialmisi-desdeo-webui.rahtiapp.fi" + # ── Gunicorn (matches .s2i/environment) ─────────────────────────── + - name: APP_MODULE + value: "desdeo.api.app:app" + - name: GUNICORN_CMD_ARGS + value: "--bind=0.0.0.0:8080 --workers=1 --access-logfile=- --worker-class uvicorn.workers.UvicornWorker" + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "1" + readinessProbe: + httpGet: + path: /health # adjust if the FastAPI app exposes a different health endpoint + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 10 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 20 + failureThreshold: 3 +--- +apiVersion: v1 +kind: Service +metadata: + name: desdeo-api + labels: + app: desdeo-api +spec: + selector: + app: desdeo-api + ports: + - name: http + port: 8080 + targetPort: 8080 +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: desdeo-api + labels: + app: desdeo-api +spec: + host: gialmisi-desdeo-api.rahtiapp.fi + to: + kind: Service + name: desdeo-api + port: + targetPort: http + tls: + # edge: TLS terminated at the Rahti HAProxy; traffic to the pod is plain HTTP. + termination: edge + # Redirect any accidental HTTP requests to HTTPS. + insecureEdgeTerminationPolicy: Redirect diff --git a/deploy/api-imagestream.yaml b/deploy/api-imagestream.yaml new file mode 100644 index 000000000..69fdfb647 --- /dev/null +++ b/deploy/api-imagestream.yaml @@ -0,0 +1,18 @@ +# deploy/api-imagestream.yaml +# +# ImageStream for the DESDEO API container image. +# The BuildConfig writes new image revisions here; the Deployment reads from here. +# Separating build output from deployment allows rolling updates to trigger +# automatically when a new image is pushed. +--- +apiVersion: image.openshift.io/v1 +kind: ImageStream +metadata: + name: desdeo-api + labels: + app: desdeo-api +spec: + lookupPolicy: + # Allow Deployments in this namespace to reference the image by its + # ImageStreamTag name without needing the full registry URL. + local: true diff --git a/deploy/db-init-job.yaml b/deploy/db-init-job.yaml new file mode 100644 index 000000000..afd9aadc5 --- /dev/null +++ b/deploy/db-init-job.yaml @@ -0,0 +1,63 @@ +# deploy/db-init-job.yaml +# +# One-shot Kubernetes Job that runs desdeo/api/db_init_prod.py using the API +# image. Run this: +# - After the very first deployment (creates tables + seeds admin user). +# - After a deliberate database wipe (the script is idempotent on re-run). +# +# The Job is NOT re-triggered automatically on each new image build. +# If you add new tables in a later release use a proper migration tool +# (Alembic) instead of re-running this Job. +# +# Usage: +# # Replace with the actual image pullspec, e.g.: +# # image-registry.openshift-image-registry.svc:5000//desdeo-api:latest +# oc create -f deploy/db-init-job.yaml +# +# # Watch it: +# oc logs -f job/desdeo-db-init +# +# # Clean up after success: +# oc delete job desdeo-db-init +# +# If you need to re-run it (e.g. to re-seed after a wipe), delete the old Job +# first then create it again. +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: desdeo-db-init + labels: + app: desdeo-api + component: db-init +spec: + # Do not restart the pod on success. + backoffLimit: 3 + # Auto-clean completed Job pods after 1 hour. + ttlSecondsAfterFinished: 3600 + template: + metadata: + labels: + app: desdeo-api + component: db-init + spec: + restartPolicy: Never + containers: + - name: db-init + # Use the same image as the API Deployment. + # Replace with your Rahti project name. + image: image-registry.openshift-image-registry.svc:5000//desdeo-api:latest + # Run the production init script. + command: ["python", "desdeo/api/db_init_prod.py"] + envFrom: + # Inject DATABASE_URL, SECRET_KEY, DESDEO_ADMIN_USERNAME, + # DESDEO_ADMIN_PASSWORD from the shared Secret. + - secretRef: + name: desdeo-secrets + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml new file mode 100644 index 000000000..1ed80fea1 --- /dev/null +++ b/deploy/postgres.yaml @@ -0,0 +1,108 @@ +# deploy/postgres.yaml +# +# In-cluster PostgreSQL using the Bitnami image, which is designed to run +# as a non-root user and is therefore compatible with OpenShift / Rahti SCCs. +# +# Apply with: +# oc apply -f deploy/postgres.yaml +# +# Bitnami env var names differ from the official postgres image: +# POSTGRESQL_USERNAME → creates a non-superuser (matches DATABASE_URL user) +# POSTGRESQL_PASSWORD → password for POSTGRESQL_USERNAME +# POSTGRESQL_DATABASE → database to create on first start +# POSTGRESQL_POSTGRES_PASSWORD → password for the postgres superuser +# +# Data is persisted in the PVC mounted at /bitnami/postgresql. +--- +apiVersion: v1 +kind: Service +metadata: + name: desdeo-postgres + labels: + app: desdeo-postgres +spec: + # ClusterIP only — Postgres is not exposed outside the cluster. + clusterIP: None # headless service for StatefulSet DNS + selector: + app: desdeo-postgres + ports: + - name: postgres + port: 5432 + targetPort: 5432 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: desdeo-postgres + labels: + app: desdeo-postgres +spec: + selector: + matchLabels: + app: desdeo-postgres + serviceName: desdeo-postgres + replicas: 1 + template: + metadata: + labels: + app: desdeo-postgres + spec: + containers: + - name: postgres + image: docker.io/bitnami/postgresql:16 + ports: + - containerPort: 5432 + name: postgres + env: + - name: POSTGRESQL_USERNAME + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: POSTGRES_USER + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: POSTGRES_PASSWORD + - name: POSTGRESQL_DATABASE + value: desdeo + - name: POSTGRESQL_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: POSTGRES_SUPERUSER_PASSWORD + volumeMounts: + - name: postgres-data + mountPath: /bitnami/postgresql + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + livenessProbe: + exec: + command: + - /bin/sh + - -c + - pg_isready -U $POSTGRESQL_USERNAME -d desdeo + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - pg_isready -U $POSTGRESQL_USERNAME -d desdeo + initialDelaySeconds: 5 + periodSeconds: 5 + volumeClaimTemplates: + - metadata: + name: postgres-data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi diff --git a/deploy/secrets-template.yaml b/deploy/secrets-template.yaml new file mode 100644 index 000000000..fa04e0699 --- /dev/null +++ b/deploy/secrets-template.yaml @@ -0,0 +1,56 @@ +# deploy/secrets-template.yaml +# +# DO NOT commit real values to git. +# +# Usage: +# 1. Copy this file: cp deploy/secrets-template.yaml deploy/secrets.yaml +# 2. Fill in all placeholders in deploy/secrets.yaml +# 3. Apply: oc apply -f deploy/secrets.yaml +# 4. Keep deploy/secrets.yaml out of version control. +# +# All values must be base64-encoded. Quick helper: +# echo -n 'myvalue' | base64 +# +# Or use --from-literal to skip manual encoding: +# oc create secret generic desdeo-secrets \ +# --from-literal=POSTGRES_USER=desdeo \ +# --from-literal=POSTGRES_PASSWORD= \ +# --from-literal=POSTGRES_SUPERUSER_PASSWORD= \ +# --from-literal=DATABASE_URL='postgresql://desdeo:@desdeo-postgres:5432/desdeo' \ +# --from-literal=SECRET_KEY= \ +# --from-literal=DESDEO_ADMIN_USERNAME=admin \ +# --from-literal=DESDEO_ADMIN_PASSWORD= \ +# --dry-run=client -o yaml > deploy/secrets.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: desdeo-secrets +type: Opaque +stringData: + # ── PostgreSQL ────────────────────────────────────────────────────────── + # Application user (non-superuser). Must match DATABASE_URL below. + POSTGRES_USER: desdeo + POSTGRES_PASSWORD: + # Superuser password (used internally by Bitnami image). + POSTGRES_SUPERUSER_PASSWORD: + + # ── API runtime ───────────────────────────────────────────────────────── + # Full DSN consumed by the FastAPI app and db_init_prod.py. + # Host must be the Kubernetes Service name defined in postgres.yaml. + DATABASE_URL: "postgresql://desdeo:@desdeo-postgres:5432/desdeo" + + # JWT / session signing key — generate with: + # python -c "import secrets; print(secrets.token_hex(64))" + SECRET_KEY: + + # ── DB init job ───────────────────────────────────────────────────────── + # Credentials for the initial analyst user seeded by the db-init Job. + DESDEO_ADMIN_USERNAME: admin + DESDEO_ADMIN_PASSWORD: + + # ── Webhook secrets (BuildConfigs) ────────────────────────────────────── + # Random strings used to authenticate GitHub webhook payloads. + # Generate with: python -c "import secrets; print(secrets.token_hex(24))" + WEBHOOK_SECRET_API: + WEBHOOK_SECRET_WEBUI: diff --git a/deploy/webui-buildconfig.yaml b/deploy/webui-buildconfig.yaml new file mode 100644 index 000000000..0420ce945 --- /dev/null +++ b/deploy/webui-buildconfig.yaml @@ -0,0 +1,62 @@ +# deploy/webui-buildconfig.yaml +# +# BuildConfig for the DESDEO web UI using Docker build strategy. +# +# The Dockerfile lives at webui/Dockerfile in the repository. +# contextDir: webui → all COPY paths in the Dockerfile are relative to webui/. +# +# VITE_API_URL is baked into the client-side bundle at build time. +# It must be the *public* HTTPS URL of the API Route. +# If you ever change the API hostname, you must trigger a new webui build. +--- +apiVersion: build.openshift.io/v1 +kind: BuildConfig +metadata: + name: desdeo-webui + labels: + app: desdeo-webui +spec: + # ── Source ─────────────────────────────────────────────────────────────── + source: + type: Git + git: + uri: https://github.com/industrial-optimization-group/DESDEO.git + ref: + # Only the webui/ subtree is needed as the Docker build context. + contextDir: webui + + # ── Build strategy ─────────────────────────────────────────────────────── + strategy: + type: Docker + dockerStrategy: + dockerfilePath: Dockerfile + # Build arguments — passed as ARG to the Dockerfile. + buildArgs: + - name: VITE_API_URL + value: "https://gialmisi-desdeo-api.rahtiapp.fi" + + # ── Output ─────────────────────────────────────────────────────────────── + output: + to: + kind: ImageStreamTag + name: desdeo-webui:latest + + # ── Triggers ───────────────────────────────────────────────────────────── + triggers: + - type: GitHub + github: + secretReference: + name: desdeo-secrets + - type: ConfigChange + + # ── Build pod resources ─────────────────────────────────────────────────── + # npm ci + vite build is memory-hungry; 2Gi is usually sufficient. + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "2Gi" + cpu: "1" + + runPolicy: Serial diff --git a/deploy/webui-deployment.yaml b/deploy/webui-deployment.yaml new file mode 100644 index 000000000..9589327cc --- /dev/null +++ b/deploy/webui-deployment.yaml @@ -0,0 +1,98 @@ +# deploy/webui-deployment.yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: desdeo-webui + labels: + app: desdeo-webui + annotations: + image.openshift.io/triggers: > + [{"from":{"kind":"ImageStreamTag","name":"desdeo-webui:latest"}, + "fieldPath":"spec.template.spec.containers[0].image"}] +spec: + replicas: 1 + selector: + matchLabels: + app: desdeo-webui + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + template: + metadata: + labels: + app: desdeo-webui + spec: + containers: + - name: webui + image: desdeo-webui:latest + imagePullPolicy: Always + ports: + - containerPort: 3000 + name: http + protocol: TCP + env: + # adapter-node reads PORT to know which port to listen on. + - name: PORT + value: "3000" + # Server-side +server.ts proxy routes use this to reach the API + # over the internal cluster network (avoids the public ingress). + - name: API_URL + value: "http://desdeo-api:8080" + # ORIGIN is required by SvelteKit for CSRF protection when behind a proxy. + - name: ORIGIN + value: "https://gialmisi-desdeo-webui.rahtiapp.fi" + resources: + requests: + memory: "128Mi" + cpu: "50m" + limits: + memory: "256Mi" + cpu: "500m" + readinessProbe: + httpGet: + path: / + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + livenessProbe: + httpGet: + path: / + port: 3000 + initialDelaySeconds: 20 + periodSeconds: 20 + failureThreshold: 3 +--- +apiVersion: v1 +kind: Service +metadata: + name: desdeo-webui + labels: + app: desdeo-webui +spec: + selector: + app: desdeo-webui + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: desdeo-webui + labels: + app: desdeo-webui +spec: + host: gialmisi-desdeo-webui.rahtiapp.fi + to: + kind: Service + name: desdeo-webui + port: + targetPort: http + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect diff --git a/deploy/webui-imagestream.yaml b/deploy/webui-imagestream.yaml new file mode 100644 index 000000000..0348a28bc --- /dev/null +++ b/deploy/webui-imagestream.yaml @@ -0,0 +1,11 @@ +# deploy/webui-imagestream.yaml +--- +apiVersion: image.openshift.io/v1 +kind: ImageStream +metadata: + name: desdeo-webui + labels: + app: desdeo-webui +spec: + lookupPolicy: + local: true diff --git a/desdeo/api/app.py b/desdeo/api/app.py index 7b71e1032..8dbf6bf37 100644 --- a/desdeo/api/app.py +++ b/desdeo/api/app.py @@ -40,6 +40,12 @@ app.include_router(enautilus.router) app.include_router(gdm_score_bands_routers.router) + +@app.get("/health") +def health(): + return {"status": "ok"} + + origins = AuthConfig.cors_origins app.add_middleware( diff --git a/desdeo/api/db_init_prod.py b/desdeo/api/db_init_prod.py new file mode 100644 index 000000000..d86126427 --- /dev/null +++ b/desdeo/api/db_init_prod.py @@ -0,0 +1,84 @@ +"""Production database initialisation script. + +Run once as a Kubernetes Job after the first deployment (or after a full +database wipe). It is intentionally idempotent: running it multiple times +against the same database is safe. + +What it does +------------ +1. Creates all SQLModel tables if they do not already exist. + (Uses create_all which is a no-op for tables that are present.) +2. Seeds an initial analyst user whose credentials come from env vars. + If the user already exists the step is skipped. + +Environment variables required +------------------------------- +DATABASE_URL PostgreSQL DSN, e.g. + postgresql://desdeo:@desdeo-postgres:5432/desdeo +DESDEO_ADMIN_USERNAME Username for the seeded analyst account. +DESDEO_ADMIN_PASSWORD Password for the seeded analyst account. + +Optional +-------- +DESDEO_ADMIN_GROUP Group name for the seeded user (default: "admin"). +""" + +import os +import sys + +from sqlmodel import Session, SQLModel, select + +# Import the engine after DATABASE_URL is in the environment so the config +# module picks it up correctly. +from desdeo.api.db import engine +from desdeo.api.models import User, UserRole +from desdeo.api.routers.user_authentication import get_password_hash + + +def create_tables() -> None: + print("[db-init] Creating database tables (create_all is a no-op for existing tables)...") + SQLModel.metadata.create_all(engine) + print("[db-init] Tables ready.") + + +def seed_admin_user() -> None: + username = os.environ.get("DESDEO_ADMIN_USERNAME") + password = os.environ.get("DESDEO_ADMIN_PASSWORD") + group = os.environ.get("DESDEO_ADMIN_GROUP", "admin") + + if not username or not password: + print("[db-init] WARNING: DESDEO_ADMIN_USERNAME or DESDEO_ADMIN_PASSWORD not set — skipping user seed.") + return + + with Session(engine) as session: + existing = session.exec(select(User).where(User.username == username)).first() + + if existing: + print(f"[db-init] User '{username}' already exists — skipping.") + return + + user = User( + username=username, + password_hash=get_password_hash(password), + role=UserRole.analyst, + group=group, + ) + session.add(user) + session.commit() + print(f"[db-init] Created user '{username}' (role=analyst, group={group}).") + + +def main() -> None: + database_url = os.environ.get("DATABASE_URL") + if not database_url: + print("[db-init] ERROR: DATABASE_URL is not set.", file=sys.stderr) + sys.exit(1) + + print(f"[db-init] Using database: {database_url.split('@')[-1]}") # hide credentials + create_tables() + seed_admin_user() + print("[db-init] Done.") + + +if __name__ == "__main__": + main() diff --git a/webui/Dockerfile b/webui/Dockerfile new file mode 100644 index 000000000..aeae76740 --- /dev/null +++ b/webui/Dockerfile @@ -0,0 +1,71 @@ +# Multi-stage Dockerfile for the DESDEO web UI. +# Build context: webui/ (set via BuildConfig contextDir: webui) +# +# Two URL variables control how the frontend talks to the backend API: +# +# VITE_API_URL (build-time ARG / ENV) +# Baked into the client-side JavaScript bundle by Vite. +# Used by browser code: import.meta.env.VITE_API_URL +# Must be the *public* HTTPS route of the API. +# Default: https://gialmisi-desdeo-api.rahtiapp.fi +# +# API_URL (runtime ENV in the container / Deployment env) +# Used by SvelteKit server-side route handlers (+server.ts) that +# proxy requests to the backend. +# Should point to the *internal cluster* service so traffic stays +# inside the cluster and avoids the public ingress. +# Default here: http://desdeo-api:8080 +# Override in the Deployment manifest if the service name differs. + +# --------------------------------------------------------------------------- +# Stage 1 – build +# --------------------------------------------------------------------------- +FROM node:20-alpine AS build + +WORKDIR /app + +# Install dependencies first for layer-cache efficiency. +COPY package*.json ./ +RUN npm ci + +# Copy the rest of the source. +COPY . . + +# VITE_API_URL is baked into the bundle at build time. +ARG VITE_API_URL=https://gialmisi-desdeo-api.rahtiapp.fi +ENV VITE_API_URL=$VITE_API_URL + +# NPM_RUN=start:production makes svelte.config.js select adapter-node. +RUN NPM_RUN=start:production npm run build + +# Prune devDependencies so we can copy node_modules into the runtime stage. +RUN npm prune --omit=dev + +# --------------------------------------------------------------------------- +# Stage 2 – runtime +# --------------------------------------------------------------------------- +FROM node:20-alpine + +WORKDIR /app + +# Copy the compiled adapter-node output and the pruned dependencies. +COPY --from=build /app/build ./build +COPY --from=build /app/node_modules ./node_modules +COPY --from=build /app/package.json ./package.json + +# OpenShift runs containers with an arbitrary UID in GID 0 (root group). +# Setting group ownership to 0 with group-write allows the process to run +# under any UID without permission errors. +RUN chgrp -R 0 /app && chmod -R g=u /app + +# adapter-node default port; can be overridden at runtime via PORT env. +EXPOSE 3000 +ENV PORT=3000 + +# Server-side proxy routes use this to reach the backend inside the cluster. +ENV API_URL=http://desdeo-api:8080 + +# Run as a non-root UID (required by Rahti / OpenShift SCC). +USER 1001 + +CMD ["node", "build"] From e88332ea810410c9a8b697f724c5d7d4e88618a0 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 09:53:59 +0200 Subject: [PATCH 10/24] Added secrets to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fb17e8ea0..6719b8e2c 100644 --- a/.gitignore +++ b/.gitignore @@ -161,3 +161,4 @@ scratch/ # Surrogate models surrogatemodels/ utopia_forest.json +deploy/secrets.yaml From e5c8606b3e68d69cc57a65e48077dec53b323cd3 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 13:04:19 +0200 Subject: [PATCH 11/24] Updated build and Dockerfiles. --- deploy/api-buildconfig.yaml | 4 ++-- deploy/postgres.yaml | 29 ++++++++++++----------------- deploy/webui-buildconfig.yaml | 11 +++++++---- webui/Dockerfile | 4 ++-- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/deploy/api-buildconfig.yaml b/deploy/api-buildconfig.yaml index 7e70ba950..6df1242da 100644 --- a/deploy/api-buildconfig.yaml +++ b/deploy/api-buildconfig.yaml @@ -24,9 +24,9 @@ spec: source: type: Git git: - uri: https://github.com/industrial-optimization-group/DESDEO.git + uri: https://github.com/gialmisi/DESDEO.git # Replace with the branch you want to track (e.g. master, deploy, main). - ref: + ref: rahti-deploy # No contextDir: the pyproject.toml and .s2i/ are at the repo root. # ── Build strategy ─────────────────────────────────────────────────────── diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml index 1ed80fea1..795f13874 100644 --- a/deploy/postgres.yaml +++ b/deploy/postgres.yaml @@ -21,8 +21,7 @@ metadata: labels: app: desdeo-postgres spec: - # ClusterIP only — Postgres is not exposed outside the cluster. - clusterIP: None # headless service for StatefulSet DNS + clusterIP: None selector: app: desdeo-postgres ports: @@ -49,12 +48,12 @@ spec: spec: containers: - name: postgres - image: docker.io/bitnami/postgresql:16 + image: image-registry.openshift-image-registry.svc:5000/openshift/postgresql:16-el10 ports: - containerPort: 5432 name: postgres env: - - name: POSTGRESQL_USERNAME + - name: POSTGRESQL_USER valueFrom: secretKeyRef: name: desdeo-secrets @@ -66,14 +65,9 @@ spec: key: POSTGRES_PASSWORD - name: POSTGRESQL_DATABASE value: desdeo - - name: POSTGRESQL_POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: POSTGRES_SUPERUSER_PASSWORD volumeMounts: - name: postgres-data - mountPath: /bitnami/postgresql + mountPath: /var/lib/pgsql/data resources: requests: memory: "256Mi" @@ -81,23 +75,24 @@ spec: limits: memory: "512Mi" cpu: "500m" - livenessProbe: + readinessProbe: exec: command: - /bin/sh - -c - - pg_isready -U $POSTGRESQL_USERNAME -d desdeo - initialDelaySeconds: 30 + - psql -U $POSTGRESQL_USER -d desdeo -c "SELECT 1" + initialDelaySeconds: 15 periodSeconds: 10 failureThreshold: 6 - readinessProbe: + livenessProbe: exec: command: - /bin/sh - -c - - pg_isready -U $POSTGRESQL_USERNAME -d desdeo - initialDelaySeconds: 5 - periodSeconds: 5 + - psql -U $POSTGRESQL_USER -d desdeo -c "SELECT 1" + initialDelaySeconds: 30 + periodSeconds: 20 + failureThreshold: 3 volumeClaimTemplates: - metadata: name: postgres-data diff --git a/deploy/webui-buildconfig.yaml b/deploy/webui-buildconfig.yaml index 0420ce945..fbe83cd56 100644 --- a/deploy/webui-buildconfig.yaml +++ b/deploy/webui-buildconfig.yaml @@ -20,8 +20,8 @@ spec: source: type: Git git: - uri: https://github.com/industrial-optimization-group/DESDEO.git - ref: + uri: https://github.com/gialmisi/DESDEO.git + ref: rahti-deploy # Only the webui/ subtree is needed as the Docker build context. contextDir: webui @@ -34,6 +34,9 @@ spec: buildArgs: - name: VITE_API_URL value: "https://gialmisi-desdeo-api.rahtiapp.fi" + env: + - name: NODE_OPTIONS + value: "--max-old-space-size=3072" # ── Output ─────────────────────────────────────────────────────────────── output: @@ -53,10 +56,10 @@ spec: # npm ci + vite build is memory-hungry; 2Gi is usually sufficient. resources: requests: - memory: "1Gi" + memory: "2Gi" cpu: "500m" limits: - memory: "2Gi" + memory: "4Gi" cpu: "1" runPolicy: Serial diff --git a/webui/Dockerfile b/webui/Dockerfile index aeae76740..7ca72aba3 100644 --- a/webui/Dockerfile +++ b/webui/Dockerfile @@ -20,7 +20,7 @@ # --------------------------------------------------------------------------- # Stage 1 – build # --------------------------------------------------------------------------- -FROM node:20-alpine AS build +FROM node:24-alpine AS build WORKDIR /app @@ -44,7 +44,7 @@ RUN npm prune --omit=dev # --------------------------------------------------------------------------- # Stage 2 – runtime # --------------------------------------------------------------------------- -FROM node:20-alpine +FROM node:24-alpine WORKDIR /app From 4245365eec0e36e454dcf8b2300f3ae2af965a8c Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 13:13:10 +0200 Subject: [PATCH 12/24] Changed npm ci to npm install in webui/Dockerfile --- webui/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui/Dockerfile b/webui/Dockerfile index 7ca72aba3..a8e757c63 100644 --- a/webui/Dockerfile +++ b/webui/Dockerfile @@ -26,7 +26,7 @@ WORKDIR /app # Install dependencies first for layer-cache efficiency. COPY package*.json ./ -RUN npm ci +RUN npm install # Copy the rest of the source. COPY . . From f88bf154f130abe0f72ae0344bab7843dcf1fc5c Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 14:01:59 +0200 Subject: [PATCH 13/24] Updated deployment files. --- deploy/api-deployment.yaml | 42 +++++++++++++++++++++++++----------- deploy/db-init-job.yaml | 5 ++++- deploy/webui-deployment.yaml | 2 +- 3 files changed, 35 insertions(+), 14 deletions(-) diff --git a/deploy/api-deployment.yaml b/deploy/api-deployment.yaml index 1d4cae5e3..b77ee79a0 100644 --- a/deploy/api-deployment.yaml +++ b/deploy/api-deployment.yaml @@ -46,24 +46,42 @@ spec: name: http protocol: TCP env: - # ── Pulled from Secret ──────────────────────────────────────────── - - name: DATABASE_URL + - name: DESDEO_PRODUCTION + value: "true" + - name: AUTHJWT_SECRET valueFrom: secretKeyRef: name: desdeo-secrets - key: DATABASE_URL - - name: SECRET_KEY + key: AUTHJWT_SECRET + - name: DB_HOST valueFrom: secretKeyRef: name: desdeo-secrets - key: SECRET_KEY - # ── Static runtime config ───────────────────────────────────────── - - name: DEBUG - value: "false" - # CORS: allow the public webui origin. - - name: ALLOWED_ORIGINS - value: "https://gialmisi-desdeo-webui.rahtiapp.fi" - # ── Gunicorn (matches .s2i/environment) ─────────────────────────── + key: DB_HOST + - name: DB_PORT + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_PORT + - name: DB_NAME + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_NAME + - name: DB_USER + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_PASSWORD + - name: CORS_ORIGINS + value: '["https://gialmisi-desdeo-webui.rahtiapp.fi"]' + - name: COOKIE_DOMAIN + value: "rahtiapp.fi" - name: APP_MODULE value: "desdeo.api.app:app" - name: GUNICORN_CMD_ARGS diff --git a/deploy/db-init-job.yaml b/deploy/db-init-job.yaml index afd9aadc5..4e6d96b8e 100644 --- a/deploy/db-init-job.yaml +++ b/deploy/db-init-job.yaml @@ -46,9 +46,12 @@ spec: - name: db-init # Use the same image as the API Deployment. # Replace with your Rahti project name. - image: image-registry.openshift-image-registry.svc:5000//desdeo-api:latest + image: image-registry.openshift-image-registry.svc:5000/gialmisi-desdeo/desdeo-api:latest # Run the production init script. command: ["python", "desdeo/api/db_init_prod.py"] + end: + - name: DESDEO_PRODUCTION + value: "true" envFrom: # Inject DATABASE_URL, SECRET_KEY, DESDEO_ADMIN_USERNAME, # DESDEO_ADMIN_PASSWORD from the shared Secret. diff --git a/deploy/webui-deployment.yaml b/deploy/webui-deployment.yaml index 9589327cc..884bd255e 100644 --- a/deploy/webui-deployment.yaml +++ b/deploy/webui-deployment.yaml @@ -47,7 +47,7 @@ spec: resources: requests: memory: "128Mi" - cpu: "50m" + cpu: "100m" limits: memory: "256Mi" cpu: "500m" From af2e7e5e73943e3a290b880c62a9fdc584ee968f Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 14:47:06 +0200 Subject: [PATCH 14/24] Web-GUI - Updated api client to use correct url's for api calls in debug/deploy. --- webui/src/lib/api/new-client.ts | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/webui/src/lib/api/new-client.ts b/webui/src/lib/api/new-client.ts index 3e8f365c0..de8c3879c 100644 --- a/webui/src/lib/api/new-client.ts +++ b/webui/src/lib/api/new-client.ts @@ -21,16 +21,21 @@ const getBody = async (c: Response | Request): Promise => { return (c as Response).text() as Promise; }; -// NOTE: Update just base url const getUrl = (contextUrl: string): string => { const url = new URL(contextUrl); - const origin = url.origin; const pathname = url.pathname; const search = url.search; - const requestUrl = new URL(`${origin}${pathname}${search}`); + const base = + typeof process !== 'undefined' && process.env?.API_BASE_URL + ? process.env.API_BASE_URL + : (import.meta.env.VITE_API_URL ?? 'http://localhost:8000'); - return requestUrl.toString(); + // base may be a relative path (e.g. '/api' in local dev via Vite proxy) + // in that case, fall back to localhost as the origin + const absoluteBase = base.startsWith('http') ? base : `http://localhost:8000`; + + return new URL(`${absoluteBase}${pathname}${search}`).toString(); }; const getHeaders = (headers?: HeadersInit): HeadersInit => { From 5324b5b66932129dd97a96218fd980c955466f91 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 15:16:39 +0200 Subject: [PATCH 15/24] Web-GUI - Fixed some URL resolving issues and passing access token cookies correctly in the UI (mostly affecting deployment). --- deploy/webui-deployment.yaml | 2 +- webui/src/hooks.server.ts | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/deploy/webui-deployment.yaml b/deploy/webui-deployment.yaml index 884bd255e..ed2f4bf54 100644 --- a/deploy/webui-deployment.yaml +++ b/deploy/webui-deployment.yaml @@ -39,7 +39,7 @@ spec: value: "3000" # Server-side +server.ts proxy routes use this to reach the API # over the internal cluster network (avoids the public ingress). - - name: API_URL + - name: API_BASE_URL value: "http://desdeo-api:8080" # ORIGIN is required by SvelteKit for CSRF protection when behind a proxy. - name: ORIGIN diff --git a/webui/src/hooks.server.ts b/webui/src/hooks.server.ts index a214db051..1f5b957b8 100644 --- a/webui/src/hooks.server.ts +++ b/webui/src/hooks.server.ts @@ -5,7 +5,17 @@ import { dev } from '$app/environment'; // const API = process.env.API_BASE_URL ?? '/'; export const handleFetch: HandleFetch = async ({ event, request, fetch }) => { - // TODO: check that the request originates from our app, instead of being a third party + // Forward access_token cookie to all API requests + const accessToken = event.cookies.get("access_token"); + if (accessToken) { + request = new Request(request, { + headers: new Headers({ + ...Object.fromEntries(request.headers.entries()), + cookie: `access_token=${accessToken}`, + }), + }); + } + const originalRequest = request.clone(); let res = await fetch(request); From 92ddc5c9836b9ba56b0eb19a63a4de5e9cbfcd45 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 15:59:04 +0200 Subject: [PATCH 16/24] Web-UI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add catch-all proxy route /api/[...path] that forwards browser API calls to the upstream API using event.fetch, so handleFetch intercepts for cookie injection and 401/token-refresh handling - Fix getUrl to return relative /api/ on the browser side instead of a direct cross-origin URL, routing all browser traffic through the proxy - Switch server-side detection in getUrl from process.env check to typeof window === 'undefined' with http://localhost:8000 fallback - Remove client-side 401 retry logic from customFetch — now handled server-side by handleFetch via the proxy - Remove dead Vite /api proxy config from vite.config.ts --- webui/src/lib/api/new-client.ts | 33 ++++++-------------- webui/src/routes/api/[...path]/+server.ts | 38 +++++++++++++++++++++++ webui/vite.config.ts | 12 +------ 3 files changed, 48 insertions(+), 35 deletions(-) create mode 100644 webui/src/routes/api/[...path]/+server.ts diff --git a/webui/src/lib/api/new-client.ts b/webui/src/lib/api/new-client.ts index de8c3879c..54b4b3ba1 100644 --- a/webui/src/lib/api/new-client.ts +++ b/webui/src/lib/api/new-client.ts @@ -26,16 +26,15 @@ const getUrl = (contextUrl: string): string => { const pathname = url.pathname; const search = url.search; - const base = - typeof process !== 'undefined' && process.env?.API_BASE_URL - ? process.env.API_BASE_URL - : (import.meta.env.VITE_API_URL ?? 'http://localhost:8000'); - - // base may be a relative path (e.g. '/api' in local dev via Vite proxy) - // in that case, fall back to localhost as the origin - const absoluteBase = base.startsWith('http') ? base : `http://localhost:8000`; + // Server-side (Node.js): call API directly. Falls back to localhost if + // API_BASE_URL is not in process.env (Vite dev doesn't populate it automatically). + if (typeof window === 'undefined') { + const base = process.env.API_BASE_URL ?? 'http://localhost:8000'; + return new URL(`${base}${pathname}${search}`).toString(); + } - return new URL(`${absoluteBase}${pathname}${search}`).toString(); + // Browser-side: route through the SvelteKit proxy so cookies stay on one domain + return `/api${pathname}${search}`; }; const getHeaders = (headers?: HeadersInit): HeadersInit => { @@ -61,22 +60,8 @@ export const customFetch = async ( }; const request = new Request(requestUrl, requestInit); - const retryRequest = request.clone(); - - let response = await f(request); - - if (response.status === 401) { - const refreshUrl = new URL("/refresh", requestUrl).toString(); - const refreshResponse = await f(refreshUrl, { - method: "POST", - credentials: "include", - }); - - if (refreshResponse.ok) { - response = await f(retryRequest); - } - } + const response = await f(request); const data = await getBody(response); return { status: response.status, data, headers: response.headers } as T; diff --git a/webui/src/routes/api/[...path]/+server.ts b/webui/src/routes/api/[...path]/+server.ts new file mode 100644 index 000000000..b1554751d --- /dev/null +++ b/webui/src/routes/api/[...path]/+server.ts @@ -0,0 +1,38 @@ +import type { RequestHandler } from './$types'; + +const API_BASE_URL = process.env.API_BASE_URL ?? 'http://localhost:8000'; + +const handler: RequestHandler = async ({ request, params, fetch }) => { + const path = params.path; + const search = new URL(request.url).search; + const upstreamUrl = `${API_BASE_URL}/${path}${search}`; + + const headers = new Headers(request.headers); + headers.delete('host'); + + const upstreamRequest = new Request(upstreamUrl, { + method: request.method, + headers, + body: ['GET', 'HEAD'].includes(request.method) ? undefined : request.body, + // @ts-expect-error — duplex is required for streaming bodies in Node 18+ + duplex: 'half', + }); + + // Use event.fetch (not global fetch) so handleFetch intercepts for 401/refresh + const response = await fetch(upstreamRequest); + + // Forward response headers so getBody can detect content-type correctly + const responseHeaders = new Headers(response.headers); + responseHeaders.delete('set-cookie'); // SvelteKit manages cookies separately + + return new Response(response.body, { + status: response.status, + headers: responseHeaders, + }); +}; + +export const GET = handler; +export const POST = handler; +export const PUT = handler; +export const PATCH = handler; +export const DELETE = handler; diff --git a/webui/vite.config.ts b/webui/vite.config.ts index c800a69cd..3356e81f0 100644 --- a/webui/vite.config.ts +++ b/webui/vite.config.ts @@ -13,15 +13,5 @@ export default defineConfig({ }, optimizeDeps: { exclude: ['mathlive'] - }, - server: { - proxy: { - '/api': { - target: 'http://127.0.0.1:8000', - changeOrigin: true, - secure: false, - rewrite: (path) => path.replace(/^\/api/, '') - } - } } -}); \ No newline at end of file +}); From fef36bf6fc57e970dacd1031885505c4931bb68f Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Fri, 27 Mar 2026 17:39:21 +0200 Subject: [PATCH 17/24] Docs and deploy - Updated docs to include guide on how to setup fullstack on OpenShift using the oc tool from a terminal. - Polished deployment files. --- deploy/api-buildconfig.yaml | 73 ++-- deploy/api-deployment.yaml | 148 ++++---- deploy/builder-buildconfig.yaml | 61 ++++ deploy/builder-imagestream.yaml | 19 + deploy/postgres.yaml | 118 +++---- deploy/secrets-template.yaml | 4 - deploy/webui-buildconfig.yaml | 25 +- docs/howtoguides/deploying_on_openshift.md | 387 +++++++++++++++++++++ docs/howtoguides/index.md | 1 + mkdocs.yml | 1 + 10 files changed, 643 insertions(+), 194 deletions(-) create mode 100644 deploy/builder-buildconfig.yaml create mode 100644 deploy/builder-imagestream.yaml create mode 100644 docs/howtoguides/deploying_on_openshift.md diff --git a/deploy/api-buildconfig.yaml b/deploy/api-buildconfig.yaml index 6df1242da..88f2f6b88 100644 --- a/deploy/api-buildconfig.yaml +++ b/deploy/api-buildconfig.yaml @@ -2,16 +2,21 @@ # # BuildConfig for the DESDEO API using OpenShift S2I strategy. # -# Builder image: python:3.12-ubi9 from the shared 'openshift' namespace. -# The custom .s2i/bin/assemble script in the repository overrides the default -# assemble behaviour and uses uv to install dependencies. +# Builder image: desdeo-builder ImageStream (custom image built from +# desdeo-s2i-buildimage.Dockerfile). This includes Python 3.12 on UBI8 +# plus COIN-OR solvers (bonmin, ipopt, cbc) and ca-certificates. # -# Triggers: -# - GitHub webhook (push to DEPLOY_BRANCH) ← main CI/CD trigger -# - ImageChange on the builder image ← rebuilds on Python security patches +# To use the base Python image without solvers instead, +# replace the sourceStrategy.from block with: +# from: +# kind: ImageStreamTag +# name: python:3.12-ubi9 +# namespace: openshift # -# To get the webhook URL after applying: -# oc describe bc/desdeo-api | grep -A2 Webhook +# Triggers: +# - ImageChange on desdeo-builder -> rebuilds API when builder is updated +# - GitHub webhook (push to DEPLOY_BRANCH) <- main CI/CD trigger +# - ConfigChange --- apiVersion: build.openshift.io/v1 kind: BuildConfig @@ -20,62 +25,46 @@ metadata: labels: app: desdeo-api spec: - # ── Source ─────────────────────────────────────────────────────────────── source: type: Git git: uri: https://github.com/gialmisi/DESDEO.git - # Replace with the branch you want to track (e.g. master, deploy, main). ref: rahti-deploy - # No contextDir: the pyproject.toml and .s2i/ are at the repo root. - # ── Build strategy ─────────────────────────────────────────────────────── strategy: type: Source sourceStrategy: - # Python 3.12 UBI9 S2I builder available in every Rahti project via the - # shared 'openshift' ImageStream. Verify with: - # oc get is python -n openshift + # Reference the custom builder ImageStream produced by builder-buildconfig.yaml. + # This image has Python 3.12 + COIN-OR solvers pre-installed. from: kind: ImageStreamTag - name: python:3.12-ubi9 - namespace: openshift - # Environment variables available during the S2I *build* (assemble). - # Runtime env vars go in the Deployment, not here. + name: desdeo-builder:latest env: - - name: UPGRADE_PIP_TO_LATEST - value: "1" - - name: DESDEO_INSTALL - value: "--group web --group server" - - name: DEBUG - value: "false" + - name: UPGRADE_PIP_TO_LATEST + value: "1" + - name: DESDEO_INSTALL + value: "--group web --group server" + - name: DEBUG + value: "false" - # ── Output ─────────────────────────────────────────────────────────────── output: to: kind: ImageStreamTag name: desdeo-api:latest - # ── Triggers ───────────────────────────────────────────────────────────── triggers: - # Rebuild when the Python builder image is updated (security patches). - - type: ImageChange - imageChange: {} - # GitHub webhook — authenticates with WEBHOOK_SECRET_API from the Secret. - - type: GitHub - github: - secretReference: - name: desdeo-secrets - # Key in the Secret that holds the webhook secret string. - # Note: this uses secretReference (key in existing Secret) rather than - # an inline secret so we don't store the value in this manifest. - # Manual / API trigger. - - type: ConfigChange + # Rebuild API when the builder image is updated (solver or base OS updates). + - type: ImageChange + imageChange: {} + # GitHub webhook, push to DEPLOY_BRANCH triggers a new API build. + - type: GitHub + github: + secretReference: + name: desdeo-secrets + - type: ConfigChange - # ── Run policy ─────────────────────────────────────────────────────────── runPolicy: Serial - # ── Resource limits for the build pod ──────────────────────────────────── resources: requests: memory: "1Gi" diff --git a/deploy/api-deployment.yaml b/deploy/api-deployment.yaml index b77ee79a0..8b55d5eab 100644 --- a/deploy/api-deployment.yaml +++ b/deploy/api-deployment.yaml @@ -29,7 +29,7 @@ spec: strategy: type: RollingUpdate rollingUpdate: - maxUnavailable: 0 # zero-downtime: new pod must be ready before old is removed + maxUnavailable: 0 maxSurge: 1 template: metadata: @@ -37,76 +37,76 @@ spec: app: desdeo-api spec: containers: - - name: api - # Placeholder: the image change annotation above overwrites this on deploy. - image: desdeo-api:latest - imagePullPolicy: Always - ports: - - containerPort: 8080 - name: http - protocol: TCP - env: - - name: DESDEO_PRODUCTION - value: "true" - - name: AUTHJWT_SECRET - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: AUTHJWT_SECRET - - name: DB_HOST - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: DB_HOST - - name: DB_PORT - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: DB_PORT - - name: DB_NAME - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: DB_NAME - - name: DB_USER - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: DB_USER - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: DB_PASSWORD - - name: CORS_ORIGINS - value: '["https://gialmisi-desdeo-webui.rahtiapp.fi"]' - - name: COOKIE_DOMAIN - value: "rahtiapp.fi" - - name: APP_MODULE - value: "desdeo.api.app:app" - - name: GUNICORN_CMD_ARGS - value: "--bind=0.0.0.0:8080 --workers=1 --access-logfile=- --worker-class uvicorn.workers.UvicornWorker" - resources: - requests: - memory: "512Mi" - cpu: "250m" - limits: - memory: "1Gi" - cpu: "1" - readinessProbe: - httpGet: - path: /health # adjust if the FastAPI app exposes a different health endpoint - port: 8080 - initialDelaySeconds: 15 - periodSeconds: 10 - failureThreshold: 3 - livenessProbe: - httpGet: - path: /health - port: 8080 - initialDelaySeconds: 30 - periodSeconds: 20 - failureThreshold: 3 + - name: api + # Placeholder: the image change annotation above overwrites this on deploy. + image: desdeo-api:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + name: http + protocol: TCP + env: + - name: DESDEO_PRODUCTION + value: "true" + - name: AUTHJWT_SECRET + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: AUTHJWT_SECRET + - name: DB_HOST + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_HOST + - name: DB_PORT + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_PORT + - name: DB_NAME + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_NAME + - name: DB_USER + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: DB_PASSWORD + - name: CORS_ORIGINS + value: '["https://gialmisi-desdeo-webui.rahtiapp.fi"]' + - name: COOKIE_DOMAIN + value: "rahtiapp.fi" + - name: APP_MODULE + value: "desdeo.api.app:app" + - name: GUNICORN_CMD_ARGS + value: "--bind=0.0.0.0:8080 --workers=1 --access-logfile=- --worker-class uvicorn.workers.UvicornWorker" + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "1" + readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 10 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 20 + failureThreshold: 3 --- apiVersion: v1 kind: Service @@ -118,9 +118,9 @@ spec: selector: app: desdeo-api ports: - - name: http - port: 8080 - targetPort: 8080 + - name: http + port: 8080 + targetPort: 8080 --- apiVersion: route.openshift.io/v1 kind: Route diff --git a/deploy/builder-buildconfig.yaml b/deploy/builder-buildconfig.yaml new file mode 100644 index 000000000..fe58d7727 --- /dev/null +++ b/deploy/builder-buildconfig.yaml @@ -0,0 +1,61 @@ +# deploy/builder-buildconfig.yaml +# +# BuildConfig that produces the custom DESDEO S2I builder image. +# Uses Docker strategy to build desdeo-s2i-buildimage.Dockerfile +# from the repository root. +# +# This only needs to be (re)built when: +# - The Dockerfile changes +# - The solver binaries release is updated +# - The base UBI8 image receives a security update (ImageChange trigger) +# +# To trigger a manual rebuild: +# oc start-build desdeo-builder --follow +# +# The output image is stored in the desdeo-builder ImageStream and +# referenced by the api-buildconfig.yaml as the S2I builder. +--- +apiVersion: build.openshift.io/v1 +kind: BuildConfig +metadata: + name: desdeo-builder + labels: + app: desdeo-api + component: builder-image +spec: + source: + type: Git + git: + uri: https://github.com/gialmisi/DESDEO.git + ref: rahti-deploy + + strategy: + type: Docker + dockerStrategy: + dockerfilePath: desdeo-s2i-buildimage.Dockerfile + # The Dockerfile temporarily switches to root to install packages, + # then switches back to UID 1001. This is expected and required. + noCache: false + + output: + to: + kind: ImageStreamTag + name: desdeo-builder:latest + + triggers: + # Rebuild when the base UBI8 image receives updates (security patches). + - type: ImageChange + imageChange: {} + # Manual / API trigger. + - type: ConfigChange + + # The builder image download includes solver binaries (~100MB). + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "1" + + runPolicy: Serial diff --git a/deploy/builder-imagestream.yaml b/deploy/builder-imagestream.yaml new file mode 100644 index 000000000..6fb5c7ffc --- /dev/null +++ b/deploy/builder-imagestream.yaml @@ -0,0 +1,19 @@ +# deploy/builder-imagestream.yaml +# +# ImageStream for the custom DESDEO S2I builder image. +# This image extends the base Python 3.12 UBI8 image with: +# - COIN-OR solvers (bonmin, ipopt, cbc) from a GitHub release +# - ca-certificates for managing Gurobi licences +# +# The API BuildConfig references this stream as its S2I builder, +# so whenever this image is updated the API is automatically rebuilt. +--- +apiVersion: image.openshift.io/v1 +kind: ImageStream +metadata: + name: desdeo-builder + labels: + app: desdeo-api +spec: + lookupPolicy: + local: true diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml index 795f13874..5378696fc 100644 --- a/deploy/postgres.yaml +++ b/deploy/postgres.yaml @@ -7,10 +7,10 @@ # oc apply -f deploy/postgres.yaml # # Bitnami env var names differ from the official postgres image: -# POSTGRESQL_USERNAME → creates a non-superuser (matches DATABASE_URL user) -# POSTGRESQL_PASSWORD → password for POSTGRESQL_USERNAME -# POSTGRESQL_DATABASE → database to create on first start -# POSTGRESQL_POSTGRES_PASSWORD → password for the postgres superuser +# POSTGRESQL_USERNAME: creates a non-superuser (matches DATABASE_URL user) +# POSTGRESQL_PASSWORD: password for POSTGRESQL_USERNAME +# POSTGRESQL_DATABASE: database to create on first start +# POSTGRESQL_POSTGRES_PASSWORD: password for the postgres superuser # # Data is persisted in the PVC mounted at /bitnami/postgresql. --- @@ -25,9 +25,9 @@ spec: selector: app: desdeo-postgres ports: - - name: postgres - port: 5432 - targetPort: 5432 + - name: postgres + port: 5432 + targetPort: 5432 --- apiVersion: apps/v1 kind: StatefulSet @@ -47,57 +47,57 @@ spec: app: desdeo-postgres spec: containers: - - name: postgres - image: image-registry.openshift-image-registry.svc:5000/openshift/postgresql:16-el10 - ports: - - containerPort: 5432 - name: postgres - env: - - name: POSTGRESQL_USER - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: POSTGRES_USER - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - name: desdeo-secrets - key: POSTGRES_PASSWORD - - name: POSTGRESQL_DATABASE - value: desdeo - volumeMounts: - - name: postgres-data - mountPath: /var/lib/pgsql/data + - name: postgres + image: image-registry.openshift-image-registry.svc:5000/openshift/postgresql:16-el10 + ports: + - containerPort: 5432 + name: postgres + env: + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: POSTGRES_USER + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: desdeo-secrets + key: POSTGRES_PASSWORD + - name: POSTGRESQL_DATABASE + value: desdeo + volumeMounts: + - name: postgres-data + mountPath: /var/lib/pgsql/data + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "512Mi" + cpu: "500m" + readinessProbe: + exec: + command: + - /bin/sh + - -c + - psql -U $POSTGRESQL_USER -d desdeo -c "SELECT 1" + initialDelaySeconds: 15 + periodSeconds: 10 + failureThreshold: 6 + livenessProbe: + exec: + command: + - /bin/sh + - -c + - psql -U $POSTGRESQL_USER -d desdeo -c "SELECT 1" + initialDelaySeconds: 30 + periodSeconds: 20 + failureThreshold: 3 + volumeClaimTemplates: + - metadata: + name: postgres-data + spec: + accessModes: ["ReadWriteOnce"] resources: requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "512Mi" - cpu: "500m" - readinessProbe: - exec: - command: - - /bin/sh - - -c - - psql -U $POSTGRESQL_USER -d desdeo -c "SELECT 1" - initialDelaySeconds: 15 - periodSeconds: 10 - failureThreshold: 6 - livenessProbe: - exec: - command: - - /bin/sh - - -c - - psql -U $POSTGRESQL_USER -d desdeo -c "SELECT 1" - initialDelaySeconds: 30 - periodSeconds: 20 - failureThreshold: 3 - volumeClaimTemplates: - - metadata: - name: postgres-data - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 2Gi + storage: 2Gi diff --git a/deploy/secrets-template.yaml b/deploy/secrets-template.yaml index fa04e0699..cd1a37493 100644 --- a/deploy/secrets-template.yaml +++ b/deploy/secrets-template.yaml @@ -28,14 +28,12 @@ metadata: name: desdeo-secrets type: Opaque stringData: - # ── PostgreSQL ────────────────────────────────────────────────────────── # Application user (non-superuser). Must match DATABASE_URL below. POSTGRES_USER: desdeo POSTGRES_PASSWORD: # Superuser password (used internally by Bitnami image). POSTGRES_SUPERUSER_PASSWORD: - # ── API runtime ───────────────────────────────────────────────────────── # Full DSN consumed by the FastAPI app and db_init_prod.py. # Host must be the Kubernetes Service name defined in postgres.yaml. DATABASE_URL: "postgresql://desdeo:@desdeo-postgres:5432/desdeo" @@ -44,12 +42,10 @@ stringData: # python -c "import secrets; print(secrets.token_hex(64))" SECRET_KEY: - # ── DB init job ───────────────────────────────────────────────────────── # Credentials for the initial analyst user seeded by the db-init Job. DESDEO_ADMIN_USERNAME: admin DESDEO_ADMIN_PASSWORD: - # ── Webhook secrets (BuildConfigs) ────────────────────────────────────── # Random strings used to authenticate GitHub webhook payloads. # Generate with: python -c "import secrets; print(secrets.token_hex(24))" WEBHOOK_SECRET_API: diff --git a/deploy/webui-buildconfig.yaml b/deploy/webui-buildconfig.yaml index fbe83cd56..1e1e15c93 100644 --- a/deploy/webui-buildconfig.yaml +++ b/deploy/webui-buildconfig.yaml @@ -16,7 +16,6 @@ metadata: labels: app: desdeo-webui spec: - # ── Source ─────────────────────────────────────────────────────────────── source: type: Git git: @@ -25,35 +24,31 @@ spec: # Only the webui/ subtree is needed as the Docker build context. contextDir: webui - # ── Build strategy ─────────────────────────────────────────────────────── strategy: type: Docker dockerStrategy: dockerfilePath: Dockerfile # Build arguments — passed as ARG to the Dockerfile. buildArgs: - - name: VITE_API_URL - value: "https://gialmisi-desdeo-api.rahtiapp.fi" + - name: VITE_API_URL + value: "https://gialmisi-desdeo-api.rahtiapp.fi" env: - - name: NODE_OPTIONS - value: "--max-old-space-size=3072" + - name: NODE_OPTIONS + value: "--max-old-space-size=3072" - # ── Output ─────────────────────────────────────────────────────────────── output: to: kind: ImageStreamTag name: desdeo-webui:latest - # ── Triggers ───────────────────────────────────────────────────────────── triggers: - - type: GitHub - github: - secretReference: - name: desdeo-secrets - - type: ConfigChange + - type: GitHub + github: + secretReference: + name: desdeo-secrets + - type: ConfigChange - # ── Build pod resources ─────────────────────────────────────────────────── - # npm ci + vite build is memory-hungry; 2Gi is usually sufficient. + # npm install + vite build is memory-hungry; 2Gi is usually sufficient. resources: requests: memory: "2Gi" diff --git a/docs/howtoguides/deploying_on_openshift.md b/docs/howtoguides/deploying_on_openshift.md new file mode 100644 index 000000000..5a7d503ef --- /dev/null +++ b/docs/howtoguides/deploying_on_openshift.md @@ -0,0 +1,387 @@ +# How to deploy DESDEO on OpenShift + +## Overview + +This guide walks through deploying the full DESDEO stack: FastAPI backend, +SvelteKit web UI, and PostgreSQL, on an OpenShift/OKD cluster. [CSC +Rahti](https://rahti.csc.fi/) is used as the concrete example throughout; values +specific to Rahti (hostnames, API endpoint, image registry URL) are marked so +readers on other OpenShift clusters can substitute their own. + +This guide uses YAML manifests and the `oc` CLI exclusively. Every deployment +step is reproducible and version-controlled. OpenShift is a Kubernetes +distribution with extra features layered on top, this guide uses +OpenShift-specific objects (BuildConfig, ImageStream, Route) that do not exist +in vanilla Kubernetes. If you are deploying on plain Kubernetes, consult your +platform's CI/CD documentation instead. + +The files you will work with live in two places in the DESDEO repository: + +- `deploy/`: all OpenShift manifests (ImageStreams, BuildConfigs, Deployments, + StatefulSet, Routes, Job). +- Several application-level files added or modified to support production + deployment, described under [Repository preparation](#repository-preparation) + +## Prerequisites + +- A CSC account with an active Rahti project (see [Rahti access](https://docs.csc.fi/cloud/rahti/access/)) +- `oc` CLI installed (see [Using the Rahti CLI](https://docs.csc.fi/cloud/rahti/usage/cli/)) +- Logged in to the cluster: + ```bash + oc login https://api.2.rahti.csc.fi:6443 --token= + ``` +- Switched to your project: + ```bash + oc project + ``` +- A fork or branch of the [DESDEO + repository](https://github.com/industrial-optimization-group/DESDEO) with the + `deploy/` files committed and pushed + +## Architecture + +Four components are deployed and wired together: + +1. `desdeo-api` Deployment: FastAPI served by gunicorn+uvicorn, listening on + port 8080. Built in-cluster using OpenShift's Source-to-Image (S2I) strategy + from a Python builder image. + +2. `desdeo-webui` Deployment: SvelteKit with adapter-node, listening on port 3000. + Built using the Docker strategy from `webui/Dockerfile`. All browser API + calls are routed through a `/api/[...path]` proxy route baked into the + SvelteKit app, this keeps cookies same-origin and avoids CORS complications. + +3. `desdeo-postgres` StatefulSet: PostgreSQL running on the built-in Rahti image, + backed by a PersistentVolumeClaim. + +4. OpenShift Routes: TLS-terminated at Rahti's HAProxy ingress. Certificates for + `*.rahtiapp.fi` are provisioned automatically. + +### URL environment variables + +Two env vars control how the API is reached, and they intentionally point to different targets: + +| Variable | Value | Used by | +|---|---|---| +| `VITE_API_URL` | `/api` | Baked into the client-side JS bundle at build time. Browser requests go to `/api/...`, which the SvelteKit proxy handles. | +| `API_BASE_URL` | `http://desdeo-api:8080` | Set at runtime on the webui pod. SvelteKit's server-side proxy uses the internal cluster DNS name to reach the API, never exposed to the browser. | + +Do not set `VITE_API_URL` to the API's external Route URL. The proxy architecture means the browser never talks directly to the API. + +## Repository preparation + +The following files must be present in the repository before deploying. All manifests live under `deploy/`. +These ar provided in the master branch. + +| File | Purpose | +|---|---| +| `deploy/postgres.yaml` | StatefulSet, Service, and PVC for PostgreSQL | +| `deploy/builder-imagestream.yaml` | ImageStream that tracks the custom S2I builder image | +| `deploy/builder-buildconfig.yaml` | BuildConfig — Docker strategy, builds the solver-enabled S2I builder image | +| `deploy/api-imagestream.yaml` | ImageStream that tracks built API images | +| `deploy/webui-imagestream.yaml` | ImageStream that tracks built webui images | +| `deploy/api-buildconfig.yaml` | BuildConfig: S2I using `desdeo-builder:latest`, GitHub webhook trigger | +| `deploy/webui-buildconfig.yaml` | BuildConfig: Docker strategy, GitHub webhook trigger | +| `deploy/api-deployment.yaml` | Deployment, Service, and Route for the API | +| `deploy/webui-deployment.yaml` | Deployment, Service, and Route for the web UI | +| `deploy/db-init-job.yaml` | One-shot Job that creates tables and seeds the initial user | + +In addition, several non-manifest files are required: + +- `.s2i/bin/assemble`: Custom S2I assemble script that uses `uv sync --frozen`. + +- `.s2i/environment`: Sets S2I environment variables such as `APP_MODULE`, `GUNICORN_CMD_ARGS`, and the port. + +- `desdeo/api/db_init_prod.py`: Production database initialization script. The + `db_init.py` debug branch is a no-no in production mode; this separate script + creates all SQLModel tables and seeds the initial analyst user. + +- `webui/Dockerfile`: Multi-stage Node 24 build. The `NPM_RUN=start:production` + build arg selects the adapter-node start script. + +- `webui/src/routes/api/[...path]/+server.ts`: The SvelteKit proxy route. It + forwards all `/api/*` requests to the API using `event.fetch`, so the + `handleFetch` hook in `hooks.server.ts` can intercept 401 responses and handle + token refresh transparently. + +- `desdeo-s2i-buildimage.Dockerfile`: Builds the custom S2I builder image that + extends the Python 3.12 UBI8 base with COIN-OR solvers (`bonmin`, `ipopt`, `cbc`). + +## Step 1: Prepare secrets + +All credentials are stored in a single Secret named `desdeo-secrets` (NOT under +version control). Create it with `oc create secret generic` rather than from a +YAML file. This avoids ever writing credentials to disk or committing them to +the repository. + +```bash +oc create secret generic desdeo-secrets \ + --from-literal=POSTGRES_USER=desdeo \ + --from-literal=POSTGRES_PASSWORD= \ + --from-literal=DB_HOST=desdeo-postgres \ + --from-literal=DB_PORT=5432 \ + --from-literal=DB_NAME=desdeo \ + --from-literal=DB_USER=desdeo \ + --from-literal=DB_PASSWORD= \ + --from-literal=AUTHJWT_SECRET=$(python3 -c "import secrets; print(secrets.token_hex(64))") \ + --from-literal=DESDEO_ADMIN_USERNAME=admin \ + --from-literal=DESDEO_ADMIN_PASSWORD= \ + --from-literal=WEBHOOK_SECRET_API=$(python3 -c "import secrets; print(secrets.token_hex(24))") \ + --from-literal=WEBHOOK_SECRET_WEBUI=$(python3 -c "import secrets; print(secrets.token_hex(24))") +``` + +Key reference: + +| Key | Description | +|---|---| +| `POSTGRES_USER` / `DB_USER` | PostgreSQL application user name | +| `POSTGRES_PASSWORD` / `DB_PASSWORD` | Password for the above | +| `DB_HOST` | Kubernetes Service name, always `desdeo-postgres` | +| `DB_PORT` | `5432` | +| `DB_NAME` | Database name | +| `AUTHJWT_SECRET` | JWT signing key, generate a fresh value, never reuse | +| `DESDEO_ADMIN_USERNAME` | Initial analyst account username | +| `DESDEO_ADMIN_PASSWORD` | Initial analyst account password | +| `WEBHOOK_SECRET_API` | GitHub webhook secret for the API BuildConfig | +| `WEBHOOK_SECRET_WEBUI` | GitHub webhook secret for the webui BuildConfig | + +!!! note + `DESDEO_PRODUCTION=true` is set directly in the Deployment manifest, not in + this Secret, because it is not sensitive. + +## Step 2: Deploy PostgreSQL + +```bash +oc apply -f deploy/postgres.yaml +oc rollout status statefulset/desdeo-postgres +``` + +The StatefulSet uses the built-in Rahti PostgreSQL image: + +``` +image-registry.openshift-image-registry.svc:5000/openshift/postgresql:16-el10 +``` + +To check which tags are available on your cluster: + +```bash +oc get is postgresql -n openshift -o jsonpath='{.spec.tags[*].name}' +``` + +Data is stored at `/var/lib/pgsql/data` in the PVC. + +!!! note + The env vars that initialize the database are `POSTGRESQL_USER`, + `POSTGRESQL_PASSWORD`, and `POSTGRESQL_DATABASE` (the `POSTGRESQL_` prefix, + not `POSTGRES_`). The manifests map these from the Secret keys `DB_USER`, + `DB_PASSWORD`, and `DB_NAME`. + +!!! note + An alternative to in-cluster PostgreSQL is [CSC Pukki + DBaaS](https://docs.csc.fi/cloud/dbaas/), a managed PostgreSQL service. + Pukki removes the operational overhead of managing the database yourself but + adds setup steps not covered in this guide. + +## Step 3: Create ImageStreams and BuildConfigs + +An ImageStream is an OpenShift object that tracks versions of a container image. +When a BuildConfig pushes a new image to an ImageStream, any Deployment watching +that stream automatically triggers a rolling update, no external registry or CI +system required. + +Apply the ImageStreams first: + +```bash +oc apply -f deploy/builder-imagestream.yaml +oc apply -f deploy/api-imagestream.yaml +oc apply -f deploy/webui-imagestream.yaml +``` + +Before applying the BuildConfigs, open each file and substitute the placeholder +`` with the branch you want to build from (e.g. `master`). Ensure +the git URI uses HTTPS, not SSH, the build pod does not have SSH credentials. + +The API BuildConfig uses the S2I strategy with `desdeo-builder:latest` as its +builder image — the custom image built from `desdeo-s2i-buildimage.Dockerfile` +that includes COIN-OR solvers. The webui BuildConfig uses the Docker strategy +with `webui/Dockerfile`. The build arg `VITE_API_URL=/api` is passed explicitly, +this is intentional, as browser requests go through the SvelteKit proxy rather +than directly to the API. + +```bash +oc apply -f deploy/builder-buildconfig.yaml +oc apply -f deploy/api-buildconfig.yaml +oc apply -f deploy/webui-buildconfig.yaml +``` + +## Step 4: Trigger first builds + +The builder image must be ready before the API build can start, as +`api-buildconfig.yaml` references `desdeo-builder:latest` as its S2I base. + +```bash +# Build the solver-enabled builder image first +oc start-build desdeo-builder --follow + +# Then build the API and webui (can run in parallel once the builder is done) +oc start-build desdeo-api --follow +oc start-build desdeo-webui --follow +``` + +The first build takes longer than subsequent ones because there is no layer +cache. Expect roughly 6 minutes for the builder, 4 minutes for the API, and +5 minutes for the webui. + +Once the API pod is running, verify the solvers are present: + +```bash +oc exec deployment/desdeo-api -- which bonmin ipopt cbc +``` + +All three should return paths under `/opt/solver_binaries/`. + +!!! warning + If the webui build fails with `exit status 137`, the build pod ran out of memory. Increase the build pod memory limit in `webui-buildconfig.yaml`: + ```yaml + spec: + resources: + limits: + memory: 4Gi + ``` + Also add `NODE_OPTIONS=--max-old-space-size=3072` to `dockerStrategy.env` in the same file, then re-apply and re-trigger the build. +--- + +## Step 5: Deploy API and web UI + +```bash +oc apply -f deploy/api-deployment.yaml +oc apply -f deploy/webui-deployment.yaml +oc rollout status deployment/desdeo-api +oc rollout status deployment/desdeo-webui +``` + +!!! warning + Rahti enforces a maximum CPU limit-to-request ratio of 5:1. If your + `resources.limits.cpu` divided by `resources.requests.cpu` exceeds this, the + ReplicaSet will silently fail to create pods. The error does not appear + in pod logs, look in the ReplicaSet events: + ```bash + oc describe replicaset + ``` + The manifests in `deploy/` are set within the allowed ratio. If you + customize resource settings, check the ratio before applying. + +The following env vars must be present on the API pod at runtime (sourced from `desdeo-secrets` via `secretKeyRef` in the Deployment): + +| Variable | Source | +|---|---| +| `DESDEO_PRODUCTION` | Set to `true` directly in the Deployment manifest | +| `DB_HOST`, `DB_PORT`, `DB_NAME`, `DB_USER`, `DB_PASSWORD` | From `desdeo-secrets` | +| `AUTHJWT_SECRET` | From `desdeo-secrets` | +| `CORS_ORIGINS` | Set in the Deployment to `["https://your-webui.rahtiapp.fi"]` | + +!!! note + With the SvelteKit proxy architecture, `COOKIE_DOMAIN` on the API is + irrelevant. Cookies are owned by the webui host and forwarded server-side. + Leave `COOKIE_DOMAIN` unset. + +## Step 6: Initialize the database + +`db_init_prod.py` creates all SQLModel tables and seeds the initial analyst user +defined by `DESDEO_ADMIN_USERNAME` and `DESDEO_ADMIN_PASSWORD`. Safe to re-run if needed. + +The script runs as a one-shot Kubernetes Job using the API image. Before +applying, open `deploy/db-init-job.yaml` and replace `` with your Rahti +project name (used to construct the image pull reference). + +```bash +oc apply -f deploy/db-init-job.yaml +oc logs -f job/desdeo-db-init +``` + +Expected output: + +``` +[db-init] Tables ready. +[db-init] Created user 'admin' (role=analyst, group=admin). +[db-init] Done. +``` + +Once the job completes successfully, delete it: + +```bash +oc delete job desdeo-db-init +``` + + +## Step 7: Verify + +```bash +curl https://your-api.rahtiapp.fi/health +# → {"status":"ok"} + +curl -I https://your-webui.rahtiapp.fi/ +# → HTTP/2 200 +# (a 307 redirect to /home is also normal) +``` + +Routes are TLS-terminated at Rahti's HAProxy ingress. Certificates for +`*.rahtiapp.fi` are provisioned automatically, no manual certificate work is +required. + +## Step 8: Set up GitHub webhooks + +BuildConfigs include GitHub webhook triggers. Once configured, every push to the +deploy branch triggers a rebuild of the affected component, which then rolls out +automatically via the ImageStream trigger on the Deployment. + +Retrieve the webhook URLs. The webhook secret is embedded in the URL itself, +the GitHub "Secret" field should be left blank: + +```bash +oc get bc/desdeo-api -o jsonpath='{.spec.triggers}' | python3 -m json.tool +``` + +Find the `github` trigger entry and copy the `secret` value. Then construct the +full webhook URL: + +``` +https://api.2.rahti.csc.fi:6443/apis/build.openshift.io/v1/namespaces//buildconfigs/desdeo-api/webhooks//github +``` + +In GitHub, go to your fork: **Settings → Webhooks → Add webhook** + +- Payload URL: the URL constructed above +- Content type: `application/json`, this is required; the default `x-www-form-urlencoded` will be rejected by OpenShift +- Secret: leave blank +- Events: Just the push event + +Repeat for `desdeo-webui` using `bc/desdeo-webui`. + +!!! note + `oc describe bc/desdeo-api` always shows `` as a placeholder in the + webhook URL regardless of how the secret is stored, this is a display-only + mask. Always use `oc get bc -o jsonpath` as shown above to retrieve the + actual secret value. + +## Troubleshooting + +| Symptom | Cause | Fix | +|---|---|---| +| API pod crashes on startup with `ValidationError: authjwt_secret_key` | `AUTHJWT_SECRET` env var missing or key name wrong | Verify the key name in the Secret matches exactly what the Deployment references | +| API pod crashes with DB connection error | `DB_HOST`, `DB_PORT`, `DB_NAME`, `DB_USER`, or `DB_PASSWORD` missing or incorrect | Run `oc describe secret desdeo-secrets` and compare key names with the Deployment's `secretKeyRef` fields | +| Webui pod never starts; `FailedCreate` in ReplicaSet events | CPU limit-to-request ratio exceeds 5:1 | Adjust `resources.requests.cpu` so that `limits.cpu / requests.cpu ≤ 5` | +| Login returns 500; logs show `TypeError: Invalid URL` | `API_BASE_URL` env var not set on the webui pod | Set `API_BASE_URL=http://desdeo-api:8080` in the webui Deployment | +| Build fails with `exit status 137` | Build pod out of memory | Set `spec.resources.limits.memory: 4Gi` in the BuildConfig and add `NODE_OPTIONS=--max-old-space-size=3072` to `dockerStrategy.env` | +| Build fails with `pip install --group` error | Default S2I assemble script used instead of the custom one | Ensure `.s2i/bin/assemble` is present in the repo and uses `uv sync --frozen` | +| `uv sync` fails with lockfile conflict | `uv.lock` is out of sync with `pyproject.toml` | Run `uv lock` locally and commit the updated lockfile | +| Database init job fails with import errors | `DESDEO_PRODUCTION` not set, API falls back to SQLite debug mode | Ensure `DESDEO_PRODUCTION=true` is set in the Job's env spec | +| GitHub webhook returns 401 | Content type set to `application/x-www-form-urlencoded` | Change content type to `application/json` in the GitHub webhook settings | + + +## Known limitations + +Schema migrations:`db_init_prod.py` uses `SQLModel.metadata.create_all`, +which creates missing tables but does not ALTER existing ones. If the data +model changes in a later release, tables must be migrated manually or via, e.g., +Alembic before redeploying. diff --git a/docs/howtoguides/index.md b/docs/howtoguides/index.md index f366caa71..3653ca79d 100644 --- a/docs/howtoguides/index.md +++ b/docs/howtoguides/index.md @@ -33,4 +33,5 @@ Guides are goal-oriented and are meant to direct users towards specific goals wh - **[Running the web-API and web-GUI](api_and_gui.md):** How to run the web-API and web-GUI - **[Hosting on Kubernetes](kubernetes.md):** How to host a DESDEO web application on Kubernetes +- **[Deploying on OpenShift](deploying_on_openshift.md):** How to deploy DESDEO on OpenShift/OKD using `oc` CLI and YAML manifests (Rahti example) - **[Implementing method interfaces](implementing_method_interfaces.md):** How to implement new interactive method interfaces in the Web-GUI diff --git a/mkdocs.yml b/mkdocs.yml index ff05c53c8..4b2483e4d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -36,6 +36,7 @@ nav: - "How to define multiobjective optimization problems": "howtoguides/problem.md" - "How to define a multiobjective optimization problem": "howtoguides/how_to_define_a_problem.ipynb" - "How to host a DESDEO web application on Kubernetes": "howtoguides/kubernetes.md" + - "How to deploy DESDEO on OpenShift": "howtoguides/deploying_on_openshift.md" - "How to run DESDEO's web-API and web-GUI": "howtoguides/api_and_gui.md" - "How to implement method interfaces in the Web-GUI": "howtoguides/implementing_method_interfaces.md" - "How to utilize 'MCDM' methods": "howtoguides/how_to_utilize_mcdm_methods.ipynb" From 5ea4719a37c41aa47c09f659e64f1255cfffa76f Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Wed, 1 Apr 2026 09:18:27 +0300 Subject: [PATCH 18/24] Updated the webui README --- webui/README.md | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/webui/README.md b/webui/README.md index 37e96b0c7..5a69afda1 100644 --- a/webui/README.md +++ b/webui/README.md @@ -2,37 +2,34 @@ ## Environment variables -For the frontend to work correctly, there are some environmental variables -that should be set in an`.env` file at the root level. These variables are: +For the frontend to work correctly, there are some environment variables that +should be set in a `.env` file at the root of the `webui/` directory. These +variables are: -- `VITE_API_URL` which should be defined to be '/api' for the proxy to work correctly. I.e.: +- `VITE_API_URL` — set to `"/api"` so that client-side code routes requests + through the SvelteKit catch-all proxy at `src/routes/api/[...path]/+server.ts`: ```bash VITE_API_URL="/api" ``` -- `API_URL` which should be defined to be 'http://localhost:8000 or the path of the server' +- `API_BASE_URL` — the URL of the running DESDEO web-API, used by server-side + route handlers and by `orval.config.mjs` when generating the OpenAPI client: ```bash -API_URL=http://localhost:8000 +API_BASE_URL=http://localhost:8000 ``` -Check also the file `vite.config.ts`, where in the server setting - -```toml - server: { - proxy: { - '/api': { - target: 'http://127.0.0.1:8000', - changeOrigin: true, - secure: false, - rewrite: (path) => path.replace(/^\/api/, '') - } - } - } +A minimal `.env` for local development therefore looks like: + +```bash +API_BASE_URL="http://localhost:8000" +VITE_API_URL="/api" ``` -the `target` should point to the local URL that can be used to access the DESDEO web-API. +> **Note:** `VITE_API_URL` is baked into the client bundle at build time by +> Vite, so changing it after a build has no effect. `API_BASE_URL` is read at +> runtime by the Node.js server process. ## Installing @@ -92,8 +89,10 @@ npm run dev -- --open When the web-API is updated, it is important to update the OpenAPI clients, which automatically use the schemas defined in the web-API on the GUI side. To -generate them, make sure the web-API is running on the URL defined in `OPENAPI_URL` in the file -`orval.config.mjs`, and issue the command: +generate them, make sure the web-API is running at the URL defined in +`OPENAPI_URL` inside `orval.config.mjs` (defaults to +`http://localhost:8000/openapi.json`), and that `API_BASE_URL` is set in your +`.env` file, then run: ```bash npm run generate:client From 774eb8d17ef20a0cdc08dc8427119687bd0156a3 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Wed, 1 Apr 2026 14:30:31 +0300 Subject: [PATCH 19/24] Web-API, Web-GUI - Add /manage-users page for analysts and admins to create DM and analyst accounts. - Guard the route server-side; restrict POST /add_new_dm API endpoint to analyst/admin (was public) - Populate auth store on topbar mount via /user_info to enable role-based nav visibility - Enable Orval Zod body schema generation and regenerate client - Fix API_BASE_URL in webui README --- desdeo/api/routers/user_authentication.py | 13 +- webui/orval.config.mjs | 11 ++ .../lib/components/ui/topbar/topbar.svelte | 29 +++- webui/src/lib/gen/endpoints/DESDEOFastAPI.ts | 24 +++ .../src/lib/gen/endpoints/DESDEOFastAPIzod.ts | 5 + webui/src/routes/manage-users/+layout.svelte | 10 ++ webui/src/routes/manage-users/+page.server.ts | 26 +++ webui/src/routes/manage-users/+page.svelte | 159 ++++++++++++++++++ webui/src/routes/manage-users/handler.ts | 53 ++++++ webui/src/routes/manage-users/userSchema.ts | 13 ++ 10 files changed, 340 insertions(+), 3 deletions(-) create mode 100644 webui/src/routes/manage-users/+layout.svelte create mode 100644 webui/src/routes/manage-users/+page.server.ts create mode 100644 webui/src/routes/manage-users/+page.svelte create mode 100644 webui/src/routes/manage-users/handler.ts create mode 100644 webui/src/routes/manage-users/userSchema.ts diff --git a/desdeo/api/routers/user_authentication.py b/desdeo/api/routers/user_authentication.py index f2a0402f4..f9cb3f5c1 100644 --- a/desdeo/api/routers/user_authentication.py +++ b/desdeo/api/routers/user_authentication.py @@ -475,12 +475,14 @@ def refresh_access_token( @router.post("/add_new_dm") def add_new_dm( + user: Annotated[User, Depends(get_current_user)], form_data: Annotated[OAuth2PasswordRequestForm, Depends()], session: Annotated[Session, Depends(get_session)], ) -> JSONResponse: - """Add a new user of the role Decision Maker to the database. Requires no login. + """Add a new user of the role Decision Maker to the database. Requires a logged in analyst or an admin. Args: + user: Annotated[User, Depends(get_current_user)]: Logged in user with the role "analyst" or "admin". form_data (Annotated[OAuth2PasswordRequestForm, Depends()]): The user credentials to add to the database. session (Annotated[Session, Depends(get_session)]): the database session. @@ -488,8 +490,15 @@ def add_new_dm( JSONResponse: A JSON response Raises: - HTTPException: if username is already in use or if saving to the database fails for some reason. + HTTPException: if the logged in user is not an analyst or an admin or if + username is already in use or if saving to the database fails for some reason. """ + if user.role not in (UserRole.analyst, UserRole.admin): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Logged in user has insufficient rights.", + ) + add_user_to_database( form_data=form_data, role=UserRole.dm, diff --git a/webui/orval.config.mjs b/webui/orval.config.mjs index 2b9466487..09533549d 100644 --- a/webui/orval.config.mjs +++ b/webui/orval.config.mjs @@ -36,6 +36,17 @@ export default defineConfig({ target: 'src/lib/gen/endpoints', namingConvention: "PascalCase", fileExtension: 'zod.ts', + override: { + zod: { + generate: { + body: true, + param: true, + query: true, + header: true, + response: true, + }, + }, + }, }, hooks: { afterAllFilesWrite: 'prettier --write', diff --git a/webui/src/lib/components/ui/topbar/topbar.svelte b/webui/src/lib/components/ui/topbar/topbar.svelte index 498291397..9a6c9a432 100644 --- a/webui/src/lib/components/ui/topbar/topbar.svelte +++ b/webui/src/lib/components/ui/topbar/topbar.svelte @@ -21,11 +21,15 @@ import Problem from '@lucide/svelte/icons/puzzle'; import Archive from '@lucide/svelte/icons/archive'; import HelpCircle from '@lucide/svelte/icons/circle-help'; - import { Button } from '$lib/components/ui/button/index.js'; + import UserPlus from '@lucide/svelte/icons/user-plus'; + import * as DropdownMenu from '$lib/components/ui/dropdown-menu/index.js'; import { goto } from '$app/navigation'; + import { onMount } from 'svelte'; + import { get } from 'svelte/store'; import { auth } from '../../../../stores/auth'; import { derived } from 'svelte/store'; + import { getCurrentUserInfoUserInfoGet } from '$lib/gen/endpoints/DESDEOFastAPI'; import desdeo_logo from '$lib/assets/desdeo_logo.svg'; async function logout() { @@ -43,12 +47,25 @@ goto('/home'); } + onMount(async () => { + if (!get(auth).user) { + const response = await getCurrentUserInfoUserInfoGet(); + if (response.status === 200) { + auth.setAuth('authenticated', response.data); + } + } + }); + const userDisplay = derived(auth, ($auth) => { if ($auth.user) { return `${$auth.user.username} (${$auth.user.role})`; } return ''; }); + + const canManageUsers = derived(auth, ($auth) => + $auth.user?.role === 'analyst' || $auth.user?.role === 'admin' + );
@@ -89,6 +106,16 @@ + {#if $canManageUsers} + + + Users + + {/if} + { + return `http://localhost:8000/health`; +}; + +export const healthHealthGet = async (options?: RequestInit): Promise => { + return customFetch(getHealthHealthGetUrl(), { + ...options, + method: 'GET' + }); +}; diff --git a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts index 94d5381c0..ad16a2a94 100644 --- a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts +++ b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts @@ -8006,3 +8006,8 @@ export const ConfigureGdmGdmScoreBandsConfigurePostBody = zod .describe('Configuration for the SCORE bands based GDM.'); export const ConfigureGdmGdmScoreBandsConfigurePostResponse = zod.unknown(); + +/** + * @summary Health + */ +export const HealthHealthGetResponse = zod.unknown(); diff --git a/webui/src/routes/manage-users/+layout.svelte b/webui/src/routes/manage-users/+layout.svelte new file mode 100644 index 000000000..6fd05e623 --- /dev/null +++ b/webui/src/routes/manage-users/+layout.svelte @@ -0,0 +1,10 @@ + + +
+ + +
+ diff --git a/webui/src/routes/manage-users/+page.server.ts b/webui/src/routes/manage-users/+page.server.ts new file mode 100644 index 000000000..a974e54d3 --- /dev/null +++ b/webui/src/routes/manage-users/+page.server.ts @@ -0,0 +1,26 @@ +import { redirect } from '@sveltejs/kit'; +import type { PageServerLoad } from './$types'; +import { getCurrentUserInfoUserInfoGet } from '$lib/gen/endpoints/DESDEOFastAPI'; + +export const load: PageServerLoad = async ({ cookies }) => { + const refreshToken = cookies.get('refresh_token'); + if (!refreshToken) { + return redirect(307, '/home'); + } + + const accessToken = cookies.get('access_token'); + const response = await getCurrentUserInfoUserInfoGet({ + headers: { Authorization: `Bearer ${accessToken}` }, + }); + + if (response.status !== 200) { + return redirect(307, '/home'); + } + + const role = response.data.role; + if (role !== 'analyst' && role !== 'admin') { + return redirect(307, '/dashboard'); + } + + return {}; +}; diff --git a/webui/src/routes/manage-users/+page.svelte b/webui/src/routes/manage-users/+page.svelte new file mode 100644 index 000000000..d5b5ece5b --- /dev/null +++ b/webui/src/routes/manage-users/+page.svelte @@ -0,0 +1,159 @@ + + + + Manage Users | DESDEO + + +
+

Manage Users

+

Create new user accounts.

+ +
+ + + + Add Decision Maker + Create a new decision maker account. + + +
+
+ + +
+
+ + +
+ + {#if dmResult} +

+ {dmResult.message} +

+ {/if} +
+
+
+ + + {#if isAnalystOrAdmin} + + + Add Analyst + Create a new analyst account. + + +
+
+ + +
+
+ + +
+ + {#if analystResult} +

+ {analystResult.message} +

+ {/if} +
+
+
+ {/if} +
+
diff --git a/webui/src/routes/manage-users/handler.ts b/webui/src/routes/manage-users/handler.ts new file mode 100644 index 000000000..b706ad9bb --- /dev/null +++ b/webui/src/routes/manage-users/handler.ts @@ -0,0 +1,53 @@ +import { + addNewDmAddNewDmPost, + addNewAnalystAddNewAnalystPost, +} from '$lib/gen/endpoints/DESDEOFastAPI'; +import type { + BodyAddNewDmAddNewDmPost, + BodyAddNewAnalystAddNewAnalystPost, +} from '$lib/gen/endpoints/DESDEOFastAPI'; + +export type UserCreateResult = { success: boolean; message: string }; + +// Note: /add_new_dm and /add_new_analyst use FastAPI's OAuth2PasswordRequestForm +// (application/x-www-form-urlencoded). Authentication for the analyst endpoint is +// handled transparently by the SvelteKit proxy and hooks.server.ts, which attach +// the access_token cookie to outgoing requests — no manual token passing needed here. + +export async function addDm(username: string, password: string): Promise { + const body: BodyAddNewDmAddNewDmPost = { username, password, scope: '' }; + const response = await addNewDmAddNewDmPost(body); + const status = response.status as number; + + if (status === 401) { + return { success: false, message: 'Unauthorized: analyst or admin role required.' }; + } + if (status === 409) { + return { success: false, message: 'A user with that username already exists.' }; + } + if (status !== 201) { + console.error('addDm failed.', status); + return { success: false, message: 'Failed to create user. Please try again.' }; + } + + return { success: true, message: 'Decision maker created successfully.' }; +} + +export async function addAnalyst(username: string, password: string): Promise { + const body: BodyAddNewAnalystAddNewAnalystPost = { username, password, scope: '' }; + const response = await addNewAnalystAddNewAnalystPost(body); + const status = response.status as number; + + if (status === 401) { + return { success: false, message: 'Unauthorized: analyst or admin role required.' }; + } + if (status === 409) { + return { success: false, message: 'A user with that username already exists.' }; + } + if (status !== 201) { + console.error('addAnalyst failed.', status); + return { success: false, message: 'Failed to create analyst. Please try again.' }; + } + + return { success: true, message: 'Analyst created successfully.' }; +} diff --git a/webui/src/routes/manage-users/userSchema.ts b/webui/src/routes/manage-users/userSchema.ts new file mode 100644 index 000000000..7ca19df06 --- /dev/null +++ b/webui/src/routes/manage-users/userSchema.ts @@ -0,0 +1,13 @@ +import { z } from "zod"; + +// The /add_new_dm and /add_new_analyst endpoints use FastAPI's OAuth2PasswordRequestForm, +// which sends data as application/x-www-form-urlencoded — not a JSON body. Orval does not +// generate Zod schemas for form-encoded request bodies, so this schema is defined manually. +// The generated TypeScript interfaces (BodyAddNewDmAddNewDmPost, BodyAddNewAnalystAddNewAnalystPost) +// are still used when constructing the actual API call body. +export const newUserSchema = z.object({ + username: z.string().min(1, "Username is required"), + password: z.string().min(1, "Password is required"), +}); + +export type FormMessage = { success: boolean; text: string }; From 6ed3f018b3034cefe173ed8cb84bb9fe1fef83a1 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Wed, 1 Apr 2026 15:06:59 +0300 Subject: [PATCH 20/24] Web-API - Added endpoint to get a list of DM users. - Modified endpoints for adding problems so that admin and analyst users may add problems for other users. - Tested the new and modified endpoints. --- desdeo/api/routers/problem.py | 38 ++++- desdeo/api/routers/user_authentication.py | 26 ++++ desdeo/api/tests/test_problem_on_behalf.py | 164 +++++++++++++++++++++ desdeo/api/tests/test_routes.py | 16 +- uv.lock | 2 +- 5 files changed, 238 insertions(+), 8 deletions(-) create mode 100644 desdeo/api/tests/test_problem_on_behalf.py diff --git a/desdeo/api/routers/problem.py b/desdeo/api/routers/problem.py index b1924fe61..df74cc542 100644 --- a/desdeo/api/routers/problem.py +++ b/desdeo/api/routers/problem.py @@ -116,12 +116,15 @@ def get_problem( def add_problem( request: Annotated[Problem, Depends(parse_problem_json)], context: Annotated[SessionContext, Depends(SessionContextGuard().post)], + target_user_id: int | None = None, ) -> ProblemInfo: """Add a newly defined problem to the database. Args: request (Problem): the JSON representation of the problem. context (Annotated[SessionContext, Depends): the session context. + target_user_id (int | None): if provided, assign the problem to this user instead of + the caller. Only analysts and admins may use this parameter. Note: Users with the role 'guest' may not add new problems. @@ -141,8 +144,22 @@ def add_problem( detail="Guest users are not allowed to add new problems.", ) + effective_user = user + if target_user_id is not None: + if user.role not in (UserRole.analyst, UserRole.admin): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Only analysts and admins can add problems on behalf of other users.", + ) + effective_user = db_session.get(User, target_user_id) + if effective_user is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id={target_user_id} not found.", + ) + try: - problem_db = ProblemDB.from_problem(request, user=user) + problem_db = ProblemDB.from_problem(request, user=effective_user) except Exception as e: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, @@ -160,12 +177,15 @@ def add_problem( def add_problem_json( json_file: UploadFile, context: Annotated[SessionContext, Depends(SessionContextGuard().post)], + target_user_id: int | None = None, ) -> ProblemInfo: """Adds a problem to the database based on its JSON definition. Args: json_file (UploadFile): a file in JSON format describing the problem. context (Annotated[SessionContext, Depends): the session context. + target_user_id (int | None): if provided, assign the problem to this user instead of + the caller. Only analysts and admins may use this parameter. Raises: HTTPException: if the provided `json_file` is empty. @@ -177,6 +197,20 @@ def add_problem_json( user = context.user db_session = context.db_session + effective_user = user + if target_user_id is not None: + if user.role not in (UserRole.analyst, UserRole.admin): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Only analysts and admins can add problems on behalf of other users.", + ) + effective_user = db_session.get(User, target_user_id) + if effective_user is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id={target_user_id} not found.", + ) + raw = json_file.file.read() if not raw: @@ -188,7 +222,7 @@ def add_problem_json( raise HTTPException(status_code=400, detail="Invalid JSON.") from e problem = Problem.model_validate_json(raw, by_name=True) - problem_db = ProblemDB.from_problem(problem, user=user) + problem_db = ProblemDB.from_problem(problem, user=effective_user) db_session.add(problem_db) db_session.commit() diff --git a/desdeo/api/routers/user_authentication.py b/desdeo/api/routers/user_authentication.py index f9cb3f5c1..dcaa7941b 100644 --- a/desdeo/api/routers/user_authentication.py +++ b/desdeo/api/routers/user_authentication.py @@ -335,6 +335,32 @@ def add_user_to_database( ) +@router.get("/users/dms") +def get_dm_users( + user: Annotated[User, Depends(get_current_user)], + session: Annotated[Session, Depends(get_session)], +) -> list[UserPublic]: + """Return all users with the decision maker role. Requires analyst or admin. + + Args: + user (Annotated[User, Depends]): the current user. + session (Annotated[Session, Depends]): the database session. + + Returns: + list[UserPublic]: public information for all DM users. + + Raises: + HTTPException: if the current user is not an analyst or admin. + """ + if user.role not in (UserRole.analyst, UserRole.admin): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Only analysts and admins can list users.", + ) + statement = select(User).where(User.role == UserRole.dm) + return list(session.exec(statement).all()) + + @router.get("/user_info") def get_current_user_info(user: Annotated[User, Depends(get_current_user)]) -> UserPublic: """Return information about the current user. diff --git a/desdeo/api/tests/test_problem_on_behalf.py b/desdeo/api/tests/test_problem_on_behalf.py new file mode 100644 index 000000000..85ecdb308 --- /dev/null +++ b/desdeo/api/tests/test_problem_on_behalf.py @@ -0,0 +1,164 @@ +"""Tests for analyst adding problems on behalf of decision makers.""" + +from fastapi import status +from fastapi.testclient import TestClient + +from desdeo.api.models import ProblemInfo, UserPublic, UserRole +from desdeo.problem.testproblems import simple_knapsack_vectors + +from .conftest import get_json, login, post_file_multipart, post_json + + +def _add_dm(client: TestClient, analyst_token: str, username: str, password: str) -> None: + """Helper: create a DM user via the API.""" + response = client.post( + "/add_new_dm", + data={"username": username, "password": password, "grant_type": "password"}, + headers={"Authorization": f"Bearer {analyst_token}", "content-type": "application/x-www-form-urlencoded"}, + ) + assert response.status_code == status.HTTP_201_CREATED + + +def test_list_dms_as_analyst(client: TestClient): + """Analyst can retrieve the list of DM users.""" + analyst_token = login(client) + + # No DMs yet — list should be empty + response = get_json(client, "/users/dms", analyst_token) + assert response.status_code == status.HTTP_200_OK + assert response.json() == [] + + # Create two DM users + _add_dm(client, analyst_token, "dm_one", "dm_one") + _add_dm(client, analyst_token, "dm_two", "dm_two") + + response = get_json(client, "/users/dms", analyst_token) + assert response.status_code == status.HTTP_200_OK + + dms = [UserPublic.model_validate(u) for u in response.json()] + usernames = {dm.username for dm in dms} + assert usernames == {"dm_one", "dm_two"} + assert all(dm.role == UserRole.dm for dm in dms) + + +def test_list_dms_as_dm_forbidden(client: TestClient): + """DM users cannot list other DM users.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_user", "dm_user") + + dm_token = login(client, username="dm_user", password="dm_user") # noqa: S106 + response = get_json(client, "/users/dms", dm_token) + assert response.status_code == status.HTTP_403_FORBIDDEN + + +def test_list_dms_unauthenticated(client: TestClient): + """Unauthenticated requests to /users/dms are rejected.""" + response = client.get("/users/dms") + assert response.status_code == status.HTTP_401_UNAUTHORIZED + + +def test_add_problem_for_dm_as_analyst(client: TestClient, session_and_user: dict): + """Analyst can add a problem that is owned by a DM.""" + analyst_token = login(client) + + # Create a DM + _add_dm(client, analyst_token, "target_dm", "target_dm") + dm_token = login(client, username="target_dm", password="target_dm") # noqa: S106 + + # Fetch DM id from /users/dms + dms = get_json(client, "/users/dms", analyst_token).json() + dm_id = next(u["id"] for u in dms if u["username"] == "target_dm") + + # Analyst submits a problem on behalf of the DM + problem = simple_knapsack_vectors() + response = post_json(client, f"/problem/add?target_user_id={dm_id}", problem.model_dump(), analyst_token) + assert response.status_code == status.HTTP_200_OK + + info = ProblemInfo.model_validate(response.json()) + assert info.name == "Simple two-objective Knapsack problem" + + # DM should now own the problem + dm_problems = get_json(client, "/problem/all", dm_token).json() + assert any(p["name"] == "Simple two-objective Knapsack problem" for p in dm_problems) + + # Analyst should NOT own the problem + analyst_problems = get_json(client, "/problem/all", analyst_token).json() + assert not any(p["name"] == "Simple two-objective Knapsack problem" for p in analyst_problems) + + +def test_add_problem_for_dm_as_dm_forbidden(client: TestClient): + """A DM cannot add a problem on behalf of another user.""" + analyst_token = login(client) + + # Create two DMs + _add_dm(client, analyst_token, "dm_a", "dm_a") + _add_dm(client, analyst_token, "dm_b", "dm_b") + + # Fetch dm_b's id + dms = get_json(client, "/users/dms", analyst_token).json() + dm_b_id = next(u["id"] for u in dms if u["username"] == "dm_b") + + # dm_a tries to add a problem for dm_b — should be forbidden + dm_a_token = login(client, username="dm_a", password="dm_a") # noqa: S106 + problem = simple_knapsack_vectors() + response = post_json(client, f"/problem/add?target_user_id={dm_b_id}", problem.model_dump(), dm_a_token) + assert response.status_code == status.HTTP_403_FORBIDDEN + + +def test_add_problem_for_nonexistent_user(client: TestClient): + """Adding a problem for a non-existent target_user_id returns 404.""" + analyst_token = login(client) + problem = simple_knapsack_vectors() + response = post_json(client, "/problem/add?target_user_id=99999", problem.model_dump(), analyst_token) + assert response.status_code == status.HTTP_404_NOT_FOUND + + +def test_add_problem_json_for_dm_as_analyst(client: TestClient): + """Analyst can upload a JSON problem file on behalf of a DM.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "json_dm", "json_dm") + dm_token = login(client, username="json_dm", password="json_dm") # noqa: S106 + + dms = get_json(client, "/users/dms", analyst_token).json() + dm_id = next(u["id"] for u in dms if u["username"] == "json_dm") + + # Serialize a problem to JSON bytes + problem = simple_knapsack_vectors() + problem_bytes = problem.model_dump_json(by_alias=True).encode() + + response = post_file_multipart( + client, + f"/problem/add_json?target_user_id={dm_id}", + problem_bytes, + analyst_token, + ) + assert response.status_code == status.HTTP_200_OK + + info = ProblemInfo.model_validate(response.json()) + assert info.name == "Simple two-objective Knapsack problem" + + # DM should own the problem + dm_problems = get_json(client, "/problem/all", dm_token).json() + assert any(p["name"] == "Simple two-objective Knapsack problem" for p in dm_problems) + + +def test_add_problem_json_for_dm_as_dm_forbidden(client: TestClient): + """A DM cannot upload a JSON problem on behalf of another user.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "jdm_a", "jdm_a") + _add_dm(client, analyst_token, "jdm_b", "jdm_b") + + dms = get_json(client, "/users/dms", analyst_token).json() + jdm_b_id = next(u["id"] for u in dms if u["username"] == "jdm_b") + + jdm_a_token = login(client, username="jdm_a", password="jdm_a") # noqa: S106 + problem = simple_knapsack_vectors() + problem_bytes = problem.model_dump_json(by_alias=True).encode() + + response = post_file_multipart( + client, + f"/problem/add_json?target_user_id={jdm_b_id}", + problem_bytes, + jdm_a_token, + ) + assert response.status_code == status.HTTP_403_FORBIDDEN diff --git a/desdeo/api/tests/test_routes.py b/desdeo/api/tests/test_routes.py index 74df63135..70fbbd1ec 100644 --- a/desdeo/api/tests/test_routes.py +++ b/desdeo/api/tests/test_routes.py @@ -683,11 +683,13 @@ def test_nimbus_save_and_delete_save(client: TestClient): def test_add_new_dm(client: TestClient): """Test that adding a decision maker works.""" + access_token = login(client) + # Create a new user to the database good_response = client.post( "/add_new_dm", data={"username": "new_dm", "password": "new_dm", "grant_type": "password"}, - headers={"content-type": "application/x-www-form-urlencoded"}, + headers={"Authorization": f"Bearer {access_token}", "content-type": "application/x-www-form-urlencoded"}, ) assert good_response.status_code == status.HTTP_201_CREATED @@ -695,7 +697,7 @@ def test_add_new_dm(client: TestClient): bad_response = client.post( "/add_new_dm", data={"username": "new_dm", "password": "new_dm", "grant_type": "password"}, - headers={"content-type": "application/x-www-form-urlencoded"}, + headers={"Authorization": f"Bearer {access_token}", "content-type": "application/x-www-form-urlencoded"}, ) assert bad_response.status_code == status.HTTP_409_CONFLICT @@ -713,10 +715,14 @@ def test_add_new_analyst(client: TestClient): assert nologin_response.status_code == status.HTTP_401_UNAUTHORIZED # Try to create an analyst using a dm account. + analyst_token_for_setup = login(client) response = client.post( "/add_new_dm", data={"username": "new_dm", "password": "new_dm", "grant_type": "password"}, - headers={"content-type": "application/x-www-form-urlencoded"}, + headers={ + "Authorization": f"Bearer {analyst_token_for_setup}", + "content-type": "application/x-www-form-urlencoded", + }, ) assert response.status_code == status.HTTP_201_CREATED @@ -836,7 +842,7 @@ def get_user_info(token: str): response = client.post( "/add_new_dm", data={"username": "new_dm", "password": "new_dm", "grant_type": "password"}, - headers={"content-type": "application/x-www-form-urlencoded"}, + headers={"Authorization": f"Bearer {access_token}", "content-type": "application/x-www-form-urlencoded"}, ) assert response.status_code == status.HTTP_201_CREATED @@ -1047,7 +1053,7 @@ def test_gdm_score_bands(client: TestClient): response = client.post( "/add_new_dm", data={"username": "dm", "password": "dm", "grant_type": "password"}, - headers={"content-type": "application/x-www-form-urlencoded"}, + headers={"Authorization": f"Bearer {access_token}", "content-type": "application/x-www-form-urlencoded"}, ) assert response.status_code == 201 diff --git a/uv.lock b/uv.lock index ed589fa76..c7b8be916 100644 --- a/uv.lock +++ b/uv.lock @@ -694,7 +694,7 @@ wheels = [ [[package]] name = "desdeo" -version = "2.2.2" +version = "2.3.0" source = { editable = "." } dependencies = [ { name = "bayesian-optimization" }, From 59c89e80ec27046e53788d13b6f947660d80e969 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Wed, 1 Apr 2026 15:58:13 +0300 Subject: [PATCH 21/24] Web-API, Web-GUI - Analysts and admins can view, delete, and manage all users' problems - Added fetch_problem_with_role_check helper in utils.py (role-aware ownership bypass) - Problem listing endpoints return all problems for analysts, own only for DMs - Problem action endpoints (delete, solver, JSON, repr sets) allow analyst access to any problem - Problems UI: user filter dropdown (default "Myself"), Owner field in detail panel - Problems define page: DM selector for analyst to assign problem to a DM on creation - Fixed test_delete_problem_unauthorized (analyst role changed to DM) - Regenerated orval client with new DM users endpoint and target_user_id params --- desdeo/api/routers/problem.py | 56 ++++++--- desdeo/api/routers/utils.py | 54 ++++---- desdeo/api/tests/test_delete_problem.py | 6 +- desdeo/api/tests/test_problem_on_behalf.py | 119 +++++++++++++++++- webui/src/lib/gen/endpoints/DESDEOFastAPI.ts | 59 ++++++++- .../src/lib/gen/endpoints/DESDEOFastAPIzod.ts | 46 ++++++- webui/src/routes/problems/+page.svelte | 76 ++++++++++- webui/src/routes/problems/+page.ts | 15 ++- webui/src/routes/problems/define/+page.svelte | 40 +++++- webui/src/routes/problems/define/handler.ts | 14 ++- 10 files changed, 407 insertions(+), 78 deletions(-) diff --git a/desdeo/api/routers/problem.py b/desdeo/api/routers/problem.py index df74cc542..5222d309a 100644 --- a/desdeo/api/routers/problem.py +++ b/desdeo/api/routers/problem.py @@ -5,8 +5,9 @@ from fastapi import APIRouter, Depends, HTTPException, Request, UploadFile, status from fastapi.responses import JSONResponse -from sqlmodel import Session +from sqlmodel import Session, select +from desdeo.api.db import get_session from desdeo.api.models import ( ForestProblemMetaData, ProblemDB, @@ -64,28 +65,40 @@ async def parse_problem_json(request: Request) -> Problem: @router.get("/all") -def get_problems(user: Annotated[User, Depends(get_current_user)]) -> list[ProblemInfoSmall]: - """Get information on all the current user's problems. +def get_problems( + user: Annotated[User, Depends(get_current_user)], + db_session: Annotated[Session, Depends(get_session)], +) -> list[ProblemInfoSmall]: + """Get information on problems. Analysts and admins see all users' problems. Args: user (Annotated[User, Depends): the current user. + db_session (Annotated[Session, Depends]): the database session. Returns: - list[ProblemInfoSmall]: a list of information on all the problems. + list[ProblemInfoSmall]: a list of information on the problems. """ + if user.role in (UserRole.analyst, UserRole.admin): + return list(db_session.exec(select(ProblemDB)).all()) return user.problems @router.get("/all_info") -def get_problems_info(user: Annotated[User, Depends(get_current_user)]) -> list[ProblemInfo]: - """Get detailed information on all the current user's problems. +def get_problems_info( + user: Annotated[User, Depends(get_current_user)], + db_session: Annotated[Session, Depends(get_session)], +) -> list[ProblemInfo]: + """Get detailed information on problems. Analysts and admins see all users' problems. Args: user (Annotated[User, Depends): the current user. + db_session (Annotated[Session, Depends]): the database session. Returns: - list[ProblemInfo]: a list of the detailed information on all the problems. + list[ProblemInfo]: a list of the detailed information on the problems. """ + if user.role in (UserRole.analyst, UserRole.admin): + return list(db_session.exec(select(ProblemDB)).all()) return user.problems @@ -299,10 +312,10 @@ def select_solver( ) # Auth the user - if user.id != problem_db.user_id: + if user.role not in (UserRole.analyst, UserRole.admin) and user.id != problem_db.user_id: raise HTTPException( detail="Unauthorized user!", - status_code=status.HTTP_401_UNAUTHORIZED, + status_code=status.HTTP_403_FORBIDDEN, ) # All good, get on with it. @@ -406,8 +419,8 @@ def get_all_representative_solution_sets( raise HTTPException(status_code=404, detail=f"Problem with ID {problem_id} not found.") # Check the user - if problem_db.user_id != user.id: - raise HTTPException(status_code=401, detail="Unauthorized user.") + if user.role not in (UserRole.analyst, UserRole.admin) and problem_db.user_id != user.id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Unauthorized user.") # Fetch metadata problem_metadata = problem_db.problem_metadata @@ -442,8 +455,11 @@ def get_representative_solution_set( raise HTTPException(status_code=404, detail=f"Representative set with ID {set_id} not found.") # Check the user - if repr_set.metadata_instance.problem.user_id != context.user.id: - raise HTTPException(status_code=401, detail="Unauthorized user.") + if ( + context.user.role not in (UserRole.analyst, UserRole.admin) + and repr_set.metadata_instance.problem.user_id != context.user.id + ): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Unauthorized user.") # Return all fields as a dict return RepresentativeSolutionSetFull( @@ -471,10 +487,10 @@ def delete_representative_solution_set( if repr_metadata is None: raise HTTPException(status_code=404, detail=f"Representative solution set with ID {set_id} not found.") - # Ensure the user owns the problem this set belongs to + # Ensure the user owns the problem this set belongs to (analysts/admins are exempt) problem_metadata = repr_metadata.metadata_instance - if problem_metadata.problem.user_id != user.id: - raise HTTPException(status_code=401, detail="Unauthorized user.") + if user.role not in (UserRole.analyst, UserRole.admin) and problem_metadata.problem.user_id != user.id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Unauthorized user.") # Delete the set db_session.delete(repr_metadata) @@ -494,8 +510,8 @@ def delete_problem( if problem_db is None: raise HTTPException(status_code=404, detail=f"Problem with ID {problem_id} not found.") - if problem_db.user_id != user.id: - raise HTTPException(status_code=401, detail="Unauthorized user.") + if user.role not in (UserRole.analyst, UserRole.admin) and problem_db.user_id != user.id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Unauthorized user.") db_session.delete(problem_db) db_session.commit() @@ -514,8 +530,8 @@ def get_problem_json( if problem_db is None: raise HTTPException(status_code=404, detail=f"Problem with ID {problem_id} not found.") - if problem_db.user_id != user.id: - raise HTTPException(status_code=401, detail="Unauthorized user.") + if user.role not in (UserRole.analyst, UserRole.admin) and problem_db.user_id != user.id: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Unauthorized user.") problem = Problem.from_problemdb(problem_db) return JSONResponse(content=json.loads(problem.model_dump_json()), status_code=status.HTTP_200_OK) diff --git a/desdeo/api/routers/utils.py b/desdeo/api/routers/utils.py index 7f2caba46..e8cb6db44 100644 --- a/desdeo/api/routers/utils.py +++ b/desdeo/api/routers/utils.py @@ -19,6 +19,7 @@ RPMSolveRequest, StateDB, User, + UserRole, ) from desdeo.api.models.session import CreateSessionRequest from desdeo.api.routers.user_authentication import get_current_user @@ -26,6 +27,27 @@ RequestType = RPMSolveRequest | ENautilusStepRequest | CreateSessionRequest +def fetch_problem_with_role_check(user: User, problem_id: int, session: Session) -> ProblemDB | None: + """Fetch a ProblemDB by id, bypassing ownership for analysts and admins. + + Args: + user (User): the requesting user. + problem_id (int): id of the problem to fetch. + session (Session): the database session. + + Returns: + ProblemDB | None: the matching problem, or None if not found. + """ + if user.role in (UserRole.analyst, UserRole.admin): + statement = select(ProblemDB).where(ProblemDB.id == problem_id) + else: + statement = select(ProblemDB).where( + ProblemDB.user_id == user.id, + ProblemDB.id == problem_id, + ) + return session.exec(statement).first() + + def fetch_interactive_session( user: User, session: Session, @@ -218,18 +240,11 @@ def post( parent_state = None if request is not None: - if hasattr(request, "problem_id"): - problem_db = fetch_user_problem(user, request, db_session) + if hasattr(request, "problem_id") and request.problem_id is not None: + problem_db = fetch_problem_with_role_check(user, request.problem_id, db_session) if problem_db is None and problem_id is not None: - - class _ProblemOnly: - def __init__(self, problem_id: int): - self.problem_id = problem_id - self.session_id = None - self.parent_state_id = None - - problem_db = fetch_user_problem(user, _ProblemOnly(problem_id), db_session) + problem_db = fetch_problem_with_role_check(user, problem_id, db_session) if hasattr(request, "interactive_session_id") or hasattr(request, "problem_id"): interactive_session = fetch_interactive_session(user, db_session, request) @@ -242,15 +257,7 @@ def __init__(self, problem_id: int): interactive_session=interactive_session, ) elif problem_id is not None: - - class _ProblemOnly: - def __init__(self, problem_id: int): - self.problem_id = problem_id - self.session_id = None - self.parent_state_id = None - - pseudo_request = _ProblemOnly(problem_id) - problem_db = fetch_user_problem(user, pseudo_request, db_session) + problem_db = fetch_problem_with_role_check(user, problem_id, db_session) context = SessionContext( user=user, @@ -275,14 +282,7 @@ def get( interactive_session = None if problem_id is not None: - - class _ProblemOnly: - def __init__(self, problem_id: int): - self.problem_id = problem_id - self.session_id = None - self.parent_state_id = None - - problem_db = fetch_user_problem(user, _ProblemOnly(problem_id), db_session) + problem_db = fetch_problem_with_role_check(user, problem_id, db_session) if session_id is not None or (problem_id is not None): interactive_session = fetch_interactive_session( diff --git a/desdeo/api/tests/test_delete_problem.py b/desdeo/api/tests/test_delete_problem.py index 20ddda0d0..99c723e60 100644 --- a/desdeo/api/tests/test_delete_problem.py +++ b/desdeo/api/tests/test_delete_problem.py @@ -100,11 +100,11 @@ def test_delete_problem_unauthorized(client: TestClient, session_and_user: dict) session: Session = session_and_user["session"] user: User = session_and_user["user"] - # Create a second user + # Create a second user (DM — cannot delete other users' problems) other_user = User( username="other", password_hash=get_password_hash("other"), - role=UserRole.analyst, + role=UserRole.dm, group="test", ) session.add(other_user) @@ -126,7 +126,7 @@ def test_delete_problem_unauthorized(client: TestClient, session_and_user: dict) headers={"Authorization": f"Bearer {other_token}"}, ) - assert response.status_code == status.HTTP_401_UNAUTHORIZED + assert response.status_code == status.HTTP_403_FORBIDDEN # Problem should still exist assert session.get(ProblemDB, problem_id) is not None diff --git a/desdeo/api/tests/test_problem_on_behalf.py b/desdeo/api/tests/test_problem_on_behalf.py index 85ecdb308..cc65753b3 100644 --- a/desdeo/api/tests/test_problem_on_behalf.py +++ b/desdeo/api/tests/test_problem_on_behalf.py @@ -81,9 +81,10 @@ def test_add_problem_for_dm_as_analyst(client: TestClient, session_and_user: dic dm_problems = get_json(client, "/problem/all", dm_token).json() assert any(p["name"] == "Simple two-objective Knapsack problem" for p in dm_problems) - # Analyst should NOT own the problem - analyst_problems = get_json(client, "/problem/all", analyst_token).json() - assert not any(p["name"] == "Simple two-objective Knapsack problem" for p in analyst_problems) + # Analyst sees all problems (including the DM's), but user_id must belong to the DM + all_problems = get_json(client, "/problem/all_info", analyst_token).json() + created = next(p for p in all_problems if p["name"] == "Simple two-objective Knapsack problem") + assert created["user_id"] == dm_id def test_add_problem_for_dm_as_dm_forbidden(client: TestClient): @@ -162,3 +163,115 @@ def test_add_problem_json_for_dm_as_dm_forbidden(client: TestClient): jdm_a_token, ) assert response.status_code == status.HTTP_403_FORBIDDEN + + +def _add_problem_as_dm(client: TestClient, dm_token: str) -> int: + """Helper: DM adds a problem and returns its id.""" + problem = simple_knapsack_vectors() + response = post_json(client, "/problem/add", problem.model_dump(), dm_token) + assert response.status_code == status.HTTP_200_OK + return response.json()["id"] + + +def test_analyst_sees_all_problems(client: TestClient): + """Analyst sees problems from all users in GET /problem/all_info.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "vis_dm", "vis_dm") + dm_token = login(client, username="vis_dm", password="vis_dm") # noqa: S106 + + dm_problem_id = _add_problem_as_dm(client, dm_token) + + response = get_json(client, "/problem/all_info", analyst_token) + assert response.status_code == status.HTTP_200_OK + ids = [p["id"] for p in response.json()] + assert dm_problem_id in ids + + +def test_dm_only_sees_own_problems(client: TestClient): + """A DM cannot see another DM's problems in GET /problem/all_info.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_x", "dm_x") + _add_dm(client, analyst_token, "dm_y", "dm_y") + + dm_x_token = login(client, username="dm_x", password="dm_x") # noqa: S106 + dm_y_token = login(client, username="dm_y", password="dm_y") # noqa: S106 + + dm_x_problem_id = _add_problem_as_dm(client, dm_x_token) + + # dm_y lists problems — must not see dm_x's problem + response = get_json(client, "/problem/all_info", dm_y_token) + assert response.status_code == status.HTTP_200_OK + ids = [p["id"] for p in response.json()] + assert dm_x_problem_id not in ids + + +def test_analyst_can_get_problem_json_for_dm(client: TestClient): + """Analyst can download a DM's problem as JSON.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "json_owner", "json_owner") + dm_token = login(client, username="json_owner", password="json_owner") # noqa: S106 + + dm_problem_id = _add_problem_as_dm(client, dm_token) + + response = get_json(client, f"/problem/{dm_problem_id}/json", analyst_token) + assert response.status_code == status.HTTP_200_OK + + +def test_dm_cannot_get_problem_json_of_other_dm(client: TestClient): + """A DM cannot download another DM's problem JSON.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "owner_dm", "owner_dm") + _add_dm(client, analyst_token, "thief_dm", "thief_dm") + + owner_token = login(client, username="owner_dm", password="owner_dm") # noqa: S106 + thief_token = login(client, username="thief_dm", password="thief_dm") # noqa: S106 + + owner_problem_id = _add_problem_as_dm(client, owner_token) + + response = get_json(client, f"/problem/{owner_problem_id}/json", thief_token) + assert response.status_code == status.HTTP_403_FORBIDDEN + + +def test_analyst_can_delete_dm_problem(client: TestClient): + """Analyst can delete a DM's problem.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "del_dm", "del_dm") + dm_token = login(client, username="del_dm", password="del_dm") # noqa: S106 + + dm_problem_id = _add_problem_as_dm(client, dm_token) + + response = client.delete( + f"/problem/{dm_problem_id}", + headers={"Authorization": f"Bearer {analyst_token}"}, + ) + assert response.status_code == status.HTTP_204_NO_CONTENT + + +def test_analyst_can_assign_solver_for_dm_problem(client: TestClient): + """Analyst can assign a solver to a DM's problem.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "solver_dm", "solver_dm") + dm_token = login(client, username="solver_dm", password="solver_dm") # noqa: S106 + + dm_problem_id = _add_problem_as_dm(client, dm_token) + + response = post_json( + client, + "/problem/assign_solver", + {"problem_id": dm_problem_id, "solver_string_representation": "scipy_minimize"}, + analyst_token, + ) + assert response.status_code == status.HTTP_200_OK + + +def test_analyst_can_get_repr_solution_sets_for_dm(client: TestClient): + """Analyst can list representative solution sets for a DM's problem.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "repr_dm", "repr_dm") + dm_token = login(client, username="repr_dm", password="repr_dm") # noqa: S106 + + dm_problem_id = _add_problem_as_dm(client, dm_token) + + response = get_json(client, f"/problem/{dm_problem_id}/all_representative_solution_sets", analyst_token) + assert response.status_code == status.HTTP_200_OK + assert response.json() == [] diff --git a/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts b/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts index 69f8a97a6..6c8cb020b 100644 --- a/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts +++ b/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts @@ -1715,10 +1715,12 @@ export type DeleteProblemProblemProblemIdDeleteParams = { }; export type AddProblemProblemAddPostParams = { + target_user_id?: number | null; problem_id?: number | null; }; export type AddProblemJsonProblemAddJsonPostParams = { + target_user_id?: number | null; problem_id?: number | null; }; @@ -1845,6 +1847,43 @@ export type ConfigureGdmGdmScoreBandsConfigurePostParams = { group_id: number; }; +/** + * Return all users with the decision maker role. Requires analyst or admin. + +Args: + user (Annotated[User, Depends]): the current user. + session (Annotated[Session, Depends]): the database session. + +Returns: + list[UserPublic]: public information for all DM users. + +Raises: + HTTPException: if the current user is not an analyst or admin. + * @summary Get Dm Users + */ +export type getDmUsersUsersDmsGetResponse200 = { + data: UserPublic[]; + status: 200; +}; + +export type getDmUsersUsersDmsGetResponseSuccess = getDmUsersUsersDmsGetResponse200 & { + headers: Headers; +}; +export type getDmUsersUsersDmsGetResponse = getDmUsersUsersDmsGetResponseSuccess; + +export const getGetDmUsersUsersDmsGetUrl = () => { + return `http://localhost:8000/users/dms`; +}; + +export const getDmUsersUsersDmsGet = async ( + options?: RequestInit +): Promise => { + return customFetch(getGetDmUsersUsersDmsGetUrl(), { + ...options, + method: 'GET' + }); +}; + /** * Return information about the current user. @@ -2038,9 +2077,10 @@ export const refreshAccessTokenRefreshPost = async ( }; /** - * Add a new user of the role Decision Maker to the database. Requires no login. + * Add a new user of the role Decision Maker to the database. Requires a logged in analyst or an admin. Args: + user: Annotated[User, Depends(get_current_user)]: Logged in user with the role "analyst" or "admin". form_data (Annotated[OAuth2PasswordRequestForm, Depends()]): The user credentials to add to the database. session (Annotated[Session, Depends(get_session)]): the database session. @@ -2048,7 +2088,8 @@ Returns: JSONResponse: A JSON response Raises: - HTTPException: if username is already in use or if saving to the database fails for some reason. + HTTPException: if the logged in user is not an analyst or an admin or if + username is already in use or if saving to the database fails for some reason. * @summary Add New Dm */ export type addNewDmAddNewDmPostResponse200 = { @@ -2197,13 +2238,14 @@ export const addNewAnalystAddNewAnalystPost = async ( }; /** - * Get information on all the current user's problems. + * Get information on problems. Analysts and admins see all users' problems. Args: user (Annotated[User, Depends): the current user. + db_session (Annotated[Session, Depends]): the database session. Returns: - list[ProblemInfoSmall]: a list of information on all the problems. + list[ProblemInfoSmall]: a list of information on the problems. * @summary Get Problems */ export type getProblemsProblemAllGetResponse200 = { @@ -2230,13 +2272,14 @@ export const getProblemsProblemAllGet = async ( }; /** - * Get detailed information on all the current user's problems. + * Get detailed information on problems. Analysts and admins see all users' problems. Args: user (Annotated[User, Depends): the current user. + db_session (Annotated[Session, Depends]): the database session. Returns: - list[ProblemInfo]: a list of the detailed information on all the problems. + list[ProblemInfo]: a list of the detailed information on the problems. * @summary Get Problems Info */ export type getProblemsInfoProblemAllInfoGetResponse200 = { @@ -2403,6 +2446,8 @@ export const deleteProblemProblemProblemIdDelete = async ( Args: request (Problem): the JSON representation of the problem. context (Annotated[SessionContext, Depends): the session context. + target_user_id (int | None): if provided, assign the problem to this user instead of + the caller. Only analysts and admins may use this parameter. Note: Users with the role 'guest' may not add new problems. @@ -2474,6 +2519,8 @@ export const addProblemProblemAddPost = async ( Args: json_file (UploadFile): a file in JSON format describing the problem. context (Annotated[SessionContext, Depends): the session context. + target_user_id (int | None): if provided, assign the problem to this user instead of + the caller. Only analysts and admins may use this parameter. Raises: HTTPException: if the provided `json_file` is empty. diff --git a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts index ad16a2a94..e8b5e8f29 100644 --- a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts +++ b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts @@ -7,6 +7,30 @@ */ import * as zod from 'zod'; +/** + * Return all users with the decision maker role. Requires analyst or admin. + +Args: + user (Annotated[User, Depends]): the current user. + session (Annotated[Session, Depends]): the database session. + +Returns: + list[UserPublic]: public information for all DM users. + +Raises: + HTTPException: if the current user is not an analyst or admin. + * @summary Get Dm Users + */ +export const GetDmUsersUsersDmsGetResponseItem = zod + .object({ + username: zod.string(), + id: zod.number(), + role: zod.enum(['guest', 'dm', 'analyst', 'admin']).describe('Possible user roles.'), + group_ids: zod.union([zod.array(zod.number()), zod.null()]) + }) + .describe('The object to handle public user information.'); +export const GetDmUsersUsersDmsGetResponse = zod.array(GetDmUsersUsersDmsGetResponseItem); + /** * Return information about the current user. @@ -80,9 +104,10 @@ Returns: export const RefreshAccessTokenRefreshPostResponse = zod.unknown(); /** - * Add a new user of the role Decision Maker to the database. Requires no login. + * Add a new user of the role Decision Maker to the database. Requires a logged in analyst or an admin. Args: + user: Annotated[User, Depends(get_current_user)]: Logged in user with the role "analyst" or "admin". form_data (Annotated[OAuth2PasswordRequestForm, Depends()]): The user credentials to add to the database. session (Annotated[Session, Depends(get_session)]): the database session. @@ -90,7 +115,8 @@ Returns: JSONResponse: A JSON response Raises: - HTTPException: if username is already in use or if saving to the database fails for some reason. + HTTPException: if the logged in user is not an analyst or an admin or if + username is already in use or if saving to the database fails for some reason. * @summary Add New Dm */ export const AddNewDmAddNewDmPostResponse = zod.unknown(); @@ -114,13 +140,14 @@ Raises: export const AddNewAnalystAddNewAnalystPostResponse = zod.unknown(); /** - * Get information on all the current user's problems. + * Get information on problems. Analysts and admins see all users' problems. Args: user (Annotated[User, Depends): the current user. + db_session (Annotated[Session, Depends]): the database session. Returns: - list[ProblemInfoSmall]: a list of information on all the problems. + list[ProblemInfoSmall]: a list of information on the problems. * @summary Get Problems */ export const getProblemsProblemAllGetResponseProblemMetadataOneForestMetadataOneItemMetadataTypeDefault = `forest_problem_metadata`; @@ -209,13 +236,14 @@ export const GetProblemsProblemAllGetResponseItem = zod export const GetProblemsProblemAllGetResponse = zod.array(GetProblemsProblemAllGetResponseItem); /** - * Get detailed information on all the current user's problems. + * Get detailed information on problems. Analysts and admins see all users' problems. Args: user (Annotated[User, Depends): the current user. + db_session (Annotated[Session, Depends]): the database session. Returns: - list[ProblemInfo]: a list of the detailed information on all the problems. + list[ProblemInfo]: a list of the detailed information on the problems. * @summary Get Problems Info */ export const getProblemsInfoProblemAllInfoGetResponseObjectivesItemMaximizeDefault = false; @@ -1417,6 +1445,8 @@ export const DeleteProblemProblemProblemIdDeleteQueryParams = zod.object({ Args: request (Problem): the JSON representation of the problem. context (Annotated[SessionContext, Depends): the session context. + target_user_id (int | None): if provided, assign the problem to this user instead of + the caller. Only analysts and admins may use this parameter. Note: Users with the role 'guest' may not add new problems. @@ -1429,6 +1459,7 @@ Returns: * @summary Add Problem */ export const AddProblemProblemAddPostQueryParams = zod.object({ + target_user_id: zod.union([zod.number(), zod.null()]).optional(), problem_id: zod.union([zod.number(), zod.null()]).optional() }); @@ -2073,6 +2104,8 @@ export const AddProblemProblemAddPostResponse = zod Args: json_file (UploadFile): a file in JSON format describing the problem. context (Annotated[SessionContext, Depends): the session context. + target_user_id (int | None): if provided, assign the problem to this user instead of + the caller. Only analysts and admins may use this parameter. Raises: HTTPException: if the provided `json_file` is empty. @@ -2083,6 +2116,7 @@ Returns: * @summary Add Problem Json */ export const AddProblemJsonProblemAddJsonPostQueryParams = zod.object({ + target_user_id: zod.union([zod.number(), zod.null()]).optional(), problem_id: zod.union([zod.number(), zod.null()]).optional() }); diff --git a/webui/src/routes/problems/+page.svelte b/webui/src/routes/problems/+page.svelte index 9afbb683e..e89a44db5 100644 --- a/webui/src/routes/problems/+page.svelte +++ b/webui/src/routes/problems/+page.svelte @@ -43,7 +43,8 @@ import * as Tabs from '$lib/components/ui/tabs'; import * as Table from '$lib/components/ui/table/index.js'; import { Button } from '$lib/components/ui/button'; - import type { ProblemInfo } from '$lib/gen/endpoints/DESDEOFastAPI'; + import type { ProblemInfo, UserPublic } from '$lib/gen/endpoints/DESDEOFastAPI'; + import { auth } from '../../stores/auth'; import { methodSelection } from '../../stores/methodSelection'; import { invalidateAll } from '$app/navigation'; import { deleteProblem, downloadProblemJson, getAssignedSolver, getAvailableSolvers, assignSolver, addRepresentativeSolutionSet } from './handler'; @@ -58,6 +59,41 @@ let { data }: PageProps = $props(); let problemList = $derived(data.problemList); + let dmUsers = $derived(data.dmUsers as UserPublic[]); + + const isAnalystOrAdmin = $derived( + $auth.user?.role === 'analyst' || $auth.user?.role === 'admin' + ); + + const ownerMap = $derived( + Object.fromEntries(dmUsers.map((u: UserPublic) => [u.id, u.username])) + ); + + function getOwnerLabel(userId: number): string { + if (userId === $auth.user?.id) return $auth.user?.username ?? String(userId); + return ownerMap[userId] ?? `User #${userId}`; + } + + // Unique users that have at least one problem in the list (for the filter dropdown) + const usersWithProblems = $derived( + isAnalystOrAdmin + ? [...new Map(problemList.map((p: ProblemInfo) => [p.user_id, p.user_id])).keys()].map((id) => ({ + id, + label: getOwnerLabel(id) + })) + : [] + ); + + // 'me' = current user, 'all' = everyone, '' = specific user id + let selectedFilter = $state('me'); + + const filteredProblemList = $derived( + selectedFilter === 'all' + ? problemList + : selectedFilter === 'me' + ? problemList.filter((p: ProblemInfo) => p.user_id === $auth.user?.id) + : problemList.filter((p: ProblemInfo) => p.user_id === Number(selectedFilter)) + ); let selectedProblem = $state(undefined); let expandedObjectives = $state(new Set()); let expandedConstraints = $state(new Set()); @@ -263,12 +299,39 @@ of preferences you want to utilize.

{#if problemList.length === 0} -

You have not defined any problems yet.

+

+ {isAnalystOrAdmin ? 'No problems have been defined yet.' : 'You have not defined any problems yet.'} +

{:else} + {#if isAnalystOrAdmin && usersWithProblems.length > 1} +
+ + { + selectedFilter = v || 'me'; + selectedProblem = undefined; + }} + > + + {selectedFilter === 'me' ? ($auth.user?.username ?? 'Myself') : selectedFilter === 'all' ? 'All users' : getOwnerLabel(Number(selectedFilter))} + + + {$auth.user?.username ?? 'Myself'} + All users + {#each usersWithProblems.filter((u) => u.id !== $auth.user?.id) as u} + {u.label} + {/each} + + +
+ {/if} +
{ selectedProblem = e; console.log('Selected problem:', selectedProblem.id); @@ -337,6 +400,13 @@ {/if}
+ {#if isAnalystOrAdmin} +
+
+
Owner
+
{getOwnerLabel(selectedProblem.user_id)}
+
+ {/if}
diff --git a/webui/src/routes/problems/+page.ts b/webui/src/routes/problems/+page.ts index c788907c3..e22e3dd21 100644 --- a/webui/src/routes/problems/+page.ts +++ b/webui/src/routes/problems/+page.ts @@ -1,14 +1,21 @@ import type { PageLoad } from './$types'; -import { getProblemsInfoProblemAllInfoGet } from '$lib/gen/endpoints/DESDEOFastAPI'; +import { getDmUsersUsersDmsGet, getProblemsInfoProblemAllInfoGet } from '$lib/gen/endpoints/DESDEOFastAPI'; +import type { UserPublic } from '$lib/gen/endpoints/DESDEOFastAPI'; export const load: PageLoad = async () => { - const res = await getProblemsInfoProblemAllInfoGet(); + const [problemsRes, dmsRes] = await Promise.all([ + getProblemsInfoProblemAllInfoGet(), + getDmUsersUsersDmsGet().catch(() => null) + ]); - if (res.status !== 200) { + if (problemsRes.status !== 200) { throw new Error('Failed to fetch problems'); } + const dmUsers: UserPublic[] = dmsRes?.status === 200 ? (dmsRes.data as UserPublic[]) : []; + return { - problemList: res.data + problemList: problemsRes.data, + dmUsers }; }; diff --git a/webui/src/routes/problems/define/+page.svelte b/webui/src/routes/problems/define/+page.svelte index 4b33d2b0f..c88ba3587 100644 --- a/webui/src/routes/problems/define/+page.svelte +++ b/webui/src/routes/problems/define/+page.svelte @@ -17,6 +17,9 @@ VariableTypeEnum } from '$lib/gen/endpoints/DESDEOFastAPI'; import { createProblem, fetchProblem, type ProblemPayload, uploadProblemJson } from './handler'; + import { getDmUsersUsersDmsGet } from '$lib/gen/endpoints/DESDEOFastAPI'; + import type { UserPublic } from '$lib/gen/endpoints/DESDEOFastAPI'; + import { auth } from '../../../stores/auth'; type VariableForm = { name: string; @@ -135,6 +138,13 @@ let jsonFile = $state(null); + let dms = $state([]); + let selectedDmId = $state(''); + const isAnalystOrAdmin = $derived( + $auth.user?.role === 'analyst' || $auth.user?.role === 'admin' + ); + const targetUserId = $derived(selectedDmId ? Number(selectedDmId) : null); + const parseNumber = (value: string): number | null => { if (value.trim() === '') return null; const parsed = Number(value); @@ -354,7 +364,7 @@ } isSubmitting = true; - const response = await createProblem(buildPayload()); + const response = await createProblem(buildPayload(), targetUserId); isSubmitting = false; if (!response.ok) { @@ -378,7 +388,7 @@ } isSubmitting = true; - const response = await uploadProblemJson({ json_file: jsonFile }); + const response = await uploadProblemJson({ json_file: jsonFile }, targetUserId); isSubmitting = false; if (!response.ok) { @@ -513,6 +523,13 @@ }; onMount(async () => { + if ($auth.user?.role === 'analyst' || $auth.user?.role === 'admin') { + const dmResponse = await getDmUsersUsersDmsGet(); + if (dmResponse.status === 200) { + dms = dmResponse.data; + } + } + const editId = page.url.searchParams.get('edit'); if (!editId) return; @@ -538,6 +555,25 @@

Problem Definition

+ {#if isAnalystOrAdmin && dms.length > 0} +
+ + + + {selectedDmId + ? (dms.find((dm) => String(dm.id) === selectedDmId)?.username ?? 'Unknown') + : 'Myself (default)'} + + + Myself (default) + {#each dms as dm} + {dm.username} + {/each} + + +
+ {/if} + (mode = value)}> Define via Form diff --git a/webui/src/routes/problems/define/handler.ts b/webui/src/routes/problems/define/handler.ts index 6ba39d8e2..a453c3101 100644 --- a/webui/src/routes/problems/define/handler.ts +++ b/webui/src/routes/problems/define/handler.ts @@ -33,9 +33,13 @@ export type ProblemResponse = | { ok: true; data: ProblemInfo } | { ok: false; error: string; status?: number }; -export async function createProblem(payload: ProblemPayload): Promise { +export async function createProblem( + payload: ProblemPayload, + targetUserId?: number | null +): Promise { try { - const response = await addProblemProblemAddPost(payload as any); + const params = targetUserId != null ? { target_user_id: targetUserId } : undefined; + const response = await addProblemProblemAddPost(payload as any, params); if (response.status !== 200) { return { ok: false, error: 'Failed to create problem.', status: response.status }; @@ -49,10 +53,12 @@ export async function createProblem(payload: ProblemPayload): Promise { try { - const response = await addProblemJsonProblemAddJsonPost(body); + const params = targetUserId != null ? { target_user_id: targetUserId } : undefined; + const response = await addProblemJsonProblemAddJsonPost(body, params); if (response.status !== 200) { return { ok: false, error: 'Failed to upload problem JSON.', status: response.status }; From d4845b0bf8d8f5309ac7714368a02645bd695907 Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Wed, 1 Apr 2026 16:31:00 +0300 Subject: [PATCH 22/24] Web-API, Web-GUI - Analysts and admins can view, create, and delete interactive sessions for any user - Added fetch_interactive_session_with_role_check helper in utils.py (role-aware ownership bypass) - Session listing returns all sessions for analysts, own only for DMs; empty list returns 200+[] instead of 404 - POST /session/new accepts ?target_user_id= for analysts to create sessions on behalf of DMs - GET /session/get/{id} and DELETE /session/{id} now enforce ownership for DMs (fixes latent security bug) - Fixed InteractiveSessionBase missing from_attributes=True (POST /session/new was returning {}) - Sessions UI: user filter dropdown (default "Myself"), Owner column, "Create for" DM selector - Regenerated orval client with new session endpoint params --- desdeo/api/models/session.py | 3 + desdeo/api/routers/session.py | 62 ++++-- desdeo/api/routers/utils.py | 30 +++ desdeo/api/tests/test_routes.py | 7 +- desdeo/api/tests/test_session_management.py | 184 ++++++++++++++++++ webui/src/lib/gen/endpoints/DESDEOFastAPI.ts | 10 +- .../src/lib/gen/endpoints/DESDEOFastAPIzod.ts | 10 +- .../src/routes/methods/sessions/+page.svelte | 117 +++++++++-- webui/src/routes/methods/sessions/handler.ts | 10 +- 9 files changed, 391 insertions(+), 42 deletions(-) create mode 100644 desdeo/api/tests/test_session_management.py diff --git a/desdeo/api/models/session.py b/desdeo/api/models/session.py index f8386baad..a999e80d4 100644 --- a/desdeo/api/models/session.py +++ b/desdeo/api/models/session.py @@ -2,6 +2,7 @@ from typing import TYPE_CHECKING +from pydantic import ConfigDict from sqlmodel import Field, Relationship, SQLModel if TYPE_CHECKING: @@ -18,6 +19,8 @@ class CreateSessionRequest(SQLModel): class InteractiveSessionBase(SQLModel): """The base model for representing interactive sessions.""" + model_config = ConfigDict(from_attributes=True) + id: int | None user_id: int | None diff --git a/desdeo/api/routers/session.py b/desdeo/api/routers/session.py index 5b4a5e757..baf3d7b0f 100644 --- a/desdeo/api/routers/session.py +++ b/desdeo/api/routers/session.py @@ -11,9 +11,14 @@ InteractiveSessionDB, InteractiveSessionInfo, User, + UserRole, ) from desdeo.api.routers.user_authentication import get_current_user -from desdeo.api.routers.utils import SessionContext, SessionContextGuard, fetch_interactive_session +from desdeo.api.routers.utils import ( + SessionContext, + SessionContextGuard, + fetch_interactive_session_with_role_check, +) router = APIRouter(prefix="/session") @@ -22,13 +27,34 @@ def create_new_session( request: CreateSessionRequest, context: Annotated[SessionContext, Depends(SessionContextGuard().post)], + target_user_id: int | None = None, ) -> InteractiveSessionInfo: - """Creates a new interactive session.""" + """Creates a new interactive session. + + If ``target_user_id`` is provided, the session is created on behalf of that user. + Only analysts and admins may use this parameter. + """ user = context.user db_session = context.db_session + if target_user_id is not None: + if user.role not in (UserRole.analyst, UserRole.admin): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Only analysts and admins may create sessions for other users.", + ) + target_user = db_session.get(User, target_user_id) + if target_user is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"User with id={target_user_id} not found.", + ) + owner = target_user + else: + owner = user + interactive_session = InteractiveSessionDB( - user_id=user.id, + user_id=owner.id, info=request.info, ) @@ -36,12 +62,12 @@ def create_new_session( db_session.commit() db_session.refresh(interactive_session) - user.active_session_id = interactive_session.id + owner.active_session_id = interactive_session.id - db_session.add(user) + db_session.add(owner) db_session.commit() - return interactive_session + return InteractiveSessionInfo.model_validate(interactive_session) @router.get("/get/{session_id}") @@ -50,8 +76,8 @@ def get_session( user: Annotated[User, Depends(get_current_user)], session: Annotated[Session, Depends(get_db_session)], ) -> InteractiveSessionInfo: - """Return an interactive session with a current user.""" - return fetch_interactive_session( + """Return an interactive session. Analysts and admins may access any session.""" + return fetch_interactive_session_with_role_check( user=user, session_id=session_id, session=session, @@ -63,17 +89,13 @@ def get_all_sessions( user: Annotated[User, Depends(get_current_user)], session: Annotated[Session, Depends(get_db_session)], ) -> list[InteractiveSessionInfo]: - """Return all interactive sessions of the current user.""" - statement = select(InteractiveSessionDB).where(InteractiveSessionDB.user_id == user.id) - result = session.exec(statement).all() - - if not result: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail="No interactive sessions found for the user.", - ) + """Return interactive sessions. Analysts and admins see all users' sessions; others see only their own.""" + if user.role in (UserRole.analyst, UserRole.admin): + statement = select(InteractiveSessionDB) + else: + statement = select(InteractiveSessionDB).where(InteractiveSessionDB.user_id == user.id) - return result + return list(session.exec(statement).all()) @router.delete("/{session_id}", status_code=status.HTTP_204_NO_CONTENT) @@ -82,8 +104,8 @@ def delete_session( user: Annotated[User, Depends(get_current_user)], session: Annotated[Session, Depends(get_db_session)], ) -> None: - """Delete an interactive session and all its related states.""" - interactive_session = fetch_interactive_session( + """Delete an interactive session and all its related states. Analysts and admins may delete any session.""" + interactive_session = fetch_interactive_session_with_role_check( user=user, session_id=session_id, session=session, diff --git a/desdeo/api/routers/utils.py b/desdeo/api/routers/utils.py index e8cb6db44..868e44c89 100644 --- a/desdeo/api/routers/utils.py +++ b/desdeo/api/routers/utils.py @@ -48,6 +48,36 @@ def fetch_problem_with_role_check(user: User, problem_id: int, session: Session) return session.exec(statement).first() +def fetch_interactive_session_with_role_check(user: User, session_id: int, session: Session) -> InteractiveSessionDB: + """Fetch an InteractiveSessionDB by id, bypassing ownership for analysts and admins. + + Args: + user (User): the requesting user. + session_id (int): id of the interactive session to fetch. + session (Session): the database session. + + Raises: + HTTPException: when the session is not found (or not owned by the user for non-analysts). + + Returns: + InteractiveSessionDB: the matching session. + """ + if user.role in (UserRole.analyst, UserRole.admin): + statement = select(InteractiveSessionDB).where(InteractiveSessionDB.id == session_id) + else: + statement = select(InteractiveSessionDB).where( + InteractiveSessionDB.id == session_id, + InteractiveSessionDB.user_id == user.id, + ) + result = session.exec(statement).first() + if result is None: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Could not find interactive session with id={session_id}.", + ) + return result + + def fetch_interactive_session( user: User, session: Session, diff --git a/desdeo/api/tests/test_routes.py b/desdeo/api/tests/test_routes.py index 70fbbd1ec..a60168ff2 100644 --- a/desdeo/api/tests/test_routes.py +++ b/desdeo/api/tests/test_routes.py @@ -264,8 +264,8 @@ def test_get_all_sessions_success(client: TestClient, session_and_user: dict): assert len(data) == 2 -def test_get_all_sessions_not_found(client: TestClient, session_and_user: dict): - """Test get_all returns 404 if user has no sessions.""" +def test_get_all_sessions_empty(client: TestClient, session_and_user: dict): + """Test get_all returns 200 + empty list if user has no sessions.""" access_token = login(client) response = client.get( @@ -273,7 +273,8 @@ def test_get_all_sessions_not_found(client: TestClient, session_and_user: dict): headers={"Authorization": f"Bearer {access_token}"}, ) - assert response.status_code == status.HTTP_404_NOT_FOUND + assert response.status_code == status.HTTP_200_OK + assert response.json() == [] def test_delete_session_success(client: TestClient, session_and_user: dict): diff --git a/desdeo/api/tests/test_session_management.py b/desdeo/api/tests/test_session_management.py new file mode 100644 index 000000000..82450a7cb --- /dev/null +++ b/desdeo/api/tests/test_session_management.py @@ -0,0 +1,184 @@ +"""Tests for analyst/admin management of other users' interactive sessions.""" + +from fastapi import status +from fastapi.testclient import TestClient + +from .conftest import get_json, login, post_json + + +def _add_dm(client: TestClient, analyst_token: str, username: str, password: str) -> None: + """Helper: create a DM user via the API.""" + response = client.post( + "/add_new_dm", + data={"username": username, "password": password, "grant_type": "password"}, + headers={"Authorization": f"Bearer {analyst_token}", "content-type": "application/x-www-form-urlencoded"}, + ) + assert response.status_code == status.HTTP_201_CREATED + + +def _create_session(client: TestClient, token: str, info: str | None = None) -> int: + """Helper: create a session and return its id.""" + response = post_json(client, "/session/new", {"info": info}, token) + assert response.status_code == status.HTTP_200_OK + return response.json()["id"] + + +def test_analyst_sees_all_sessions(client: TestClient): + """Analyst's GET /session/get_all includes sessions from DM users.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_list", "dm_list") + dm_token = login(client, username="dm_list", password="dm_list") # noqa: S106 + + dm_session_id = _create_session(client, dm_token, "DM's session") + + response = get_json(client, "/session/get_all", analyst_token) + assert response.status_code == status.HTTP_200_OK + ids = [s["id"] for s in response.json()] + assert dm_session_id in ids + + +def test_dm_sees_only_own_sessions(client: TestClient): + """DM cannot see another DM's sessions in GET /session/get_all.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_x", "dm_x") + _add_dm(client, analyst_token, "dm_y", "dm_y") + + dm_x_token = login(client, username="dm_x", password="dm_x") # noqa: S106 + dm_y_token = login(client, username="dm_y", password="dm_y") # noqa: S106 + + dm_x_session_id = _create_session(client, dm_x_token) + + response = get_json(client, "/session/get_all", dm_y_token) + assert response.status_code == status.HTTP_200_OK + ids = [s["id"] for s in response.json()] + assert dm_x_session_id not in ids + + +def test_empty_session_list_returns_ok(client: TestClient): + """GET /session/get_all returns 200 + empty list when user has no sessions.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_empty", "dm_empty") + dm_token = login(client, username="dm_empty", password="dm_empty") # noqa: S106 + + response = get_json(client, "/session/get_all", dm_token) + assert response.status_code == status.HTTP_200_OK + assert response.json() == [] + + +def test_analyst_creates_session_for_dm(client: TestClient): + """Analyst can create a session owned by a DM via ?target_user_id=.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_target", "dm_target") + dm_token = login(client, username="dm_target", password="dm_target") # noqa: S106 + + dms = get_json(client, "/users/dms", analyst_token).json() + dm_id = next(u["id"] for u in dms if u["username"] == "dm_target") + + response = post_json( + client, + f"/session/new?target_user_id={dm_id}", + {"info": "created by analyst"}, + analyst_token, + ) + assert response.status_code == status.HTTP_200_OK + session_data = response.json() + assert session_data["user_id"] == dm_id + + # DM should see it in their own listing + dm_sessions = get_json(client, "/session/get_all", dm_token).json() + assert any(s["id"] == session_data["id"] for s in dm_sessions) + + +def test_dm_cannot_create_for_other_dm(client: TestClient): + """DM cannot create a session for another user via ?target_user_id=.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_a", "dm_a") + _add_dm(client, analyst_token, "dm_b", "dm_b") + + dms = get_json(client, "/users/dms", analyst_token).json() + dm_b_id = next(u["id"] for u in dms if u["username"] == "dm_b") + + dm_a_token = login(client, username="dm_a", password="dm_a") # noqa: S106 + response = post_json( + client, + f"/session/new?target_user_id={dm_b_id}", + {"info": "should fail"}, + dm_a_token, + ) + assert response.status_code == status.HTTP_403_FORBIDDEN + + +def test_create_session_for_nonexistent_user(client: TestClient): + """Creating a session with a nonexistent target_user_id returns 404.""" + analyst_token = login(client) + response = post_json(client, "/session/new?target_user_id=99999", {"info": None}, analyst_token) + assert response.status_code == status.HTTP_404_NOT_FOUND + + +def test_analyst_can_get_dm_session_by_id(client: TestClient): + """Analyst can GET a DM's session by its ID.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_get", "dm_get") + dm_token = login(client, username="dm_get", password="dm_get") # noqa: S106 + + dm_session_id = _create_session(client, dm_token) + + response = get_json(client, f"/session/get/{dm_session_id}", analyst_token) + assert response.status_code == status.HTTP_200_OK + assert response.json()["id"] == dm_session_id + + +def test_dm_cannot_get_other_dm_session_by_id(client: TestClient): + """DM cannot GET another DM's session by ID.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_owner", "dm_owner") + _add_dm(client, analyst_token, "dm_thief", "dm_thief") + + owner_token = login(client, username="dm_owner", password="dm_owner") # noqa: S106 + thief_token = login(client, username="dm_thief", password="dm_thief") # noqa: S106 + + owner_session_id = _create_session(client, owner_token) + + response = get_json(client, f"/session/get/{owner_session_id}", thief_token) + assert response.status_code == status.HTTP_404_NOT_FOUND + + +def test_analyst_can_delete_dm_session(client: TestClient): + """Analyst can delete a DM's session.""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_del", "dm_del") + dm_token = login(client, username="dm_del", password="dm_del") # noqa: S106 + + dm_session_id = _create_session(client, dm_token) + + response = client.delete( + f"/session/{dm_session_id}", + headers={"Authorization": f"Bearer {analyst_token}"}, + ) + assert response.status_code == status.HTTP_204_NO_CONTENT + + # Confirm it's gone + response = get_json(client, f"/session/get/{dm_session_id}", analyst_token) + assert response.status_code == status.HTTP_404_NOT_FOUND + + +def test_dm_cannot_delete_other_dm_session(client: TestClient): + """DM cannot delete another DM's session (regression: latent ownership bug).""" + analyst_token = login(client) + _add_dm(client, analyst_token, "dm_keep", "dm_keep") + _add_dm(client, analyst_token, "dm_attacker", "dm_attacker") + + keep_token = login(client, username="dm_keep", password="dm_keep") # noqa: S106 + attacker_token = login(client, username="dm_attacker", password="dm_attacker") # noqa: S106 + + target_session_id = _create_session(client, keep_token) + + response = client.delete( + f"/session/{target_session_id}", + headers={"Authorization": f"Bearer {attacker_token}"}, + ) + assert response.status_code == status.HTTP_404_NOT_FOUND + + # Session should still exist + response = get_json(client, f"/session/get/{target_session_id}", analyst_token) + assert response.status_code == status.HTTP_200_OK diff --git a/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts b/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts index 6c8cb020b..0db0a6f75 100644 --- a/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts +++ b/webui/src/lib/gen/endpoints/DESDEOFastAPI.ts @@ -1752,6 +1752,7 @@ export type GetProblemJsonProblemProblemIdJsonGetParams = { }; export type CreateNewSessionSessionNewPostParams = { + target_user_id?: number | null; problem_id?: number | null; }; @@ -3089,6 +3090,9 @@ export const getProblemJsonProblemProblemIdJsonGet = async ( /** * Creates a new interactive session. + +If ``target_user_id`` is provided, the session is created on behalf of that user. +Only analysts and admins may use this parameter. * @summary Create New Session */ export type createNewSessionSessionNewPostResponse200 = { @@ -3149,7 +3153,7 @@ export const createNewSessionSessionNewPost = async ( }; /** - * Return an interactive session with a current user. + * Return an interactive session. Analysts and admins may access any session. * @summary Get Session */ export type getSessionSessionGetSessionIdGetResponse200 = { @@ -3193,7 +3197,7 @@ export const getSessionSessionGetSessionIdGet = async ( }; /** - * Return all interactive sessions of the current user. + * Return interactive sessions. Analysts and admins see all users' sessions; others see only their own. * @summary Get All Sessions */ export type getAllSessionsSessionGetAllGetResponse200 = { @@ -3224,7 +3228,7 @@ export const getAllSessionsSessionGetAllGet = async ( }; /** - * Delete an interactive session and all its related states. + * Delete an interactive session and all its related states. Analysts and admins may delete any session. * @summary Delete Session */ export type deleteSessionSessionSessionIdDeleteResponse204 = { diff --git a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts index e8b5e8f29..7f1252f4e 100644 --- a/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts +++ b/webui/src/lib/gen/endpoints/DESDEOFastAPIzod.ts @@ -3003,9 +3003,13 @@ export const GetProblemJsonProblemProblemIdJsonGetResponse = zod.unknown(); /** * Creates a new interactive session. + +If ``target_user_id`` is provided, the session is created on behalf of that user. +Only analysts and admins may use this parameter. * @summary Create New Session */ export const CreateNewSessionSessionNewPostQueryParams = zod.object({ + target_user_id: zod.union([zod.number(), zod.null()]).optional(), problem_id: zod.union([zod.number(), zod.null()]).optional() }); @@ -3024,7 +3028,7 @@ export const CreateNewSessionSessionNewPostResponse = zod .describe('The base model for representing interactive sessions.'); /** - * Return an interactive session with a current user. + * Return an interactive session. Analysts and admins may access any session. * @summary Get Session */ export const GetSessionSessionGetSessionIdGetParams = zod.object({ @@ -3040,7 +3044,7 @@ export const GetSessionSessionGetSessionIdGetResponse = zod .describe('The base model for representing interactive sessions.'); /** - * Return all interactive sessions of the current user. + * Return interactive sessions. Analysts and admins see all users' sessions; others see only their own. * @summary Get All Sessions */ export const GetAllSessionsSessionGetAllGetResponseItem = zod @@ -3055,7 +3059,7 @@ export const GetAllSessionsSessionGetAllGetResponse = zod.array( ); /** - * Delete an interactive session and all its related states. + * Delete an interactive session and all its related states. Analysts and admins may delete any session. * @summary Delete Session */ export const DeleteSessionSessionSessionIdDeleteParams = zod.object({ diff --git a/webui/src/routes/methods/sessions/+page.svelte b/webui/src/routes/methods/sessions/+page.svelte index cdcd18582..caf9d623b 100644 --- a/webui/src/routes/methods/sessions/+page.svelte +++ b/webui/src/routes/methods/sessions/+page.svelte @@ -1,16 +1,20 @@ @@ -125,11 +169,29 @@ Create a new session - - Optional info/label. - + Optional info/label. + {#if isAnalystOrAdmin && dms.length > 0} +
+ + (selectedTargetDmId = v)} + > + + {selectedTargetDmId ? getOwnerLabel(Number(selectedTargetDmId)) : ($auth.user?.username ?? 'Myself')} + + + {$auth.user?.username ?? 'Myself'} + {#each dms as dm} + {dm.username} + {/each} + + +
+ {/if}
Existing sessions - These are your current interactive sessions. Deleting a session deletes the session and its related states. + {isAnalystOrAdmin + ? 'All interactive sessions. Deleting a session removes it and its related states.' + : 'Your interactive sessions. Deleting a session removes it and its related states.'} - {#if sessions.length === 0} + {#if isAnalystOrAdmin && usersWithSessions.length > 1} +
+ + (selectedFilter = v || 'me')} + > + + {selectedFilter === 'me' + ? ($auth.user?.username ?? 'Myself') + : selectedFilter === 'all' + ? 'All users' + : getOwnerLabel(Number(selectedFilter))} + + + {$auth.user?.username ?? 'Myself'} + All users + {#each usersWithSessions.filter((u) => u.id !== $auth.user?.id) as u} + {u.label} + {/each} + + +
+ {/if} + {#if filteredSessions.length === 0}
No sessions found. Create one above.
{:else} @@ -157,14 +246,20 @@ ID Info + {#if isAnalystOrAdmin} + Owner + {/if} Actions - {#each sessions as s (s.id)} + {#each filteredSessions as s (s.id)} {s.id} - {s.info ?? '—'} + {s.info ?? '—'} + {#if isAnalystOrAdmin} + {getOwnerLabel(s.user_id)} + {/if}
diff --git a/webui/src/routes/methods/sessions/handler.ts b/webui/src/routes/methods/sessions/handler.ts index 28f3a657b..7b24dea6b 100644 --- a/webui/src/routes/methods/sessions/handler.ts +++ b/webui/src/routes/methods/sessions/handler.ts @@ -22,9 +22,15 @@ export async function fetch_sessions(): Promise return response.data; } -export async function create_session(info: string | null): Promise { +export async function create_session( + info: string | null, + targetUserId?: number | null +): Promise { const payload: CreateSessionRequest = { info: info ?? null }; - const response: createNewSessionSessionNewPostResponse = await createNewSessionSessionNewPost(payload); + const response: createNewSessionSessionNewPostResponse = await createNewSessionSessionNewPost( + payload, + targetUserId != null ? { target_user_id: targetUserId } : undefined + ); if (response.status !== 200) { console.error('create_session failed.', response.status); From 3cda67694b0e1bf644b164e9bb4224730fa7594c Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Thu, 2 Apr 2026 10:40:47 +0300 Subject: [PATCH 23/24] Deployment - Updated config files. --- deploy/api-buildconfig.yaml | 3 +- deploy/api-deployment.yaml | 8 +++- deploy/db-init-job.yaml | 30 ++++++-------- deploy/postgres.yaml | 21 ++++++---- deploy/secrets-template.yaml | 76 ++++++++++++++++++++++++++--------- deploy/webui-buildconfig.yaml | 16 +++++--- 6 files changed, 99 insertions(+), 55 deletions(-) diff --git a/deploy/api-buildconfig.yaml b/deploy/api-buildconfig.yaml index 88f2f6b88..23abe9b9a 100644 --- a/deploy/api-buildconfig.yaml +++ b/deploy/api-buildconfig.yaml @@ -28,6 +28,7 @@ spec: source: type: Git git: + # Github repo and deploy branch uri: https://github.com/gialmisi/DESDEO.git ref: rahti-deploy @@ -60,7 +61,7 @@ spec: - type: GitHub github: secretReference: - name: desdeo-secrets + name: desdeo-webhook-api - type: ConfigChange runPolicy: Serial diff --git a/deploy/api-deployment.yaml b/deploy/api-deployment.yaml index 8b55d5eab..6ab48e36e 100644 --- a/deploy/api-deployment.yaml +++ b/deploy/api-deployment.yaml @@ -78,10 +78,14 @@ spec: secretKeyRef: name: desdeo-secrets key: DB_PASSWORD + # CORS: allow requests from the webui Route. + # Update this if the webui hostname changes. - name: CORS_ORIGINS value: '["https://gialmisi-desdeo-webui.rahtiapp.fi"]' - - name: COOKIE_DOMAIN - value: "rahtiapp.fi" + # COOKIE_DOMAIN is intentionally not set. + # With the SvelteKit proxy architecture, cookies are owned by the + # webui host and forwarded server-side. Setting a shared domain here + # is unnecessary and can cause authentication issues. - name: APP_MODULE value: "desdeo.api.app:app" - name: GUNICORN_CMD_ARGS diff --git a/deploy/db-init-job.yaml b/deploy/db-init-job.yaml index 4e6d96b8e..ec6b063b7 100644 --- a/deploy/db-init-job.yaml +++ b/deploy/db-init-job.yaml @@ -1,27 +1,23 @@ # deploy/db-init-job.yaml # # One-shot Kubernetes Job that runs desdeo/api/db_init_prod.py using the API -# image. Run this: +# image. Run this: # - After the very first deployment (creates tables + seeds admin user). # - After a deliberate database wipe (the script is idempotent on re-run). # # The Job is NOT re-triggered automatically on each new image build. -# If you add new tables in a later release use a proper migration tool +# If you add new tables in a later release, use a proper migration tool # (Alembic) instead of re-running this Job. # # Usage: -# # Replace with the actual image pullspec, e.g.: -# # image-registry.openshift-image-registry.svc:5000//desdeo-api:latest -# oc create -f deploy/db-init-job.yaml -# -# # Watch it: +# # Replace with your Rahti project name before applying. +# oc apply -f deploy/db-init-job.yaml # oc logs -f job/desdeo-db-init -# -# # Clean up after success: # oc delete job desdeo-db-init # -# If you need to re-run it (e.g. to re-seed after a wipe), delete the old Job -# first then create it again. +# To re-run after a database wipe, delete the old Job first: +# oc delete job desdeo-db-init +# oc apply -f deploy/db-init-job.yaml --- apiVersion: batch/v1 kind: Job @@ -31,7 +27,6 @@ metadata: app: desdeo-api component: db-init spec: - # Do not restart the pod on success. backoffLimit: 3 # Auto-clean completed Job pods after 1 hour. ttlSecondsAfterFinished: 3600 @@ -44,17 +39,16 @@ spec: restartPolicy: Never containers: - name: db-init - # Use the same image as the API Deployment. # Replace with your Rahti project name. - image: image-registry.openshift-image-registry.svc:5000/gialmisi-desdeo/desdeo-api:latest - # Run the production init script. + image: image-registry.openshift-image-registry.svc:5000//desdeo-api:latest command: ["python", "desdeo/api/db_init_prod.py"] - end: + env: - name: DESDEO_PRODUCTION value: "true" envFrom: - # Inject DATABASE_URL, SECRET_KEY, DESDEO_ADMIN_USERNAME, - # DESDEO_ADMIN_PASSWORD from the shared Secret. + # Injects DB_HOST, DB_PORT, DB_NAME, DB_USER, DB_PASSWORD, + # AUTHJWT_SECRET, DESDEO_ADMIN_USERNAME, DESDEO_ADMIN_PASSWORD + # from the shared Secret. - secretRef: name: desdeo-secrets resources: diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml index 5378696fc..690690b4f 100644 --- a/deploy/postgres.yaml +++ b/deploy/postgres.yaml @@ -1,18 +1,23 @@ # deploy/postgres.yaml # -# In-cluster PostgreSQL using the Bitnami image, which is designed to run -# as a non-root user and is therefore compatible with OpenShift / Rahti SCCs. +# In-cluster PostgreSQL using the OpenShift built-in image. # # Apply with: # oc apply -f deploy/postgres.yaml # -# Bitnami env var names differ from the official postgres image: -# POSTGRESQL_USERNAME: creates a non-superuser (matches DATABASE_URL user) -# POSTGRESQL_PASSWORD: password for POSTGRESQL_USERNAME -# POSTGRESQL_DATABASE: database to create on first start -# POSTGRESQL_POSTGRES_PASSWORD: password for the postgres superuser +# The OpenShift PostgreSQL image uses these env var names: +# POSTGRESQL_USER — creates a non-superuser application account +# POSTGRESQL_PASSWORD — password for POSTGRESQL_USER +# POSTGRESQL_DATABASE — database to create on first start # -# Data is persisted in the PVC mounted at /bitnami/postgresql. +# These are sourced from desdeo-secrets (keys POSTGRES_USER and POSTGRES_PASSWORD). +# The database name is hardcoded to 'desdeo' here; change it if needed and update +# DB_NAME in the secret accordingly. +# +# Data is persisted in the PVC mounted at /var/lib/pgsql/data. +# +# To check available image tags on your cluster: +# oc get is postgresql -n openshift -o jsonpath='{.spec.tags[*].name}' --- apiVersion: v1 kind: Service diff --git a/deploy/secrets-template.yaml b/deploy/secrets-template.yaml index cd1a37493..9759df4a9 100644 --- a/deploy/secrets-template.yaml +++ b/deploy/secrets-template.yaml @@ -5,22 +5,34 @@ # Usage: # 1. Copy this file: cp deploy/secrets-template.yaml deploy/secrets.yaml # 2. Fill in all placeholders in deploy/secrets.yaml -# 3. Apply: oc apply -f deploy/secrets.yaml +# 3. Apply: oc apply -f deploy/secrets.yaml # 4. Keep deploy/secrets.yaml out of version control. # -# All values must be base64-encoded. Quick helper: -# echo -n 'myvalue' | base64 +# Generate passwords and keys: +# python -c "import secrets; print(secrets.token_hex(32))" # password +# python -c "import secrets; print(secrets.token_hex(64))" # JWT key +# python -c "import secrets; print(secrets.token_hex(24))" # webhook secret # -# Or use --from-literal to skip manual encoding: +# Or create the main secret directly without a file: # oc create secret generic desdeo-secrets \ # --from-literal=POSTGRES_USER=desdeo \ # --from-literal=POSTGRES_PASSWORD= \ -# --from-literal=POSTGRES_SUPERUSER_PASSWORD= \ -# --from-literal=DATABASE_URL='postgresql://desdeo:@desdeo-postgres:5432/desdeo' \ -# --from-literal=SECRET_KEY= \ +# --from-literal=DB_HOST=desdeo-postgres \ +# --from-literal=DB_PORT=5432 \ +# --from-literal=DB_NAME=desdeo \ +# --from-literal=DB_USER=desdeo \ +# --from-literal=DB_PASSWORD= \ +# --from-literal=AUTHJWT_SECRET=<64-char-hex> \ # --from-literal=DESDEO_ADMIN_USERNAME=admin \ # --from-literal=DESDEO_ADMIN_PASSWORD= \ -# --dry-run=client -o yaml > deploy/secrets.yaml +# --from-literal=WEBHOOK_SECRET_API=<24-char-hex> \ +# --from-literal=WEBHOOK_SECRET_WEBUI=<24-char-hex> +# +# Then create the webhook secrets separately: +# oc create secret generic desdeo-webhook-api \ +# --from-literal=WebHookSecretKey= +# oc create secret generic desdeo-webhook-webui \ +# --from-literal=WebHookSecretKey= --- apiVersion: v1 kind: Secret @@ -28,25 +40,49 @@ metadata: name: desdeo-secrets type: Opaque stringData: - # Application user (non-superuser). Must match DATABASE_URL below. + # Application user created by the OpenShift PostgreSQL image on first start. POSTGRES_USER: desdeo POSTGRES_PASSWORD: - # Superuser password (used internally by Bitnami image). - POSTGRES_SUPERUSER_PASSWORD: - # Full DSN consumed by the FastAPI app and db_init_prod.py. - # Host must be the Kubernetes Service name defined in postgres.yaml. - DATABASE_URL: "postgresql://desdeo:@desdeo-postgres:5432/desdeo" + # These are read individually by desdeo/api/config.py in production mode. + # DB_HOST must match the Kubernetes Service name in postgres.yaml. + DB_HOST: desdeo-postgres + DB_PORT: "5432" + DB_NAME: desdeo + DB_USER: desdeo + DB_PASSWORD: # same value as POSTGRES_PASSWORD above - # JWT / session signing key — generate with: - # python -c "import secrets; print(secrets.token_hex(64))" - SECRET_KEY: + # Generate with: python -c "import secrets; print(secrets.token_hex(64))" + # Never reuse across deployments. + AUTHJWT_SECRET: - # Credentials for the initial analyst user seeded by the db-init Job. + # Seeded by the db-init Job. Can be changed after first login. DESDEO_ADMIN_USERNAME: admin DESDEO_ADMIN_PASSWORD: - # Random strings used to authenticate GitHub webhook payloads. - # Generate with: python -c "import secrets; print(secrets.token_hex(24))" + # Generate with: python -c "import secrets; print(secrets.token_hex(24))" + # Use different values for API and webui. + # IMPORTANT: copy the same values into desdeo-webhook-api and + # desdeo-webhook-webui below — YAML does not support variable references. WEBHOOK_SECRET_API: WEBHOOK_SECRET_WEBUI: +--- +apiVersion: v1 +kind: Secret +metadata: + name: desdeo-webhook-api +type: Opaque +stringData: + # OpenShift's secretReference looks for exactly this key name. + # Must be the same value as WEBHOOK_SECRET_API above. + WebHookSecretKey: +--- +apiVersion: v1 +kind: Secret +metadata: + name: desdeo-webhook-webui +type: Opaque +stringData: + # OpenShift's secretReference looks for exactly this key name. + # Must be the same value as WEBHOOK_SECRET_WEBUI above. + WebHookSecretKey: diff --git a/deploy/webui-buildconfig.yaml b/deploy/webui-buildconfig.yaml index 1e1e15c93..55497d61e 100644 --- a/deploy/webui-buildconfig.yaml +++ b/deploy/webui-buildconfig.yaml @@ -6,8 +6,11 @@ # contextDir: webui → all COPY paths in the Dockerfile are relative to webui/. # # VITE_API_URL is baked into the client-side bundle at build time. -# It must be the *public* HTTPS URL of the API Route. -# If you ever change the API hostname, you must trigger a new webui build. +# It is set to '/api' so that browser requests go to the SvelteKit +# proxy route at /api/[...path], which forwards them to the API +# over the internal cluster network. Do NOT set this to the API's +# public Route URL, the proxy architecture means the browser never +# talks directly to the API. --- apiVersion: build.openshift.io/v1 kind: BuildConfig @@ -28,10 +31,11 @@ spec: type: Docker dockerStrategy: dockerfilePath: Dockerfile - # Build arguments — passed as ARG to the Dockerfile. buildArgs: + # '/api' routes browser requests through the SvelteKit server-side proxy. + # See webui/src/routes/api/[...path]/+server.ts. - name: VITE_API_URL - value: "https://gialmisi-desdeo-api.rahtiapp.fi" + value: "/api" env: - name: NODE_OPTIONS value: "--max-old-space-size=3072" @@ -45,10 +49,10 @@ spec: - type: GitHub github: secretReference: - name: desdeo-secrets + name: desdeo-webhook-webui - type: ConfigChange - # npm install + vite build is memory-hungry; 2Gi is usually sufficient. + # npm install + vite build is memory-hungry; 4Gi is usually sufficient. resources: requests: memory: "2Gi" From c3e02c203e8279fa44e8ece0e6373f1ca48bc17c Mon Sep 17 00:00:00 2001 From: Giovanni Misitano Date: Thu, 2 Apr 2026 15:48:50 +0300 Subject: [PATCH 24/24] Docs - Updated docs on how to deploy on Openshift --- docs/howtoguides/deploying_on_openshift.md | 465 +++++++++++++++------ docs/howtoguides/index.md | 4 +- 2 files changed, 342 insertions(+), 127 deletions(-) diff --git a/docs/howtoguides/deploying_on_openshift.md b/docs/howtoguides/deploying_on_openshift.md index 5a7d503ef..4924984b7 100644 --- a/docs/howtoguides/deploying_on_openshift.md +++ b/docs/howtoguides/deploying_on_openshift.md @@ -1,31 +1,45 @@ -# How to deploy DESDEO on OpenShift +# How to deploy DESDEO on OpenShift (Kubernetes) ## Overview -This guide walks through deploying the full DESDEO stack: FastAPI backend, -SvelteKit web UI, and PostgreSQL, on an OpenShift/OKD cluster. [CSC +This guide walks through deploying the full DESDEO stack, FastAPI backend, +SvelteKit web UI, and PostgreSQL database, on an OpenShift/OKD cluster. [CSC Rahti](https://rahti.csc.fi/) is used as the concrete example throughout; values specific to Rahti (hostnames, API endpoint, image registry URL) are marked so readers on other OpenShift clusters can substitute their own. -This guide uses YAML manifests and the `oc` CLI exclusively. Every deployment -step is reproducible and version-controlled. OpenShift is a Kubernetes -distribution with extra features layered on top, this guide uses -OpenShift-specific objects (BuildConfig, ImageStream, Route) that do not exist -in vanilla Kubernetes. If you are deploying on plain Kubernetes, consult your -platform's CI/CD documentation instead. +Two approaches are documented: + +- **CLI approach**: uses YAML manifests and the `oc` CLI (command-line interface) + exclusively. Every step is reproducible and version-controlled. The bulk of + this guide follows this approach. +- **Web console approach**: uses the Rahti web interface. Described under + [Alternative web console approach](#web-console-approach) for + users who prefer a graphical interface. + +OpenShift is a Kubernetes distribution with extra features layered on top. This +guide uses OpenShift-specific objects (BuildConfig, ImageStream, Route) that do +not exist in vanilla Kubernetes. If you are deploying on plain Kubernetes, +consult your platform's documentation instead. The files you will work with live in two places in the DESDEO repository: - `deploy/`: all OpenShift manifests (ImageStreams, BuildConfigs, Deployments, StatefulSet, Routes, Job). - Several application-level files added or modified to support production - deployment, described under [Repository preparation](#repository-preparation) + deployment, described under [Repository preparation](#repository-preparation). ## Prerequisites -- A CSC account with an active Rahti project (see [Rahti access](https://docs.csc.fi/cloud/rahti/access/)) -- `oc` CLI installed (see [Using the Rahti CLI](https://docs.csc.fi/cloud/rahti/usage/cli/)) +- A CSC account with an active computing project. +- Rahti access enabled for that project. Apply via + [MyCSC](https://my.csc.fi) -> your project -> Services -> Rahti -> Apply for + access. See [Rahti access](https://docs.csc.fi/cloud/rahti/access/) for + details. +- A Rahti project created in the [Rahti web console](https://console-openshift-console.apps.2.rahti.csc.fi/). + When creating the project, include your CSC computing project number in the + description field using the format `csc_project:#######`. +- `oc` CLI installed (see [Using the Rahti CLI](https://docs.csc.fi/cloud/rahti/usage/cli/)). - Logged in to the cluster: ```bash oc login https://api.2.rahti.csc.fi:6443 --token= @@ -34,50 +48,60 @@ The files you will work with live in two places in the DESDEO repository: ```bash oc project ``` -- A fork or branch of the [DESDEO - repository](https://github.com/industrial-optimization-group/DESDEO) with the - `deploy/` files committed and pushed +- A fork or branch of the [DESDEO repository](https://github.com/industrial-optimization-group/DESDEO) + with the `deploy/` files committed and pushed. + +!!! note + Newly created CSC computing projects can take some time to become visible + to Rahti. If project creation fails with an error, wait a few minutes and + try again. ## Architecture Four components are deployed and wired together: -1. `desdeo-api` Deployment: FastAPI served by gunicorn+uvicorn, listening on - port 8080. Built in-cluster using OpenShift's Source-to-Image (S2I) strategy - from a Python builder image. +1. **`desdeo-api` Deployment**: FastAPI served by gunicorn+uvicorn, listening on + port 8080. Built in-cluster using OpenShift's Source-to-Image (S2I) strategy + from a custom Python builder image that includes COIN-OR solvers. -2. `desdeo-webui` Deployment: SvelteKit with adapter-node, listening on port 3000. - Built using the Docker strategy from `webui/Dockerfile`. All browser API - calls are routed through a `/api/[...path]` proxy route baked into the - SvelteKit app, this keeps cookies same-origin and avoids CORS complications. +2. **`desdeo-webui` Deployment**: SvelteKit with adapter-node, listening on port + 3000. Built using the Docker strategy from `webui/Dockerfile`. All browser API + calls are routed through a `/api/[...path]` proxy route baked into the + SvelteKit app. This keeps cookies same-origin and avoids CORS complications. -3. `desdeo-postgres` StatefulSet: PostgreSQL running on the built-in Rahti image, - backed by a PersistentVolumeClaim. +3. **`desdeo-postgres` StatefulSet**: PostgreSQL running on the built-in OpenShift + image, backed by a PersistentVolumeClaim. Alternatively, [CSC Pukki DBaaS](#option-b-pukki-dbaas) + can be used instead. -4. OpenShift Routes: TLS-terminated at Rahti's HAProxy ingress. Certificates for - `*.rahtiapp.fi` are provisioned automatically. +4. **OpenShift Routes**: TLS-terminated at Rahti's HAProxy ingress. Certificates + for `*.rahtiapp.fi` are provisioned automatically. ### URL environment variables -Two env vars control how the API is reached, and they intentionally point to different targets: +Two env vars control how the API is reached, and they intentionally point to +different targets: | Variable | Value | Used by | |---|---|---| -| `VITE_API_URL` | `/api` | Baked into the client-side JS bundle at build time. Browser requests go to `/api/...`, which the SvelteKit proxy handles. | +| `VITE_API_URL` | `/api` | Baked into the client-side Javascript bundle at build time. Browser requests go to `/api/...`, which the SvelteKit proxy handles. | | `API_BASE_URL` | `http://desdeo-api:8080` | Set at runtime on the webui pod. SvelteKit's server-side proxy uses the internal cluster DNS name to reach the API, never exposed to the browser. | -Do not set `VITE_API_URL` to the API's external Route URL. The proxy architecture means the browser never talks directly to the API. +!!! warning + Do not set `VITE_API_URL` to the API's external Route URL. The proxy + architecture means the browser never talks directly to the API. Doing so + causes cross-origin cookie issues that prevent authentication from working. ## Repository preparation -The following files must be present in the repository before deploying. All manifests live under `deploy/`. -These ar provided in the master branch. +The following files must be present in the repository before deploying. All +manifests live under `deploy/`. | File | Purpose | |---|---| +| `deploy/secrets-template.yaml` | Template for creating credentials (never commit real values) | | `deploy/postgres.yaml` | StatefulSet, Service, and PVC for PostgreSQL | | `deploy/builder-imagestream.yaml` | ImageStream that tracks the custom S2I builder image | -| `deploy/builder-buildconfig.yaml` | BuildConfig — Docker strategy, builds the solver-enabled S2I builder image | +| `deploy/builder-buildconfig.yaml` | BuildConfig: Docker strategy, builds the solver-enabled S2I builder image | | `deploy/api-imagestream.yaml` | ImageStream that tracks built API images | | `deploy/webui-imagestream.yaml` | ImageStream that tracks built webui images | | `deploy/api-buildconfig.yaml` | BuildConfig: S2I using `desdeo-builder:latest`, GitHub webhook trigger | @@ -86,18 +110,21 @@ These ar provided in the master branch. | `deploy/webui-deployment.yaml` | Deployment, Service, and Route for the web UI | | `deploy/db-init-job.yaml` | One-shot Job that creates tables and seeds the initial user | -In addition, several non-manifest files are required: +In addition, several application-level files are required: -- `.s2i/bin/assemble`: Custom S2I assemble script that uses `uv sync --frozen`. +- `.s2i/bin/assemble`: Custom S2I assemble script that uses `uv sync --frozen` + to install Python dependencies. The default assemble script uses pip, which + does not understand uv's `--group` flag. -- `.s2i/environment`: Sets S2I environment variables such as `APP_MODULE`, `GUNICORN_CMD_ARGS`, and the port. +- `.s2i/environment`: Sets S2I environment variables such as `APP_MODULE`, + `GUNICORN_CMD_ARGS`, and the port. -- `desdeo/api/db_init_prod.py`: Production database initialization script. The - `db_init.py` debug branch is a no-no in production mode; this separate script +- `desdeo/api/db_init_prod.py`: Production database initialisation script. The + `db_init.py` debug branch does nothing in production mode; this separate script creates all SQLModel tables and seeds the initial analyst user. - `webui/Dockerfile`: Multi-stage Node 24 build. The `NPM_RUN=start:production` - build arg selects the adapter-node start script. + env var selects the adapter-node start script via `svelte.config.js`. - `webui/src/routes/api/[...path]/+server.ts`: The SvelteKit proxy route. It forwards all `/api/*` requests to the API using `event.fetch`, so the @@ -105,16 +132,52 @@ In addition, several non-manifest files are required: token refresh transparently. - `desdeo-s2i-buildimage.Dockerfile`: Builds the custom S2I builder image that - extends the Python 3.12 UBI8 base with COIN-OR solvers (`bonmin`, `ipopt`, `cbc`). + extends the Python 3.12 UBI8 base with COIN-OR solvers (`bonmin`, `ipopt`, + `cbc`). ## Step 1: Prepare secrets -All credentials are stored in a single Secret named `desdeo-secrets` (NOT under -version control). Create it with `oc create secret generic` rather than from a -YAML file. This avoids ever writing credentials to disk or committing them to -the repository. +All credentials are stored in a Secret named `desdeo-secrets`. +Two options are available, choose one and skip the other. + +Key reference of the stored secrets: + +| Key | Description | +|---|---| +| `POSTGRES_USER` / `DB_USER` | PostgreSQL application user name | +| `POSTGRES_PASSWORD` / `DB_PASSWORD` | Password for the above (same value) | +| `DB_HOST` | Kubernetes Service name: `desdeo-postgres` (or Pukki hostname) | +| `DB_PORT` | `5432` | +| `DB_NAME` | Database name | +| `AUTHJWT_SECRET` | JWT signing key, generate fresh, never reuse between deployments| +| `DESDEO_ADMIN_USERNAME` | Initial analyst account username | +| `DESDEO_ADMIN_PASSWORD` | Initial analyst account password | +| `WEBHOOK_SECRET_API` | GitHub webhook secret for the API BuildConfig | +| `WEBHOOK_SECRET_WEBUI` | GitHub webhook secret for the webui BuildConfig | + +!!! note + `DESDEO_PRODUCTION=true` is set directly in the Deployment manifest, not + as a Secret, because it is not sensitive. + +### Options A: From `secrets.yaml` + +Copy the template, fill in the values, then apply it: + +```bash +cp deploy/secrets-template.yaml deploy/secrets.yaml +# Edit deploy/secrets.yaml, replace every accordingly +oc apply -f deploy/secrets.yaml +``` + +!!! warning + Make __absolutely sure__ that the file `secrets.yaml` is __never__ committed to git! + +### Option B: From literals +Create the secret and the two dedicated webhook secrets using `oc create secret +generic`: ```bash +# Main application secret oc create secret generic desdeo-secrets \ --from-literal=POSTGRES_USER=desdeo \ --from-literal=POSTGRES_PASSWORD= \ @@ -128,41 +191,28 @@ oc create secret generic desdeo-secrets \ --from-literal=DESDEO_ADMIN_PASSWORD= \ --from-literal=WEBHOOK_SECRET_API=$(python3 -c "import secrets; print(secrets.token_hex(24))") \ --from-literal=WEBHOOK_SECRET_WEBUI=$(python3 -c "import secrets; print(secrets.token_hex(24))") -``` -Key reference: +# Dedicated webhook secrets. OpenShift's secretReference requires the key +# to be named exactly 'WebHookSecretKey'. Use the same values as above. +oc create secret generic desdeo-webhook-api \ + --from-literal=WebHookSecretKey= +oc create secret generic desdeo-webhook-webui \ + --from-literal=WebHookSecretKey= +``` -| Key | Description | -|---|---| -| `POSTGRES_USER` / `DB_USER` | PostgreSQL application user name | -| `POSTGRES_PASSWORD` / `DB_PASSWORD` | Password for the above | -| `DB_HOST` | Kubernetes Service name, always `desdeo-postgres` | -| `DB_PORT` | `5432` | -| `DB_NAME` | Database name | -| `AUTHJWT_SECRET` | JWT signing key, generate a fresh value, never reuse | -| `DESDEO_ADMIN_USERNAME` | Initial analyst account username | -| `DESDEO_ADMIN_PASSWORD` | Initial analyst account password | -| `WEBHOOK_SECRET_API` | GitHub webhook secret for the API BuildConfig | -| `WEBHOOK_SECRET_WEBUI` | GitHub webhook secret for the webui BuildConfig | +## Step 2: Deploy PostgreSQL -!!! note - `DESDEO_PRODUCTION=true` is set directly in the Deployment manifest, not in - this Secret, because it is not sensitive. +Two options are available. Choose one and skip the other. -## Step 2: Deploy PostgreSQL +### Option A: In-cluster PostgreSQL (default) ```bash oc apply -f deploy/postgres.yaml oc rollout status statefulset/desdeo-postgres ``` -The StatefulSet uses the built-in Rahti PostgreSQL image: - -``` -image-registry.openshift-image-registry.svc:5000/openshift/postgresql:16-el10 -``` - -To check which tags are available on your cluster: +The StatefulSet uses the built-in Rahti PostgreSQL image. To check available +tags on your cluster: ```bash oc get is postgresql -n openshift -o jsonpath='{.spec.tags[*].name}' @@ -172,15 +222,47 @@ Data is stored at `/var/lib/pgsql/data` in the PVC. !!! note The env vars that initialize the database are `POSTGRESQL_USER`, - `POSTGRESQL_PASSWORD`, and `POSTGRESQL_DATABASE` (the `POSTGRESQL_` prefix, - not `POSTGRES_`). The manifests map these from the Secret keys `DB_USER`, - `DB_PASSWORD`, and `DB_NAME`. + `POSTGRESQL_PASSWORD`, and `POSTGRESQL_DATABASE` (note the `POSTGRESQL_` + prefix). The manifests map these from the Secret keys `POSTGRES_USER`, + `POSTGRES_PASSWORD`, and the hardcoded value `desdeo`. -!!! note - An alternative to in-cluster PostgreSQL is [CSC Pukki - DBaaS](https://docs.csc.fi/cloud/dbaas/), a managed PostgreSQL service. - Pukki removes the operational overhead of managing the database yourself but - adds setup steps not covered in this guide. +### Option B: Pukki DBaaS + +[Pukki](https://pukki.dbaas.csc.fi) is CSC's managed PostgreSQL service. It +removes the need to deploy `deploy/postgres.yaml` entirely — skip that step if +using Pukki. + +**Prerequisites**: add the Pukki service to your CSC computing project via +MyCSC → your project → Services → Pukki → Apply for access. + +**Setup:** + +1. Log in to [pukki.dbaas.csc.fi](https://pukki.dbaas.csc.fi). +2. Click **Launch Instance**. Give it a name. Default Volume Size, Datastore, + and Flavor settings are fine for most deployments. +3. Under **Database Access**, add the Rahti egress IP: `86.50.229.150/32`. +4. Under **Initialize Databases**, create a database (e.g. `desdeo`) and set + an admin username and password. These become `DB_USER`, `DB_PASSWORD`, and + `DB_NAME` in the Secret. +5. Once the instance is running, copy the hostname from the Pukki dashboard. + This becomes `DB_HOST` in the Secret instead of `desdeo-postgres`. + +Update the secret with the Pukki hostname: + +```bash +oc create secret generic desdeo-secrets \ + ... \ + --from-literal=DB_HOST= \ + ... +``` + +Skip `oc apply -f deploy/postgres.yaml`. All subsequent steps are identical +regardless of which option you chose. + +!!! warning + The Rahti egress IP `86.50.229.150/32` must be added to the Pukki access + list before deploying. Without it the API pod cannot reach the database and + will crash on startup. ## Step 3: Create ImageStreams and BuildConfigs @@ -197,15 +279,15 @@ oc apply -f deploy/api-imagestream.yaml oc apply -f deploy/webui-imagestream.yaml ``` -Before applying the BuildConfigs, open each file and substitute the placeholder -`` with the branch you want to build from (e.g. `master`). Ensure -the git URI uses HTTPS, not SSH, the build pod does not have SSH credentials. +Before applying the BuildConfigs, open each file and substitute `` +with the branch you want to build from (e.g. `master`). Ensure the git URI uses +HTTPS, not SSH, because build pods do not have SSH credentials. The API BuildConfig uses the S2I strategy with `desdeo-builder:latest` as its -builder image — the custom image built from `desdeo-s2i-buildimage.Dockerfile` +builder image, the custom image built from `desdeo-s2i-buildimage.Dockerfile` that includes COIN-OR solvers. The webui BuildConfig uses the Docker strategy -with `webui/Dockerfile`. The build arg `VITE_API_URL=/api` is passed explicitly, -this is intentional, as browser requests go through the SvelteKit proxy rather +with `webui/Dockerfile`. The build arg `VITE_API_URL=/api` is passed explicitly. +This is intentional, as browser requests go through the SvelteKit proxy rather than directly to the API. ```bash @@ -220,7 +302,9 @@ The builder image must be ready before the API build can start, as `api-buildconfig.yaml` references `desdeo-builder:latest` as its S2I base. ```bash -# Build the solver-enabled builder image first +# Build the solver-enabled builder image first (takes a few minutes). +# --follow does not always work, Rahti's web-based interface can also be +# used for monitoring progress. oc start-build desdeo-builder --follow # Then build the API and webui (can run in parallel once the builder is done) @@ -229,8 +313,8 @@ oc start-build desdeo-webui --follow ``` The first build takes longer than subsequent ones because there is no layer -cache. Expect roughly 6 minutes for the builder, 4 minutes for the API, and -5 minutes for the webui. +cache. Expect roughly a few minutes for the builder, and anther few minutes for +both the API and the webui. Once the API pod is running, verify the solvers are present: @@ -241,15 +325,16 @@ oc exec deployment/desdeo-api -- which bonmin ipopt cbc All three should return paths under `/opt/solver_binaries/`. !!! warning - If the webui build fails with `exit status 137`, the build pod ran out of memory. Increase the build pod memory limit in `webui-buildconfig.yaml`: + If the webui build fails with `exit status 137`, the build pod ran out of + memory. Increase the build pod memory limit in `webui-buildconfig.yaml`: ```yaml spec: resources: limits: memory: 4Gi ``` - Also add `NODE_OPTIONS=--max-old-space-size=3072` to `dockerStrategy.env` in the same file, then re-apply and re-trigger the build. ---- + Also ensure `NODE_OPTIONS=--max-old-space-size=3072` is set in + `dockerStrategy.env`, then re-apply and re-trigger the build. ## Step 5: Deploy API and web UI @@ -261,9 +346,9 @@ oc rollout status deployment/desdeo-webui ``` !!! warning - Rahti enforces a maximum CPU limit-to-request ratio of 5:1. If your - `resources.limits.cpu` divided by `resources.requests.cpu` exceeds this, the - ReplicaSet will silently fail to create pods. The error does not appear + Rahti enforces a maximum CPU limit-to-request ratio of 5:1. If + `resources.limits.cpu` divided by `resources.requests.cpu` exceeds this, + the ReplicaSet will silently fail to create pods. The error does not appear in pod logs, look in the ReplicaSet events: ```bash oc describe replicaset @@ -271,7 +356,7 @@ oc rollout status deployment/desdeo-webui The manifests in `deploy/` are set within the allowed ratio. If you customize resource settings, check the ratio before applying. -The following env vars must be present on the API pod at runtime (sourced from `desdeo-secrets` via `secretKeyRef` in the Deployment): +The following env vars must be present on the API pod at runtime: | Variable | Source | |---|---| @@ -281,18 +366,18 @@ The following env vars must be present on the API pod at runtime (sourced from ` | `CORS_ORIGINS` | Set in the Deployment to `["https://your-webui.rahtiapp.fi"]` | !!! note - With the SvelteKit proxy architecture, `COOKIE_DOMAIN` on the API is - irrelevant. Cookies are owned by the webui host and forwarded server-side. - Leave `COOKIE_DOMAIN` unset. + `COOKIE_DOMAIN` is intentionally not set. With the SvelteKit proxy + architecture, cookies are owned by the webui host and forwarded + server-side — the API does not need to set a shared cookie domain. ## Step 6: Initialize the database -`db_init_prod.py` creates all SQLModel tables and seeds the initial analyst user -defined by `DESDEO_ADMIN_USERNAME` and `DESDEO_ADMIN_PASSWORD`. Safe to re-run if needed. +`db_init_prod.py` creates all SQLModel tables and seeds the initial analyst +user defined by `DESDEO_ADMIN_USERNAME` and `DESDEO_ADMIN_PASSWORD`. It is +safe to re-run — tables that already exist are not touched. -The script runs as a one-shot Kubernetes Job using the API image. Before -applying, open `deploy/db-init-job.yaml` and replace `` with your Rahti -project name (used to construct the image pull reference). +Before applying, open `deploy/db-init-job.yaml` and replace `` with +your Rahti project name. ```bash oc apply -f deploy/db-init-job.yaml @@ -313,15 +398,41 @@ Once the job completes successfully, delete it: oc delete job desdeo-db-init ``` +!!! note + Warnings about missing solvers (`bonmin`, `cbc`, `ipopt`) in the init job + logs are harmless if the solver builder image has not been used. Once the + API is rebuilt using `desdeo-builder:latest`, the warnings will disappear. + +### Resetting the database + +To re-run the init job on an existing database (e.g. after adding new tables +in a release), simply apply the job again. Existing data is not affected. + +To wipe the database entirely and start fresh, **all users, problems, and +session data will be permanently deleted**: + +```bash +# Drop and recreate the public schema +oc exec -it statefulset/desdeo-postgres -- \ + psql -U desdeo -d desdeo -c "DROP SCHEMA public CASCADE; CREATE SCHEMA public;" + +# Re-run the init job +oc apply -f deploy/db-init-job.yaml +oc logs -f job/desdeo-db-init +oc delete job desdeo-db-init +``` + +!!! warning + The schema drop is irreversible. All data will be permanently lost. ## Step 7: Verify ```bash curl https://your-api.rahtiapp.fi/health -# → {"status":"ok"} +# -> {"status":"ok"} curl -I https://your-webui.rahtiapp.fi/ -# → HTTP/2 200 +# -> HTTP/2 200 # (a 307 redirect to /home is also normal) ``` @@ -335,53 +446,157 @@ BuildConfigs include GitHub webhook triggers. Once configured, every push to the deploy branch triggers a rebuild of the affected component, which then rolls out automatically via the ImageStream trigger on the Deployment. -Retrieve the webhook URLs. The webhook secret is embedded in the URL itself, -the GitHub "Secret" field should be left blank: +Retrieve the webhook secret values from the dedicated webhook secrets: ```bash -oc get bc/desdeo-api -o jsonpath='{.spec.triggers}' | python3 -m json.tool +oc get secret desdeo-webhook-api -o jsonpath='{.data.WebHookSecretKey}' | base64 -d +oc get secret desdeo-webhook-webui -o jsonpath='{.data.WebHookSecretKey}' | base64 -d ``` -Find the `github` trigger entry and copy the `secret` value. Then construct the -full webhook URL: +Construct the webhook URLs: ``` https://api.2.rahti.csc.fi:6443/apis/build.openshift.io/v1/namespaces//buildconfigs/desdeo-api/webhooks//github +https://api.2.rahti.csc.fi:6443/apis/build.openshift.io/v1/namespaces//buildconfigs/desdeo-webui/webhooks//github ``` -In GitHub, go to your fork: **Settings → Webhooks → Add webhook** +In GitHub, go to your repository: **Settings -> Webhooks -> Add webhook** - Payload URL: the URL constructed above -- Content type: `application/json`, this is required; the default `x-www-form-urlencoded` will be rejected by OpenShift -- Secret: leave blank +- Content type: `application/json`: required; `x-www-form-urlencoded` will be rejected +- Secret: leave blank (the secret is embedded in the URL) - Events: Just the push event -Repeat for `desdeo-webui` using `bc/desdeo-webui`. +Add one webhook per BuildConfig. !!! note - `oc describe bc/desdeo-api` always shows `` as a placeholder in the - webhook URL regardless of how the secret is stored, this is a display-only - mask. Always use `oc get bc -o jsonpath` as shown above to retrieve the - actual secret value. + `oc describe bc/desdeo-api` always shows `` as a placeholder in + the webhook URL, this is a display-only mask. Always retrieve the actual + secret value from the Secret object as shown above. + +At this point, the desdei web-API and webui should be running on Rahti, and they should automatically +update when new commits are pushed to the deployment branch. ## Troubleshooting | Symptom | Cause | Fix | |---|---|---| -| API pod crashes on startup with `ValidationError: authjwt_secret_key` | `AUTHJWT_SECRET` env var missing or key name wrong | Verify the key name in the Secret matches exactly what the Deployment references | -| API pod crashes with DB connection error | `DB_HOST`, `DB_PORT`, `DB_NAME`, `DB_USER`, or `DB_PASSWORD` missing or incorrect | Run `oc describe secret desdeo-secrets` and compare key names with the Deployment's `secretKeyRef` fields | +| API pod crashes with `ValidationError: authjwt_secret_key` | `AUTHJWT_SECRET` env var missing or key name wrong | Verify key names in the Secret match the Deployment's `secretKeyRef` fields | +| API pod crashes with DB connection error | `DB_HOST`, `DB_PORT`, `DB_NAME`, `DB_USER`, or `DB_PASSWORD` missing or incorrect | Run `oc describe secret desdeo-secrets` and compare key names | +| API pod crashes with connection timeout to Pukki | Rahti egress IP not whitelisted in Pukki access list | Add `86.50.229.150/32` to the Pukki instance's Database Access settings | | Webui pod never starts; `FailedCreate` in ReplicaSet events | CPU limit-to-request ratio exceeds 5:1 | Adjust `resources.requests.cpu` so that `limits.cpu / requests.cpu ≤ 5` | | Login returns 500; logs show `TypeError: Invalid URL` | `API_BASE_URL` env var not set on the webui pod | Set `API_BASE_URL=http://desdeo-api:8080` in the webui Deployment | -| Build fails with `exit status 137` | Build pod out of memory | Set `spec.resources.limits.memory: 4Gi` in the BuildConfig and add `NODE_OPTIONS=--max-old-space-size=3072` to `dockerStrategy.env` | +| Build fails with `exit status 137` | Build pod out of memory | Set `spec.resources.limits.memory: 4Gi` in the BuildConfig | | Build fails with `pip install --group` error | Default S2I assemble script used instead of the custom one | Ensure `.s2i/bin/assemble` is present in the repo and uses `uv sync --frozen` | | `uv sync` fails with lockfile conflict | `uv.lock` is out of sync with `pyproject.toml` | Run `uv lock` locally and commit the updated lockfile | -| Database init job fails with import errors | `DESDEO_PRODUCTION` not set, API falls back to SQLite debug mode | Ensure `DESDEO_PRODUCTION=true` is set in the Job's env spec | -| GitHub webhook returns 401 | Content type set to `application/x-www-form-urlencoded` | Change content type to `application/json` in the GitHub webhook settings | - +| Database init job fails with import errors | `DESDEO_PRODUCTION` not set; API falls back to SQLite mode | Ensure `DESDEO_PRODUCTION=true` is set in the Job env | +| GitHub webhook returns 401 | Wrong content type or secret mismatch | Set content type to `application/json`; verify webhook secret matches `WebHookSecretKey` in the dedicated Secret | ## Known limitations -Schema migrations:`db_init_prod.py` uses `SQLModel.metadata.create_all`, -which creates missing tables but does not ALTER existing ones. If the data -model changes in a later release, tables must be migrated manually or via, e.g., -Alembic before redeploying. +- **Schema migrations**: `db_init_prod.py` uses `SQLModel.metadata.create_all`, + which creates missing tables but does not ALTER existing ones. If the data + model changes in a later release, tables must be migrated manually or via, + e.g., Alembic, before redeploying. + +- **WebSocket connections**: The GDM-SCORE-bands and GNIMBUS features use + `VITE_API_URL` directly for `ws://` connections and are not proxied through + the SvelteKit `/api` route. These require separate handling not covered in + this guide. + +--- + +## Web console approach + +The steps above use the `oc` CLI and YAML manifests. Rahti also provides a web +console at [console-openshift-console.apps.2.rahti.csc.fi](https://console-openshift-console.apps.2.rahti.csc.fi/) +that lets you accomplish the same tasks through a graphical interface. This +section documents the web console approach as an alternative. + +!!! note + The web console approach is less reproducible than the CLI approach and + requires more manual steps on each redeployment. It is recommended for + one-shot deploymnets or first-time exploration, not for ongoing deployments. + +### Getting started + +Log in to the Rahti web console. Look for the **Create Project** button (you +may need to switch to the Administrator perspective to see it). Fill in the +project name and description. Include your CSC computing project number in the +description in the format `csc_project:#######`. + +### Deploying the API + +Navigate to **+Add -> Import from Git**. Enter the repository URL and branch +under **Show advanced Git options -> Git reference**. + +Under **Build**, add the following environment variables: + +``` +DESDEO_PRODUCTION = true +DB_HOST = +DB_PORT = 5432 +DB_NAME = desdeo +DB_USER = +DB_PASSWORD = +AUTHJWT_SECRET = <64-char hex> +CORS_ORIGINS = ["https://your-webui.rahtiapp.fi"] +``` + +The builder image should be set to `python:3.12-ubi9`. The S2I assemble script +(`.s2i/bin/assemble`) uses `uv sync --frozen` to install dependencies. + +Store sensitive values in an OpenShift Secret and reference them in the Build +configuration rather than entering them as plain text. + +### Deploying the web UI + +Add another resource via **+Add -> Import from Git** using the same repository +and branch. Under **Advanced Git Options**, set the **Context Dir** to `/webui`. + +Select **Docker build** as the build strategy (not S2I). The `webui/Dockerfile` +handles the Node 24 build internally. + +Set these build arguments and environment variables: + +``` +VITE_API_URL = /api +API_BASE_URL = http://:8080 +``` + +!!! warning + Do not set `VITE_API_URL` to the API's public Route URL. Browser requests + must go through the SvelteKit `/api` proxy, not directly to the API. + +If the build fails with `exit status 137`, increase the build memory limit in +the BuildConfig YAML: + +```yaml +spec: + resources: + limits: + memory: 4000Mi +``` + +### PostgreSQL + +Use either Pukki DBaaS or a PostgreSQL image from the Rahti developer catalog. + +For Pukki, see [Option B: Pukki DBaaS](#option-b-pukki-dbaas) above, +the setup steps are the same regardless of whether you use the CLI or web console. + +For in-cluster PostgreSQL, navigate to **+Add -> Developer Catalog** and find +the PostgreSQL template. The correct env var names for the OpenShift image are +`POSTGRESQL_USER`, `POSTGRESQL_PASSWORD`, and `POSTGRESQL_DATABASE`. + +### Database initialization + +Once the API is running, use `db_init_prod.py` to create tables and seed the +initial user. The recommended approach is to run it as a Kubernetes Job using +the manifest in `deploy/db-init-job.yaml` (see [Step 6](#step-6-initialize-the-database)). + +Alternatively, you can exec into the API pod directly: + +```bash +oc exec -it deployment/desdeo-api -- python desdeo/api/db_init_prod.py +``` diff --git a/docs/howtoguides/index.md b/docs/howtoguides/index.md index 3653ca79d..9112bf050 100644 --- a/docs/howtoguides/index.md +++ b/docs/howtoguides/index.md @@ -32,6 +32,6 @@ Guides are goal-oriented and are meant to direct users towards specific goals wh ## Web-API and Web-GUI - **[Running the web-API and web-GUI](api_and_gui.md):** How to run the web-API and web-GUI -- **[Hosting on Kubernetes](kubernetes.md):** How to host a DESDEO web application on Kubernetes -- **[Deploying on OpenShift](deploying_on_openshift.md):** How to deploy DESDEO on OpenShift/OKD using `oc` CLI and YAML manifests (Rahti example) +- **[Hosting on Kubernetes (old)](kubernetes.md):** How to host a DESDEO web application on Kubernetes +- **[Deploying on OpenShift](deploying_on_openshift.md):** How to deploy DESDEO on OpenShift/Kubernetes (Rahti example) - **[Implementing method interfaces](implementing_method_interfaces.md):** How to implement new interactive method interfaces in the Web-GUI