diff --git a/app.py b/app.py index 7633d32..a6d21dc 100644 --- a/app.py +++ b/app.py @@ -1,1172 +1,1152 @@ -################################################################################ -### Loading the required modules -import json -import re - -import requests -import urllib.parse -from flask import Flask, abort, jsonify, render_template, request -from flask_caching import Cache -from jinja2 import TemplateNotFound -from werkzeug.routing import BaseConverter - -# from wikidataintegrator import wdi_core -from wikibaseintegrator import wbi_helpers - -# Import BioStudies extractor -from data.biostudies.search import BioStudiesExtractor -from data.zenodo.search import ZenodoExtractor -from data.mapping import normalize_all - -################################################################################ -CACHE_TIMEOUT = 60 * 60 * 24 * 5 # 5 days -- [Ozan] I created a separate - # timeout object for the tools page because - # a 5-day caching is too long for it. -CACHE_TIMEOUT_SERVICE = 60 # Separate timeout for the tools page -- 60 - # seconds. -### Configuration for BioStudies Integration -# Change these variables to switch between collections -BIOSTUDIES_COLLECTION = "VHP4Safety" # Replace with "EU-ToxRisk" to test -BIOSTUDIES_COLLECTION_NAME = "VHP4Safety" # Display name for the page -ZENODO_COMMUNITY = "vhp4safety" # zenodo community -ZENODO_RECORD_TYPE = "dataset" # only show datasets - -CASESTUDIES = ["thyroid", "kidney", "parkinson"] # List of valid case studies - -###Shared explanation dictionaries for filters (used in both tools and data page) -STAGE_EXPLANATIONS = { - "Chemical Characteristics and Hazard Identification": "A Safety Assessment Workflow Step that categorizes services that use molecular structures, chemical descriptors, and databases to predict or analyze the properties, behavior, and potential risks of chemical substances.", - "Exposure": "A Safety Assessment Workflow Step which categorizes services that evaluate and analyze the route, duration, magnitude and frequency of exposure of an organism or (sub)population to one or multiple chemicals.", - "Toxicokinetics": "A Safety Assessment Workflow Step which categorizes services that analyze the kinetics (absorption, distribution, metabolism and excretion) of chemicals and how these processes influence the internal dose.", - "Toxicodynamics": "A Safety Assessment Workflow Step which categorizes services that use or extend the (quantitative) AOP framework to analyze and assess the interaction of chemicals with biological targets.", - "Adverse Outcome": "A Safety Assessment Workflow Step which specifically refers to clinical and epidemiological effects. It categorizes services that provide information on the toxicological endpoints and adverse outcomes at a clinical or epidemiological level of chemical exposures.", - "Other": "Other or unknown category.", - # Legacy labels (kept for the data/methods pages until their data sources migrate) - "ADME": "Absorption, distribution, metabolism, and excretion of a substance (toxic or not) in a living organism, following exposure to this substance.", - "Hazard Assessment": "The process of assessing the intrinsic hazard a substance poses to human health and/or the environment", - "Chemical Information": "Information about chemical properties and identity.", - "General": "Not specific to a flow step.", - "(External) exposure": "External exposure assessment.", - "Generic": "Generic category.", -} -METHODS_URL = "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/cap/methods_index.json" -# TOOLS and SERVICES are synonymous -SERVICES_URL = "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/cap/service_index.json" - -REG_QUESTIONS = { - "reg_q_1a": { - "label": "Kidney Case Study (a)", - "explanation": "What is the safe cisplatin dose in cancer patients?", - }, - "reg_q_1b": { - "label": "Kidney Case Study (b)", - "explanation": "What is the intrinsic hazard of tacrolimus for nephrotoxicity?", - }, - "reg_q_2a": { - "label": "Parkinson Case Study (a)", - "explanation": "Can compound Dinoseb cause Parkinson's Disease?", - }, - "reg_q_2b": { - "label": "Parkinson Case Study (b)", - "explanation": "What level of exposure to compound Dinoseb leads to risk for developing Parkinson’s disease?", - }, - "reg_q_3a": { - "label": "Thyroid Case Study (a)", - "explanation": "What information about silychristin do we need to give an advice to women in their early pregnancy to decide whether the substance can be used?", - }, - "reg_q_3b": { - "label": "Thyroid Case Study (b)", - "explanation": "Does silychristin influence the thyroid-mediated brain development in the fetus resulting in cognitive impairment in children?", - }, -} - -# Derived: keep the old structure available for templates expecting {label: explanation} -REG_QUESTION_EXPLANATIONS = { - v["label"]: v["explanation"] for v in REG_QUESTIONS.values() -} - - -################################################################################ -class RegexConverter(BaseConverter): - """Converter for regular expression routes. - - References - ---------- - Scholia views.py - https://stackoverflow.com/questions/5870188 - - """ - - def __init__(self, url_map, *items): - """Set up regular expression matcher.""" - super(RegexConverter, self).__init__(url_map) - self.regex = items[0] - - -cache_config = { - "CACHE_TYPE": "SimpleCache", # Flask-Caching related configs - "CACHE_DEFAULT_TIMEOUT": CACHE_TIMEOUT, # 60 min chaching - "CACHE_SERVICE_TIMEOUT": CACHE_TIMEOUT_SERVICE -} -app = Flask(__name__) -app.config.from_mapping(cache_config) -cache = Cache(app) - - -@cache.memoize(timeout=CACHE_TIMEOUT) -def get_json_dict(url: str, timeout: int = 5) -> dict: - """Fetch xxxx_index.json from the cloud repo and return as a dictionary. - Return an empty dict on any error to avoid breaking pages that depend on it. - """ - try: - resp = requests.get(url, timeout=timeout) - if resp.status_code != 200: - return {} - data = resp.json() - if isinstance(data, dict): - return data - else: - return {} - except Exception: - return {} - - -# A separate get_json_dict function for the tools page with its own timeout. -@cache.memoize(timeout=CACHE_TIMEOUT_SERVICE) -def get_json_dict_service(url: str, timeout: int = 5) -> dict: - """Fetch xxxx_index.json from the cloud repo and return as a dictionary. - Return an empty dict on any error to avoid breaking pages that depend on it. - """ - try: - resp = requests.get(url, timeout=timeout) - if resp.status_code != 200: - return {} - data = resp.json() - if isinstance(data, dict): - return data - else: - return {} - except Exception: - return {} - - -@cache.memoize(timeout=CACHE_TIMEOUT) -def get_repository_data( - search_query: str, - page: int = 1, - page_size: int = 18, - filters: list | None = None, - load_metadata: bool = True, -) -> tuple[dict, dict]: - """ - Extract data from respositories - """ - # Initialize extractor for BIOSTUDIES - bs_extractor = BioStudiesExtractor(collection=BIOSTUDIES_COLLECTION) - - # Fetch data based on search query or list all - if search_query: - bs_results = bs_extractor.search_studies( - search_query, - page=page, - page_size=page_size, - filters=filters, - load_metadata=load_metadata, - ) - else: - bs_results = bs_extractor.list_studies( - page=page, - page_size=page_size, - include_urls=True, - filters=filters, - load_metadata=load_metadata, - ) - - # Initialize extractor for Zenodo - zen_extractor = ZenodoExtractor( - community=ZENODO_COMMUNITY, record_type=ZENODO_RECORD_TYPE - ) - - if not filters: - # We currently do no filter Zenodo datasets. - if search_query: - zen_result = zen_extractor.search_records( - search_query, page=page, size=page_size, load_metadata=load_metadata - ) - else: - # load metadata needed for is_rocrate filtering in template - zen_result = zen_extractor.list_records( - page=page, - size=page_size, - include_urls=True, - load_metadata=load_metadata, - ) - else: - zen_result = {"hits": [], "total": 0, "error": None} - - return bs_results, zen_result - - -# Provide methods list to all templates for the Methods dropdown in the navbar -@app.context_processor -def inject_methods_menu(): - """Fetch methods_index.json and expose a simple list of {id, title} to templates. - Return an empty list on any error to avoid breaking pages. - """ - data = get_json_dict(METHODS_URL) - if data: - items = [] - for key, val in data.items() if isinstance(data, dict) else []: - title = ( - val.get("method") - or val.get("method_name_content") - or val.get("method_name") - or key - ) - items.append({"id": key, "title": title}) - # sort by title - items = sorted(items, key=lambda x: x["title"].lower()) - return {"methods_menu": items} - else: - return {"methods_menu": []} - - -@app.context_processor -def inject_tools_menu(): - """Fetch methods_index.json and expose a simple list of {id, title} to templates. - Return an empty list on any error to avoid breaking pages. - """ - data = get_json_dict_service(SERVICES_URL) - if data: - items = [] - for key, val in data.items() if isinstance(data, dict) else []: - title = val.get("service") or key - items.append({"id": key, "title": title}) - # sort by title - items = sorted(items, key=lambda x: x["title"].lower()) - return {"tools_menu": items} - else: - return {"tools_menu": []} - - -@app.context_processor -def inject_data_menu(): - """Fetch methods_index.json and expose a simple list of {id, title} to templates. - Return an empty list on any error to avoid breaking pages. - """ - bs_results, zen_results = get_repository_data(search_query="") - hits: list = bs_results.get("hits", []) - hits.extend(zen_results.get("hits", [])) - if hits: - items = [] - for hit in hits: - title = hit.get("title") - id = hit.get("accession", "") or hit.get("doi_url", "") or hit.get("id", "") - url = hit.get("url", "") or hit.get("doi_url") - items.append({"id": id, "title": title, "url": url}) - # sort by title - items = sorted(items, key=lambda x: x["title"].lower()) - return {"data_menu": items} - else: - return {"data_menu": []} - - -################################################################################ -### The landing page -@app.route("/") -def home(): - try: - tools = get_json_dict_service( - SERVICES_URL - ) # Geting the service_list.json in the dictionary format. - tools = list(tools.values()) # Converting the dictionary to a list object. - except Exception as e: - return f"Error processing service data: {e}", 500 - num_tools = len(tools) - num_case_studies = len(CASESTUDIES) - bs_res, zen_res = get_repository_data(search_query="") - num_datasets = bs_res["total"] + zen_res["total"] - return render_template( - "home.html", - num_tools=num_tools, - num_case_studies=num_case_studies, - num_datasets=num_datasets, - ) - - -################################################################################ -### The sitemap.xml for search engines -@app.route("/sitemap.xml") -def sitemap(): - sitemapContent = """ - - - https://platform.vhp4safety.nl/ - - - https://platform.vhp4safety.nl/casestudies - - - https://platform.vhp4safety.nl/tools - - - https://platform.vhp4safety.nl/methods - - - https://platform.vhp4safety.nl/data - - -"""; - return Response(sitemapContent, mimetype='text/xml'); - - -################################################################################ -### Pages under 'Data' -@app.route("/data") -def data(): - # Get query parameters for pagination and search - page = request.args.get("page", 1, type=int) - page_size = request.args.get("page_size", 18, type=int) - search_query = request.args.get("query", "", type=str) - - # Get filter parameters - filter_case_study = request.args.get("filter_case_study", "", type=str) - filter_regulatory_question = request.args.get( - "filter_regulatory_question", "", type=str - ) - filter_flow_step = request.args.get("filter_flow_step", "", type=str) - - # Build filter list (only include non-empty filters) - filters = [] - if filter_case_study: - filters.append(("case_study", filter_case_study)) - if filter_regulatory_question: - filters.append(("regulatory_question", filter_regulatory_question)) - if filter_flow_step: - filters.append(("flow_step", filter_flow_step)) - - bs_results, zen_results = get_repository_data( - search_query, page, page_size, filters=filters - ) - - # Extract studies and metadata - studies = bs_results.get("hits", []) - bs_total = bs_results.get("total", 0) - bs_error: str | None = bs_results.get("error", None) - - # Extract datasets and metadata from Zenodo - datasets = zen_results.get("hits", []) - zen_total = zen_results.get("total", 0) - zen_error: str | None = zen_results.get("error", None) - - # enrich with normalized metadata mapping: - - # studies, datasets = normalize_all([studies],[datasets]) - - # combine totals for pagination - total = bs_total + zen_total - - # Get filtering metadata (if filters were applied) - filters_applied = bs_results.get("filters_applied", False) - hits_returned = bs_results.get("hits_returned", len(studies)) - pages_fetched = bs_results.get("pages_fetched", 1) - page_size_met = bs_results.get("page_size_met", True) - - # Calculate pagination info - has_next = (page * page_size) < total - has_prev = page > 1 - - # Pass data to template - return render_template( - "data/data.html", - studies=studies, - datasets=datasets, - total=total, - page=page, - page_size=page_size, - search_query=search_query, - collection_name=BIOSTUDIES_COLLECTION_NAME, - collection=BIOSTUDIES_COLLECTION, - errors={"zenodo": zen_error, "biostudies": bs_error}, - has_next=has_next, - has_prev=has_prev, - filter_case_study=filter_case_study, - filter_regulatory_question=filter_regulatory_question, - filter_flow_step=filter_flow_step, - filters_applied=filters_applied, - hits_returned=hits_returned, - pages_fetched=pages_fetched, - page_size_met=page_size_met, - stage_explanations=STAGE_EXPLANATIONS, - reg_question_explanations=REG_QUESTION_EXPLANATIONS, - ) - - -################################################################################ -### DataSet detail view - - -@app.template_filter("split_text_int") -def split_text_int(value: None|str) -> tuple[str, None|int]: - """ - Splits trailing integer from a string. - 'S-VHPS21' -> ('S-VHPS', 21) - 'ABC' -> ('ABC', None) - 'X-12A' -> ('X-12A', None) # only splits if digits are at the very end - """ - # used to construct ftp file link *POTENTIALLY BRITTLE* - if value is None: - return ("", None) - - s = str(value) - m = re.match(r"^(.*?)(\d+)$", s) - if not m: - return (s, None) - - return (m.group(1), int(m.group(2))) - - -@app.route("/data/") -def data_detail(dataid): - bs_results, zen_results = get_repository_data(dataid) - - studies = bs_results.get("hits", []) - bs_total = bs_results.get("total", 0) - bs_error: str | None = bs_results.get("error", None) - - # Extract datasets and metadata from Zenodo - datasets = zen_results.get("hits", []) - zen_total = zen_results.get("total", 0) - zen_error: str | None = zen_results.get("error", None) - - studies, datasets = normalize_all(studies, datasets) - - if bs_error and not zen_error: - if zen_total != 1: - return abort(404) - elif zen_error and not bs_error: - if bs_total != 1: - return abort(404) - if studies: - return render_template("data/data_details.html", data=studies[0]) - elif datasets: - return render_template("data/data_details.html", data=datasets[0]) - return abort(404) - -################################################################################ -### Pages under 'Models' -@app.route("/models_page") -def models(): - # Get query parameters for pagination and search - page = request.args.get("page", 1, type=int) - page_size = request.args.get("page_size", 18, type=int) - search_query = request.args.get("query", "", type=str) - - # Get filter parameters - filter_case_study = request.args.get("filter_case_study", "", type=str) - filter_regulatory_question = request.args.get( - "filter_regulatory_question", "", type=str - ) - filter_flow_step = request.args.get("filter_flow_step", "", type=str) - - # Build filter list (only include non-empty filters) - filters = [] - if filter_case_study: - filters.append(("case_study", filter_case_study)) - if filter_regulatory_question: - filters.append(("regulatory_question", filter_regulatory_question)) - if filter_flow_step: - filters.append(("flow_step", filter_flow_step)) - - # Initialize extractor - extractor = BioStudiesExtractor(collection=BIOSTUDIES_COLLECTION) - - # Fetch data based on search query or list all - if search_query: - results = extractor.search_studies( - search_query, page=page, page_size=page_size, filters=filters - ) - else: - results = extractor.list_studies( - page=page, page_size=page_size, include_urls=True, filters=filters - ) - - # Extract studies and metadata - studies = results.get("hits", []) - total = results.get("total", 0) - error = results.get("error", None) - - # Get filtering metadata (if filters were applied) - filters_applied = results.get("filters_applied", False) - hits_returned = results.get("hits_returned", len(studies)) - pages_fetched = results.get("pages_fetched", 1) - page_size_met = results.get("page_size_met", True) - - # Calculate pagination info - has_next = (page * page_size) < total - has_prev = page > 1 - - # Pass model data to template - return render_template( - "models_page.html", - studies=studies, - total=total, - page=page, - page_size=page_size, - search_query=search_query, - collection_name=BIOSTUDIES_COLLECTION_NAME, - collection=BIOSTUDIES_COLLECTION, - error=error, - has_next=has_next, - has_prev=has_prev, - filter_case_study=filter_case_study, - filter_regulatory_question=filter_regulatory_question, - filter_flow_step=filter_flow_step, - filters_applied=filters_applied, - hits_returned=hits_returned, - pages_fetched=pages_fetched, - page_size_met=page_size_met, - stage_explanations=STAGE_EXPLANATIONS, - reg_question_explanations=REG_QUESTION_EXPLANATIONS, - ) - - -################################################################################ -### Pages under 'Tools' - - -### Here begins the updated version for creating the tool list page. -@app.route("/tools") -def tools(): - try: - tools = get_json_dict_service( - SERVICES_URL - ) # Geting the service_list.json in the dictionary format. - tools = list(tools.values()) # Converting the dictionary to a list object. - - # Mapping the URLs with glossary IDs to their text values. - stage_mapping = { - "https://vhp4safety.github.io/glossary#VHP0000153": "Chemical Characteristics and Hazard Identification", - "https://vhp4safety.github.io/glossary#VHP0000154": "Exposure", - "https://vhp4safety.github.io/glossary#VHP0000155": "Toxicokinetics", - "https://vhp4safety.github.io/glossary#VHP0000156": "Toxicodynamics", - "https://vhp4safety.github.io/glossary#VHP0000158": "Adverse Outcome", - # Legacy mappings (superseded by the Process Flow Step URIs above) - "https://vhp4safety.github.io/glossary#VHP0000056": "ADME", - "https://vhp4safety.github.io/glossary#VHP0000102": "Hazard Assessment", - "https://vhp4safety.github.io/glossary#VHP0000148": "Chemical Information", - "https://vhp4safety.github.io/glossary#VHP0000149": "General", - } - - for tool in tools: - full_stage_url = tool.get("stage", "") - - # Writing the service name and stage values in the logs for troubleshooting. - # print(f"Tool: {tool['service']}, Stage URL: {full_stage_url}") # Log the full URL - - # Checking if the full URL is in the mapping and updating the stage. - if full_stage_url in stage_mapping: - # print(f"Mapping stage URL {full_stage_url} to {stage_mapping[full_stage_url]}") # Log the mapping - tool["stage"] = stage_mapping[full_stage_url] - elif tool["stage"] in ["NA", "Unknown"]: - tool["stage"] = ( - "Other" # Combining "NA" and "Unknown" stages in a single stage-type, "Other". - ) - - html_name = tool.get("html_name") - md_name = tool.get("md_file_name") - png_name = tool.get("png_file_name") - - tool["url"] = f"https://cloud.vhp4safety.nl/service/{html_name}" - tool["meta_data"] = ( - f"https://raw.githubusercontent.com/VHP4Safety/cloud/main/docs/service/{md_name}" - if md_name - else "md file not found" - ) - - # Check if the tool has the placeholder logo - placeholder_logo = "https://github.com/VHP4Safety/ui-design/blob/main/static/images/logo.png" - if png_name == placeholder_logo: - tool["png"] = None # set to None if it's the common placeholder - else: - tool["png"] = ( - f"https://raw.githubusercontent.com/VHP4Safety/cloud/main/docs/service/{png_name}" - if not png_name.startswith("http") - else png_name - ) - - inst_url = tool.get("inst_url", "no_url") - if not inst_url: # catches "" as well - inst_url = "no_url" - tool["inst_url"] = inst_url - - # Fetch per-tool detail JSON to check hosting status - tool_id = tool.get("id", "") - vhp_hosted = False - if inst_url != "no_url" and tool_id: - try: - detail_url = f"https://cloud.vhp4safety.nl/service/{tool_id}.json" - detail_resp = requests.get(detail_url, timeout=5) - if detail_resp.status_code == 200: - detail = detail_resp.json() - vhp_platform = detail.get("instance", {}).get("vhp-platform", "").lower() - vhp_hosted = vhp_platform not in ("external", "independent", "") - except Exception: - pass - tool["vhp_hosted"] = vhp_hosted - - # Getting selected stages from the URL. - selected_stages = request.args.getlist("stage") - - # Filtering tools by selected stages. - if selected_stages: - tools = [tool for tool in tools if tool.get("stage") in selected_stages] - - # Getting all unique stages from the tools for the filter options. - stages = sorted(set(tool.get("stage") for tool in tools if tool.get("stage"))) - - # Forcing "Other" to be the last item in the list of stages. - if "Other" in stages: - stages.remove("Other") - stages.append("Other") - - # Filtering over the regulatory questions. - reg_questions = {v["label"]: k for k, v in REG_QUESTIONS.items()} - - selected_questions = request.args.getlist("reg_q") - - for question in selected_questions: - field = reg_questions.get(question) - if field: - tools = [ - tool for tool in tools if str(tool.get(field, "")).lower() == "true" - ] - - # Getting the search query from URL to add a search bar based on tool names. - search_query = request.args.get("search", "").strip().lower() - - # Filtering tools by search query. - if search_query: - tools = [ - tool - for tool in tools - if search_query in tool.get("service", "").lower() - ] - - return render_template( - "tools/tools.html", - tools=tools, - stages=stages, - selected_stages=selected_stages, - reg_questions=reg_questions, - selected_questions=selected_questions, - stage_explanations=STAGE_EXPLANATIONS, - reg_question_explanations=REG_QUESTION_EXPLANATIONS, - ) - - except Exception as e: - return f"Error processing service data: {e}", 500 - - -### New route to list methods (similar to the tools page) -@app.route("/methods") -@app.route("/methods/") -def methods(): - """Fetch methods_index.json from the cloud repo, normalize fields and render a methods list page.""" - url = "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/cap/methods_index.json" - response = requests.get(url) - - if response.status_code != 200: - return f"Error fetching methods list: {response.status_code}", 503 - - try: - methods = response.json() - methods = list(methods.values()) # convert dict to list - - # Normalize fields for the template and collect stages - stages_set = set() - normalized = [] - for m in methods: - norm = {} - norm["id"] = m.get("id", "") - # template expects 'service' and 'description' - norm["service"] = ( - m.get("method") - or m.get("method_name_content") - or m.get("method_name") - or "" - ) - norm["description"] = ( - m.get("method_description_content") or m.get("method_description") or "" - ) - # main_url used for method webpage (catalog page) - norm["main_url"] = m.get("catalog_webpage_url") or "no_url" - # interactive instance not present in methods index - norm["inst_url"] = m.get("inst_url") or "no_url" - # metadata md file not available in index; keep empty string - norm["meta_data"] = m.get("meta_data") or "" - # placeholder/no png - norm["png"] = None - # keep original raw data for potential details page - norm["raw"] = m - - # collect stages (split comma-separated values) - stage_field = (m.get("vhp4safety_workflow_stage_content") or "").strip() - if stage_field: - for part in [s.strip() for s in stage_field.split(",")]: - if part: - stages_set.add(part) - - normalized.append(norm) - - # Apply search and filters similar to /tools - selected_stages = request.args.getlist("stage") - selected_questions = request.args.getlist("reg_q") - search_query = request.args.get("search", "").strip().lower() - - methods_filtered = normalized - - if selected_stages: - methods_filtered = [ - m - for m in methods_filtered - if any( - s - in ( - (m["raw"].get("vhp4safety_workflow_stage_content") or "").split( - "," - ) - ) - for s in selected_stages - ) - ] - - # Filter by regulatory questions if provided (REG_QUESTIONS keys map to internal fields) - reg_questions = {v["label"]: k for k, v in REG_QUESTIONS.items()} - if selected_questions: - for question in selected_questions: - field = reg_questions.get(question) - if field: - methods_filtered = [ - m - for m in methods_filtered - if str(m["raw"].get(field, "")).lower() == "true" - ] - - if search_query: - methods_filtered = [ - m - for m in methods_filtered - if search_query in m.get("service", "").lower() - ] - - stages = sorted(stages_set) - if "Other" in stages: - stages.remove("Other") - stages.append("Other") - - # Pass everything the template expects - return render_template( - "methods/methods.html", - methods=methods_filtered, - stages=stages, - selected_stages=selected_stages, - reg_questions=reg_questions, - selected_questions=selected_questions, - stage_explanations=STAGE_EXPLANATIONS, - reg_question_explanations=REG_QUESTION_EXPLANATIONS, - ) - - except Exception as e: - return f"Error processing methods data: {e}", 500 - - -@app.route("/methods/") -def method_page(methodid): - """Render a single method page using templates/methods/method.html - Method details are taken from methods_index.json (keyed by method id). - """ - try: - methods = get_json_dict(METHODS_URL) - # methods_index.json is a dict keyed by method id - if methodid not in methods: - abort(404) - method_details = methods[methodid] - except Exception as e: - return f"Error processing methods data: {e}", 500 - - # Try to load the full method JSON from the docs/methods folder (raw github) - method_json = None - # URL-encode the filename part to be safe - encoded = urllib.parse.quote(methodid, safe="") - raw_url = ( - "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/docs/methods/" - + f"{encoded}.json" - ) - try: - r = requests.get(raw_url, timeout=5) - if r.status_code == 200: - method_json = r.json() - else: - # fall back to using the index entry as minimal data - method_json = method_details - except Exception as exc: - # on any error, fall back to index entry - method_json = method_details - - # Pass both to the template: some templates expect method_json, others method_details - return render_template( - "methods/method.html", - method=method_details, - method_details=method_details, - method_json=method_json, - ) - - -@app.route("/tools/") -def tool_page(toolname): - # get the tools metadata: - try: - tools = get_json_dict_service(SERVICES_URL) - tools = dict(tools) - # Geting the service_list.json in the dictionary format. - # Converting the dictionary to a list object. - except Exception as e: - return f"Error processing service data: {e}", 500 - - # Map toolname to the correct JSON file in the new tool folder - if toolname not in tools: - abort(404) - - # get the tools metadata: - url = "https://cloud.vhp4safety.nl/service/" + toolname + ".json" - response = requests.get(url) - - if response.status_code != 200: - return f"Error fetching service list: {response.status_code}", 503 - - try: - tool_details = response.json() - tool_details = dict(tool_details) - # Geting the service_list.json in the dictionary format. - # Converting the dictionary to a list object. - except Exception as e: - return f"Error processing service data: {e}", 500 - - # Pass the json filename to the template (for JS to pick up) - return render_template( - "tools/tool.html", tool_json=tools[toolname], tool_details=tool_details - ) - - -################################################################################ -### Pages under 'Implementation' - -# General Explore our work -@app.route("/exploreourwork") -def exploreourwork(): - return render_template("exploreourwork.html") - -# General Training -@app.route("/training") -def training(): - return render_template("training.html") - -# General Impact -@app.route("/impact") -def impact(): - return render_template("impact.html") - -################################################################################ -### Pages under 'Process Flow' - - -# General Safety Assessment Workflow page -@app.route("/Safety_Assessment_Workflow") -def SafetyAssessmentWorkflow(): - return render_template("Safety_Assessment_Workflow.html") - - -################################################################################ -### Pages under 'Case Studies' - - -# General case studies page -@app.route("/casestudies") -def workflows(): - return render_template("case_studies/casestudies.html") - - -# Individual case study page, dynamically filled based on URL -@app.route("/casestudies/", defaults={"step": ""}) -@app.route("/casestudies//") -@app.route("/casestudies///") -# additional routes are parsed client side via js to allow smooth animation -def casestudy(case:str="", question:str="", step:str=""): - if case not in CASESTUDIES: - abort(404) - # JS will handle steps via the URL - return render_template("case_studies/casestudy.html", case=case) - - -@app.route("/workflow/") -def show(workflow): - try: - return render_template( - f"case_studies/parkinson/workflows/{workflow}_workflow.html" - ) - except TemplateNotFound: - abort(404) - - -################################################################################ -### Pages related to chemical compounds - - -def is_valid_qid(qid): - return re.fullmatch(r"Q\d+", qid) is not None - - -@app.route("/compound/") -def show_compound(cwid): - try: - return render_template(f"compound.html", cwid=cwid) - except TemplateNotFound: - abort(404) - - -@app.route("/get_compound_properties/") -def show_compounds_properties_as_json(cwid): - if not is_valid_qid(cwid): - return jsonify({"error": "Invalid compound identifier"}), 400 - compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" - sparqlquery = ( - "PREFIX wd: \n" - "PREFIX wdt: \n\n" - "SELECT ?cmp ?cmpLabel ?formula ?mass ?inchi ?inchiKey ?SMILES WHERE {\n" - " VALUES ?cmp { wd:" + cwid + " }\n" - " ?cmp wdt:P9 ?inchi ;\n" - " wdt:P10 ?inchiKey .\n" - " OPTIONAL { ?cmp wdt:P2 ?mass }\n" - " OPTIONAL { ?cmp wdt:P3 ?formula }\n" - " OPTIONAL { ?cmp wdt:P7 ?chiralSMILES }\n" - " OPTIONAL { ?cmp wdt:P12 ?nonchiralSMILES }\n" - ' BIND (COALESCE(IF(BOUND(?chiralSMILES), ?chiralSMILES, 1/0), IF(BOUND(?nonchiralSMILES), ?nonchiralSMILES, 1/0), "") AS ?SMILES)\n' - ' SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n' - "}" - ) - try: - compound_dat = wbi_helpers.execute_sparql_query( - sparqlquery, endpoint=compoundwikiEP - ) - except Exception as e: - return jsonify({"error": str(e)}), 500 - if not bool(compound_dat): - return jsonify({"error": "No data found"}), 404 - compound_dat = compound_dat["results"]["bindings"][0] - # return jsonify(compound_dat); - compound_list = [ - { - "wcid": compound_dat["cmp"]["value"], - "label": compound_dat["cmpLabel"]["value"], - "inchi": compound_dat["inchi"]["value"], - "inchikey": compound_dat["inchiKey"]["value"], - "SMILES": compound_dat["SMILES"]["value"], - "formula": compound_dat["formula"]["value"], - "mass": compound_dat["mass"]["value"], - } - ] - return jsonify(compound_list), 200 - - -@app.route("/get_compound_identifiers/") -def show_compounds_identifiers_as_json(cwid): - if not is_valid_qid(cwid): - return jsonify({"error": "Invalid compound identifier"}), 400 - compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" - sparqlquery = ( - "PREFIX wd: \n" - "PREFIX wdt: \n\n" - "SELECT DISTINCT ?propertyLabel ?value ?formatterURL\n" - "WHERE {\n" - " VALUES ?property { wd:P13 wd:P22 wd:P23 wd:P26 wd:P27 wd:P28 wd:P36 wd:P41 wd:P43 wd:P44 wd:P45 }\n" - " ?property wikibase:directClaim ?valueProp .\n" - " OPTIONAL { wd:" + cwid + " ?valueProp ?value }\n" - " OPTIONAL { ?property wdt:P6 ?formatterURL }\n" - ' SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n' - "}" - ) - try: - compound_dat = wbi_helpers.execute_sparql_query( - sparqlquery, endpoint=compoundwikiEP - ) - except Exception as e: - return jsonify({"error": str(e)}), 500 - if len(compound_dat["results"]["bindings"]) == 0: - return jsonify({"error": "No data found"}), 404 - compound_dat = compound_dat["results"]["bindings"] - # return jsonify(compound_dat) - - compound_list = [] - for expProp in compound_dat: - if "value" in expProp: - compound_list.append( - { - "propertyLabel": expProp["propertyLabel"]["value"], - "value": expProp["value"]["value"], - "formatterURL": expProp["formatterURL"]["value"], - } - ) - else: - compound_list.append( - { - "propertyLabel": expProp["propertyLabel"]["value"], - "value": "", - "formatterURL": "", - } - ) - return jsonify(compound_list), 200 - - -@app.route("/get_compound_toxicology/") -def show_compounds_toxicology_as_json(cwid): - if not is_valid_qid(cwid): - return jsonify({"error": "Invalid compound identifier"}), 400 - compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" - sparqlquery = ( - "PREFIX wd: \n" - "PREFIX wdt: \n\n" - "SELECT DISTINCT ?propertyLabel ?value ?formatterURL\n" - "WHERE {\n" - " VALUES ?property { wd:P17 wd:P19 wd:P4 }\n" - " ?property wikibase:directClaim ?valueProp .\n" - " OPTIONAL { wd:" + cwid + " ?valueProp ?value }\n" - " OPTIONAL { ?property wdt:P6 ?formatterURL }\n" - ' SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n' - "}" - ) - try: - compound_dat = wbi_helpers.execute_sparql_query( - sparqlquery, endpoint=compoundwikiEP - ) - except Exception as e: - return jsonify({"error": str(e)}), 500 - if len(compound_dat["results"]["bindings"]) == 0: - return jsonify({"error": "No data found"}), 404 - compound_dat = compound_dat["results"]["bindings"] - # return jsonify(compound_dat) - - compound_list = [] - for expProp in compound_dat: - print(expProp) - if "value" in expProp: - compound_list.append( - { - "propertyLabel": expProp["propertyLabel"]["value"], - "value": expProp["value"]["value"], - } - ) - else: - compound_list.append( - {"propertyLabel": expProp["propertyLabel"]["value"], "value": ""} - ) - return jsonify(compound_list), 200 - - -@app.route("/get_compound_expdata/") -def show_compounds_expdata_as_json(cwid): - if not is_valid_qid(cwid): - return jsonify({"error": "Invalid compound identifier"}), 400 - compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" - sparqlquery = ( - "PREFIX wd: \n" - "PREFIX wdt: \n" - "PREFIX wid: \n" - "PREFIX widt: \n" - "PREFIX prov: \n\n" - "SELECT ?qid WHERE {\n" - " wd:P5 wikibase:directClaim ?identifierProp .\n" - " wd:" + cwid + " ?identifierProp ?wikidata .\n" - ' BIND (iri(CONCAT("http://www.wikidata.org/entity/", ?wikidata)) AS ?qid)\n' - "}" - ) - try: - compound_dat = wbi_helpers.execute_sparql_query( - sparqlquery, endpoint=compoundwikiEP - ) - except Exception as e: - return jsonify({"error": str(e)}), 500 - if not bool(compound_dat): - return jsonify({"error": "No data found"}), 404 - if len(compound_dat["results"]["bindings"]) == 0: - return jsonify({"error": "No data found"}), 404 - compound_dat = compound_dat["results"]["bindings"][0] - qid = compound_dat["qid"]["value"] - # the next query may be affected by https://github.com/ad-freiburg/qlever-control/issues/187 - sparqlquery = ( - "PREFIX wd: \n" - "PREFIX wdt: \n" - "PREFIX prov: \n" - "PREFIX rdfs: \n" - "PREFIX pr: \n" - "PREFIX wikibase: \n\n" - "SELECT DISTINCT ?propEntityLabel ?value ?unitsLabel ?source ?doi ?statement\n" - "WHERE {\n" - " <" + qid + "> ?propp ?statement .\n" - " ?statement a wikibase:BestRank ;\n" - " ?proppsv [ wikibase:quantityAmount ?value ; wikibase:quantityUnit ?units ] .\n" - " #OPTIONAL { ?statement prov:wasDerivedFrom/pr:P248 ?sourceTmp . OPTIONAL { ?sourceTmp wdt:P356 ?doiTmp . } }\n" - " ?property wikibase:claim ?propp ; wikibase:statementValue ?proppsv ; wdt:P1629 ?propEntity ; wdt:P31 wd:Q21077852 .\n" - " ?propEntity @en@rdfs:label ?propEntityLabel .\n" - " ?units @en@rdfs:label ?unitsLabel .\n" - ' BIND (COALESCE(IF(BOUND(?sourceTmp), ?sourceTmp, 1/0), "") AS ?source)\n' - ' BIND (COALESCE(IF(BOUND(?doiTmp), ?doiTmp, 1/0), "") AS ?doi)\n' - "}" - ) - # return sparqlquery - try: - sparqlqueryURL = ( - "https://qlever.cs.uni-freiburg.de/api/wikidata?format=json&query=" - + urllib.parse.quote_plus(sparqlquery) - ) - # return sparqlqueryURL - compound_dat = requests.get(sparqlqueryURL) - # return json.loads(compound_dat.content) - except Exception as e: - return jsonify({"error": str(e)}), 500 - if not bool(compound_dat): - return jsonify({"error": "No data found"}), 404 - compound_dat = json.loads(compound_dat.content)["results"]["bindings"] - # return jsonify(compound_dat) - compound_list = [] - for expProp in compound_dat: - # return jsonify(expProp) - compound_list.append( - { - "propEntityLabel": expProp["propEntityLabel"]["value"], - "value": expProp["value"]["value"], - "unitsLabel": expProp["unitsLabel"]["value"], - "source": expProp["source"]["value"], - "doi": expProp["doi"]["value"], - "seeAlso": expProp["statement"]["value"], - } - ) - return jsonify(compound_list), 200 - - -################################################################################ -### Pages under 'Legal' -@app.route("/legal/terms_of_service") -def terms_of_service(): - return render_template("legal/terms_of_service.html") - - -@app.route("/legal/privacypolicy") -def privacy_policy(): - return render_template("legal/privacypolicy.html") - - -if __name__ == "__main__": - app.run(host="0.0.0.0", port=5050, debug=True) - - \ No newline at end of file +################################################################################ +### Loading the required modules +import json +import re + +import requests +import urllib.parse +from flask import Flask, abort, jsonify, render_template, request, Response +from flask_caching import Cache +from jinja2 import TemplateNotFound +from werkzeug.routing import BaseConverter + +# from wikidataintegrator import wdi_core +from wikibaseintegrator import wbi_helpers + +# Import BioStudies extractor +from data.biostudies.search import BioStudiesExtractor +from data.zenodo.search import ZenodoExtractor +from data.mapping import normalize_all + +################################################################################ +CACHE_TIMEOUT = 60 * 60 * 24 * 5 # 5 days -- [Ozan] I created a separate + # timeout object for the tools page because + # a 5-day caching is too long for it. +CACHE_TIMEOUT_SERVICE = 60 # Separate timeout for the tools page -- 60 + # seconds. +### Configuration for BioStudies Integration +# Change these variables to switch between collections +BIOSTUDIES_COLLECTION = "VHP4Safety" # Replace with "EU-ToxRisk" to test +BIOSTUDIES_COLLECTION_NAME = "VHP4Safety" # Display name for the page +ZENODO_COMMUNITY = "vhp4safety" # zenodo community +ZENODO_RECORD_TYPE = "dataset" # only show datasets + +CASESTUDIES = ["thyroid", "kidney", "parkinson"] # List of valid case studies + +###Shared explanation dictionaries for filters (used in both tools and data page) +STAGE_EXPLANATIONS = { + "Chemical Characteristics and Hazard Identification": "A Safety Assessment Workflow Step that categorizes services that use molecular structures, chemical descriptors, and databases to predict or analyze the properties, behavior, and potential risks of chemical substances.", + "Exposure": "A Safety Assessment Workflow Step which categorizes services that evaluate and analyze the route, duration, magnitude and frequency of exposure of an organism or (sub)population to one or multiple chemicals.", + "Toxicokinetics": "A Safety Assessment Workflow Step which categorizes services that analyze the kinetics (absorption, distribution, metabolism and excretion) of chemicals and how these processes influence the internal dose.", + "Toxicodynamics": "A Safety Assessment Workflow Step which categorizes services that use or extend the (quantitative) AOP framework to analyze and assess the interaction of chemicals with biological targets.", + "Adverse Outcome": "A Safety Assessment Workflow Step which specifically refers to clinical and epidemiological effects. It categorizes services that provide information on the toxicological endpoints and adverse outcomes at a clinical or epidemiological level of chemical exposures.", + "Other": "Other or unknown category.", + # Legacy labels (kept for the data/methods pages until their data sources migrate) + "ADME": "Absorption, distribution, metabolism, and excretion of a substance (toxic or not) in a living organism, following exposure to this substance.", + "Hazard Assessment": "The process of assessing the intrinsic hazard a substance poses to human health and/or the environment", + "Chemical Information": "Information about chemical properties and identity.", + "General": "Not specific to a flow step.", + "(External) exposure": "External exposure assessment.", + "Generic": "Generic category.", +} +METHODS_URL = "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/cap/methods_index.json" +# TOOLS and SERVICES are synonymous +SERVICES_URL = "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/cap/service_index.json" + +REG_QUESTIONS = { + "reg_q_1a": { + "label": "Kidney Case Study (a)", + "explanation": "What is the safe cisplatin dose in cancer patients?", + }, + "reg_q_1b": { + "label": "Kidney Case Study (b)", + "explanation": "What is the intrinsic hazard of tacrolimus for nephrotoxicity?", + }, + "reg_q_2a": { + "label": "Parkinson Case Study (a)", + "explanation": "Can compound Dinoseb cause Parkinson's Disease?", + }, + "reg_q_2b": { + "label": "Parkinson Case Study (b)", + "explanation": "What level of exposure to compound Dinoseb leads to risk for developing Parkinson’s disease?", + }, + "reg_q_3a": { + "label": "Thyroid Case Study (a)", + "explanation": "What information about silychristin do we need to give an advice to women in their early pregnancy to decide whether the substance can be used?", + }, + "reg_q_3b": { + "label": "Thyroid Case Study (b)", + "explanation": "Does silychristin influence the thyroid-mediated brain development in the fetus resulting in cognitive impairment in children?", + }, +} + +# Derived: keep the old structure available for templates expecting {label: explanation} +REG_QUESTION_EXPLANATIONS = { + v["label"]: v["explanation"] for v in REG_QUESTIONS.values() +} + + +################################################################################ +class RegexConverter(BaseConverter): + """Converter for regular expression routes. + + References + ---------- + Scholia views.py + https://stackoverflow.com/questions/5870188 + + """ + + def __init__(self, url_map, *items): + """Set up regular expression matcher.""" + super(RegexConverter, self).__init__(url_map) + self.regex = items[0] + + +cache_config = { + "CACHE_TYPE": "SimpleCache", # Flask-Caching related configs + "CACHE_DEFAULT_TIMEOUT": CACHE_TIMEOUT, # 60 min chaching + "CACHE_SERVICE_TIMEOUT": CACHE_TIMEOUT_SERVICE +} +app = Flask(__name__) +app.config.from_mapping(cache_config) +cache = Cache(app) + + +@cache.memoize(timeout=CACHE_TIMEOUT) +def get_json_dict(url: str, timeout: int = 5) -> dict: + """Fetch xxxx_index.json from the cloud repo and return as a dictionary. + Return an empty dict on any error to avoid breaking pages that depend on it. + """ + try: + resp = requests.get(url, timeout=timeout) + if resp.status_code != 200: + return {} + data = resp.json() + if isinstance(data, dict): + return data + else: + return {} + except Exception: + return {} + + +# A separate get_json_dict function for the tools page with its own timeout. +@cache.memoize(timeout=CACHE_TIMEOUT_SERVICE) +def get_json_dict_service(url: str, timeout: int = 5) -> dict: + """Fetch xxxx_index.json from the cloud repo and return as a dictionary. + Return an empty dict on any error to avoid breaking pages that depend on it. + """ + try: + resp = requests.get(url, timeout=timeout) + if resp.status_code != 200: + return {} + data = resp.json() + if isinstance(data, dict): + return data + else: + return {} + except Exception: + return {} + + +@cache.memoize(timeout=CACHE_TIMEOUT) +def get_repository_data( + search_query: str, + page: int = 1, + page_size: int = 18, + filters: list | None = None, + load_metadata: bool = True, +) -> tuple[dict, dict]: + """ + Extract data from respositories + """ + # Initialize extractor for BIOSTUDIES + bs_extractor = BioStudiesExtractor(collection=BIOSTUDIES_COLLECTION) + + # Fetch data based on search query or list all + if search_query: + bs_results = bs_extractor.search_studies( + search_query, + page=page, + page_size=page_size, + filters=filters, + load_metadata=load_metadata, + ) + else: + bs_results = bs_extractor.list_studies( + page=page, + page_size=page_size, + include_urls=True, + filters=filters, + load_metadata=load_metadata, + ) + + # Initialize extractor for Zenodo + zen_extractor = ZenodoExtractor( + community=ZENODO_COMMUNITY, record_type=ZENODO_RECORD_TYPE + ) + + if not filters: + # We currently do no filter Zenodo datasets. + if search_query: + zen_result = zen_extractor.search_records( + search_query, page=page, size=page_size, load_metadata=load_metadata + ) + else: + # load metadata needed for is_rocrate filtering in template + zen_result = zen_extractor.list_records( + page=page, + size=page_size, + include_urls=True, + load_metadata=load_metadata, + ) + else: + zen_result = {"hits": [], "total": 0, "error": None} + + return bs_results, zen_result + + +# Provide methods list to all templates for the Methods dropdown in the navbar +@app.context_processor +def inject_methods_menu(): + """Fetch methods_index.json and expose a simple list of {id, title} to templates. + Return an empty list on any error to avoid breaking pages. + """ + data = get_json_dict(METHODS_URL) + if data: + items = [] + for key, val in data.items() if isinstance(data, dict) else []: + title = ( + val.get("method") + or val.get("method_name_content") + or val.get("method_name") + or key + ) + items.append({"id": key, "title": title}) + # sort by title + items = sorted(items, key=lambda x: x["title"].lower()) + return {"methods_menu": items} + else: + return {"methods_menu": []} + + +@app.context_processor +def inject_tools_menu(): + """Fetch methods_index.json and expose a simple list of {id, title} to templates. + Return an empty list on any error to avoid breaking pages. + """ + data = get_json_dict_service(SERVICES_URL) + if data: + items = [] + for key, val in data.items() if isinstance(data, dict) else []: + title = val.get("service") or key + items.append({"id": key, "title": title}) + # sort by title + items = sorted(items, key=lambda x: x["title"].lower()) + return {"tools_menu": items} + else: + return {"tools_menu": []} + + +@app.context_processor +def inject_data_menu(): + """Fetch methods_index.json and expose a simple list of {id, title} to templates. + Return an empty list on any error to avoid breaking pages. + """ + bs_results, zen_results = get_repository_data(search_query="") + hits: list = bs_results.get("hits", []) + hits.extend(zen_results.get("hits", [])) + if hits: + items = [] + for hit in hits: + title = hit.get("title") + id = hit.get("accession", "") or hit.get("doi_url", "") or hit.get("id", "") + url = hit.get("url", "") or hit.get("doi_url") + items.append({"id": id, "title": title, "url": url}) + # sort by title + items = sorted(items, key=lambda x: x["title"].lower()) + return {"data_menu": items} + else: + return {"data_menu": []} + + +################################################################################ +### The landing page +@app.route("/") +def home(): + try: + tools = get_json_dict_service( + SERVICES_URL + ) # Geting the service_list.json in the dictionary format. + tools = list(tools.values()) # Converting the dictionary to a list object. + except Exception as e: + return f"Error processing service data: {e}", 500 + num_tools = len(tools) + num_case_studies = len(CASESTUDIES) + bs_res, zen_res = get_repository_data(search_query="") + num_datasets = bs_res["total"] + zen_res["total"] + return render_template( + "home.html", + num_tools=num_tools, + num_case_studies=num_case_studies, + num_datasets=num_datasets, + ) + + +################################################################################ +### The sitemap.xml for search engines +@app.route("/sitemap.xml") +def sitemap(): + sitemapContent = """ + + + https://platform.vhp4safety.nl/ + + + https://platform.vhp4safety.nl/casestudies + + + https://platform.vhp4safety.nl/tools + + + https://platform.vhp4safety.nl/methods + + + https://platform.vhp4safety.nl/data + + +"""; + return Response(sitemapContent, mimetype='text/xml'); + + +################################################################################ +### Pages under 'Data' +@app.route("/data") +def data(): + # Get query parameters for pagination and search + page = request.args.get("page", 1, type=int) + page_size = request.args.get("page_size", 18, type=int) + search_query = request.args.get("query", "", type=str) + + # Get filter parameters + filter_case_study = request.args.get("filter_case_study", "", type=str) + filter_regulatory_question = request.args.get( + "filter_regulatory_question", "", type=str + ) + filter_flow_step = request.args.get("filter_flow_step", "", type=str) + + # Build filter list (only include non-empty filters) + filters = [] + if filter_case_study: + filters.append(("case_study", filter_case_study)) + if filter_regulatory_question: + filters.append(("regulatory_question", filter_regulatory_question)) + if filter_flow_step: + filters.append(("flow_step", filter_flow_step)) + + bs_results, zen_results = get_repository_data( + search_query, page, page_size, filters=filters + ) + + # Extract studies and metadata + studies = bs_results.get("hits", []) + bs_total = bs_results.get("total", 0) + bs_error: str | None = bs_results.get("error", None) + + # Extract datasets and metadata from Zenodo + datasets = zen_results.get("hits", []) + zen_total = zen_results.get("total", 0) + zen_error: str | None = zen_results.get("error", None) + + # enrich with normalized metadata mapping: + + # studies, datasets = normalize_all([studies],[datasets]) + + # combine totals for pagination + total = bs_total + zen_total + + # Get filtering metadata (if filters were applied) + filters_applied = bs_results.get("filters_applied", False) + hits_returned = bs_results.get("hits_returned", len(studies)) + pages_fetched = bs_results.get("pages_fetched", 1) + page_size_met = bs_results.get("page_size_met", True) + + # Calculate pagination info + has_next = (page * page_size) < total + has_prev = page > 1 + + # Pass data to template + return render_template( + "data/data.html", + studies=studies, + datasets=datasets, + total=total, + page=page, + page_size=page_size, + search_query=search_query, + collection_name=BIOSTUDIES_COLLECTION_NAME, + collection=BIOSTUDIES_COLLECTION, + errors={"zenodo": zen_error, "biostudies": bs_error}, + has_next=has_next, + has_prev=has_prev, + filter_case_study=filter_case_study, + filter_regulatory_question=filter_regulatory_question, + filter_flow_step=filter_flow_step, + filters_applied=filters_applied, + hits_returned=hits_returned, + pages_fetched=pages_fetched, + page_size_met=page_size_met, + stage_explanations=STAGE_EXPLANATIONS, + reg_question_explanations=REG_QUESTION_EXPLANATIONS, + ) + + +################################################################################ +### DataSet detail view + + +@app.template_filter("split_text_int") +def split_text_int(value: None|str) -> tuple[str, None|int]: + """ + Splits trailing integer from a string. + 'S-VHPS21' -> ('S-VHPS', 21) + 'ABC' -> ('ABC', None) + 'X-12A' -> ('X-12A', None) # only splits if digits are at the very end + """ + # used to construct ftp file link *POTENTIALLY BRITTLE* + if value is None: + return ("", None) + + s = str(value) + m = re.match(r"^(.*?)(\d+)$", s) + if not m: + return (s, None) + + return (m.group(1), int(m.group(2))) + + +@app.route("/data/") +def data_detail(dataid): + bs_results, zen_results = get_repository_data(dataid) + + studies = bs_results.get("hits", []) + bs_total = bs_results.get("total", 0) + bs_error: str | None = bs_results.get("error", None) + + # Extract datasets and metadata from Zenodo + datasets = zen_results.get("hits", []) + zen_total = zen_results.get("total", 0) + zen_error: str | None = zen_results.get("error", None) + + studies, datasets = normalize_all(studies, datasets) + + if bs_error and not zen_error: + if zen_total != 1: + return abort(404) + elif zen_error and not bs_error: + if bs_total != 1: + return abort(404) + if studies: + return render_template("data/data_details.html", data=studies[0]) + elif datasets: + return render_template("data/data_details.html", data=datasets[0]) + return abort(404) + +################################################################################ +### Pages under 'Models' +@app.route("/models_page") +def models(): + # Get query parameters for pagination and search + page = request.args.get("page", 1, type=int) + page_size = request.args.get("page_size", 18, type=int) + search_query = request.args.get("query", "", type=str) + + # Get filter parameters + filter_case_study = request.args.get("filter_case_study", "", type=str) + filter_regulatory_question = request.args.get( + "filter_regulatory_question", "", type=str + ) + filter_flow_step = request.args.get("filter_flow_step", "", type=str) + + # Build filter list (only include non-empty filters) + filters = [] + if filter_case_study: + filters.append(("case_study", filter_case_study)) + if filter_regulatory_question: + filters.append(("regulatory_question", filter_regulatory_question)) + if filter_flow_step: + filters.append(("flow_step", filter_flow_step)) + + # Initialize extractor + extractor = BioStudiesExtractor(collection=BIOSTUDIES_COLLECTION) + + # Fetch data based on search query or list all + if search_query: + results = extractor.search_studies( + search_query, page=page, page_size=page_size, filters=filters + ) + else: + results = extractor.list_studies( + page=page, page_size=page_size, include_urls=True, filters=filters + ) + + # Extract studies and metadata + studies = results.get("hits", []) + total = results.get("total", 0) + error = results.get("error", None) + + # Get filtering metadata (if filters were applied) + filters_applied = results.get("filters_applied", False) + hits_returned = results.get("hits_returned", len(studies)) + pages_fetched = results.get("pages_fetched", 1) + page_size_met = results.get("page_size_met", True) + + # Calculate pagination info + has_next = (page * page_size) < total + has_prev = page > 1 + + # Pass model data to template + return render_template( + "models_page.html", + studies=studies, + total=total, + page=page, + page_size=page_size, + search_query=search_query, + collection_name=BIOSTUDIES_COLLECTION_NAME, + collection=BIOSTUDIES_COLLECTION, + error=error, + has_next=has_next, + has_prev=has_prev, + filter_case_study=filter_case_study, + filter_regulatory_question=filter_regulatory_question, + filter_flow_step=filter_flow_step, + filters_applied=filters_applied, + hits_returned=hits_returned, + pages_fetched=pages_fetched, + page_size_met=page_size_met, + stage_explanations=STAGE_EXPLANATIONS, + reg_question_explanations=REG_QUESTION_EXPLANATIONS, + ) + + +################################################################################ +### Pages under 'Tools' + + +### Here begins the updated version for creating the tool list page. +@app.route("/tools") +def tools(): + try: + tools = get_json_dict_service( + SERVICES_URL + ) # Geting the service_list.json in the dictionary format. + tools = list(tools.values()) # Converting the dictionary to a list object. + + # Mapping the URLs with glossary IDs to their text values. + stage_mapping = { + "https://vhp4safety.github.io/glossary#VHP0000153": "Chemical Characteristics and Hazard Identification", + "https://vhp4safety.github.io/glossary#VHP0000154": "Exposure", + "https://vhp4safety.github.io/glossary#VHP0000155": "Toxicokinetics", + "https://vhp4safety.github.io/glossary#VHP0000156": "Toxicodynamics", + "https://vhp4safety.github.io/glossary#VHP0000158": "Adverse Outcome", + # Legacy mappings (superseded by the Process Flow Step URIs above) + "https://vhp4safety.github.io/glossary#VHP0000056": "ADME", + "https://vhp4safety.github.io/glossary#VHP0000102": "Hazard Assessment", + "https://vhp4safety.github.io/glossary#VHP0000148": "Chemical Information", + "https://vhp4safety.github.io/glossary#VHP0000149": "General", + } + + for tool in tools: + full_stage_url = tool.get("stage", "") + + # Writing the service name and stage values in the logs for troubleshooting. + # print(f"Tool: {tool['service']}, Stage URL: {full_stage_url}") # Log the full URL + + # Checking if the full URL is in the mapping and updating the stage. + if full_stage_url in stage_mapping: + # print(f"Mapping stage URL {full_stage_url} to {stage_mapping[full_stage_url]}") # Log the mapping + tool["stage"] = stage_mapping[full_stage_url] + elif tool["stage"] in ["NA", "Unknown"]: + tool["stage"] = ( + "Other" # Combining "NA" and "Unknown" stages in a single stage-type, "Other". + ) + + html_name = tool.get("html_name") + md_name = tool.get("md_file_name") + png_name = tool.get("png_file_name") + + tool["url"] = f"https://cloud.vhp4safety.nl/service/{html_name}" + tool["meta_data"] = ( + f"https://raw.githubusercontent.com/VHP4Safety/cloud/main/docs/service/{md_name}" + if md_name + else "md file not found" + ) + + # Check if the tool has the placeholder logo + placeholder_logo = "https://github.com/VHP4Safety/ui-design/blob/main/static/images/logo.png" + if png_name == placeholder_logo: + tool["png"] = None # set to None if it's the common placeholder + else: + tool["png"] = ( + f"https://raw.githubusercontent.com/VHP4Safety/cloud/main/docs/service/{png_name}" + if not png_name.startswith("http") + else png_name + ) + + inst_url = tool.get("inst_url", "no_url") + if not inst_url: # catches "" as well + inst_url = "no_url" + tool["inst_url"] = inst_url + + # Fetch per-tool detail JSON to check hosting status + tool_id = tool.get("id", "") + vhp_hosted = False + if inst_url != "no_url" and tool_id: + try: + detail_url = f"https://cloud.vhp4safety.nl/service/{tool_id}.json" + detail_resp = requests.get(detail_url, timeout=5) + if detail_resp.status_code == 200: + detail = detail_resp.json() + vhp_platform = detail.get("instance", {}).get("vhp-platform", "").lower() + vhp_hosted = vhp_platform not in ("external", "independent", "") + except Exception: + pass + tool["vhp_hosted"] = vhp_hosted + + # Getting selected stages from the URL. + selected_stages = request.args.getlist("stage") + + # Filtering tools by selected stages. + if selected_stages: + tools = [tool for tool in tools if tool.get("stage") in selected_stages] + + # Getting all unique stages from the tools for the filter options. + stages = sorted(set(tool.get("stage") for tool in tools if tool.get("stage"))) + + # Forcing "Other" to be the last item in the list of stages. + if "Other" in stages: + stages.remove("Other") + stages.append("Other") + + # Filtering over the regulatory questions. + reg_questions = {v["label"]: k for k, v in REG_QUESTIONS.items()} + + selected_questions = request.args.getlist("reg_q") + + for question in selected_questions: + field = reg_questions.get(question) + if field: + tools = [ + tool for tool in tools if str(tool.get(field, "")).lower() == "true" + ] + + # Getting the search query from URL to add a search bar based on tool names. + search_query = request.args.get("search", "").strip().lower() + + # Filtering tools by search query. + if search_query: + tools = [ + tool + for tool in tools + if search_query in tool.get("service", "").lower() + ] + + return render_template( + "tools/tools.html", + tools=tools, + stages=stages, + selected_stages=selected_stages, + reg_questions=reg_questions, + selected_questions=selected_questions, + stage_explanations=STAGE_EXPLANATIONS, + reg_question_explanations=REG_QUESTION_EXPLANATIONS, + ) + + except Exception as e: + return f"Error processing service data: {e}", 500 + + +### New route to list methods (similar to the tools page) +@app.route("/methods") +@app.route("/methods/") +def methods(): + """Fetch methods_index.json from the cloud repo, normalize fields and render a methods list page.""" + url = "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/cap/methods_index.json" + response = requests.get(url) + + if response.status_code != 200: + return f"Error fetching methods list: {response.status_code}", 503 + + try: + methods = response.json() + methods = list(methods.values()) # convert dict to list + + # Normalize fields for the template and collect stages + stages_set = set() + normalized = [] + for m in methods: + norm = {} + norm["id"] = m.get("id", "") + # template expects 'service' and 'description' + norm["service"] = ( + m.get("method") + or m.get("method_name_content") + or m.get("method_name") + or "" + ) + norm["description"] = ( + m.get("method_description_content") or m.get("method_description") or "" + ) + # main_url used for method webpage (catalog page) + norm["main_url"] = m.get("catalog_webpage_url") or "no_url" + # interactive instance not present in methods index + norm["inst_url"] = m.get("inst_url") or "no_url" + # metadata md file not available in index; keep empty string + norm["meta_data"] = m.get("meta_data") or "" + # placeholder/no png + norm["png"] = None + # keep original raw data for potential details page + norm["raw"] = m + + # collect stages (split comma-separated values) + stage_field = (m.get("vhp4safety_workflow_stage_content") or "").strip() + if stage_field: + for part in [s.strip() for s in stage_field.split(",")]: + if part: + stages_set.add(part) + + normalized.append(norm) + + # Apply search and filters similar to /tools + selected_stages = request.args.getlist("stage") + selected_questions = request.args.getlist("reg_q") + search_query = request.args.get("search", "").strip().lower() + + methods_filtered = normalized + + if selected_stages: + methods_filtered = [ + m + for m in methods_filtered + if any( + s + in ( + (m["raw"].get("vhp4safety_workflow_stage_content") or "").split( + "," + ) + ) + for s in selected_stages + ) + ] + + # Filter by regulatory questions if provided (REG_QUESTIONS keys map to internal fields) + reg_questions = {v["label"]: k for k, v in REG_QUESTIONS.items()} + if selected_questions: + for question in selected_questions: + field = reg_questions.get(question) + if field: + methods_filtered = [ + m + for m in methods_filtered + if str(m["raw"].get(field, "")).lower() == "true" + ] + + if search_query: + methods_filtered = [ + m + for m in methods_filtered + if search_query in m.get("service", "").lower() + ] + + stages = sorted(stages_set) + if "Other" in stages: + stages.remove("Other") + stages.append("Other") + + # Pass everything the template expects + return render_template( + "methods/methods.html", + methods=methods_filtered, + stages=stages, + selected_stages=selected_stages, + reg_questions=reg_questions, + selected_questions=selected_questions, + stage_explanations=STAGE_EXPLANATIONS, + reg_question_explanations=REG_QUESTION_EXPLANATIONS, + ) + + except Exception as e: + return f"Error processing methods data: {e}", 500 + + +@app.route("/methods/") +def method_page(methodid): + """Render a single method page using templates/methods/method.html + Method details are taken from methods_index.json (keyed by method id). + """ + try: + methods = get_json_dict(METHODS_URL) + # methods_index.json is a dict keyed by method id + if methodid not in methods: + abort(404) + method_details = methods[methodid] + except Exception as e: + return f"Error processing methods data: {e}", 500 + + # Try to load the full method JSON from the docs/methods folder (raw github) + method_json = None + # URL-encode the filename part to be safe + encoded = urllib.parse.quote(methodid, safe="") + raw_url = ( + "https://raw.githubusercontent.com/VHP4Safety/cloud/refs/heads/main/docs/methods/" + + f"{encoded}.json" + ) + try: + r = requests.get(raw_url, timeout=5) + if r.status_code == 200: + method_json = r.json() + else: + # fall back to using the index entry as minimal data + method_json = method_details + except Exception as exc: + # on any error, fall back to index entry + method_json = method_details + + # Pass both to the template: some templates expect method_json, others method_details + return render_template( + "methods/method.html", + method=method_details, + method_details=method_details, + method_json=method_json, + ) + + +@app.route("/tools/") +def tool_page(toolname): + # get the tools metadata: + try: + tools = get_json_dict_service(SERVICES_URL) + tools = dict(tools) + # Geting the service_list.json in the dictionary format. + # Converting the dictionary to a list object. + except Exception as e: + return f"Error processing service data: {e}", 500 + + # Map toolname to the correct JSON file in the new tool folder + if toolname not in tools: + abort(404) + + # get the tools metadata: + url = "https://cloud.vhp4safety.nl/service/" + toolname + ".json" + response = requests.get(url) + + if response.status_code != 200: + return f"Error fetching service list: {response.status_code}", 503 + + try: + tool_details = response.json() + tool_details = dict(tool_details) + # Geting the service_list.json in the dictionary format. + # Converting the dictionary to a list object. + except Exception as e: + return f"Error processing service data: {e}", 500 + + # Pass the json filename to the template (for JS to pick up) + return render_template( + "tools/tool.html", tool_json=tools[toolname], tool_details=tool_details + ) + + +################################################################################ +### Pages under 'Process Flow' + + +# General Safety Assessment Workflow page +@app.route("/Safety_Assessment_Workflow") +def SafetyAssessmentWorkflow(): + return render_template("Safety_Assessment_Workflow.html") + + +################################################################################ +### Pages under 'Case Studies' + + +# General case studies page +@app.route("/casestudies") +def workflows(): + return render_template("case_studies/casestudies.html") + + +# Individual case study page, dynamically filled based on URL +@app.route("/casestudies/", defaults={"step": ""}) +@app.route("/casestudies//") +@app.route("/casestudies///") +# additional routes are parsed client side via js to allow smooth animation +def casestudy(case:str="", question:str="", step:str=""): + if case not in CASESTUDIES: + abort(404) + # JS will handle steps via the URL + return render_template("case_studies/casestudy.html", case=case) + + +@app.route("/workflow/") +def show(workflow): + try: + return render_template( + f"case_studies/parkinson/workflows/{workflow}_workflow.html" + ) + except TemplateNotFound: + abort(404) + + +################################################################################ +### Pages related to chemical compounds + + +def is_valid_qid(qid): + return re.fullmatch(r"Q\d+", qid) is not None + + +@app.route("/compound/") +def show_compound(cwid): + try: + return render_template(f"compound.html", cwid=cwid) + except TemplateNotFound: + abort(404) + + +@app.route("/get_compound_properties/") +def show_compounds_properties_as_json(cwid): + if not is_valid_qid(cwid): + return jsonify({"error": "Invalid compound identifier"}), 400 + compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" + sparqlquery = ( + "PREFIX wd: \n" + "PREFIX wdt: \n\n" + "SELECT ?cmp ?cmpLabel ?formula ?mass ?inchi ?inchiKey ?SMILES WHERE {\n" + " VALUES ?cmp { wd:" + cwid + " }\n" + " ?cmp wdt:P9 ?inchi ;\n" + " wdt:P10 ?inchiKey .\n" + " OPTIONAL { ?cmp wdt:P2 ?mass }\n" + " OPTIONAL { ?cmp wdt:P3 ?formula }\n" + " OPTIONAL { ?cmp wdt:P7 ?chiralSMILES }\n" + " OPTIONAL { ?cmp wdt:P12 ?nonchiralSMILES }\n" + ' BIND (COALESCE(IF(BOUND(?chiralSMILES), ?chiralSMILES, 1/0), IF(BOUND(?nonchiralSMILES), ?nonchiralSMILES, 1/0), "") AS ?SMILES)\n' + ' SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n' + "}" + ) + try: + compound_dat = wbi_helpers.execute_sparql_query( + sparqlquery, endpoint=compoundwikiEP + ) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if not bool(compound_dat): + return jsonify({"error": "No data found"}), 404 + compound_dat = compound_dat["results"]["bindings"][0] + # return jsonify(compound_dat); + compound_list = [ + { + "wcid": compound_dat["cmp"]["value"], + "label": compound_dat["cmpLabel"]["value"], + "inchi": compound_dat["inchi"]["value"], + "inchikey": compound_dat["inchiKey"]["value"], + "SMILES": compound_dat["SMILES"]["value"], + "formula": compound_dat["formula"]["value"], + "mass": compound_dat["mass"]["value"], + } + ] + return jsonify(compound_list), 200 + + +@app.route("/get_compound_identifiers/") +def show_compounds_identifiers_as_json(cwid): + if not is_valid_qid(cwid): + return jsonify({"error": "Invalid compound identifier"}), 400 + compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" + sparqlquery = ( + "PREFIX wd: \n" + "PREFIX wdt: \n\n" + "SELECT DISTINCT ?propertyLabel ?value ?formatterURL\n" + "WHERE {\n" + " VALUES ?property { wd:P13 wd:P22 wd:P23 wd:P26 wd:P27 wd:P28 wd:P36 wd:P41 wd:P43 wd:P44 wd:P45 }\n" + " ?property wikibase:directClaim ?valueProp .\n" + " OPTIONAL { wd:" + cwid + " ?valueProp ?value }\n" + " OPTIONAL { ?property wdt:P6 ?formatterURL }\n" + ' SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n' + "}" + ) + try: + compound_dat = wbi_helpers.execute_sparql_query( + sparqlquery, endpoint=compoundwikiEP + ) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if len(compound_dat["results"]["bindings"]) == 0: + return jsonify({"error": "No data found"}), 404 + compound_dat = compound_dat["results"]["bindings"] + # return jsonify(compound_dat) + + compound_list = [] + for expProp in compound_dat: + if "value" in expProp: + compound_list.append( + { + "propertyLabel": expProp["propertyLabel"]["value"], + "value": expProp["value"]["value"], + "formatterURL": expProp["formatterURL"]["value"], + } + ) + else: + compound_list.append( + { + "propertyLabel": expProp["propertyLabel"]["value"], + "value": "", + "formatterURL": "", + } + ) + return jsonify(compound_list), 200 + + +@app.route("/get_compound_toxicology/") +def show_compounds_toxicology_as_json(cwid): + if not is_valid_qid(cwid): + return jsonify({"error": "Invalid compound identifier"}), 400 + compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" + sparqlquery = ( + "PREFIX wd: \n" + "PREFIX wdt: \n\n" + "SELECT DISTINCT ?propertyLabel ?value ?formatterURL\n" + "WHERE {\n" + " VALUES ?property { wd:P17 wd:P19 wd:P4 }\n" + " ?property wikibase:directClaim ?valueProp .\n" + " OPTIONAL { wd:" + cwid + " ?valueProp ?value }\n" + " OPTIONAL { ?property wdt:P6 ?formatterURL }\n" + ' SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }\n' + "}" + ) + try: + compound_dat = wbi_helpers.execute_sparql_query( + sparqlquery, endpoint=compoundwikiEP + ) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if len(compound_dat["results"]["bindings"]) == 0: + return jsonify({"error": "No data found"}), 404 + compound_dat = compound_dat["results"]["bindings"] + # return jsonify(compound_dat) + + compound_list = [] + for expProp in compound_dat: + print(expProp) + if "value" in expProp: + compound_list.append( + { + "propertyLabel": expProp["propertyLabel"]["value"], + "value": expProp["value"]["value"], + } + ) + else: + compound_list.append( + {"propertyLabel": expProp["propertyLabel"]["value"], "value": ""} + ) + return jsonify(compound_list), 200 + + +@app.route("/get_compound_expdata/") +def show_compounds_expdata_as_json(cwid): + if not is_valid_qid(cwid): + return jsonify({"error": "Invalid compound identifier"}), 400 + compoundwikiEP = "https://compoundcloud.wikibase.cloud/query/sparql" + sparqlquery = ( + "PREFIX wd: \n" + "PREFIX wdt: \n" + "PREFIX wid: \n" + "PREFIX widt: \n" + "PREFIX prov: \n\n" + "SELECT ?qid WHERE {\n" + " wd:P5 wikibase:directClaim ?identifierProp .\n" + " wd:" + cwid + " ?identifierProp ?wikidata .\n" + ' BIND (iri(CONCAT("http://www.wikidata.org/entity/", ?wikidata)) AS ?qid)\n' + "}" + ) + try: + compound_dat = wbi_helpers.execute_sparql_query( + sparqlquery, endpoint=compoundwikiEP + ) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if not bool(compound_dat): + return jsonify({"error": "No data found"}), 404 + if len(compound_dat["results"]["bindings"]) == 0: + return jsonify({"error": "No data found"}), 404 + compound_dat = compound_dat["results"]["bindings"][0] + qid = compound_dat["qid"]["value"] + # the next query may be affected by https://github.com/ad-freiburg/qlever-control/issues/187 + sparqlquery = ( + "PREFIX wd: \n" + "PREFIX wdt: \n" + "PREFIX prov: \n" + "PREFIX rdfs: \n" + "PREFIX pr: \n" + "PREFIX wikibase: \n\n" + "SELECT DISTINCT ?propEntityLabel ?value ?unitsLabel ?source ?doi ?statement\n" + "WHERE {\n" + " <" + qid + "> ?propp ?statement .\n" + " ?statement a wikibase:BestRank ;\n" + " ?proppsv [ wikibase:quantityAmount ?value ; wikibase:quantityUnit ?units ] .\n" + " #OPTIONAL { ?statement prov:wasDerivedFrom/pr:P248 ?sourceTmp . OPTIONAL { ?sourceTmp wdt:P356 ?doiTmp . } }\n" + " ?property wikibase:claim ?propp ; wikibase:statementValue ?proppsv ; wdt:P1629 ?propEntity ; wdt:P31 wd:Q21077852 .\n" + " ?propEntity @en@rdfs:label ?propEntityLabel .\n" + " ?units @en@rdfs:label ?unitsLabel .\n" + ' BIND (COALESCE(IF(BOUND(?sourceTmp), ?sourceTmp, 1/0), "") AS ?source)\n' + ' BIND (COALESCE(IF(BOUND(?doiTmp), ?doiTmp, 1/0), "") AS ?doi)\n' + "}" + ) + # return sparqlquery + try: + sparqlqueryURL = ( + "https://qlever.cs.uni-freiburg.de/api/wikidata?format=json&query=" + + urllib.parse.quote_plus(sparqlquery) + ) + # return sparqlqueryURL + compound_dat = requests.get(sparqlqueryURL) + # return json.loads(compound_dat.content) + except Exception as e: + return jsonify({"error": str(e)}), 500 + if not bool(compound_dat): + return jsonify({"error": "No data found"}), 404 + compound_dat = json.loads(compound_dat.content)["results"]["bindings"] + # return jsonify(compound_dat) + compound_list = [] + for expProp in compound_dat: + # return jsonify(expProp) + compound_list.append( + { + "propEntityLabel": expProp["propEntityLabel"]["value"], + "value": expProp["value"]["value"], + "unitsLabel": expProp["unitsLabel"]["value"], + "source": expProp["source"]["value"], + "doi": expProp["doi"]["value"], + "seeAlso": expProp["statement"]["value"], + } + ) + return jsonify(compound_list), 200 + + +################################################################################ +### Pages under 'Legal' +@app.route("/legal/terms_of_service") +def terms_of_service(): + return render_template("legal/terms_of_service.html") + + +@app.route("/legal/privacypolicy") +def privacy_policy(): + return render_template("legal/privacypolicy.html") + + +if __name__ == "__main__": + app.run(host="0.0.0.0", port=5050, debug=True) diff --git a/exploreourwork.html b/exploreourwork.html deleted file mode 100644 index ff83087..0000000 --- a/exploreourwork.html +++ /dev/null @@ -1,254 +0,0 @@ -{% extends "base.html" %} {% block content %} - - - - - - - - -
- -
- - -
-
-
-

Explore our work

-

A comprehensive set of work practices implemented in the VHP4Safety project

-
-
-
- - - -
-
- - - - - -
-
-
-

The Virtual Human Platform was developed in the VHP4Safety project. Here you can find more information about the VHP4Safety project.

-
- - - -
-

- -

-
-
-

VHP4Safety is a Dutch national project focused on developing the Virtual Human Platform (VHP) to transform safety assessment of chemicals and pharmaceuticals. The project integrates human-relevant data, innovative in vitro models, and in silico tools to enable transparent risk assessment based on human biology. VHP4Safety combines these innovations into structures workflows that support expert decision making.

-
-
-
- - - -
-

- -

-
-
-

Traditional safety testing relies on animal studies, which raise ethical concerns and often fail to predict human responses accurately. New Approach Methodologies (NAMs) and data science, including AI, offer new opportunities to perform human-relevant animal-free safety assessment.

-

VHP4Safety is developed in co-creation with risk assessors, regulators, industry, and societal stakeholders, to ensure usability and regulatory relevance. The consortium aims to establish a reliable, transparent, and sustainable platform for safety assessment, setting a new standard for human-relevant risk assessment.

-
-
-
- - - -
-

- -

-
-
-
-
-
-
- - - -
-

- -

-
-
-

The project is organised into three research lines:

-
  1. Building the VHP: Developing the ICT infrastructure, computational models, and user interface.
  2. -
  3. Feeding the VHP: Generating human-based data using in vitro models..
  4. -
  5. Implementing the VHP: Ensuring accessibility, usability, and acceptance through education, training, and stakeholder engagement.
  6. -
-
- Project structure diagram -
- -
-
-
- - - -
-

- -

-
-
-

The VHP4Safety project was funded by the Dutch Research Council (NWO) through the ‘Netherlands Research Agenda: Research on Routes by Consortia’ (NWA-ORC 1292.19.272). With a 9.9 million euro grant from NWO and additional contributions from foundations, government bodies, and private sector organizations, the project’s total funding amounted to 11.2 million euros.

-
-
-
- -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

Solving complex integration challenges and transforming ideas into practical solutions requires focused, hands-on teamwork. VHP4Safety used Hackathons to accelerate innovation and foster rapid progress across disciplines.

-

Hackathons were organized in VHP4Safety several times a year. Hackathons were organized as short, intensive events where consortium partners worked side-by-side to address specific technical, scientific, or regulatory questions. In multidisciplinary teams, participants developed prototypes, integrated new data and tools, and tackled the VHP4Safety regulatory case studies. Preparation included prioritizing urgent topics that could only be addressed interdisciplinary, using the diversity of expertise within the VHP4Safety consortium.

-

Hackathons delivered working prototypes, integrated platform components, and direct feedback from users. Unfinished tasks were tracked for future sprints, keeping momentum high. These events not only speeded up development of the Virtual Human Platform, but also strengthened collaboration and ensured that the platform continuously evolved to meet user needs.

-
-
-
- - - -
-

- -

-
-
-

To develop a future-proof platform for safety assessment for chemicals and pharmaceuticals without the need for animal testing, VHP4Safety organized Designathons as a way of interdisciplinary co-creation. Bringing together expertise from academia, industry, regulatory bodies, and societal organizations ensures that the Virtual Human Platform (VHP) is robust, relevant, and widely supported.

-

During the course of the project, VHP4Safety organized biannual Designathons. These were interactive, consortium-wide workshops where all partners collaborated intensively. During these sessions, participants co-designed the platform’s structure and workflows, aligned on shared goals, and discussed the needs and perspectives of different stakeholders. Special task forces were established to connect the projects research lines, work packages and disciplines, while creative exercises helped shape a common vision for the VHP.

-

Designathons resulted in actionable workflow designs, a unified vision, and prioritized development steps for the platform. They promoted building the VHP4Safety community, thereby fostering open dialogue. Designathons helped to ensure that the Virtual Human Platform is a true product of its stakeholders. ready to meet the real-world needs of next-generation safety assessment.

-
-
-
- - - -
-

- -

-
-
-

The interdisciplinary research in VHP4Safety demands flexibility and continuous adaptation. Traditional project management often lacks the agility needed to respond to fast-changing scientific insights and evolving stakeholder needs. To address these challenges, VHP4Safety adopted the Scrum approach, ensuring the project remains dynamic and user-focused.

-

TInspired by the Scrum framework and agile working method, VHP4Safety worked in short, iterative cycles known as sprints. The team held weekly scrum meetings to coordinate efforts, set priorities, and quickly resolve obstacles. Every three weeks, sprint reviews enabled the team to evaluate progress and gather feedback, making it possible to adjust plans when necessary. Clearly defined roles, such as Product Owner and Scrum Master, provided the VHP4Safety agile working method with structure and clarity. The collaborative hackathons and designathons that were organised in VHP4Safety were integrated into the Scrum workflow, promoting co-creation, interdisciplinary alignment, and responsiveness to new developments.

-

The Scrum approach has significantly improved transparency, teamwork, and stakeholder engagement within VHP4Safety. It allowed for rapid integration of technological innovations and ensured that user feedback could be incorporated throughout the project. Team members experienced greater ownership and satisfaction, while the platform evolved iteratively to meet real needs of its end-users.

-
-
-
- - - -
-

- -

-
-
-

Constructive technology assessment is a way of researching into the design, desirability, costs and impacts of technologies. People interested, affected and influenced by a technology are put at the centre of definition, development and implementation and their active involvement is required from the start.

-

The virtual human platform has been developed with the contribution of multiple stakeholders from academia, industry and regulation. Through designathons and hackathons the perspectives of these stakeholders have been actively involved. Some of these stakeholders have also been addressed as potential users of the platform.

-

A matrix with 9 potential user profiles that could be served by the platform has been defined by focusing on expert users. In addition, expected transformations of the field through the implementation of the platform are proposed. Industry, regulators, and academia each have different needs from a shared platform. Meeting those needs collectively may enable a field-wide shift away from traditional animal testing toward New Approach Methodologies.

-
- Image showcasing a mateix of 9 user profiles -
-
-
-
- -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

To add list of publications

-
-
-
- - -
-
-
- -
- -
-
- - -
-{% endblock %} diff --git a/impact.html b/impact.html deleted file mode 100644 index 243ed4d..0000000 --- a/impact.html +++ /dev/null @@ -1,273 +0,0 @@ -{% extends "base.html" %} {% block content %} - - - - - - - - -
- -
- - -
-
-
-

Impact

-

Strategies for future use of the Virtual Human Platform focusing on innovation, acceptance, and impact creation

-
-
-
- - - -
-
- - - - - -
-
-
-

In the VHP we identified five key routes to impact through which research and development projects on New Approach Methodologies (NAMs) pursue societal impact. Each route reflects a set of impact activities that target specific audiences.

-
- - - -
-

- -

-
-
-

The most prominent route is changing policy and regulation, targeting risk assessors, policymakers, and regulators. Community and capacity building focus on training and education to support wider adoption and use of NAMs. Advancing scientific development aims to strengthen the NAM ecosystem through coordination, infrastructure, and harmonization. In addition, projects engage in commercialization and industrial partnerships to bring innovations into industrial practice. Mobilizing civil society, though least common, seeks to involve the general public and civil society organizations.

-

Typically, projects organize11–15 activities across multiple routes to impact. To maximize impact, however, projects should dedicate their efforts to specific routes, as time and capacities are often limited in projects. Moreover, projects should be encouraged to move beyond one-way dissemination toward two-way engagement, fostering active exchange with stakeholders—an area where many initiatives still have room to grow.

-
-
-
- - - - -
-

- -

-
-
-

Place for diagram

-
-
-
- - -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

Moving towards human-relevant safety testing of chemicals and pharmaceuticals without the use of animals requires much more than scientific and technological innovation alone. Within the VHP4Safety project, an innovation system approach was developed in collaboration between innovation scholars and toxicologists to identify all the key processes needed to achieve this transition. The framework combines an established implementation curve for New Approach Methodologies (NAMs) with insights from innovation system theory. It can be used to assess for a specific NAM modality, sector or application context where efforts are currently concentrated, where gaps exist and where coordinated action is needed to accelerate the transition to animal-free and human relevant safety assessment.

-
-
-
- - - - -
-

- -

-
-
-

The result is the framework visualised in the figure below. The framework maps seven interconnected key processes that together shape progress towards human-relevant and animal-free safety assessment. Three of these processes being knowledge development, knowledge diffusion, and market formation align with the phases of the established implementation curve for NAMs. Four additional processes that the implementation curve does not capture are also included: resource mobilisation (funding, talent, and infrastructure), entrepreneurial experimentation (private and public actors pioneering new approaches and business models), legitimacy creation (building trust and shifting belief systems among stakeholders and the wider public), and providing directionality (formulating missions, policy goals, and regulatory requirements that guide collective action). Crucially, the framework presents these processes not as a linear sequence but as an interconnected web, in which positive feedback loops between processes can accelerate the transition, while bottlenecks in one area can hamper progress across other processes.

- -
- Innovation system framework diagram -
-
-
-
- - - - -
-

- -

-
-
-

The framework is designed to be used in workshop settings, bringing together a wide range of stakeholders to discuss both the structure and the functioning of the innovation system. To facilitate this, a masterclass was developed consisting of four interactive activities:

-
    -
  1. Mission specification and problem-solution diagnosis: to specify a mission in relation to human relevant safety assessment and to discuss which problem(s) the mission aims to solve and which (technical) solutions are proposed. This defines the scope for framework application in step 2-4.
  2. -
  3. Innovation system structure: to identify relevant actors, networks, and institutions (hard laws and regulations and softer norms) and to discuss the influence and interests of stakeholders in the identified mission.
  4. -
  5. Innovation system functioning: to assess the functioning of the innovation system based on the seven key processes, scored on a scale from 1 (function performs poorly and is hindering) to 5 (function performs well and is driving).
  6. -
  7. Towards action: to discuss activities that can contribute to strengthening the functioning of the innovation system and specifically how stakeholders participating in the workshop can contribute to realising progress towards human-relevant and animal-free safety assessment.
  8. -
-
-
-
- - - - -
-

- -

-
-
-

The framework has been applied to several cases in the VHP4Safety project including the three case-studies in the project. During one of the designathons, project participants and stakeholders also worked on their own cases ranging from very general missions (transition to human relevant chemical risk assesment) to very specific ones (e.g., serum-free medial compositions). Outside the VHP4Safety project, the framework has also been applied in the AFARA project which focuses on the identification of endocrine disrupting chemicals in regulatory contexts. The masterclass was also presented to the European Commission team developing the EU roadmap towards the phasing out of animal testing in chemical safety assessment.

-
-
-
- - - -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

To be added later.

-
-
-
- - - -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

To be added later.

-
-
-
- - - -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

To add later

-
-
-
- - -
-
-
- -
- -
-
- - -
-{% endblock %} \ No newline at end of file diff --git a/static/images/implementation/Figure_User_profiles.png b/static/images/implementation/Figure_User_profiles.png deleted file mode 100644 index 73062ab..0000000 Binary files a/static/images/implementation/Figure_User_profiles.png and /dev/null differ diff --git a/static/images/implementation/Innovation system framework.png b/static/images/implementation/Innovation system framework.png deleted file mode 100644 index 085fce0..0000000 Binary files a/static/images/implementation/Innovation system framework.png and /dev/null differ diff --git a/static/images/implementation/project_structure.png b/static/images/implementation/project_structure.png deleted file mode 100644 index 9e9ec34..0000000 Binary files a/static/images/implementation/project_structure.png and /dev/null differ diff --git a/training.html b/training.html deleted file mode 100644 index 16e7cae..0000000 --- a/training.html +++ /dev/null @@ -1,146 +0,0 @@ -{% extends "base.html" %} {% block content %} - - - - - - - - -
- -
- - -
-
-
-

Training

-

Training and education to master the Virtual Human Platform and the principles of NAMs in human-based safety assessment

-
-
-
- - - -
-
- - - - - -
-
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

Content to be added

-
-
-
- - -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

To be added later.

-
-
-
- - - -
-
-
- - - - -
-
-

Teaser text to be included later

-
- - - -
-

- -

-
-
-

To add later

-
-
-
- - -
-
-
- -
- -
-
- - -
-{% endblock %} \ No newline at end of file