diff --git a/ccsr/README.md b/ccsr/README.md
new file mode 100644
index 00000000..8786563a
--- /dev/null
+++ b/ccsr/README.md
@@ -0,0 +1,43 @@
+# CCSR Maprooms
+
+# Installation and Run Instructions
+
+## Creating a conda environment with this project's dependencies
+
+* see enacts' README
+
+## Running the application in a development environment
+
+* Activate the environment
+
+ `conda activate enactsmaproom`
+
+* Create a development configuration file by copying `config-dev-sample.yaml` to `config-dev.yaml` and editing it as needed. Be careful not to commit config-dev.yaml that might hold secrets.
+
+* Start the development server using your development config file with the iri or test-pycpt conf, e.g.:
+
+ `CONFIG=config-iri.yaml:config-dev.yaml python app.py`
+
+* Navigate your browser to the URL that is displayed when the server starts, e.g. `http://127.0.0.1:8050/python_maproom/`
+
+* When done using the server stop it with CTRL-C.
+
+# Development Overview
+
+see enacts' README
+
+## Adding or removing dependencies
+
+see enacts' README
+
+## Building the documentation
+
+see enacts' README
+
+# Docker Build Instructions
+
+TBD
+
+# Support
+
+* `help@iri.columbia.edu`
diff --git a/ccsr/app.py b/ccsr/app.py
new file mode 100644
index 00000000..03cff8d7
--- /dev/null
+++ b/ccsr/app.py
@@ -0,0 +1,37 @@
+import flask
+import importlib
+import os
+
+from globals_ import FLASK, GLOBAL_CONFIG
+import homepage
+import pingrid
+
+for name, config in GLOBAL_CONFIG['maprooms'].items():
+ if config is not None:
+ module = importlib.import_module(name)
+ if isinstance(config, list):
+ for c in config:
+ module.register(FLASK, c)
+
+
+@FLASK.route(f"{GLOBAL_CONFIG['url_path_prefix']}/health")
+def health_endpoint():
+ return flask.jsonify({'status': 'healthy', 'name': 'python_maproom'})
+
+
+if __name__ == "__main__":
+ if GLOBAL_CONFIG["mode"] != "prod":
+ import warnings
+ warnings.simplefilter("error")
+ debug = True
+ else:
+ debug = False
+
+ FLASK.run(
+ GLOBAL_CONFIG["dev_server_interface"],
+ GLOBAL_CONFIG["dev_server_port"],
+ debug=debug,
+ extra_files=os.environ["CONFIG"].split(":"),
+ processes=GLOBAL_CONFIG["dev_processes"],
+ threaded=False,
+ )
diff --git a/ccsr/config-defaults.yaml b/ccsr/config-defaults.yaml
new file mode 100644
index 00000000..1705a01a
--- /dev/null
+++ b/ccsr/config-defaults.yaml
@@ -0,0 +1,24 @@
+zoom: 2
+url_path_prefix: /python_maproom
+
+# DB configuration for the production docker environment. Override in
+# development environment.
+db:
+ host: postgres
+ port: 5432
+ user: ingrid
+ dbname: iridb
+
+maprooms:
+
+ flex_fcst:
+
+ # App set up
+ - title: flex_cst_precip
+ core_path: flex_fcst_precip
+ forecast_path: /Data/data21/iri/FD.3
+ variable: Precipitation
+ y_transform: False
+ method: elr # or pycpt
+ var: precip
+ issue_format: "%b %Y"
\ No newline at end of file
diff --git a/ccsr/config-dev-sample.yaml b/ccsr/config-dev-sample.yaml
new file mode 100644
index 00000000..f9b2875c
--- /dev/null
+++ b/ccsr/config-dev-sample.yaml
@@ -0,0 +1,9 @@
+# Configuration overrides for development. Copy this file to config-dev.yaml and edit as needed.
+
+mode: debug # debug, devel or prod
+dev_server_interface: 127.0.0.1
+dev_server_port: 8050
+dev_processes: 4
+db:
+ host: localhost # at IRI, set this to dlcomputemon1.iri.columbia.edu
+ password: null # at IRI, get the password from another developer
diff --git a/ccsr/config-iri.yaml b/ccsr/config-iri.yaml
new file mode 100644
index 00000000..990209e6
--- /dev/null
+++ b/ccsr/config-iri.yaml
@@ -0,0 +1,37 @@
+zoom: 2
+
+datasets:
+
+ shapes_adm:
+ - name: Countries
+ color: black
+ sql: select adm0_code as key, adm0_name as label,
+ ST_AsBinary(ST_SimplifyPreserveTopology(coarse_geom, 0.01)) as the_geom
+ from g2015_2012_0
+ is_checked: True
+ - name: 1st_order
+ color: grey
+ sql: select (adm0_code, adm1_code) as key, adm1_name as label,
+ ST_AsBinary(ST_SimplifyPreserveTopology(coarse_geom, 0.01)) as the_geom
+ from g2015_2014_1
+ is_checked: False
+ - name: 2nd_order
+ color: grey
+ sql: select (adm0_code, adm1_code) as key, adm1_name as label,
+ ST_AsBinary(ST_SimplifyPreserveTopology(coarse_geom, 0.01)) as the_geom
+ from g2015_2014_2
+ is_checked: False
+
+maprooms:
+
+ flex_fcst:
+
+ # App set up
+ - title: flex_cst_precip
+ core_path: flex_fcst_precip
+ forecast_path: /Data/data21/iri/FD.3
+ variable: Precipitation
+ y_transform: False
+ method: elr # or pycpt
+ var: precip
+ issue_format: "%b %Y"
diff --git a/ccsr/config-test-pycpt.yaml b/ccsr/config-test-pycpt.yaml
new file mode 100644
index 00000000..ff7e3837
--- /dev/null
+++ b/ccsr/config-test-pycpt.yaml
@@ -0,0 +1,28 @@
+zoom: 7
+
+datasets:
+ shapes_adm:
+ - name: Pays
+ color: black
+ sql: select id_0 as key, name_0 as label, ST_AsBinary(the_geom) as the_geom
+ from sen_adm0
+ is_checked: False
+ - name: Regions
+ color: black
+ sql: select id_1 as key, name_1 as label, ST_AsBinary(the_geom) as the_geom
+ from sen_adm1
+ is_checked: True
+
+maprooms:
+
+ flex_fcst:
+ - y_transform: false
+ variable: Precipitation
+ #Use one or the other to test the nc multiple/single cases
+ forecast_path: /data/remic/mydatafiles/ANACIM/cpt_demo/UCSBV3
+ #forecast_path: /data/remic/mydatafiles/ANACIM/cpt_demo/UCSBV3/Jul-Sep
+ title: flex_cst_precip
+ core_path: flex_fcst_precip
+ method: pycpt # or pycpt or elr
+ var: precip
+ issue_format: "%b-%Y"
diff --git a/ccsr/fieldsets.py b/ccsr/fieldsets.py
new file mode 100644
index 00000000..9ae76a05
--- /dev/null
+++ b/ccsr/fieldsets.py
@@ -0,0 +1,388 @@
+import dash
+from dash import html
+import dash_bootstrap_components as dbc
+
+
+def Text(id, default):
+ """Provides input for text.
+
+ Auto-generates a dash bootstrap components
+ Input for selecting text.
+
+ Parameters
+ ----------
+ id : str
+ ID used for Dash callbacks.
+ default : str
+ Default value that is displayed in the input box when user loads the page.
+
+ Returns
+ -------
+ dbc.Input : component
+ dbc Input component with text inputs.
+ """
+ return dbc.Input(
+ id=id,
+ type="text",
+ size="sm",
+ class_name="m-0 p-0 d-inline-block w-auto",
+ debounce=True,
+ value=default,
+ )
+
+
+def Number(id, default=None, min=None, max=None, width="auto", debounce=True):
+ """Provides input for a number in a range.
+
+ Auto-generates a dash bootstrap components
+ Input for selecting a number from within a specified range.
+
+ Parameters
+ ----------
+ id : str
+ ID used for Dash callbacks.
+ default : float
+ Default value that is displayed in the input box when user loads the page.
+ min : float, optional
+ Minimum value the user can select from. Default is None.
+ max : float, optional
+ Maximum value the user can select from. Defautl is None.
+ width : string, optional
+ to control horizontal expansion of control. Accepts CSS entries. Default is "auto"
+
+ Returns
+ -------
+ dbc.Input : component
+ dbc Input component with numerical inputs.
+ """
+ return dbc.Input(
+ id=id,
+ type="number",
+ min=min,
+ max=max,
+ class_name="ps-1 pe-0 py-0 d-inline-block",
+ debounce=debounce,
+ value=str(default),
+ style={"font-size": "10pt", "width": width},
+ )
+
+
+def Month(id, default):
+ """Provides a selector for month.
+
+ Auto-generates a dash bootstrap components Input for selecting a month.
+
+ Parameters
+ ----------
+ id : str
+ ID used for Dash callbacks.
+ default : str
+ Default month value that is displayed in the input box when user loads the page.
+ Valid values are the first 3 letters of the month in English with intial in upper case.
+
+ Returns
+ -------
+ dbc.Select : component
+ dbc Select component with months of the year as options in dropdown.
+ """
+ options = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+ labels = [
+ "January",
+ "February",
+ "March",
+ "April",
+ "May",
+ "June",
+ "July",
+ "August",
+ "September",
+ "October",
+ "November",
+ "December",
+ ]
+ init = options.index(default)
+ return Select(id, options, labels=labels, init=init)
+
+
+def DateNoYear(id, defaultDay, defaultMonth):
+ """Provides a selector for date.
+
+ Auto-generates dash bootstrap components Input and Selector
+ for selecting a date as ('day', 'month').
+
+ Parameters
+ ----------
+ id : str
+ ID used for Dash callbacks.
+ defaultDay : int
+ Default value that is displayed in the input box when user loads the page.
+ defaultMonth : str
+ Default value that is displayed in the dropdown when user loads the page.
+ Valid values are the first 3 letters of the month in English with intial in upper case.
+
+ Returns
+ -------
+ [dbc.Input, dbc.Select] : list
+ List which includes dbc Input component with days of the month,
+ and dbc Select component with months of the year as options in dropdown.
+
+ See Also
+ --------
+ Month
+ """
+ idd = id + "day"
+ idm = id + "month"
+ return [
+ Number(idd, defaultDay, min=1, max=31, width="4em")[0],
+ Month(idm, defaultMonth),
+ ]
+
+def Sentence(*elems):
+ """Creates sentences with dash components.
+
+ Creates a sentence structure where any part of the sentence can be strings, Inputs, Dropdowns, etc.
+
+ Parameters
+ ----------
+ elems : list
+ A list of elements to be included in constructing the full sentence.
+
+ Returns
+ -------
+ dbc.Form : component
+ A dbc Form which formats all list elements into a sentence
+ where the user can interact with Inputs within the sentence.
+
+ Notes
+ ------
+ Still in development.
+ """
+ tail = (len(elems) % 2) == 1
+ groups = []
+ start = 0
+
+ if not isinstance(elems[0], str):
+ start = 1
+ tail = (len(elems) % 2) == 0
+ groups.extend(elems[0])
+
+ for i in range(start, len(elems) - (1 if tail else 0), 2):
+ assert (isinstance(elems[i], str) or isinstance(elems[i], html.Span))
+ groups.append(dbc.Label(elems[i], class_name="m-0"))
+ groups.extend(elems[i + 1])
+
+ if tail:
+ assert (isinstance(elems[-1], str) or isinstance(elems[-1], html.Span))
+ groups.append(dbc.Label(elems[-1], class_name="m-0"))
+
+ return dbc.Form(groups, class_name="py-0 d-inline-block", style={"font-size": "10pt"})
+
+def Block(
+ id,
+ title,
+ *body,
+ is_on=True,
+ width="auto",
+ border_color="grey",
+ button_id=None,
+):
+ """Separates out components in individual Fieldsets
+
+ Auto-generates a formatted block with a fieldset header and body.
+
+ Parameters
+ ----------
+ title : str
+ Title of the fieldset to be displayed.
+ body : str, dbc
+ Any number of elements which can be of various types to be
+ formatted as a sentence within the fieldset body.
+ is_on : boolean, optional
+ Fieldset is not displayed if False, default is True
+ width : str, optional
+ html style attribute value to determine width of the fieldset within its
+ parent container. Default `width` ="auto".
+ button_id : str, optional
+ name of id used to replace default Fieldset's Legend with a clickable button
+ Displays `title`
+
+ Returns
+ -------
+ html.Fieldset : component
+ An html Fieldset which has a pre-formatted title and body where the body can
+ be any number of elements.
+ """
+ if is_on:
+ the_display = "block"
+ else:
+ the_display = "none"
+ if button_id == None:
+ legend = html.Legend(
+ title,
+ className="position-absolute top-0 start-0 translate-middle-y",
+ style={
+ "font-size": "10pt",
+ "border-style": "outset",
+ "border-width": "2px",
+ "border-top-width": "0px",
+ "border-left-width": "0px",
+ "-moz-border-radius": "4px",
+ "border-radius": "4px",
+ "background-color": "WhiteSmoke",
+ "border-color": "LightGrey",
+ "padding-bottom": "1px",
+ "padding-left": "2px",
+ "padding-right": "2px",
+ "width": "auto",
+ }
+ )
+ else:
+ legend = dbc.Button(
+ id=button_id,
+ children=title,
+ class_name="position-absolute top-0 end-0 translate-middle-y",
+ style={
+ "font-size": "10pt",
+ "border-style": "outset",
+ "border-width": "2px",
+ "border-top-width": "0px",
+ "border-left-width": "0px",
+ "-moz-border-radius": "4px",
+ "border-radius": "4px",
+ "padding-bottom": "1px",
+ "padding-left": "2px",
+ "padding-right": "2px",
+ "width": "auto",
+ },
+ )
+ return html.Fieldset(
+ [
+ legend,
+ html.Div(
+ body,
+ className="pt-2 mt-0",
+ style={
+ "padding-left": "4px",
+ "padding-right": "4px",
+ "padding-bottom": "4px",
+ "margin": "0px",
+ "-moz-border-radius": "8px",
+ "border-radius": "8px",
+ "border-style": "inset",
+ "background-color": "LightGrey",
+ "border-color": border_color,
+ "display": "block",
+ "width": "auto",
+ "float": "left",
+
+ },
+ ),
+ ],
+ id=id,
+ className="p-0 mt-2 position-relative",
+ style={
+ "display": the_display,
+ "width": width,
+ "float": "left",
+ },
+ )
+
+def Options(options, labels=None):
+ """ Creates options for definition of different Dash components.
+
+ Creates a dictionary of 'labels' and 'values'
+ to be used as options for an element within different Dash components.
+
+ Parameters
+ ----------
+ options : list
+ List of values (str, int, float, etc.) which are the options of values to select from some data.
+ labels : list
+ List of values (str, int, float, etc.) which are labels representing the data values defined in `options`,
+ which do not have to be identical to the values in `options`.
+
+ Returns
+ -------
+ list of dicts
+ A list which holds a dictionary for each `options` value where key 'value' == `options` value,
+ and key 'label' == `labels` value if `label` != 'None'.
+
+ Notes
+ -----
+ The default `labels=None` will use `options` to define both the labels and the values.
+ If `labels` is populated with a list, the labels can be different from the data values.
+ In this case, the values must still match the actual data values, whereas the labels do not.
+ An error will be thrown if the number of elements in `options` list != `labels` list.
+ """
+ if labels == None:
+ return [
+ { "label": opt, "value": opt }
+ for opt in options
+ ]
+ else:
+ assert len(labels) == len(options), "The number of labels and values are not equal."
+ return [
+ { "label": label, "value":value }
+ for (label,value) in zip(labels,options)
+ ]
+
+def Select(id, options, labels=None, init=0):
+ """Provides a selector for a list of options.
+
+ Creates a auto-populated dash bootstrap components Select component.
+
+ Parameters
+ ----------
+ id : str
+ ID used for Dash callbacks.
+ options : list
+ List of values (str, int, float, etc.) which are the options of values to select from some data.
+ labels : list, optional
+ List of values (str, int, float, etc.) which are labels representing the data values defined in `options`,
+ which do not have to be identical to the values in `options` , which are the default.
+ init : int, optional
+ Index value which determines which value from the list of `options` will be displayed when user loads page.
+ Default is 0.
+
+ Returns
+ -------
+ dbc.Select : component
+ A dbc dropdown which is auto-populated with 'values' and 'labels' where key 'value' == `options` value,
+ and key 'label' == `labels` value if `label` != 'None'.
+
+ Notes
+ -----
+ If `labels` is populated with a list, the labels can be different from the data values.
+ In this case, the values must still match the actual data values, whereas the labels do not.
+ An error will be thrown if the number of elements in `options` list != `labels` list.
+ """
+ if labels == None:
+ opts = [ dict(label=opt, value=opt) for opt in options ]
+ else:
+ assert len(labels) == len(options), "The number of labels and values are not equal."
+ opts = [dict(label=label, value=opt) for (label,opt) in zip(labels,options)]
+ return dbc.Select(
+ id=id,
+ value=options[init],
+ class_name="d-inline-block w-auto py-0 pl-0 m-0",
+ options=opts,
+ style={"font-size": "10pt", "max-width": "100%", "white-space": "nowrap"},
+ )
+
+def PickPoint(width="auto"):
+ return Block("point_block", "Pick lat/lon",
+ Number(id="lat_input", width=width), "˚N",
+ dbc.Tooltip(
+ id="lat_input_tooltip",
+ target="lat_input",
+ className="tooltiptext",
+ ),
+ ", ",
+ Number(id="lng_input", width=width), "˚E",
+ dbc.Tooltip(
+ id="lng_input_tooltip",
+ target="lng_input",
+ className="tooltiptext",
+ ),
+ button_id="submit_lat_lng",
+ )
diff --git a/ccsr/flex_fcst/README.md b/ccsr/flex_fcst/README.md
new file mode 100644
index 00000000..fc594ab7
--- /dev/null
+++ b/ccsr/flex_fcst/README.md
@@ -0,0 +1,4 @@
+# Seasonal/Subseasonal Forecast Maproom
+
+This directory contains a maproom to visualize
+CPT seasonal and subseasonal forecasts (full distribution)
diff --git a/ccsr/flex_fcst/__init__.py b/ccsr/flex_fcst/__init__.py
new file mode 100644
index 00000000..0379666a
--- /dev/null
+++ b/ccsr/flex_fcst/__init__.py
@@ -0,0 +1 @@
+from .maproom import register
diff --git a/ccsr/flex_fcst/layout.py b/ccsr/flex_fcst/layout.py
new file mode 100644
index 00000000..88a6b9e7
--- /dev/null
+++ b/ccsr/flex_fcst/layout.py
@@ -0,0 +1,71 @@
+from dash import dcc
+from dash import html
+import dash_bootstrap_components as dbc
+import dash_leaflet as dlf
+from fieldsets import Block, Select, PickPoint, Month, Number
+import layout_utilities as lou
+
+from globals_ import GLOBAL_CONFIG
+
+IRI_BLUE = "rgb(25,57,138)"
+IRI_GRAY = "rgb(113,112,116)"
+LIGHT_GRAY = "#eeeeee"
+
+
+def app_layout():
+
+ return lou.app_1(
+
+ lou.navbar("Forecast",
+ Block("proba_block", "Probability", Select(
+ id="proba",
+ options=["exceeding", "non-exceeding"],
+ )),
+ Block("var_block", "Variable", Select(
+ id="variable",
+ options=["Percentile", "Value"],
+ )),
+ Block("perc_block", "Percentile", Select(
+ id="percentile",
+ options=range(10, 95, 5),
+ init=8,
+ ), " %-ile"),
+ Block("thresh_block", "Threshold",
+ Number(id="threshold", default=0, debounce=False),
+ html.Div(id='phys-units', style={"color": "white"}),
+ ),
+ Block("issue_block", "Issue", html.Div(id="start_div")),
+ Block("target_block", "Target Period", html.Div(id="lead_div")),
+ PickPoint(width="8em"),
+ ),
+
+ lou.description(
+ "Forecast",
+ """
+ This Maproom displays the full forecast distribution
+ in different flavors.
+ """,
+ html.P(
+ """
+ The map shows the probability of exceeding or non-exceeding
+ an observed historical percentile or a threshold in the variable physical units
+ for a given forecast (issue date and target period).
+ Use the controls in the top banner to choose presentation of the forecast to map
+ and to navigate through other forecast issues and targets.
+ """
+ ),
+ html.P(
+ """
+ Click the map to show forecast and observed
+ probability of exceeding and distribution
+ at the clicked location.
+ """
+ ),
+ ),
+
+ lou.map(GLOBAL_CONFIG["zoom"]),
+
+ lou.local_double_tabbed(
+ ["Probability of Exceedance", "Probability Distribution"],
+ ),
+ )
diff --git a/ccsr/flex_fcst/maproom.py b/ccsr/flex_fcst/maproom.py
new file mode 100644
index 00000000..74473f12
--- /dev/null
+++ b/ccsr/flex_fcst/maproom.py
@@ -0,0 +1,345 @@
+import dash
+import dash_bootstrap_components as dbc
+from dash.dependencies import Output, Input, State
+import pingrid
+from pingrid import CMAPS
+from . import layout
+import plotly.graph_objects as pgo
+import numpy as np
+import xarray as xr
+import flex_fcst_calc as ffc
+import maproom_utilities as mru
+from globals_ import GLOBAL_CONFIG
+from fieldsets import Select
+
+
+def register(FLASK, config):
+ PFX = f"{GLOBAL_CONFIG['url_path_prefix']}/{config['core_path']}"
+ TILE_PFX = f"{PFX}/tile"
+
+ # App
+
+ APP = dash.Dash(
+ __name__,
+ server=FLASK,
+ external_stylesheets=[
+ dbc.themes.BOOTSTRAP,
+ ],
+ url_base_pathname=f"{PFX}/",
+ meta_tags=[
+ {"name": "description", "content": "Forecast"},
+ {"name": "viewport", "content": "width=device-width, initial-scale=1.0"},
+ ],
+ )
+ APP.title = "Forecast"
+
+ APP.layout = layout.app_layout()
+
+ @APP.callback(
+ Output("phys-units" ,"children"),
+ Output("start_div", "children"),
+ Output("lead_div", "children"),
+ Output("target_block", "style"),
+ Output("lat_input", "min"),
+ Output("lat_input", "max"),
+ Output("lat_input_tooltip", "children"),
+ Output("lng_input", "min"),
+ Output("lng_input", "max"),
+ Output("lng_input_tooltip", "children"),
+ Output("map", "center"),
+ Input("location", "pathname"),
+ )
+ def initialize(path):
+ #Initialization for start date dropdown to get a list of start dates
+ # according to files available
+ fcst_ds, obs, start_dates, target_display = ffc.get_fcst(config)
+ phys_units = [" "+obs.attrs["units"]] if "units" in obs.attrs else ""
+ issue_select = Select("start_date", options=start_dates)
+ target_dict = ffc.get_targets_dict(config, fcst_ds, start_dates[0])
+ lead_select = Select("lead_time",
+ options=[ld["value"] for ld in target_dict],
+ labels=[ld["label"] for ld in target_dict],
+ )
+ lead_style = {"display": "block"} if target_display else {"display": "none"}
+ return (
+ phys_units, issue_select, lead_select, lead_style,
+ ) + mru.initialize_map(fcst_ds)
+
+ @APP.callback(
+ Output("perc_block", "style"),
+ Output("thresh_block", "style"),
+ Input("variable", "value")
+ )
+ def display_relevant_control(variable):
+ if variable == "Percentile":
+ style_percentile = {"display": "block"}
+ style_threshold = {"display": "none"}
+ else:
+ style_percentile = {"display": "none"}
+ style_threshold = {"display": "block"}
+ return style_percentile, style_threshold
+
+
+ @APP.callback(
+ Output("lead_time","options"),
+ Input("start_date","value"),
+ )
+ def target_range_options(start_date):
+ fcst_ds, _, _, _ = ffc.get_fcst(config)
+ return ffc.get_targets_dict(config, fcst_ds, start_date)
+
+
+ @APP.callback(
+ Output("map_title", "children"),
+ Input("start_date", "value"),
+ Input("lead_time", "value"),
+ Input("lead_time", "options"),
+ )
+ def write_map_title(start_date, lead_time, targets):
+ for option in targets :
+ if option["value"] == int(lead_time) :
+ target = option["label"]
+ return f'{target} {config["variable"]} Forecast issued {start_date}'
+
+
+ @APP.callback(
+ Output("loc_marker", "position"),
+ Output("lat_input", "value"),
+ Output("lng_input", "value"),
+ Input("submit_lat_lng","n_clicks"),
+ Input("map", "click_lat_lng"),
+ State("lat_input", "value"),
+ State("lng_input", "value")
+ )
+ def pick_location(n_clicks, click_lat_lng, latitude, longitude):
+ # Reading
+ fcst_ds, _, _, _ = ffc.get_fcst(config)
+ return mru.picked_location(
+ fcst_ds, [""], click_lat_lng, latitude, longitude
+ )
+
+
+ @APP.callback(
+ Output("local_graph_0", "figure"),
+ Output("local_graph_1", "figure"),
+ Input("loc_marker", "position"),
+ Input("start_date", "value"),
+ Input("lead_time", "value"),
+ Input("lead_time", "options"),
+ )
+ def local_plots(marker_pos, start_date, lead_time, targets):
+ # Time Units Errors handling
+ # Reading
+ lat = marker_pos[0]
+ lng = marker_pos[1]
+ fcst_ds, obs, _, _ = ffc.get_fcst(config, start_date=start_date, lead_time=lead_time)
+ # Errors handling
+ fcst_ds, obs, error_msg = ffc.errors_handling(config, fcst_ds, obs, lat, lng)
+ if error_msg is not None:
+ error_fig = pingrid.error_fig(error_msg)
+ return error_fig, error_fig
+ else:
+ # CDF from 499 quantiles
+ quantiles = np.arange(1, 500) / 500
+ quantiles = xr.DataArray(
+ quantiles, dims="percentile", coords={"percentile": quantiles}
+ )
+ (
+ cdf_fcst_x, cdf_fcst_y, cdf_obs_x, cdf_obs_y,
+ cdf_obs_emp_x, cdf_obs_emp_y,
+ pdf_fcst_x, pdf_fcst_y, pdf_obs_x, pdf_obs_y,
+ ) = ffc.distribution(quantiles, obs, fcst_ds, config)
+ phys_units = (" "+obs.attrs["units"]) if "units" in obs.attrs else ""
+ # Graph for CDF
+ cdf_graph = pgo.Figure()
+ cdf_graph.add_trace(
+ pgo.Scatter(
+ x=cdf_fcst_x.squeeze().values,
+ y=cdf_fcst_y,
+ hovertemplate="%{y:.0%} chance of exceeding"
+ + "
%{x:.1f} "
+ + phys_units,
+ name="forecast",
+ line=pgo.scatter.Line(color="red"),
+ )
+ )
+ cdf_graph.add_trace(
+ pgo.Scatter(
+ x=cdf_obs_x.values,
+ y=cdf_obs_y,
+ hovertemplate="%{y:.0%} chance of exceeding"
+ + "
%{x:.1f} "
+ + phys_units,
+ name="obs (parametric)",
+ line=pgo.scatter.Line(color="blue"),
+ )
+ )
+ if config["method"] == "pycpt" :
+ cdf_graph.add_trace(
+ pgo.Scatter(
+ x=cdf_obs_emp_x.values,
+ y=cdf_obs_emp_y,
+ hovertemplate="%{y:.0%} chance of exceeding"
+ + "
%{x:.1f} "
+ + phys_units,
+ name="obs (empirical)",
+ line=pgo.scatter.Line(color="forestgreen"),
+ )
+ )
+ cdf_graph.update_traces(mode="lines", connectgaps=False)
+ for option in targets :
+ if option["value"] == int(lead_time) :
+ target = option["label"]
+ cdf_graph.update_layout(
+ xaxis_title=f'{config["variable"]} ({phys_units})',
+ yaxis_title="Probability of exceeding",
+ title={
+ "text": (
+ f'{target} forecast issued {start_date} '
+ f'
at ({fcst_ds["Y"].values}N,{fcst_ds["X"].values}E)'
+ ),
+ "font": dict(size=14),
+ },
+ )
+ # Graph for PDF
+ pdf_graph = pgo.Figure()
+ pdf_graph.add_trace(
+ pgo.Scatter(
+ x=pdf_fcst_x.squeeze().values,
+ y=pdf_fcst_y.squeeze().values,
+ customdata=(quantiles["percentile"] * -1 + 1),
+ hovertemplate="%{customdata:.0%} chance of exceeding"
+ + "
%{x:.1f} "
+ + phys_units,
+ name="forecast",
+ line=pgo.scatter.Line(color="red"),
+ )
+ )
+ pdf_graph.add_trace(
+ pgo.Scatter(
+ x=pdf_obs_x.values,
+ y=pdf_obs_y.values,
+ customdata=(quantiles["percentile"] * -1 + 1),
+ hovertemplate="%{customdata:.0%} chance of exceeding"
+ + "
%{x:.1f} "
+ + phys_units,
+ name="obs",
+ line=pgo.scatter.Line(color="blue"),
+ )
+ )
+ pdf_graph.update_traces(mode="lines", connectgaps=False)
+ pdf_graph.update_layout(
+ xaxis_title=f'{config["variable"]} ({phys_units})',
+ yaxis_title="Probability density",
+ title={
+ "text": (
+ f'{target} forecast issued {start_date} '
+ f'
at ({fcst_ds["Y"].values}N,{fcst_ds["X"].values}E)'
+ ),
+ "font": dict(size=14),
+ },
+ )
+ return cdf_graph, pdf_graph
+
+
+ def to_flexible(fcst_cdf, proba, variable, percentile):
+ if variable == "Percentile":
+ if proba == "exceeding":
+ fcst_cdf = 1 - fcst_cdf
+ percentile = 1 - percentile
+ color_scale = CMAPS["rain_poe"]
+ else:
+ color_scale = CMAPS["rain_pne"]
+ scale = [
+ 0,
+ (percentile - 0.05) * 1/3,
+ (percentile - 0.05) * 2/3,
+ percentile - 0.05,
+ percentile - 0.05,
+ percentile + 0.05,
+ percentile + 0.05,
+ percentile + 0.05 + (1 - (percentile + 0.05)) * 1/3,
+ percentile + 0.05 + (1 - (percentile + 0.05)) * 2/3,
+ 1,
+ ]
+ color_scale = pingrid.ColorScale(
+ color_scale.name, color_scale.colors, scale=scale,
+ )
+ else:
+ if proba == "exceeding":
+ fcst_cdf = 1 - fcst_cdf
+ color_scale = CMAPS["correlation"]
+ fcst_cdf.attrs["colormap"] = color_scale
+ fcst_cdf.attrs["scale_min"] = 0
+ fcst_cdf.attrs["scale_max"] = 1
+ return fcst_cdf
+
+
+ @APP.callback(
+ Output("colorbar", "colorscale"),
+ Input("proba", "value"),
+ Input("variable", "value"),
+ Input("percentile", "value")
+ )
+ def draw_colorbar(proba, variable, percentile):
+ return to_flexible(
+ xr.DataArray(), proba, variable, int(percentile)/100.,
+ ).attrs["colormap"].to_dash_leaflet()
+
+
+ @APP.callback(
+ Output("layers_control", "children"),
+ Input("proba", "value"),
+ Input("variable", "value"),
+ Input("percentile", "value"),
+ Input("threshold", "value"),
+ Input("start_date","value"),
+ Input("lead_time","value")
+ )
+ def make_map(proba, variable, percentile, threshold, start_date, lead_time):
+ try:
+ if variable != "Percentile":
+ if threshold is None:
+ url_str = ""
+ send_alarm = True
+ else:
+ send_alarm = False
+ url_str = (
+ f"{TILE_PFX}/{{z}}/{{x}}/{{y}}/{proba}/{variable}/"
+ f"{percentile}/{float(threshold)}/{start_date}/{lead_time}"
+ )
+ else:
+ send_alarm = False
+ url_str = (
+ f"{TILE_PFX}/{{z}}/{{x}}/{{y}}/{proba}/{variable}/"
+ f"{percentile}/0.0/{start_date}/{lead_time}"
+ )
+ except:
+ url_str= ""
+ return mru.layers_controls(
+ url_str, "flex_fcst", "Forecast",
+ GLOBAL_CONFIG["datasets"]["shapes_adm"], GLOBAL_CONFIG,
+ )
+
+
+ # Endpoints
+
+ @FLASK.route(
+ (
+ f"{TILE_PFX}//////"
+ f"///"
+ ),
+ endpoint=f"{config['core_path']}"
+ )
+ def fcst_tiles(tz, tx, ty, proba, variable, percentile, threshold, start_date, lead_time):
+ # Reading
+ fcst_ds, obs, _, _ = ffc.get_fcst(config, start_date=start_date, lead_time=lead_time)
+ fcst_cdf = ffc.proba(variable, obs, int(percentile)/100., config, threshold, fcst_ds)
+ # Depending on choices:
+ # probabilities symmetry around percentile threshold
+ # choice of colorscale (dry to wet, wet to dry, or correlation)
+ fcst_cdf = to_flexible(
+ fcst_cdf, proba, variable, int(percentile)/100.
+ )
+ resp = pingrid.tile(fcst_cdf, tx, ty, tz)
+ return resp
diff --git a/ccsr/flex_fcst_calc.py b/ccsr/flex_fcst_calc.py
new file mode 100644
index 00000000..4d07b0fa
--- /dev/null
+++ b/ccsr/flex_fcst_calc.py
@@ -0,0 +1,150 @@
+import pycpt
+import iri_fd_elr as ife
+from pathlib import Path
+import pingrid
+import numpy as np
+import datetime
+
+
+def get_targets_dict(fcst_conf, fcst_ds, start_date):
+ if fcst_conf["method"] == "pycpt":
+ return pycpt.pycptv2_targets_dict(fcst_ds, start_date=start_date)
+ else:
+ S = datetime.datetime(
+ int(start_date[4:8]), ife.strftimeb2int(start_date[0:3]), 1
+ )
+ return ife.targets_dict(fcst_ds, S)
+
+
+def get_fcst(fcst_conf, start_date=None, lead_time=None):
+ if fcst_conf["method"] == "pycpt":
+ fcst_ds, obs = pycpt.get_fcst(fcst_conf)
+ start_dates = sorted(
+ fcst_ds["S"].dt.strftime(fcst_conf["issue_format"]).values, reverse=True
+ )
+ if "Li" in fcst_ds.dims :
+ target_display = True if (fcst_ds["Li"].size > 1) else False
+ else :
+ target_display = False
+ if start_date is not None:
+ fcst_ds = fcst_ds.sel(S=start_date)
+ if (("Li" in fcst_ds.dims) and (lead_time is not None)):
+ fcst_ds = fcst_ds.sel(Li=int(lead_time))
+ obs = obs.where(
+ obs["T"].dt.month == fcst_ds.squeeze()["T"].dt.month.values,
+ drop=True,
+ )
+ else:
+ S = None if start_date is None else (start_date[0:3] + start_date[4:8])
+ fcst_mean_ds, fcst_coeff_ds, fcst_clim_coeff_ds = ife.read_elr(
+ Path(fcst_conf["forecast_path"]), fcst_conf["var"], S=S
+ )
+ fcst_ds = (
+ fcst_mean_ds
+ .rename({"coeff": "fcst_mean", "prob": "mod"})
+ .merge(fcst_coeff_ds)
+ .rename({"longitude": "X", "latitude": "Y"})
+ )
+ obs = fcst_clim_coeff_ds.rename({"longitude": "X", "latitude": "Y"})
+ # This is needed because we don't want the map centered in the Pacific
+ # and because anyway leaflet (or pingrid?) is confused with X from 0 to 360
+ fcst_ds = fcst_ds.assign_coords(X=(((fcst_ds.X + 180) % 360) - 180)).sortby("X")
+ obs = obs.assign_coords(X=(((obs.X + 180) % 360) - 180)).sortby("X")
+ start_dates = [
+ sd.strftime(fcst_conf["issue_format"])
+ for sd in ife.get_elr_S(
+ Path(fcst_conf["forecast_path"]), fcst_conf["var"]
+ )
+ ]
+ target_display = True
+ if lead_time is not None:
+ fcst_ds = fcst_ds.sel(lead=int(lead_time))
+ obs = obs.sel(lead=int(lead_time))
+ return fcst_ds, obs, start_dates, target_display
+
+
+def errors_handling(config, fcst_ds, obs, lat, lng):
+ if config["method"] == "pycpt" :
+ fcst_ds_input1 = "deterministic"
+ fcst_ds_input2 = "prediction_error_variance"
+ else:
+ fcst_ds_input1 = "fcst_mean"
+ fcst_ds_input2 = "coeff"
+ error_msg = None
+ try:
+ if (
+ fcst_ds[fcst_ds_input1] is None
+ or fcst_ds[fcst_ds_input2] is None
+ or obs is None
+ ):
+ error_msg="Data missing for this issue and target"
+ else:
+ fcst_ds = pingrid.sel_snap(fcst_ds, lat, lng)
+ obs = pingrid.sel_snap(obs, lat, lng)
+ isnan = (
+ np.isnan(fcst_ds[fcst_ds_input1]).all()
+ or np.isnan(obs).any()
+ )
+ if config["y_transform"]:
+ if fcst_ds["hcst"] is None:
+ error_msg="Data missing for this issue and target"
+ else:
+ isnan_yt = np.isnan(fcst_ds["hcst"]).any()
+ isnan = isnan or isnan_yt
+ if isnan:
+ error_msg="Data missing at this location"
+ except KeyError:
+ error_msg="Grid box out of data domain"
+ return fcst_ds, obs, error_msg
+
+
+def distribution(quantiles, obs, fcst_ds, config):
+ if config["method"] == "pycpt" :
+ fcst_ppf, obs_ppf, obs_quant, fcst_pdf, obs_pdf = pycpt.distribution(
+ quantiles, obs, fcst_ds, config
+ )
+ poe = fcst_ppf["percentile"] * -1 + 1
+ cdf_fcst_x = fcst_ppf
+ cdf_fcst_y = poe
+ cdf_obs_x = obs_ppf
+ cdf_obs_y = poe
+ cdf_obs_emp_x = obs_quant
+ cdf_obs_emp_y = poe
+ pdf_fcst_x = fcst_ppf
+ pdf_fcst_y = fcst_pdf
+ pdf_obs_x = obs_ppf
+ pdf_obs_y = obs_pdf
+ else :
+ fcst_cdf, obs_cdf, obs_ppf, fcst_pdf, obs_pdf = ife.ELR_distribution(
+ quantiles, obs, fcst_ds
+ )
+ cdf_fcst_x = obs_ppf
+ cdf_fcst_y = fcst_cdf
+ cdf_obs_x = obs_ppf
+ cdf_obs_y = obs_cdf
+ cdf_obs_emp_x = None
+ cdf_obs_emp_y = None
+ pdf_fcst_x = obs_ppf
+ pdf_fcst_y = fcst_pdf
+ pdf_obs_x = obs_ppf
+ pdf_obs_y = obs_pdf
+ return (
+ cdf_fcst_x, cdf_fcst_y, cdf_obs_x, cdf_obs_y, cdf_obs_emp_x, cdf_obs_emp_y,
+ pdf_fcst_x, pdf_fcst_y, pdf_obs_x, pdf_obs_y,
+ )
+
+
+def proba(variable, obs, percentile, config, threshold, fcst_ds):
+ if config["method"] == "pycpt" :
+ return pycpt.proba(variable, obs, percentile, config, threshold, fcst_ds)
+ else:
+ if variable == "Percentile":
+ threshold = None
+ else:
+ percentile = None
+ if config["variable"] == "Precipitation" :
+ clipv = 0
+ return ife.proba(
+ fcst_ds, obs, percentile=percentile, threshold=threshold, clipv=clipv
+ ).rename(X="lon", Y="lat")
+
\ No newline at end of file
diff --git a/ccsr/globals_.py b/ccsr/globals_.py
new file mode 100644
index 00000000..848ad29c
--- /dev/null
+++ b/ccsr/globals_.py
@@ -0,0 +1,22 @@
+import flask
+import pingrid
+import os
+
+defaultconfig = pingrid.load_config("config-defaults.yaml")
+appconfig = pingrid.load_config(os.environ["CONFIG"])
+GLOBAL_CONFIG = pingrid.deep_merge(
+ {k: v for k, v in defaultconfig.items() if k != "maprooms"},
+ {k: v for k, v in appconfig.items() if k != "maprooms"},
+)
+GLOBAL_CONFIG['maprooms'] = {}
+for k, v in appconfig['maprooms'].items():
+ if isinstance(v, list):
+ GLOBAL_CONFIG['maprooms'][k] = [pingrid.deep_merge(defaultconfig['maprooms'][k], v[i]) for i in range(len(v))]
+ elif v is not None:
+ GLOBAL_CONFIG['maprooms'][k] = pingrid.deep_merge(defaultconfig['maprooms'][k], v)
+
+
+FLASK = flask.Flask(
+ "ccsrmaprooms",
+ static_url_path=f'{GLOBAL_CONFIG["url_path_prefix"]}/static',
+)
diff --git a/ccsr/homepage.py b/ccsr/homepage.py
new file mode 100644
index 00000000..89eda6c3
--- /dev/null
+++ b/ccsr/homepage.py
@@ -0,0 +1,32 @@
+import flask
+import os
+
+from globals_ import FLASK, GLOBAL_CONFIG
+import pingrid
+
+template = '''
+{% for item in maprooms %}
+{{item["title"]}}
+{% endfor %}
+'''
+
+maprooms = []
+
+for name, config in GLOBAL_CONFIG["maprooms"].items():
+ if config is not None:
+ if not isinstance(config, list):
+ config = [config]
+ for c in config:
+ try:
+ one_config = {
+ "title": c['title'],
+ "path": f'{GLOBAL_CONFIG["url_path_prefix"]}/{c["core_path"]}',
+ }
+ except KeyError as e:
+ raise Exception(f'configuration of maproom "{name}" is incomplete') from e
+ maprooms.append(one_config)
+
+
+@FLASK.route(GLOBAL_CONFIG["url_path_prefix"] + "/")
+def homepage():
+ return flask.render_template_string(template, maprooms=maprooms)
diff --git a/ccsr/iri_fd_elr.py b/ccsr/iri_fd_elr.py
new file mode 100644
index 00000000..37f95f59
--- /dev/null
+++ b/ccsr/iri_fd_elr.py
@@ -0,0 +1,273 @@
+from pathlib import Path
+import xarray as xr
+import datetime
+import predictions
+from dateutil.relativedelta import relativedelta
+import numpy as np
+
+
+def strftimeb2int(strftimeb):
+ """Convert month values to integers (1-12) from strings.
+
+ Parameters
+ ----------
+ strftimeb : str
+ String value representing months of year.
+ Returns
+ -------
+ strftimebint : int
+ Integer value corresponding to month.
+ See Also
+ --------
+ Notes
+ -----
+ Examples
+ --------
+ """
+ strftimeb_all = {
+ "Jan": 1,
+ "Feb": 2,
+ "Mar": 3,
+ "Apr": 4,
+ "May": 5,
+ "Jun": 6,
+ "Jul": 7,
+ "Aug": 8,
+ "Sep": 9,
+ "Oct": 10,
+ "Nov": 11,
+ "Dec": 12,
+ }
+ strftimebint = strftimeb_all[strftimeb]
+ return strftimebint
+
+
+def get_elr_S(data_path, var):
+ """Returns list of dates of forecast issues.
+
+ Parameters
+ ----------
+ data_path : Path
+ Path where data files are located.
+ var : str
+ forecast variable as found in files names.
+ Returns
+ -------
+ Reversely sorted list of issue dates as datetime.
+ """
+ files_list = data_path.glob(f"forecast_mean_{var}_*.nc")
+ return sorted([datetime.datetime(
+ int(f.name[-7:-3]),
+ strftimeb2int(f.name[-10:-7]),
+ 1,
+ ) for f in files_list], reverse=True)
+
+
+def read_elr(data_path, var, S=None):
+ """Returns the forecast mean, ELR parameters, and climatological ELR parameters
+ of a given issue.
+
+ Parameters
+ ----------
+ data_path : Path
+ Path where data files are located.
+ var : str
+ forecast variable as found in files names.
+ S : str, optional
+ string identifying the issue date as found in file name, e.g. Jan2026
+ If None, returns the latest
+ Returns
+ -------
+ Returns the forecast mean, ELR parameters, and climatological ELR parameters
+ of a given issue.
+ """
+ if S is None:
+ S = get_elr_S(data_path, var)[0].strftime('%b%Y')
+ file_name_var_date = f"{var}_{S}"
+ fcst_mean_ds = xr.open_dataset(
+ data_path / f"forecast_mean_{file_name_var_date}.nc"
+ )
+ fcst_coeff_ds = xr.open_dataset(
+ data_path / f"forecast_coeff_{file_name_var_date}.nc"
+ )
+ fcst_clim_coeff_ds = xr.open_dataset(
+ data_path / f"forecast_clim_coeff_{file_name_var_date}.nc"
+ )["clim"]
+ return fcst_mean_ds, fcst_coeff_ds, fcst_clim_coeff_ds
+
+
+def targets_dict(fcst_ds, S=None):
+ """Returns a list of dictionaries of leads and target dates for a given S.
+
+ Parameters
+ ----------
+ fcst_ds : xr.Dataset
+ Any ELR forecast dataset with lead coords.
+ S : datetime.datetime
+ Forecast issue date
+ Returns
+ -------
+ Returns a list of dictionaries of leads and target dates for a given S.
+ """
+ leads = fcst_ds["lead"]
+ toto = [
+ {
+ "label": predictions.target_range_formatting(
+ S + relativedelta(months=int(lead)),
+ S + relativedelta(months=(int(lead) + 2)),
+ "months",
+ ),
+ "value": lead,
+ } for lead in leads.values
+ ]
+ return toto
+
+
+def ELR_poe(ELR_params, quantity, param_coords, ELR_mean=None):
+ """ELR probability of exceedance
+
+ Parameters
+ ----------
+ ELR_params : xr.Dataset
+ Datasets with the ELR parameters Bs
+ quantity : xr.DataArray
+ physical value to compute probability of exceedance of
+ param_coords : str
+ name of parameters Bs's coordinate
+ ELR_mean : xr.DataArray, optional
+ ELR forecast mean. Not needed if calculating poe for clim
+
+ Returns
+ -------
+ xr.DataArray probability of exceedance as a function of quantity's coords
+
+ Notes
+ -----
+ Given the ELR_params Bi for i={0, 1, 2}
+ Consider F = B0 + B1 * ELR_mean + B2 * quantity
+ (where ELR_mean = 0 if clim and B1 doesn't exist)
+ Then poe = exp(F) / (1 + exp(F))
+ """
+ B1 = 0 if ELR_mean is None else ELR_params.isel({param_coords: 1}) * ELR_mean
+ F = (
+ ELR_params.isel({param_coords: 0})
+ + B1
+ + ELR_params.isel({param_coords: -1}) * quantity
+ )
+ return np.exp(F) / (1 + np.exp(F))
+
+
+def ELR_quantity(ELR_clim_params, p):
+ """Climatological physical value for percentile p
+
+ Parameters
+ ----------
+ ELR_clim_params : xr.DataArray
+ The climatolofical ELR parmeters
+ p : xr.DataArray
+ percentile value
+
+ Returns
+ -------
+ Climatological physical value for percentile p
+
+ Notes
+ -----
+ This is basically the inverse of ELR_poe for the clim case.
+ """
+ return (
+ np.log(p / ((1 - p) * np.exp(ELR_clim_params.sel(coeff=1))))
+ / ELR_clim_params.sel(coeff=2)
+ )
+
+
+def ELR_pdf(cdf, quantity, dim="percentile"):
+ """Probability Density Function from CDF
+
+ Parameters
+ ----------
+ cdf: xr.DataArray
+ CDF as a function of coords dim
+ quantity: xr.DataArray
+ Physical values of the CDF as a function of coords dim
+ dim: str, optional
+ Name of the common coords of CDF and quantity
+
+ Returns
+ -------
+ PDF as a function of dim
+
+ Notes
+ -----
+ PDF is the derivative of CDF as a function of quantity
+ """
+ return cdf.differentiate(dim) * cdf[dim].diff(dim) / quantity.diff(dim)
+
+
+def ELR_distribution(quantiles, clim, fcst_ds):
+ """Returns the CDF and PDF distributions values of forecast and observations for
+ given quantiles
+
+ Parameters
+ ----------
+ quantiles : xr.DataArray
+ Values between 0 and 1 against same coords named percentiles
+ clim : xr.DataArray
+ ELR climatological parameters
+ fcst_ds : xr.Dataset
+ forecast mean and ELR parameters
+
+ Returns
+ -------
+ Returns the CDF and PDF distributions values of forecast and observations for
+ given quantiles
+
+ Notes
+ -----
+
+ """
+ obs_ppf = ELR_quantity(clim, quantiles)
+ fcst_cdf = ELR_poe(
+ fcst_ds["coeff"], obs_ppf, "coff", ELR_mean=fcst_ds["fcst_mean"]
+ )
+ obs_cdf = ELR_poe(clim, obs_ppf, "coeff")
+ # Unfortuntely while the clim ELR params return mm/month,
+ # the fcst ones expect total mm
+ obs_ppf = 3 * obs_ppf
+ fcst_pdf = ELR_pdf(fcst_cdf, obs_ppf)
+ obs_pdf = ELR_pdf(obs_cdf, obs_ppf)
+ # In the IRI Maprooms, the probabilities are computed for all the models
+ # and then averaged
+ return fcst_cdf.mean("mod"), obs_cdf, obs_ppf, fcst_pdf.mean("mod"), obs_pdf
+
+
+def proba(fcst_ds, clim, percentile=None, threshold=None, clipv=None):
+ """Calculates forecast probability of exceeding historical percentile
+ or threshold
+
+ Parameters
+ ----------
+ variable : str
+ Percentile or Precipitation/Temperature
+ clim : xr.DataArray
+ clim ELR parameters
+ percentile : real
+ historical percentile value to compute poe of
+ config :
+
+ """
+ assert (
+ (
+ ((percentile is None) and (threshold is None))
+ or ((percentile is not None) and (threshold is not None))
+ ),
+ ("Please assign either percentile or threshold"),
+ )
+ if threshold is None :
+ threshold = ELR_quantity(clim, percentile)
+ if clipv is not None:
+ threshold = threshold.clip(min=clipv)
+ fcst_cdf = ELR_poe(
+ fcst_ds["coeff"], threshold, "coff", ELR_mean=fcst_ds["fcst_mean"]
+ )
+ return fcst_cdf.mean("mod")
diff --git a/ccsr/layout_utilities.py b/ccsr/layout_utilities.py
new file mode 100644
index 00000000..776e4238
--- /dev/null
+++ b/ccsr/layout_utilities.py
@@ -0,0 +1,204 @@
+import dash_bootstrap_components as dbc
+from dash import dcc
+from dash import html
+import dash_leaflet as dlf
+
+
+IRI_BLUE = "rgb(25,57,138)"
+IRI_GRAY = "rgb(113,112,116)"
+LIGHT_GRAY = "#eeeeee"
+
+
+def app_1(navbar, description, map, local):
+ """
+ Layout with controls in navbar at the top,
+ 2 columns: a narrow left one with descriptions and right one with
+ 2 rows: map on top of local graph
+ """
+ return dbc.Container(
+ [
+ dcc.Location(id="location", refresh=True),
+ navbar,
+ dbc.Row(
+ [
+ dbc.Col(
+ description, sm=12, md=4,
+ style={
+ "background-color": "white", "border-style": "solid",
+ "border-color": LIGHT_GRAY,
+ "border-width": "0px 1px 0px 0px",
+ },
+ ),
+ dbc.Col(
+ [
+ dbc.Row([dbc.Col(
+ map, width=12, style={"background-color": "white"},
+ )]),
+ dbc.Row([
+ dbc.Col(
+ local, width=12,
+ style={
+ "background-color": "white",
+ "min-height": "100px",
+ "border-style": "solid",
+ "border-color": LIGHT_GRAY,
+ "border-width": "1px 0px 0px 0px",
+ },
+ )],
+ ),
+ ],
+ sm=12, md=8, style={"background-color": "white"},
+ ),
+ ],
+ ),
+ ],
+ fluid=True, style={"padding-left": "0px", "padding-right": "0px"},
+ )
+
+
+def navbar(title, *elems):
+ """
+ A title followed by a line-up of controls
+ and a dbc.Alert
+ """
+ return dbc.Nav(
+ [
+ html.A(dbc.Row(
+ [dbc.Col(dbc.NavbarBrand(title, className="ml-2"))],
+ align="center", style={"padding-left": "5px", "color": "white"},
+ ))
+ ] + [
+ elems[i] for i in range(len(elems))
+ ] + [
+ dbc.Alert(
+ "Something went wrong",
+ color="danger",
+ dismissable=True,
+ is_open=False,
+ id="map_warning",
+ style={"margin-bottom": "8px"},
+ ),
+ ],
+ style={"background-color": IRI_GRAY},
+ )
+
+
+def description(title, subtitle, *elems):
+ """
+ An H5 title, a subtitle sentence followed by a series of paragraphs
+ or other html elements
+ """
+ return dbc.Container(
+ [
+ html.H5([title]), html.P(subtitle),
+ dcc.Loading(html.P(id="map_description"), type="dot"),
+ ] + [
+ elems[i] for i in range(len(elems))
+ ],
+ fluid=True, className="scrollable-panel",
+ style={"padding-bottom": "1rem", "padding-top": "1rem"},
+ )
+
+
+def map(
+ default_zoom,
+ layers_control_position="topleft", scale_control_position="bottomright",
+ cb_nTicks=9, cb_opacity=1, cb_tooltip=True,
+ cb_position="topright", cb_width=10, cb_height=300,
+):
+ """
+ A dlf map topped with and H5 title,
+ positioned layers control, scale and colorbar,
+ a dlf.Marker for local selection,
+ and default colorbar options
+ """
+ return dbc.Container(
+ [
+ dcc.Loading(html.H5(
+ id="map_title",
+ style={
+ "text-align":"center", "border-width":"1px",
+ "border-style":"solid", "border-color":"grey",
+ "margin-top":"3px", "margin-bottom":"3px",
+ },
+ ), type="dot"),
+ dcc.Loading(dlf.Map(
+ [
+ dlf.LayersControl(
+ id="layers_control", position=layers_control_position
+ ),
+ dlf.LayerGroup(
+ [dlf.Marker(id="loc_marker", position=(0, 0))],
+ id="layers_group"
+ ),
+ dlf.ScaleControl(
+ imperial=False, position=scale_control_position
+ ),
+ dlf.Colorbar(
+ id="colorbar",
+ nTicks=cb_nTicks,
+ opacity=cb_opacity,
+ tooltip=cb_tooltip,
+ position=cb_position,
+ width=cb_width,
+ height=cb_height,
+ className="p-1",
+ style={
+ "background": "white", "border-style": "inset",
+ "-moz-border-radius": "4px", "border-radius": "4px",
+ "border-color": "LightGrey",
+ },
+ ),
+ ],
+ id="map",
+ center=None,
+ zoom=default_zoom,
+ style={"width": "100%", "height": "50vh"},
+ ), type="dot"),
+ ],
+ fluid=True,
+ )
+
+
+def local_single_tabbed(label, download_button=False):
+ """
+ Single tabbed local graph with or without a download data button
+ """
+ if download_button:
+ button = [html.Div([
+ dbc.Button("Download local data", id="btn_csv", size="sm"),
+ dcc.Download(id="download-dataframe-csv"),
+ ], className="d-grid justify-content-md-end")]
+ else:
+ button = []
+ return dbc.Tabs([dbc.Tab(
+ button + [
+ html.Div([dbc.Spinner(dcc.Graph(id="local_graph"))]),
+ ],
+ label=label)])
+
+def local_double_tabbed(label, download_button=False):
+ """
+ Double tabbed local graphs with or without a download data button
+ """
+ if download_button:
+ button = [html.Div([
+ dbc.Button("Download local data", id="btn_csv", size="sm"),
+ dcc.Download(id="download-dataframe-csv"),
+ ], className="d-grid justify-content-md-end")]
+ else:
+ button = []
+ return dbc.Tabs([
+ dbc.Tab(
+ button + [
+ html.Div([dbc.Spinner(dcc.Graph(id="local_graph_0"))]),
+ ],
+ label=label[0]
+ ),
+ dbc.Tab(
+ button + [
+ html.Div([dbc.Spinner(dcc.Graph(id="local_graph_1"))]),
+ ],
+ label=label[1]
+ )
+ ])
diff --git a/ccsr/maproom_utilities.py b/ccsr/maproom_utilities.py
new file mode 100644
index 00000000..f056f91f
--- /dev/null
+++ b/ccsr/maproom_utilities.py
@@ -0,0 +1,417 @@
+import pingrid
+import pandas as pd
+import dash
+import dash_leaflet as dlf
+import psycopg2
+from psycopg2 import sql
+import shapely
+from shapely import wkb
+from shapely.geometry.multipolygon import MultiPolygon
+from shapely.geometry import Polygon
+
+
+# Mapping Utilities
+
+STD_TIME_FORMAT = "%Y-%m-%d"
+HUMAN_TIME_FORMAT = "%-d %b %Y"
+
+
+def get_geom(level, conf, shapes_adm_name):
+ """ Form a geometric object from sql query or synthetic
+
+ Parameters
+ ----------
+ level: int
+ level from the enumeration of a suite of administrative boundaries listed in
+ `conf` . Synthetic case limited to 0 and 1.
+ conf: dict
+ dictionary listing desired administrative shapes and their attributes.
+
+ Returns
+ -------
+ df : pandas.DataFrame
+ a pd.DF with columns "label" (dtype=string),
+ "key" (string or int depending on the table),
+ and "the_geom" (shapely.Geometry)
+
+ See Also
+ --------
+ synthesize_geom, sql2geom
+ """
+ if "bbox" in conf["datasets"] :
+ return synthesize_geom(conf["datasets"]["bbox"], level=level)
+ else:
+ return sql2geom(conf["datasets"][shapes_adm_name][level]["sql"], conf["db"])
+
+
+def sql2GeoJSON(shapes_sql, db_config):
+ """ Form a GeoJSON dict from sql request to a database
+
+ Parameters
+ ----------
+ shapes_sql: str
+ sql request
+ db_config: dict
+ dictionary with host, port, user and dbname information
+
+ Returns
+ -------
+ features: dict
+ dictionary with features as key and GeoJSON of shapes_sql as value
+
+ See Also
+ --------
+ sql2geom, geom2GeoJSON
+
+ Examples
+ --------
+ shapes_sql: select id_1 as key, name_1 as label,
+ ST_AsBinary(the_geom) as the_geom from sen_adm1
+ db_config:
+ host: postgres
+ port: 5432
+ user: ingrid
+ dbname: iridb
+ """
+ return geom2GeoJSON(sql2geom(shapes_sql, db_config))
+
+
+def geom2GeoJSON(df):
+ """ Form a GeoJSON dict from a geometric object
+
+ Parameters
+ ----------
+ df: geometric object
+ shapely geometric object
+
+ Returns
+ -------
+ features: dict
+ dictionary with features as key and GeoJSON of `geom` as value
+
+ See Also
+ --------
+ sql2geom, shapely.MultiPolygon, shapely.geometry.mapping
+ """
+ df["the_geom"] = df["the_geom"].apply(
+ lambda x: x if isinstance(x, MultiPolygon) else MultiPolygon([x])
+ )
+ shapes = df["the_geom"].apply(shapely.geometry.mapping)
+ for i in df.index: #this adds the district layer as a label in the dict
+ shapes[i]['label'] = df['label'][i]
+ return {"features": shapes}
+
+
+def sql2geom(shapes_sql, db_config):
+ """ Form a geometric object from sql query to a database
+
+ Parameters
+ ----------
+ shapes_sql: str
+ sql query
+ db_config: dict
+ dictionary with host, port, user and dbname information
+
+ Returns
+ -------
+ df : pandas.DataFrame
+ a pd.DF with columns "label" (dtype=string),
+ "key" (string or int depending on the table),
+ and "the_geom" (shapely.Geometry)
+
+ See Also
+ --------
+ psycopg2.connect, psycopg2.sql, pandas.read_sql, shapely.wkb,
+
+ Examples
+ --------
+ shapes_sql: select id_1 as key, name_1 as label,
+ ST_AsBinary(the_geom) as the_geom from sen_adm1
+ db_config:
+ host: postgres
+ port: 5432
+ user: ingrid
+ dbname: iridb
+ """
+ with psycopg2.connect(**db_config) as conn:
+ s = sql.Composed(
+ [
+ sql.SQL("with g as ("),
+ sql.SQL(shapes_sql),
+ sql.SQL(
+ """
+ )
+ select
+ g.label, g.key, g.the_geom
+ from g
+ """
+ ),
+ ]
+ )
+ df = pd.read_sql(s, conn)
+ df["the_geom"] = df["the_geom"].apply(lambda x: wkb.loads(x.tobytes()))
+ return df
+
+
+def synthesize_geom(bbox, level):
+ """ Synthesize a geometric object from a bounding box
+
+ Parameters
+ ----------
+ bbox : array
+ coordinates of bounding box of spatial domain as [W, S, E, N]
+ level : int
+ 0 or 1 to mimick a containing admin level (0) with 1 geometry roughly smaller
+ than `bbox` or a contained admin level (1) with 2 geometries partitioning
+ level 0
+
+ Returns
+ -------
+ df : pandas.DataFrame
+ a pd.DF with columns "label" (dtype=string),
+ "key" (string or int depending on the table),
+ and "the_geom" (shapely.Geometry)
+
+ See Also
+ --------
+ shapely.geometry.Polygon
+
+ Notes
+ -----
+ A level 0 contained into bbox is necessary to test the clipping feature since
+ `bbox` is also used to generate the fake data.
+ """
+ west, south, east, north = bbox
+ assert (south + 0.25) <= (north - 0.5), (
+ "Please extend latitudinal domain of bbox"
+ )
+ if east < west :
+ assert (west + 0.25) >= (east - 0.5), (
+ "Please extend longitudinal domain of bbox"
+ )
+ else :
+ assert (west + 0.25) <= (east - 0.5), (
+ "Please extend longitudinal domain of bbox"
+ )
+ west = west + 0.25
+ south = south + 0.25
+ east = east - 0.5
+ norht = north - 0.5
+ if level == 0 :
+ df = pd.DataFrame({"label" : ["Guyane"], "key": [0], "the_geom": [Polygon([
+ [west, south], [west, north], [east, north], [east, south]
+ ])]})
+ elif level == 1 : #2 triangles partitioning level-0 box at its SW-NE diagnonal
+ df = pd.DataFrame({"label" : ["NW", "SE"], "key": [1, 2],"the_geom": [
+ Polygon([[west, south], [west, north], [east, north]]),
+ Polygon([[west, south], [east, north], [east, south]]),
+ ]})
+ else:
+ raise Exception("level must be 0 or 1")
+ return df
+
+
+def make_adm_overlay(
+ adm_name, adm_id, adm_geojson, adm_color, adm_lev, adm_weight, is_checked=False
+):
+ """ Draw a dlf.Overlay of a dlf.GeoJSON for Maprooms admin
+
+ Parameters
+ ----------
+ adm_name: str
+ name to give the dlf.Overlay
+ adm_id: str
+ unique id of dlf.Overlay
+ adm_geojson: dict
+ GeoJSON of the admin
+ adm_color: str
+ color to give the dlf.GeoJSON
+ adm_lev: int
+ index to give the dlf.GeoJSON's id
+ adm_weight: int
+ weight to give the dlf.GeoJSON
+ is_checked: boolean, optional
+ whether dlf.Overlay is checked or not (default: not)
+
+ Returns
+ -------
+ adm_layer : dlf.Overlay
+ dlf.GeoJSON overlay with the given parameters
+
+ See Also
+ --------
+ calc.sql2GeoJSON for the format of `adm_geojson`,
+ dash_leaflet.Overlay, dash_leaflet.GeoJSON
+ """
+ border_id = {"type": "borders_adm", "index": adm_lev}
+ return dlf.Overlay(
+ dlf.GeoJSON(
+ id=border_id,
+ data=adm_geojson,
+ options={
+ "fill": True,
+ "color": adm_color,
+ "weight": adm_weight,
+ "fillOpacity": 0,
+ },
+ ),
+ name=adm_name,
+ id=adm_id,
+ checked=is_checked,
+ )
+
+
+def initialize_map(data):
+ """
+ Map initialization based on `data` spatial domain
+
+ Parameters
+ ----------
+ data: xr.DataArray
+ spatial data of which longitude and latitude coordinates are X and Y
+
+ Returns
+ -------
+ lat_min, lat_max, lat_label, lon_min, lon_max, lon_label, center_of_the_map : tuple
+ respectively: minimum, maximum, label for latitude and longitude pick a point
+ control, center of the map coordinates values as a list
+ """
+ center_of_the_map = [
+ ((data["Y"][int(data["Y"].size/2)].values)),
+ ((data["X"][int(data["X"].size/2)].values)),
+ ]
+ lat_res = (data["Y"][0 ]- data["Y"][1]).values
+ lat_min = str((data["Y"][-1] - lat_res/2).values)
+ lat_max = str((data["Y"][0] + lat_res/2).values)
+ lon_res = (data["X"][1] - data["X"][0]).values
+ lon_min = str((data["X"][0] - lon_res/2).values)
+ lon_max = str((data["X"][-1] + lon_res/2).values)
+ lat_label = lat_min + " to " + lat_max + " by " + str(lat_res) + "˚"
+ lon_label = lon_min + " to " + lon_max + " by " + str(lon_res) + "˚"
+ return (
+ lat_min, lat_max, lat_label,
+ lon_min, lon_max, lon_label,
+ center_of_the_map
+ )
+
+
+def picked_location(
+ data, initialization_cases, click_lat_lng, latitude, longitude
+):
+ """
+ Inputs for map loc_marker and pick location lat/lon controls
+
+ Parameters
+ ----------
+ data: xr.DataArray
+ spatial data of which longitude and latitude coordinates are X and Y
+ initialization_cases: list[str]
+ list of Input of which changes reinitialize the map
+ click_lat_lng: list[str]
+ dlf Input from clicking map (lat and lon)
+ latitude: str
+ Input from latitude pick a point control
+ latitude: str
+ Input from latitude pick a point control
+ """
+ if (
+ dash.ctx.triggered_id == None
+ or dash.ctx.triggered_id in initialization_cases
+ ):
+ lat = data["Y"][int(data["Y"].size/2)].values
+ lng = data["X"][int(data["X"].size/2)].values
+ else:
+ if dash.ctx.triggered_id == "map":
+ lat = click_lat_lng[0]
+ lng = click_lat_lng[1]
+ else:
+ lat = latitude
+ lng = longitude
+ try:
+ nearest_grid = pingrid.sel_snap(data, lat, lng)
+ lat = nearest_grid["Y"].values
+ lng = nearest_grid["X"].values
+ except KeyError:
+ lat = lat
+ lng = lng
+ return [lat, lng], lat, lng
+
+
+def layers_controls(
+ url_str, url_id, url_name,
+ adm_conf, global_conf, adm_id_suffix=None,
+ street=True, topo=True,
+):
+ """
+ Input to dbf.LayersControl
+
+ Parameters
+ ----------
+ url_str: str
+ dlf.TileLayer's url
+ url_id: str
+ dlf.Overlay's id where dlf.TileLayer is
+ url_name: str
+ dlf.Overlay's name where dlf.TileLayer is
+ adm_conf: dict
+ configuration of administrative boundaries overlays
+ global_conf: dict
+ configuration of maproom app
+ adm_id_suffix: str, optional
+ suffix in `adm_conf` dictionary that identifies the set of admin to use
+ street: boolean, optional
+ use cartodb street map as dlf.BaseLayer
+ topo: boolean, optional
+ use opentopomap topographic map as dlf.BaseLayer
+
+ Returns
+ -------
+ layers: list
+ list of dlf.BaseLayer and dlf.Overlay
+ """
+ if adm_id_suffix is None:
+ adm_id_suffix = ""
+ layers = [
+ make_adm_overlay(
+ adm_name=adm["name"],
+ adm_id=f'{adm["name"]}{adm_id_suffix}',
+ adm_geojson=geom2GeoJSON(get_geom(
+ level=i,
+ conf=global_conf,
+ shapes_adm_name=f'shapes_adm{adm_id_suffix}',
+ )),
+ adm_color=adm["color"],
+ adm_lev=i+1,
+ adm_weight=len(adm_conf)-i,
+ is_checked=adm["is_checked"],
+ )
+ for i, adm in enumerate(adm_conf)
+ ] + [
+ dlf.Overlay(
+ dlf.TileLayer(url=url_str, opacity=1),
+ id=url_id,
+ name=url_name,
+ checked=True,
+ ),
+ ]
+ if topo:
+ layers = [
+ dlf.BaseLayer(
+ dlf.TileLayer(
+ url="https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png"
+ ),
+ name="Topo",
+ checked=True,
+ ),
+ ] + layers
+ if street:
+ layers = [
+ dlf.BaseLayer(
+ dlf.TileLayer(
+ url="https://cartodb-basemaps-{s}.global.ssl.fastly.net/light_all/{z}/{x}/{y}.png",
+ ),
+ name="Street",
+ checked=False,
+ ),
+ ] + layers
+ return layers
diff --git a/ccsr/pingrid b/ccsr/pingrid
new file mode 120000
index 00000000..70728dc2
--- /dev/null
+++ b/ccsr/pingrid
@@ -0,0 +1 @@
+../pingrid
\ No newline at end of file
diff --git a/ccsr/predictions.py b/ccsr/predictions.py
new file mode 100644
index 00000000..862aa9ad
--- /dev/null
+++ b/ccsr/predictions.py
@@ -0,0 +1,54 @@
+import pandas as pd
+
+
+def target_range_format(leads_value, start_date, period_length, time_units):
+ """ Formatting target range using leads and starts, and target range period length.
+
+ Parameters
+ ----------
+ leads_value : int
+ Application's integer representation of lead time.
+ start_date : Timestamp
+ Start date for the forecast.
+ period_length : int
+ Length of forecast target range period.
+ time_units: str
+ corresponds to the temporal parameter in which `leads_value`
+ and `period_length` are expressed in.
+ Returns
+ -------
+ date_range : str
+ String of target date range.
+ Notes
+ -----
+ If the providers representation of lead time is an integer value, convert to str for input.
+ The function will output the most concise version of the date range depending on if years and months are equal.
+ """
+ target_start = start_date + pd.offsets.DateOffset(**{time_units:leads_value})
+ target_end = target_start + pd.offsets.DateOffset(**{time_units:period_length-1})
+ return target_range_formatting(target_start, target_end, time_units)
+
+
+def target_range_formatting(target_start, target_end, time_units):
+ target_start = pd.Timestamp(target_start)
+ target_end = pd.Timestamp(target_end)
+ if target_start.year == target_end.year:
+ if target_start.month == target_end.month:
+ target_start_str = target_start.strftime("%-d")
+ else:
+ if time_units == "days":
+ target_start_str = (target_start).strftime("%-d %b")
+ else:
+ target_start_str = (target_start).strftime("%b")
+ else:
+ if time_units == "days":
+ target_start_str = (target_start).strftime("%-d %b %Y")
+ else:
+ target_start_str = (target_start).strftime("%b %Y")
+ if time_units == "days":
+ target_end_str = target_end.strftime("%-d %b %Y")
+ else:
+ target_end_str = target_end.strftime("%b %Y")
+ date_range = f"{target_start_str}-{target_end_str}"
+ return date_range
+
diff --git a/ccsr/pycpt.py b/ccsr/pycpt.py
new file mode 100644
index 00000000..f8ba1b72
--- /dev/null
+++ b/ccsr/pycpt.py
@@ -0,0 +1,306 @@
+import glob
+import numpy as np
+import xarray as xr
+from pathlib import Path
+import predictions
+from scipy.stats import t, norm
+
+
+def pycptv2_targets_dict(fcst_ds, start_date=None):
+ """ Create dictionaries of targets and leads for a given `start_date`
+
+ Parameters
+ ----------
+ fcst_ds: xr.Dataset
+ pycptv2-like xarray.Dataset
+ start_date: str("%YYYY-%mm-%dd"), optional
+ start date of the forecast. Default is None in which case
+ returns dictionary of targets and leads for the last start
+
+ Returns
+ -------
+ targets, default: dict, float
+ targets is a list of dictionaries where keys are label and value and their
+ values are respectively a formatted string of the target dates and a number
+ of their lead times, for a given start date.
+
+ See Also
+ --------
+ read_pycptv2dataset, predictions.target_range_formatting
+ """
+ if start_date is None :
+ fcst_ds = fcst_ds.isel(S=[0])
+ else :
+ fcst_ds = fcst_ds.sel(S=[start_date])
+ if "Li" in fcst_ds.dims:
+ targets = [
+ {
+ "label": predictions.target_range_formatting(
+ fcst_ds['Ti'].sel(Li=lead).squeeze().values,
+ fcst_ds['Tf'].sel(Li=lead).squeeze().values,
+ "months",
+ ),
+ "value": lead,
+ } for lead in fcst_ds["Li"].where(
+ ~np.isnat(fcst_ds["T"].squeeze()), drop=True
+ ).values
+ ]
+ else:
+ targets = [{
+ "label": predictions.target_range_formatting(
+ fcst_ds['Ti'].squeeze().values,
+ fcst_ds['Tf'].squeeze().values,
+ "months"
+ ),
+ "value": (((
+ fcst_ds['Ti'].dt.month - fcst_ds['S'].dt.month
+ ) + 12) % 12).data[0]
+ }]
+ return targets#, targets[0]["value"]
+
+
+def get_fcst(fcst_conf):
+ """ Create a forecast and obs dataset expected by maproom from a set of
+ pyCPTv2 nc files
+
+ Parameters
+ ----------
+ fcst_conf: dict
+ dictionary indicating pyCPT datasets configuration (see config)
+
+ Returns
+ -------
+ fcst_ds, obs : xr.Dataset, xr.DataArray
+ dataset of forecast mean and variance and their obs
+
+ See Also
+ --------
+ read_pycptv2dataset
+ """
+ if "forecast_path" in fcst_conf :
+ fcst_ds, obs = read_pycptv2dataset(fcst_conf["forecast_path"])
+ else :
+ raise Exception("No synthetic case as of yet")
+ return fcst_ds, obs
+
+
+def read_pycptv2dataset(data_path):
+ """ Create a xr.Dataset from yearly pyCPT nc files for one or more targets of the
+ year, for a forecast mean and variance variables, appending multiple Starts; and
+ the corresponding xr.DataArray of the observations time series used to train
+ those targets
+
+ Parameters
+ ----------
+ data_path: str
+ path of set of nc files, organized by targets if multiple ones.
+
+ Returns
+ -------
+ fcst_ds, obs : xr.Dataset, xr.DataArray
+ dataset of forecast mean and variance from `data_path` and their obs
+
+ See Also
+ --------
+ read_pycptv2dataset_single_target
+
+ Notes
+ -----
+ Whether `data_path` contains multiple targets or not is determined by whether the
+ observation obs.nc file(s) is a direct child of `data_path` or under target-wise
+ folders that have no restriction on how they are named.
+ If multiple targets, `fcst_ds` is expanded in to a square with the L dimension,
+ and so are the Ts coordinates that are turned into variables.
+ If single target, `fcst_ds` remains 1D against S only and Ts remain coordiantes
+ of S.
+ `obs` is in any case always appended and sorted into a natural 1D time series
+ """
+ data_path = Path(data_path)
+ children = list(data_path.iterdir())
+ if 'obs.nc' in map(lambda x: str(x.name), children):
+ mu_slices, var_slices, obs = read_pycptv2dataset_single_target(data_path)
+ else:
+ expand_L = True if (len(children) > 1) else False
+ mu_slices, var_slices, obs = [], [], []
+ for target in children :
+ new_mu_slices, new_var_slices, new_obs = (
+ read_pycptv2dataset_single_target(target, expand_L=expand_L)
+ )
+ mu_slices += new_mu_slices
+ var_slices += new_var_slices
+ obs.append(new_obs)
+ obs = xr.concat(obs, "T")
+ fcst_ds = xr.combine_by_coords(mu_slices).merge(xr.combine_by_coords(var_slices))
+ obs = obs.sortby(obs["T"])
+ return fcst_ds, obs
+
+
+def read_pycptv2dataset_single_target(data_path, expand_L=False):
+ """ Lists DataArrays from yearly pyCPT nc files for a single target of the
+ year, for a forecast mean and variance variables, appending multiple Starts; and
+ the corresponding xr.DataArray of the observations time series used to train that
+ target
+
+ Parameters
+ ----------
+ data_path: str
+ path of set of nc files for a single target, organized under one or more
+ Start month of the year folders mm:02
+ expand_L: boolean, optional
+ Expand xr.Dataset with a lead Li dimension. Default is False
+
+ Returns
+ -------
+ mu_slices, var_slices, obs : tuple(list[xr.DataArray])
+ tuple of list of forecast mean and list of forecast variance from `data_path`
+ and their obs
+
+ See Also
+ --------
+ open_var
+
+ Notes
+ -----
+ To use in read_pycptv2dataset expecting multiple targets, expand_L must be True
+ in which case T coordinates become variables and Li is expanded to all variables.
+ If single target, expand_L muse be False and Ts remain coordinates of S only.
+ """
+ mu_slices = []
+ var_slices = []
+ for mm in (np.arange(12) + 1) :
+ monthly_path = Path(data_path) / f'{mm:02}'
+ if monthly_path.exists():
+ mu_slices.append(open_var(
+ monthly_path, 'MME_deterministic_forecast_*.nc', expand_L=expand_L
+ ))
+ var_slices.append(open_var(
+ monthly_path,
+ 'MME_forecast_prediction_error_variance_*.nc',
+ expand_L=expand_L,
+ ))
+ obs = xr.open_dataarray(data_path / "obs.nc")
+ return mu_slices, var_slices, obs
+
+
+def open_var(path, filepattern, expand_L=False):
+ """ Create a xr.Dataset of yearly pyCPT nc files for a single Start of the year
+ and target of the year (or lead), for a single variable
+
+ Parameters
+ ----------
+ path: pathlib(Path)
+ path where nc files are
+ filepattern: str
+ files name pattern with year replaced by a *
+ expand_L: boolean, optional
+ Expand xr.Dataset with a lead Li dimension. Default is False
+
+ Returns
+ -------
+ ds : xr.Dataset
+ dataset of `filepattern` variable
+
+ Notes
+ -----
+ To use in read_pycptv2dataset expecting multiple targets, expand_L must be True
+ in which case T coordinates become variables and Li is expanded to all variables.
+ If single target, expand_L muse be False and Ts remain coordinates of S only.
+ """
+ filenames = path.glob(filepattern)
+ slices = (xr.open_dataset(f) for f in filenames)
+ ds = xr.concat(slices, 'T').swap_dims(T='S')
+ if expand_L :
+ Li = (((
+ ds.isel(S=[0])["Ti"].dt.month - ds.isel(S=[0])["S"].dt.month
+ ) + 12) % 12).data
+ ds = ds.reset_coords(["T", "Ti", "Tf"]).expand_dims(dim={"Li": Li})
+ return ds
+
+
+def distribution(quantiles, obs, fcst_ds, config):
+ obs_q, obs_mu = xr.broadcast(quantiles, obs.mean(dim="T"))
+ obs_stddev = obs.std(dim="T")
+ obs_ppf = xr.apply_ufunc(
+ norm.ppf, obs_q, kwargs={"loc": obs_mu, "scale": obs_stddev},
+ ).rename("obs_ppf")
+ # Obs quantiles
+ obs_quant = obs.quantile(quantiles, dim="T")
+ # Forecast CDF
+ fcst_q, fcst_mu = xr.broadcast(quantiles, fcst_ds["deterministic"])
+ try:
+ fcst_dof = int(fcst_ds["fcst_var"].attrs["dof"])
+ except:
+ fcst_dof = obs["T"].size - 1
+ if config["y_transform"]:
+ hcst_err_var = (np.square(obs - fcst_ds["hcst"]).sum(dim="T")) / fcst_dof
+ # fcst variance is hindcast variance weighted by (1+xvp)
+ # but data files don't have xvp neither can we recompute it from them
+ # thus xvp=0 is an approximation, acceptable dixit Simon Mason
+ # The line below is thus just a reminder of the above
+ xvp = 0
+ fcst_var = hcst_err_var * (1 + xvp)
+ else:
+ fcst_var = fcst_ds["prediction_error_variance"]
+ fcst_ppf = xr.apply_ufunc(
+ t.ppf, fcst_q, fcst_dof,
+ kwargs={"loc": fcst_mu, "scale": np.sqrt(fcst_var)},
+ ).rename("fcst_ppf")
+ if config["variable"] == "Precipitation":
+ fcst_ppf = fcst_ppf.clip(min=0)
+ obs_ppf = obs_ppf.clip(min=0)
+ fcst_pdf = xr.apply_ufunc(
+ t.pdf, fcst_ppf, fcst_dof,
+ kwargs={"loc": fcst_ds["deterministic"], "scale": np.sqrt(fcst_var)},
+ ).rename("fcst_pdf")
+ obs_pdf = xr.apply_ufunc(
+ norm.pdf, obs_ppf,
+ kwargs={"loc": obs_mu, "scale": obs_stddev},
+ ).rename("obs_pdf")
+ return fcst_ppf, obs_ppf, obs_quant, fcst_pdf, obs_pdf
+
+
+def proba(variable, obs, percentile, config, threshold, fcst_ds):
+ if variable == "Percentile":
+ obs_mu = obs.mean(dim="T")
+ obs_stddev = obs.std(dim="T")
+ obs_ppf = xr.apply_ufunc(
+ norm.ppf,
+ percentile,
+ kwargs={"loc": obs_mu, "scale": obs_stddev},
+ )
+ if config["variable"] == "Precipitation":
+ obs_ppf = obs_ppf.clip(min=0)
+ else:
+ obs_ppf = threshold
+ # Forecast CDF
+ try:
+ fcst_dof = int(fcst_ds["fcst_var"].attrs["dof"])
+ except:
+ fcst_dof = obs["T"].size - 1
+ if config["y_transform"]:
+ hcst_err_var = (np.square(obs - fcst_ds["hcst"]).sum(dim="T")) / fcst_dof
+ # fcst variance is hindcast variance weighted by (1+xvp)
+ # but data files don't have xvp neither can we recompute it from them
+ # thus xvp=0 is an approximation, acceptable dixit Simon Mason
+ # The line below is thus just a reminder of the above
+ xvp = 0
+ fcst_var = hcst_err_var * (1 + xvp)
+ else:
+ fcst_var = fcst_ds["prediction_error_variance"]
+
+ fcst_cdf = xr.DataArray( # pingrid.tile expects a xr.DA but obs_ppf is never that
+ data = xr.apply_ufunc(
+ t.cdf,
+ obs_ppf,
+ fcst_dof,
+ kwargs={
+ "loc": fcst_ds["deterministic"],
+ "scale": np.sqrt(fcst_var),
+ },
+ ),
+ # Naming conventions for pingrid.tile
+ coords = fcst_ds.rename({"X": "lon", "Y": "lat"}).coords,
+ dims = fcst_ds.rename({"X": "lon", "Y": "lat"}).dims
+ # pingrid.tile wants 2D data
+ ).squeeze()
+ return fcst_cdf