Skip to content
20 changes: 6 additions & 14 deletions argopy/extensions/canyon_b.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ def decorator(func):
delayed = None

from ..errors import InvalidDatasetStructure, DataNotFound
from ..utils import path2assets, to_list, point_in_polygon
from ..utils import to_list, point_in_polygon
from argopy.utils.assets import Asset
from . import register_argo_accessor, ArgoAccessorExtension


Expand Down Expand Up @@ -170,9 +171,6 @@ def __init__(self, *args, **kwargs):
if self._argo.N_POINTS == 0:
raise DataNotFound("Empty dataset, no data to transform !")

self.path2coef = Path(path2assets).joinpath(
"canyon-b"
) # Path to CANYON-B assets

def get_param_attrs(self, param: str) -> dict:
"""
Expand Down Expand Up @@ -447,22 +445,16 @@ def load_weights(self, param: str) -> pd.DataFrame:

Returns
-------
pd.DataFrame
:class:`pd.DataFrame`
DataFrame containing the neural network weights for the specified parameter.
"""

if param in ["AT", "pCO2", "NO3", "PO4", "SiOH4"]:
weights = pd.read_csv(
self.path2coef.joinpath(f"wgts_{param}.txt"), header=None, sep="\t"
)
weights = Asset.load(f"wgts_{param}.txt", header=None, sep="\t")
elif param == "DIC":
weights = pd.read_csv(
self.path2coef.joinpath("wgts_CT.txt"), header=None, sep="\t"
)
weights = Asset.load("wgts_CT.txt", header=None, sep="\t")
else:
weights = pd.read_csv(
self.path2coef.joinpath("wgts_pH.txt"), header=None, sep="\t"
)
weights = Asset.load("wgts_pH.txt", header=None, sep="\t")

return weights

Expand Down
53 changes: 12 additions & 41 deletions argopy/extensions/canyon_med.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
from typing import Union, List

from ..errors import InvalidDatasetStructure, DataNotFound
from ..utils import path2assets, to_list
from ..utils import to_list
from argopy.utils.assets import Asset
from . import register_argo_accessor, ArgoAccessorExtension


Expand Down Expand Up @@ -78,7 +79,6 @@ def __init__(self, *args, **kwargs):
raise DataNotFound("Empty dataset, no data to transform !")

self.n_list = 5
self.path2coef = Path(path2assets).joinpath("canyon-med")
self._input = None # Private CANYON-MED input dataframe

@property
Expand Down Expand Up @@ -186,50 +186,21 @@ def isin_medsea(row):
def load_normalisation_factors(self, param, subset="F"):
suff = self.param2suff(param)

moy_sub = pd.read_table(
self.path2coef.joinpath("moy_%s_%s.txt" % (suff, subset)),
sep=" {3}",
header=None,
engine="python",
).values
std_sub = pd.read_table(
self.path2coef.joinpath("std_%s_%s.txt" % (suff, subset)),
sep=" {3}",
header=None,
engine="python",
).values
moy_sub = Asset.load(f"canyon-med:moy_{suff}_{subset}.txt", sep=" {3}", header=None, engine="python").values
std_sub = Asset.load(f"canyon-med:std_{suff}_{subset}.txt", sep=" {3}", header=None, engine="python").values

return moy_sub, std_sub

def load_weights(self, param, subset, i):
suff = self.param2suff(param)

b1 = pd.read_csv(
self.path2coef.joinpath("poids_%s_b1_%s_%i.txt" % (suff, subset, i)),
header=None,
)
b2 = pd.read_csv(
self.path2coef.joinpath("poids_%s_b2_%s_%i.txt" % (suff, subset, i)),
header=None,
)
b3 = pd.read_csv(
self.path2coef.joinpath("poids_%s_b3_%s_%i.txt" % (suff, subset, i)),
header=None,
)
IW = pd.read_csv(
self.path2coef.joinpath("poids_%s_IW_%s_%i.txt" % (suff, subset, i)),
sep=r"\s+",
header=None,
)
LW1 = pd.read_csv(
self.path2coef.joinpath("poids_%s_LW1_%s_%i.txt" % (suff, subset, i)),
sep=r"\s+",
header=None,
)
LW2 = pd.read_csv(
self.path2coef.joinpath("poids_%s_LW2_%s_%i.txt" % (suff, subset, i)),
sep=r"\s+",
header=None,
)
b1 = Asset.load(f"canyon-med:poids_{suff}_b1_{subset}_{i}.txt", header=None)
b2 = Asset.load(f"canyon-med:poids_{suff}_b2_{subset}_{i}.txt", header=None)
b3 = Asset.load(f"canyon-med:poids_{suff}_b3_{subset}_{i}.txt", header=None)

IW = Asset.load(f"canyon-med:poids_{suff}_IW_{subset}_{i}.txt", header=None, sep=r"\s+")
LW1 = Asset.load(f"canyon-med:poids_{suff}_LW1_{subset}_{i}.txt", header=None, sep=r"\s+")
LW2 = Asset.load(f"canyon-med:poids_{suff}_LW2_{subset}_{i}.txt", header=None, sep=r"\s+")

# Using float128 arrays avoid the error or warning "overflow encountered in exp" raised by the
# activation function
Expand Down
17 changes: 5 additions & 12 deletions argopy/plot/plot.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,12 @@
#
import warnings
import logging
import os
import json
from copy import copy

import xarray as xr
import pandas as pd
import numpy as np
from typing import Union
import importlib

from ..options import OPTIONS
from ..utils.loggers import warnUnless
Expand All @@ -25,6 +22,7 @@
from ..utils.lists import subsample_list
from ..utils.casting import to_list
from ..errors import InvalidDatasetStructure
from argopy.utils.assets import Asset

from .utils import STYLE, has_seaborn, has_mpl, has_cartopy, has_ipython, has_ipywidgets
from .utils import axes_style, latlongrid, land_feature
Expand All @@ -50,13 +48,6 @@

log = logging.getLogger("argopy.plot.plot")

path2assets = importlib.util.find_spec(
"argopy.static.assets"
).submodule_search_locations[0]

with open(os.path.join(path2assets, "data_types.json"), "r") as f:
DATA_TYPES = json.load(f)


def guess_cmap(hue: str) -> str | None:
"""Try to guess the ArgoColors colormap name to use as a function of the variable to plot
Expand Down Expand Up @@ -756,6 +747,8 @@ def scatter_plot(
"""
warnUnless(has_mpl, "requires matplotlib installed")



#deprecation
if 'this_param' in kwargs:
warnings.warn(
Expand All @@ -779,8 +772,8 @@ def scatter_plot(
)
y = kwargs['this_y'] # Safe fallback on new argument

if param in DATA_TYPES["data"]["str"]:
raise ValueError("scatter_plot does not support parameter of string type (yet !)")
if param in Asset.load('data_types')["data"]["str"]:
raise ValueError("scatter_plot does not support string data type (yet !)")

# Transform the 'cmap' argument into a mpl.colors.Colormap instance
a_color = None
Expand Down
7 changes: 2 additions & 5 deletions argopy/related/argo_documentation.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
import os
import json
import pandas as pd
from functools import lru_cache
import requests

from ..stores import httpstore, memorystore
from ..options import OPTIONS
from .utils import path2assets
from argopy.utils.assets import Asset


# Load the ADMT documentation catalogue:
with open(os.path.join(path2assets, "admt_documentation_catalogue.json"), "rb") as f:
ADMT_CATALOGUE = json.load(f)['data']['catalogue']
ADMT_CATALOGUE = Asset.load('admt_documentation_catalogue')['data']['catalogue']


class ArgoDocs:
Expand Down
7 changes: 3 additions & 4 deletions argopy/related/reference_tables.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
import pandas as pd
from functools import lru_cache
import collections
from pathlib import Path

from ..stores import httpstore, filestore
from ..stores import httpstore
from ..options import OPTIONS
from ..utils import path2assets
from argopy.utils.assets import Asset


VALID_REF = filestore(cache=True).open_json(Path(path2assets).joinpath("nvs_reference_tables.json"))['data']['valid_ref']
VALID_REF = Asset.load('nvs_reference_tables')['data']['valid_ref']


class ArgoNVSReferenceTables:
Expand Down
14 changes: 4 additions & 10 deletions argopy/related/utils.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
import importlib
import os
import json
import logging

from argopy.utils.assets import Asset
from . import ArgoNVSReferenceTables


log = logging.getLogger("argopy.related.utils")
path2assets = importlib.util.find_spec(
"argopy.static.assets"
).submodule_search_locations[0]


def load_dict(ptype):
Expand All @@ -21,8 +17,7 @@ def load_dict(ptype):
profilers = dict(sorted(profilers.items()))
return profilers
except Exception:
with open(os.path.join(path2assets, "profilers.json"), "rb") as f:
jsdata = json.load(f)
jsdata = Asset.load('profilers')
log.debug(
"Failed to load the ArgoNVSReferenceTables R08 for profiler types, fall back on static assets last updated on %s"
% jsdata["last_update"]
Expand All @@ -37,8 +32,7 @@ def load_dict(ptype):
institutions = dict(sorted(institutions.items()))
return institutions
except Exception:
with open(os.path.join(path2assets, "institutions.json"), "rb") as f:
jsdata = json.load(f)
jsdata = Asset.load('institutions')
log.debug(
"Failed to load the ArgoNVSReferenceTables R04 for institutions name, fall back on static assets last updated on %s"
% jsdata["last_update"]
Expand Down
22 changes: 22 additions & 0 deletions argopy/tests/test_utils_assets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import pandas as pd
import pytest
from argopy.utils.assets import Asset


class Test_Asset():
assets = ['gdac_servers.json', 'data_types', 'schema:argo.sensor.schema.json', 'schema:argo.float.schema']
assets_id = [f"{a}" for a in assets]
@pytest.mark.parametrize("asset", assets, indirect=False, ids=assets_id)
def test_load_json(self, asset):
data = Asset.load(asset)
assert isinstance(data, dict)

assets = ['canyon-b:wgts_AT.txt']
assets_id = [f"{a}" for a in assets]
@pytest.mark.parametrize("asset", assets, indirect=False, ids=assets_id)
def test_load_csv(self, asset):
data = Asset.load(asset)
assert isinstance(data, pd.DataFrame)

data = Asset.load(asset, header=None, sep="\t")
assert isinstance(data, pd.DataFrame)
23 changes: 1 addition & 22 deletions argopy/tests/test_utils_locals.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import os

import pandas as pd
import pytest
import io
import argopy
from argopy.utils.locals import modified_environ, Asset
from argopy.utils.locals import modified_environ


@pytest.mark.parametrize("conda", [False, True],
Expand All @@ -22,22 +20,3 @@ def test_modified_environ():
assert os.environ['DUMMY_ENV_ARGOPY'] == 'toto'
assert os.environ['DUMMY_ENV_ARGOPY'] == 'initial'
os.environ.pop('DUMMY_ENV_ARGOPY')


class Test_Asset():
assets = ['gdac_servers.json', 'data_types', 'schema:argo.sensor.schema.json', 'schema:argo.float.schema']
assets_id = [f"{a}" for a in assets]
@pytest.mark.parametrize("asset", assets, indirect=False, ids=assets_id)
def test_load_json(self, asset):
data = Asset.load(asset)
assert isinstance(data, dict)

assets = ['canyon-b:wgts_AT.txt']
assets_id = [f"{a}" for a in assets]
@pytest.mark.parametrize("asset", assets, indirect=False, ids=assets_id)
def test_load_csv(self, asset):
data = Asset.load(asset)
assert isinstance(data, pd.DataFrame)

data = Asset.load(asset, header=None, sep="\t")
assert isinstance(data, pd.DataFrame)
8 changes: 1 addition & 7 deletions argopy/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
modified_environ,
get_sys_info, # noqa: F401
netcdf_and_hdf5_versions, # noqa: F401
Asset,
)
from .monitors import monitor_status, badge, fetch_status # noqa: F401
from .geo import (
Expand All @@ -69,9 +68,6 @@
from . import optical_modeling
from .carbonate import calculate_uncertainties, error_propagation

import importlib
path2assets = importlib.util.find_spec('argopy.static.assets').submodule_search_locations[0]


__all__ = (
# Checkers:
Expand Down Expand Up @@ -121,12 +117,10 @@
# Accessories classes (specific objects):
"Registry",
"float_wmo",
# Locals (environments, versions, systems):
"path2assets",
# Locals (environments, versions, systems, assets):
"show_versions",
"show_options",
"modified_environ",
"Asset",
# Monitors
"monitor_status",
# Geo (space/time data utilities)
Expand Down
Loading
Loading