From 6a1a2b9e299a04422629b6ccbe9383b361128d16 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Fri, 26 Apr 2024 17:56:50 +0100 Subject: [PATCH 01/18] dev --- cfdm/core/meta/docstringrewrite.py | 62 ++++++++++++++++++++++++++---- 1 file changed, 54 insertions(+), 8 deletions(-) diff --git a/cfdm/core/meta/docstringrewrite.py b/cfdm/core/meta/docstringrewrite.py index cba9844d5..6954feea9 100644 --- a/cfdm/core/meta/docstringrewrite.py +++ b/cfdm/core/meta/docstringrewrite.py @@ -1,10 +1,21 @@ import inspect - +from re import compile from ..functions import CF _VN = CF() +p = compile("{{.*?}}") + +#def multiple_replace(replacements, text): +# # Create a regular expression from the dictionary keys +# regex = compile(f"{'|'.join(map(re.escape, replacements.keys()))}") +# # For each match, look-up corresponding value in dictionary +# return regex.sub(lambda mo: replacements[mo.group()], text) +aaa = [0] +xxx = [0] +yyy = [0] +zzz = [0] class DocstringRewriteMeta(type): """Modify docstrings at time of import. @@ -611,18 +622,33 @@ def _docstring_update( config: `dict` """ +# print(repr(f)) + zzz[0] += 1 if class_docstring is not None: doc = class_docstring else: doc = f.__doc__ if doc is None or "{{" not in doc: return doc - + yyy[0] += 1 + # ------------------------------------------------------------ # Do general substitutions first # ------------------------------------------------------------ - for key, value in config.items(): - # Substitute the key for the value + substitutions = p.findall(doc) + aaa[0] = max(aaa[0], len(substitutions)) + +# print (config.keys()) +# if config: +# doc = multiple_replace(config, doc) + + + for key in substitutions: + xxx[0] += 1 + value = config.get(key) + if value is None: + continue + try: # Compiled regular expression substitution doc = key.sub(value, doc) @@ -630,20 +656,40 @@ def _docstring_update( # String substitution doc = doc.replace(key, value) + + + #for key, value in config.items(): + # xxx[0] += 1 + # # Substitute the key for the value + # try: + # # Compiled regular expression substitution + # doc = key.sub(value, doc) + # except AttributeError: + # # String substitution + # doc = doc.replace(key, value) + # ------------------------------------------------------------ # Now do special substitutions # ------------------------------------------------------------ # Insert the name of the package - doc = doc.replace("{{package}}", package_name) + if "{{package}}" in substitutions: + xxx[0] += 1 + doc = doc.replace("{{package}}", package_name) # Insert the name of the class containing this method - doc = doc.replace("{{class}}", class_name) + if "{{class}}" in substitutions: + xxx[0] += 1 + doc = doc.replace("{{class}}", class_name) # Insert the lower case name of the class containing this method - doc = doc.replace("{{class_lower}}", class_name_lower) + if "{{class_lower}}" in substitutions: + xxx[0] += 1 + doc = doc.replace("{{class_lower}}", class_name_lower) # Insert the CF version - doc = doc.replace("{{VN}}", _VN) + if "{{VN}}" in substitutions: + xxx[0] += 1 + doc = doc.replace("{{VN}}", _VN) # ---------------------------------------------------------------- # Set the rewritten docstring on the method From 86709bcb95cc7cd00951c9d0b2d0b5c4a6119089 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Mon, 29 Apr 2024 16:13:41 +0100 Subject: [PATCH 02/18] dev --- cfdm/core/meta/docstringrewrite.py | 151 +++++++++++++++-------------- 1 file changed, 79 insertions(+), 72 deletions(-) diff --git a/cfdm/core/meta/docstringrewrite.py b/cfdm/core/meta/docstringrewrite.py index 6954feea9..64e5c6db1 100644 --- a/cfdm/core/meta/docstringrewrite.py +++ b/cfdm/core/meta/docstringrewrite.py @@ -368,21 +368,21 @@ def __new__(cls, class_name, parents, attrs): if doc_template is not None: doc = doc_template - if doc is not None and "{{" in doc: - doc_template = doc - doc = DocstringRewriteMeta._docstring_update( - package_name, - class_name, - class_name_lower, - None, - None, - docstring_rewrite, - class_docstring=doc, - ) - attrs["__doc__"] = doc +# if doc is not None and "{{" in doc: + doc_template = doc + doc = DocstringRewriteMeta._docstring_update( + package_name, + class_name, + class_name_lower, + None, + None, + docstring_rewrite, + class_docstring=doc, + ) + attrs["__doc__"] = doc - if set_doc_template_to_None: - doc_template = None + if set_doc_template_to_None: + doc_template = None attrs["__doc_template__"] = doc_template @@ -622,79 +622,86 @@ def _docstring_update( config: `dict` """ -# print(repr(f)) - zzz[0] += 1 +# zzz[0] += 1 if class_docstring is not None: doc = class_docstring else: doc = f.__doc__ - if doc is None or "{{" not in doc: - return doc - yyy[0] += 1 + + if doc is None: #or "{{" not in doc: + return doc + +# yyy[0] += 1 # ------------------------------------------------------------ # Do general substitutions first # ------------------------------------------------------------ substitutions = p.findall(doc) - aaa[0] = max(aaa[0], len(substitutions)) +# aaa[0] = max(aaa[0], len(substitutions)) + if substitutions: # print (config.keys()) # if config: # doc = multiple_replace(config, doc) - for key in substitutions: - xxx[0] += 1 - value = config.get(key) - if value is None: - continue - - try: - # Compiled regular expression substitution - doc = key.sub(value, doc) - except AttributeError: - # String substitution - doc = doc.replace(key, value) - + for key in substitutions: +# xxx[0] += 1 + value = config.get(key) + if value is None: + continue - - #for key, value in config.items(): - # xxx[0] += 1 - # # Substitute the key for the value - # try: - # # Compiled regular expression substitution - # doc = key.sub(value, doc) - # except AttributeError: - # # String substitution - # doc = doc.replace(key, value) - - # ------------------------------------------------------------ - # Now do special substitutions - # ------------------------------------------------------------ - # Insert the name of the package - if "{{package}}" in substitutions: - xxx[0] += 1 - doc = doc.replace("{{package}}", package_name) - - # Insert the name of the class containing this method - if "{{class}}" in substitutions: - xxx[0] += 1 - doc = doc.replace("{{class}}", class_name) - - # Insert the lower case name of the class containing this method - if "{{class_lower}}" in substitutions: - xxx[0] += 1 - doc = doc.replace("{{class_lower}}", class_name_lower) - - # Insert the CF version - if "{{VN}}" in substitutions: - xxx[0] += 1 - doc = doc.replace("{{VN}}", _VN) - - # ---------------------------------------------------------------- - # Set the rewritten docstring on the method - # ---------------------------------------------------------------- - if class_docstring is None: - f.__doc__ = doc + try: + # Compiled regular expression substitution + doc = key.sub(value, doc) + except AttributeError: + # String substitution + value = value.replace("{{package}}", package_name) + value = value.replace("{{class}}", class_name) + value = value.replace("{{class_lower}}", class_name_lower) + value = value.replace("{{VN}}", _VN) + doc = doc.replace(key, value) + + + + #for key, value in config.items(): + # xxx[0] += 1 + # # Substitute the key for the value + # try: + # # Compiled regular expression substitution + # doc = key.sub(value, doc) + # except AttributeError: + # # String substitution + # doc = doc.replace(key, value) + + # ------------------------------------------------------------ + # Do special substitutions after the general ones, in case + # the general one themselves contained special ones. + # ------------------------------------------------------------ + # Insert the name of the package +# if "{{package}}" in substitutions: +# xxx[0] += 1 +# doc = doc.replace("{{package}}", package_name) + + # Insert the name of the class containing this method +# if "{{class}}" in substitutions: +# xxx[0] += 1 + # doc = doc.replace("{{class}}", class_name) + + # Insert the lower case name of the class containing this method +# if "{{class_lower}}" in substitutions: +# xxx[0] += 1 + # doc = doc.replace("{{class_lower}}", class_name_lower) + + # Insert the CF version +# if "{{VN}}" in substitutions: +# xxx[0] += 1 + # doc = doc.replace("{{VN}}", _VN) + + # ---------------------------------------------------------------- + # Set the rewritten docstring on the method + # ---------------------------------------------------------------- + if class_docstring is None: + f.__doc__ = doc return doc From 5d0b48b710140c2a7c0de90ce613209289ee7017 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 30 Apr 2024 11:27:35 +0100 Subject: [PATCH 03/18] dev --- cfdm/core/cellconnectivity.py | 6 +- cfdm/core/docstring/docstring.py | 13 +- cfdm/core/meta/docstringrewrite.py | 188 +++++++++++------------------ cfdm/docstring/docstring.py | 10 +- cfdm/test/test_docstring.py | 5 + 5 files changed, 86 insertions(+), 136 deletions(-) diff --git a/cfdm/core/cellconnectivity.py b/cfdm/core/cellconnectivity.py index c9d9a2760..fcdb3ffd8 100644 --- a/cfdm/core/cellconnectivity.py +++ b/cfdm/core/cellconnectivity.py @@ -105,7 +105,7 @@ def construct_type(self): def del_connectivity(self, default=ValueError()): """Remove the connectivity. - {{{cell connectivity type}} + {{cell connectivity type}} .. versionadded:: (cfdm) 1.11.0.0 @@ -149,7 +149,7 @@ def del_connectivity(self, default=ValueError()): def has_connectivity(self): """Whether the connectivity type has been set. - {{{cell connectivity type}} + {{cell connectivity type}} .. versionadded:: (cfdm) 1.11.0.0 @@ -233,7 +233,7 @@ def get_connectivity(self, default=ValueError()): def set_connectivity(self, connectivity): """Set the connectivity type. - {{{cell connectivity type}} + {{cell connectivity type}} .. versionadded:: (cfdm) 1.11.0.0 diff --git a/cfdm/core/docstring/docstring.py b/cfdm/core/docstring/docstring.py index dc5428dad..93dbefbc8 100644 --- a/cfdm/core/docstring/docstring.py +++ b/cfdm/core/docstring/docstring.py @@ -9,13 +9,7 @@ Replacement text may not contain other non-special substitutions. -Keys must be a `str` or `re.Pattern` object: - -* If a key is a `str` then the corresponding value must be a string. - -* If a key is a `re.Pattern` object then the corresponding value must - be a string or a callable, as accepted by the `re.Pattern.sub` - method. +A key and its corresponding value must both be `str`. .. versionaddedd:: (cfdm) 1.8.7.0 @@ -94,7 +88,10 @@ "{{init data: data_like, optional}}": """data: data_like, optional Set the data. - {{data_like}} + A data_like object is any object that can be converted + to a `Data` object, i.e. `numpy` array_like objects, + `Data` objects, and {{package}} instances that contain + `Data` objects. The data also may be set after initialisation with the `set_data` method.""", diff --git a/cfdm/core/meta/docstringrewrite.py b/cfdm/core/meta/docstringrewrite.py index 64e5c6db1..8f681a89c 100644 --- a/cfdm/core/meta/docstringrewrite.py +++ b/cfdm/core/meta/docstringrewrite.py @@ -1,21 +1,9 @@ import inspect from re import compile -from ..functions import CF -_VN = CF() +base = compile("{{.*?}}") -p = compile("{{.*?}}") -#def multiple_replace(replacements, text): -# # Create a regular expression from the dictionary keys -# regex = compile(f"{'|'.join(map(re.escape, replacements.keys()))}") -# # For each match, look-up corresponding value in dictionary -# return regex.sub(lambda mo: replacements[mo.group()], text) - -aaa = [0] -xxx = [0] -yyy = [0] -zzz = [0] class DocstringRewriteMeta(type): """Modify docstrings at time of import. @@ -73,14 +61,6 @@ def __new__(cls, class_name, parents, attrs): if class_docstring_rewrite is not None: docstring_rewrite.update(class_docstring_rewrite(None)) - special = DocstringRewriteMeta._docstring_special_substitutions() - for key in special: - if key in docstring_rewrite: - raise ValueError( - f"Can't use {key!r} as a user-defined " - "docstring substitution." - ) - # ------------------------------------------------------------ # Find the package depth # ------------------------------------------------------------ @@ -368,21 +348,21 @@ def __new__(cls, class_name, parents, attrs): if doc_template is not None: doc = doc_template -# if doc is not None and "{{" in doc: - doc_template = doc - doc = DocstringRewriteMeta._docstring_update( - package_name, - class_name, - class_name_lower, - None, - None, - docstring_rewrite, - class_docstring=doc, - ) - attrs["__doc__"] = doc + if doc is not None and "{{" in doc: + doc_template = doc + doc = DocstringRewriteMeta._docstring_update( + package_name, + class_name, + class_name_lower, + None, + None, + docstring_rewrite, + class_docstring=doc, + ) + attrs["__doc__"] = doc - if set_doc_template_to_None: - doc_template = None + if set_doc_template_to_None: + doc_template = None attrs["__doc_template__"] = doc_template @@ -391,9 +371,6 @@ def __new__(cls, class_name, parents, attrs): # ------------------------------------------------------------ return super().__new__(cls, class_name, parents, attrs) - # ---------------------------------------------------------------- - # Private methods - # ---------------------------------------------------------------- @classmethod def _docstring_special_substitutions(cls): """Return the special docstring substitutions. @@ -423,7 +400,7 @@ def _docstring_special_substitutions(cls): The special docstring substitution identifiers. """ - return ("{{class}}", "{{class_lower}}", "{{package}}", "{{VN}}") + return ("{{class}}", "{{class_lower}}", "{{package}}") @staticmethod def _docstring_substitutions(cls): @@ -445,13 +422,7 @@ def _docstring_substitutions(cls): then the latter will *not* be replaced. This restriction is to prevent the possibility of infinite recursion. - A key must be either a `str` or a `re.Pattern` object. - - If a key is a `str` then the corresponding value must be a string. - - If a key is a `re.Pattern` object then the corresponding value - must be a string or a callable, as accepted by the - `re.Pattern.sub` method. + A key and its corresponding value must both be `str`. .. versionadded:: (cfdm) 1.8.7.0 @@ -605,103 +576,86 @@ def _docstring_update( config, class_docstring=None, ): - """Performs docstring substitutions on a method at import time. + """Perform docstring substitutions. + + Docstring substitutions are applied to a class or method at + import time. .. versionadded:: (cfdm) 1.8.7.0 :Parameters: package_name: `str` + The name of the package containing the class or + method. class_name: `str` + The name of the class. + + class_name_lower: `str` + The lower case name of the class. - f: class method + f: class method or `None` + The method, or `None` if a class docstring is being + updated. - method_name: `str` + method_name: `str` or `None` + The method name, or `None` if a class docstring is + being updated. config: `dict` + A dictionary containing the general docstring + substitutions. + + class_docstring, `str` or `None` + If docstring of a class, or `None` of a method + docstring is being updated. + + :Returns: + + `str` or `None` + The updated docstring, or `None` if there is no + docstring. """ -# zzz[0] += 1 if class_docstring is not None: doc = class_docstring else: doc = f.__doc__ - if doc is None: #or "{{" not in doc: - return doc - -# yyy[0] += 1 - - # ------------------------------------------------------------ - # Do general substitutions first - # ------------------------------------------------------------ - substitutions = p.findall(doc) -# aaa[0] = max(aaa[0], len(substitutions)) + if doc is None: + return + substitutions = base.findall(doc) if substitutions: -# print (config.keys()) -# if config: -# doc = multiple_replace(config, doc) - - + # Special substitutions + if "{{package}}" in substitutions: + # Insert the name of the package + doc = doc.replace("{{package}}", package_name) + + if "{{class}}" in substitutions: + # Insert the name of the class + doc = doc.replace("{{class}}", class_name) + + if "{{class_lower}}" in substitutions: + # Insert the lower case name of the class + doc = doc.replace("{{class_lower}}", class_name_lower) + + # General substitutions for key in substitutions: -# xxx[0] += 1 value = config.get(key) if value is None: continue - - try: - # Compiled regular expression substitution - doc = key.sub(value, doc) - except AttributeError: - # String substitution - value = value.replace("{{package}}", package_name) - value = value.replace("{{class}}", class_name) - value = value.replace("{{class_lower}}", class_name_lower) - value = value.replace("{{VN}}", _VN) - doc = doc.replace(key, value) - - - - #for key, value in config.items(): - # xxx[0] += 1 - # # Substitute the key for the value - # try: - # # Compiled regular expression substitution - # doc = key.sub(value, doc) - # except AttributeError: - # # String substitution - # doc = doc.replace(key, value) - - # ------------------------------------------------------------ - # Do special substitutions after the general ones, in case - # the general one themselves contained special ones. - # ------------------------------------------------------------ - # Insert the name of the package -# if "{{package}}" in substitutions: -# xxx[0] += 1 -# doc = doc.replace("{{package}}", package_name) - - # Insert the name of the class containing this method -# if "{{class}}" in substitutions: -# xxx[0] += 1 - # doc = doc.replace("{{class}}", class_name) - - # Insert the lower case name of the class containing this method -# if "{{class_lower}}" in substitutions: -# xxx[0] += 1 - # doc = doc.replace("{{class_lower}}", class_name_lower) - - # Insert the CF version -# if "{{VN}}" in substitutions: -# xxx[0] += 1 - # doc = doc.replace("{{VN}}", _VN) - - # ---------------------------------------------------------------- - # Set the rewritten docstring on the method - # ---------------------------------------------------------------- + + # Do special substitutions on the value + value = value.replace("{{package}}", package_name) + value = value.replace("{{class}}", class_name) + value = value.replace("{{class_lower}}", class_name_lower) + + doc = doc.replace(key, value) + if class_docstring is None: + # Set the rewritten docstring on the method f.__doc__ = doc return doc diff --git a/cfdm/docstring/docstring.py b/cfdm/docstring/docstring.py index 4eccd204d..106e5cada 100644 --- a/cfdm/docstring/docstring.py +++ b/cfdm/docstring/docstring.py @@ -9,13 +9,7 @@ Replacement text may not contain other non-special substitutions. -Keys must be a `str` or `re.Pattern` object: - -* If a key is a `str` then the corresponding value must be a string. - -* If a key is a `re.Pattern` object then the corresponding value must - be a string or a callable, as accepted by the `re.Pattern.sub` - method. +A key and its corresponding value must both be `str`. .. versionaddedd:: (cfdm) 1.8.7.0 @@ -379,7 +373,7 @@ parts are not set. Can't be used with the *update* parameter.""", # update - "{{update: (sequence of) `str`, optional}": """update: (sequence of) `str`, optional + "{{update: (sequence of) `str`, optional}}": """update: (sequence of) `str`, optional Add these original file names to those already stored. The original file names of any constituent parts are not updated. Can't be used with the *define* diff --git a/cfdm/test/test_docstring.py b/cfdm/test/test_docstring.py index 1c81ce477..090a00fe8 100644 --- a/cfdm/test/test_docstring.py +++ b/cfdm/test/test_docstring.py @@ -281,6 +281,11 @@ def test_docstring_docstring_substitutions(self): self.assertIsInstance(d, dict) self.assertIn("{{repr}}", d) + # Check that the special substitutions have not been + # overwritten + for key in x._docstring_special_substitutions(): + self.assertNotIn(key, d) + if __name__ == "__main__": print("Run date:", datetime.datetime.now()) From 3f37791b39e8313ee8962a6b7ad8f14fc9c014f8 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 30 Apr 2024 11:28:54 +0100 Subject: [PATCH 04/18] dev --- Changelog.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Changelog.rst b/Changelog.rst index 301219bae..f98c4f059 100644 --- a/Changelog.rst +++ b/Changelog.rst @@ -1,3 +1,13 @@ +Version NEXTVERSION +------------------- + +**2024-??-??** + +* Speed up time taken for import + (https://github.com/NCAS-CMS/cfdm/issues/???) + +---- + Version 1.11.1.0 ---------------- From ba90ca22547fa02df605295496578ede894d5427 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Wed, 22 Oct 2025 10:57:59 +0100 Subject: [PATCH 05/18] dev --- Changelog.rst | 4 +- cfdm/__init__.py | 194 ------------------ cfdm/constants.py | 34 +-- cfdm/constructs.py | 8 +- cfdm/core/__init__.py | 51 +---- cfdm/core/data/abstract/array.py | 4 +- cfdm/core/data/data.py | 3 +- cfdm/core/meta/docstringrewrite.py | 19 +- cfdm/data/__init__.py | 15 -- cfdm/data/abstract/__init__.py | 11 - cfdm/data/abstract/array.py | 11 +- cfdm/data/abstract/compressedarray.py | 6 - cfdm/data/abstract/filearray.py | 13 +- cfdm/data/abstract/raggedarray.py | 4 - cfdm/data/aggregatedarray.py | 4 +- cfdm/data/creation.py | 5 +- cfdm/data/data.py | 61 +++--- cfdm/data/fragment/fragmentfilearray.py | 5 +- .../data/fragment/mixin/fragmentarraymixin.py | 3 +- cfdm/data/fullarray.py | 3 +- cfdm/data/h5netcdfarray.py | 8 +- cfdm/data/mixin/__init__.py | 7 - cfdm/data/mixin/arraymixin.py | 9 +- cfdm/data/mixin/compressedarraymixin.py | 7 +- cfdm/data/mixin/filearraymixin.py | 9 +- cfdm/data/mixin/indexmixin.py | 7 +- cfdm/data/netcdfindexer.py | 1 - .../subarray/abstract/subsampledsubarray.py | 3 +- .../data/subarray/cellconnectivitysubarray.py | 3 +- cfdm/data/subarray/mixin/pointtopology.py | 4 +- cfdm/data/subsampledarray.py | 3 +- cfdm/data/utils.py | 6 +- cfdm/data/zarrarray.py | 3 +- cfdm/functions.py | 9 +- cfdm/mixin/__init__.py | 14 -- cfdm/mixin/container.py | 6 - cfdm/mixin/netcdf.py | 8 +- cfdm/mixin/propertiesdata.py | 11 - cfdm/read_write/abstract/readwrite.py | 6 +- cfdm/read_write/netcdf/constants.py | 2 +- cfdm/read_write/netcdf/netcdfread.py | 24 ++- cfdm/read_write/netcdf/netcdfwrite.py | 28 +-- cfdm/read_write/read.py | 7 +- cfdm/units.py | 13 -- 44 files changed, 141 insertions(+), 515 deletions(-) diff --git a/Changelog.rst b/Changelog.rst index 474f15285..3feae8e63 100644 --- a/Changelog.rst +++ b/Changelog.rst @@ -3,11 +3,11 @@ Version NEXTVERSION **2025-12-??** -* Speed up time taken for import +* Reduce the time taken to import `cfdm` (https://github.com/NCAS-CMS/cfdm/issues/???) ---- - + Version 1.12.3.1 ---------------- diff --git a/cfdm/__init__.py b/cfdm/__init__.py index 25984eb33..31373db6f 100644 --- a/cfdm/__init__.py +++ b/cfdm/__init__.py @@ -37,197 +37,15 @@ """ -import time -s = time.time() import logging import sys -from packaging.version import Version - from . import core __date__ = core.__date__ __cf_version__ = core.__cf_version__ __version__ = core.__version__ -_requires = core._requires + ( - "cftime", - "netCDF4", - "dask", - "scipy", - "h5netcdf", - "zarr", - "s3fs", - "uritools", - "cfunits", -) - -_error0 = f"cfdm requires the modules {', '.join(_requires)}. " - -print('2 __init__', time.time() - s) - -# Check the version of cftime -try: - import cftime -except ImportError as error1: - raise ImportError(_error0 + str(error1)) -else: - _minimum_vn = "1.6.4" - if Version(cftime.__version__) < Version(_minimum_vn): - raise ValueError( - f"Bad cftime version: cfdm requires cftime>={_minimum_vn}. " - f"Got {cftime.__version__} at {cftime.__file__}" - ) - -print('cftime __init__', time.time() - s) - -## Check the version of netCDF4 -#try: -# import netCDF4 -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "1.7.2" -# if Version(netCDF4.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad netCDF4 version: cfdm requires netCDF4>={_minimum_vn}. " -# f"Got {netCDF4.__version__} at {netCDF4.__file__}" -# ) -# -#print('netCDF4 __init__', time.time() - s) -# -## Check the version of h5netcdf -#try: -# import h5netcdf -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "1.3.0" -# if Version(h5netcdf.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad h5netcdf version: cfdm requires h5netcdf>={_minimum_vn}. " -# f"Got {h5netcdf.__version__} at {h5netcdf.__file__}" -# ) -# -#print('h5netcdf __init__', time.time() - s) -# -## Check the version of h5py -#try: -# import h5py -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "3.12.0" -# if Version(h5py.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad h5py version: cfdm requires h5py>={_minimum_vn}. " -# f"Got {h5py.__version__} at {h5py.__file__}" -# ) -# -#print('h5py __init__', time.time() - s) -# -## Check the version of zarr -#try: -# import zarr -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "3.0.8" -# if Version(zarr.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad zarr version: cfdm requires zarr>={_minimum_vn}. " -# f"Got {zarr.__version__} at {zarr.__file__}" -# ) -# -#print('zarr __init__', time.time() - s) -# -## Check the version of s3fs -#try: -# import s3fs -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "2024.6.0" -# if Version(s3fs.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad s3fs version: cfdm requires s3fs>={_minimum_vn}. " -# f"Got {s3fs.__version__} at {s3fs.__file__}" -# ) -# -#print('s3fs __init__', time.time() - s) - -## Check the version of scipy -#try: -# import scipy -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "1.10.0" -# if Version(scipy.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad scipy version: cfdm requires scipy>={_minimum_vn}. " -# f"Got {scipy.__version__} at {scipy.__file__}" -# ) -# -#print('2 __init__', time.time() - s) -# -## Check the version of dask -#try: -# import dask -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "2025.5.1" -# if Version(dask.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad dask version: cfdm requires dask>={_minimum_vn}. " -# f"Got {dask.__version__} at {dask.__file__}" -# ) -# -## Check the version of distributed -#try: -# import distributed -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "2025.5.1" -# if Version(distributed.__version__) < Version(_minimum_vn): -# raise ValueError( -# "Bad distributed version: cfdm requires " -# f"distributed>={_minimum_vn}. " -# f"Got {distributed.__version__} at {distributed.__file__}" -# ) -# -## Check the version of uritools -#try: -# import uritools -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "4.0.3" -# if Version(uritools.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad uritools version: cfdm requires uritools>={_minimum_vn}. " -# f"Got {uritools.__version__} at {uritools.__file__}" -# ) -# -## Check the version of cfunits -#try: -# import cfunits -#except ImportError as error1: -# raise ImportError(_error0 + str(error1)) -#else: -# _minimum_vn = "3.3.7" -# if Version(cfunits.__version__) < Version(_minimum_vn): -# raise ValueError( -# f"Bad cfunits version: cfdm requires cfunits>={_minimum_vn}. " -# f"Got {cfunits.__version__} at {cfunits.__file__}" -# ) -# -#del _minimum_vn - -print('3 __init__', time.time() - s) - from .constants import masked # Internal ones passed on so they can be used in cf-python (see @@ -259,8 +77,6 @@ is_log_level_info, ) -print('4 __init__', time.time() - s) - # Though these are internal-use methods, include them in the namespace # (without documenting them) so that cf-python can use them internally # too: @@ -271,12 +87,8 @@ _display_or_return, ) -print('5 __init__', time.time() - s) - from .constructs import Constructs -print('6 __init__', time.time() - s) - from .data import ( Array, AggregatedArray, @@ -301,8 +113,6 @@ ZarrArray, ) -print('9 __init__', time.time() - s) - from .data import ( BiLinearSubarray, BiQuadraticLatitudeLongitudeSubarray, @@ -357,8 +167,6 @@ from .abstract import Container -print('__init__', time.time() - s) - # -------------------------------------------------------------------- # Set up basic logging for the full project with a root logger # -------------------------------------------------------------------- @@ -384,5 +192,3 @@ def detail(self, message, *args, **kwargs): logging.Logger.detail = detail - -print('__init__', time.time() - s) diff --git a/cfdm/constants.py b/cfdm/constants.py index 19ec70e70..6d1de4b57 100644 --- a/cfdm/constants.py +++ b/cfdm/constants.py @@ -1,45 +1,15 @@ -#import logging -#import sys from enum import Enum import numpy as np -#from dask import config -#from dask.utils import parse_bytes -# -#_CHUNKSIZE = "128 MiB" -#config.set({"array.chunk-size": _CHUNKSIZE}) +# -------------------------------------------------------------------- # A dictionary of useful constants. # # Whilst the dictionary may be modified directly, it is safer to # retrieve and set the values with the dedicated get-and-set # functions. - -# -#:Keys: -# -# ATOL: `float` -# The value of absolute tolerance for testing numerically tolerant -# equality. -# -# RTOL: `float` -# The value of relative tolerance for testing numerically tolerant -# equality. -# -# LOG_LEVEL: `str` -# The minimal level of seriousness for which log messages are -# shown. See `cfdm.log_level`. -# -# CHUNKSIZE: `int` -# The Dask chunk size (in bytes). See `cfdm.chunksize`. -# -#""" +# -------------------------------------------------------------------- CONSTANTS = {} -# "ATOL": sys.float_info.epsilon, -# "RTOL": sys.float_info.epsilon, -# "LOG_LEVEL": logging.getLevelName(logging.getLogger().level), -# "CHUNKSIZE": parse_bytes(_CHUNKSIZE), -#} # -------------------------------------------------------------------- diff --git a/cfdm/constructs.py b/cfdm/constructs.py index b5a1572ee..c8fa90d14 100644 --- a/cfdm/constructs.py +++ b/cfdm/constructs.py @@ -1,19 +1,15 @@ -import time -s = time.time() import logging from itertools import zip_longest from re import Pattern -print('constructs', time.time()-s) -from .mixin import Container -print('3 constructs', time.time()-s) from .core import Constructs as core_Constructs -print('4 constructs', time.time()-s) from .core.functions import deepcopy from .decorators import _manage_log_level_via_verbosity +from .mixin import Container logger = logging.getLogger(__name__) + class Constructs(Container, core_Constructs): """A container for metadata constructs. diff --git a/cfdm/core/__init__.py b/cfdm/core/__init__.py index d75140c63..7c8f87333 100644 --- a/cfdm/core/__init__.py +++ b/cfdm/core/__init__.py @@ -14,55 +14,13 @@ __date__ = "2025-10-15" __cf_version__ = "1.12" __version__ = "1.12.3.1" -tt=[0] -import time -s = time.time() -from platform import python_version -_requires = ("numpy", "packaging") -_error0 = f"cfdm.core requires the modules {', '.join(_requires)}. " - -# Check the version of packaging -try: - import packaging - from packaging.version import Version -except ImportError as error1: - raise ImportError(_error0 + str(error1)) -else: - _minimum_vn = "20.0" - if Version(packaging.__version__) < Version(_minimum_vn): - raise RuntimeError( - f"Bad packaging version: cf requires packaging>={_minimum_vn}. " - f"Got {packaging.__version__} at {packaging.__file__}" - ) - -# Check the version of python -_minimum_vn = "3.10.0" -if Version(python_version()) < Version(_minimum_vn): - raise ValueError( - f"Bad python version: cfdm.core requires python>={_minimum_vn}. " - f"Got {python_version()}" - ) - -# Check the version of numpy -try: - import numpy as np -except ImportError as error1: - raise ImportError(_error0 + str(error1)) -else: - _minimum_vn = "2.0.0" - if Version(np.__version__) < Version(_minimum_vn): - raise ValueError( - f"Bad numpy version: cfdm.core requires numpy>={_minimum_vn}. " - f"Got {np.__version__} at {np.__file__}" - ) - -del _minimum_vn +# Count the number of docstrings (first element), and the number which +# have docstring substitutions applied to them (second element). +_docstring_substitutions = [0, 0] from .constructs import Constructs - from .functions import CF, environment - from .data import Data, Array, NumpyArray from .bounds import Bounds @@ -95,6 +53,3 @@ ) from .meta import DocstringRewriteMeta -print('core/__init__', time.time() - s) -from .constructs import Constructs -print('CONSTRUCTS core/__init__', time.time() - s) diff --git a/cfdm/core/data/abstract/array.py b/cfdm/core/data/abstract/array.py index 314068c9c..48975f025 100644 --- a/cfdm/core/data/abstract/array.py +++ b/cfdm/core/data/abstract/array.py @@ -1,8 +1,8 @@ from functools import reduce from operator import mul -from ...abstract import Container -from ...utils import cached_property +from cfdm.core.abstract import Container +from cfdm.core.utils import cached_property class Array(Container): diff --git a/cfdm/core/data/data.py b/cfdm/core/data/data.py index 8983bbb5f..27fff1afe 100644 --- a/cfdm/core/data/data.py +++ b/cfdm/core/data/data.py @@ -1,6 +1,7 @@ import numpy as np -from .. import abstract +from cfdm.core import abstract + from .abstract import Array from .numpyarray import NumpyArray diff --git a/cfdm/core/meta/docstringrewrite.py b/cfdm/core/meta/docstringrewrite.py index 83add3a2e..b78ba2d0f 100644 --- a/cfdm/core/meta/docstringrewrite.py +++ b/cfdm/core/meta/docstringrewrite.py @@ -1,8 +1,13 @@ import inspect from re import compile +# Count the number of docstrings (first element of +# '_docstring_substitutions'), and the number which have docstring +# substitutions applied to them (second element). +from .. import _docstring_substitutions + base = compile("{{.*?}}") -import time + class DocstringRewriteMeta(type): """Modify docstrings at time of import. @@ -37,7 +42,6 @@ def __new__(cls, class_name, parents, attrs): taken from the class closest to the child class. """ - return super().__new__(cls, class_name, parents, attrs) class_name_lower = class_name.lower() docstring_rewrite = {} @@ -618,8 +622,8 @@ def _docstring_update( docstring. """ - from .. import tt - s = time.time() + _docstring_substitutions[0] += 1 + if class_docstring is not None: doc = class_docstring else: @@ -627,8 +631,11 @@ def _docstring_update( if doc is None: return + substitutions = base.findall(doc) if substitutions: + _docstring_substitutions[1] += 1 + # Special substitutions if "{{package}}" in substitutions: # Insert the name of the package @@ -658,7 +665,5 @@ def _docstring_update( if class_docstring is None: # Set the rewritten docstring on the method f.__doc__ = doc - - tt[0] += time.time() - s - print (tt) + return doc diff --git a/cfdm/data/__init__.py b/cfdm/data/__init__.py index de966e798..496075ddf 100644 --- a/cfdm/data/__init__.py +++ b/cfdm/data/__init__.py @@ -1,21 +1,8 @@ -import time -s = time.time() -print('0 data/__init__') - -#from .abstract import Array, CompressedArray, MeshArray, RaggedArray - from .abstract import Array - - from .abstract import CompressedArray - - from .abstract import MeshArray - - from .abstract import RaggedArray - from .subarray import ( BiLinearSubarray, BiQuadraticLatitudeLongitudeSubarray, @@ -51,5 +38,3 @@ from .zarrarray import ZarrArray from .data import Data - -print(' 9 data/__init__', time.time()-s) diff --git a/cfdm/data/abstract/__init__.py b/cfdm/data/abstract/__init__.py index 725bb1d9a..cd88bed13 100644 --- a/cfdm/data/abstract/__init__.py +++ b/cfdm/data/abstract/__init__.py @@ -1,16 +1,5 @@ -import time -s = time.time() -print('0 data/abstract/__init__') - from .array import Array - -print(' 5 data/abstract/__init__', time.time()-s); s = time.time() from .compressedarray import CompressedArray -print(' 6 data/abstract/__init__', time.time()-s); s = time.time() from .filearray import FileArray -print(' 7 data/abstract/__init__', time.time()-s); s = time.time() from .mesharray import MeshArray -print(' 8 data/abstract/__init__', time.time()-s); s = time.time() from .raggedarray import RaggedArray - -print(' 9 data/abstract/__init__', time.time()-s) diff --git a/cfdm/data/abstract/array.py b/cfdm/data/abstract/array.py index 1b0b1b2ce..711a45186 100644 --- a/cfdm/data/abstract/array.py +++ b/cfdm/data/abstract/array.py @@ -1,14 +1,7 @@ -import time -s = time.time() -print('0 data/abstract/array') -from ... import core - -from ...mixin import Container - -print(' 8 data/abstract/array', time.time()-s); s = time.time() +from cfdm import core +from cfdm.mixin import Container from .. import mixin -print(' 9 data/abstract/array', time.time()-s); s = time.time() class Array(mixin.ArrayMixin, Container, core.Array): diff --git a/cfdm/data/abstract/compressedarray.py b/cfdm/data/abstract/compressedarray.py index 6a0f75e91..423ae44d0 100644 --- a/cfdm/data/abstract/compressedarray.py +++ b/cfdm/data/abstract/compressedarray.py @@ -1,14 +1,8 @@ -import time -s = time.time() -print('0 compressedarray', time.time()-s) import numpy as np -print('1 compressedarray', time.time()-s) from ..netcdfindexer import netcdf_indexer -print('2 compressedarray', time.time()-s) from .array import Array -print('3 compressedarray', time.time()-s) class DeprecationError(Exception): """Deprecation error.""" diff --git a/cfdm/data/abstract/filearray.py b/cfdm/data/abstract/filearray.py index 21153743c..bdc11046e 100644 --- a/cfdm/data/abstract/filearray.py +++ b/cfdm/data/abstract/filearray.py @@ -1,12 +1,9 @@ from copy import deepcopy from os import sep from os.path import join -from urllib.parse import urlparse -#from s3fs import S3FileSystem -from uritools import isuri, urisplit +from cfdm.functions import abspath, dirname -from ...functions import abspath, dirname from . import Array @@ -419,7 +416,9 @@ def get_storage_options( and "endpoint_url" not in client_kwargs ): if parsed_filename is None: + from urllib.parse import urlparse if filename is None: + try: filename = self.get_filename(normalise=False) except AttributeError: @@ -461,6 +460,8 @@ def open(self, func, *args, **kwargs): the data within the file. """ + from urllib.parse import urlparse + filename = self.get_filename(normalise=True) url = urlparse(filename) if url.scheme == "file": @@ -469,7 +470,7 @@ def open(self, func, *args, **kwargs): elif url.scheme == "s3": # Create an openable S3 file object from s3fs import S3FileSystem - + storage_options = self.get_storage_options( create_endpoint_url=True, parsed_filename=url ) @@ -534,6 +535,8 @@ def replace_directory(self, old=None, new=None, normalise=False): filename = a.get_filename(normalise=normalise) if old or new: if normalise: + from uritools import isuri, urisplit + if not old: raise ValueError( "When 'normalise' is True and 'new' is a non-empty " diff --git a/cfdm/data/abstract/raggedarray.py b/cfdm/data/abstract/raggedarray.py index b7e2ce06d..4327588ec 100644 --- a/cfdm/data/abstract/raggedarray.py +++ b/cfdm/data/abstract/raggedarray.py @@ -1,13 +1,9 @@ -import time -s = time.time() -print('0 data/abstract/raggedarray') from itertools import accumulate from numbers import Number from ..subarray import RaggedSubarray from .compressedarray import CompressedArray -print(' 9 data/abstract/raggedarray', time.time()-s) class RaggedArray(CompressedArray): """An underlying ragged array. diff --git a/cfdm/data/aggregatedarray.py b/cfdm/data/aggregatedarray.py index 77f75ea2c..376def47f 100644 --- a/cfdm/data/aggregatedarray.py +++ b/cfdm/data/aggregatedarray.py @@ -2,7 +2,6 @@ from itertools import accumulate, product import numpy as np -from uritools import isuri, uricompose from ..functions import dirname from . import abstract @@ -10,6 +9,8 @@ from .netcdfindexer import netcdf_indexer from .utils import chunk_locations, chunk_positions +# from uritools import isuri, uricompose + class AggregatedArray(abstract.FileArray): """An array stored in a CF aggregation variable. @@ -704,6 +705,7 @@ def to_dask_array(self, chunks="auto"): import dask.array as da from dask.array.core import getter from dask.base import tokenize + from uritools import isuri, uricompose name = (f"{self.__class__.__name__}-{tokenize(self)}",) diff --git a/cfdm/data/creation.py b/cfdm/data/creation.py index 3e95710bf..dc8260f7a 100644 --- a/cfdm/data/creation.py +++ b/cfdm/data/creation.py @@ -1,8 +1,9 @@ """Functions used during the creation of `Data` objects.""" -#import dask.array as da +# import dask.array as da import numpy as np -#from dask.base import is_dask_collection + +# from dask.base import is_dask_collection def to_dask(array, chunks, **from_array_options): diff --git a/cfdm/data/data.py b/cfdm/data/data.py index 7a35e1122..23480aa72 100644 --- a/cfdm/data/data.py +++ b/cfdm/data/data.py @@ -1,6 +1,3 @@ -import time -s = time.time() -print('0 data') import logging import math import operator @@ -9,12 +6,7 @@ from numbers import Integral from os.path import commonprefix -#import dask.array as da import numpy as np -#from dask.base import collections_to_expr, is_dask_collection, tokenize -#from dask.optimization import cull -from netCDF4 import default_fillvals -#from scipy.sparse import issparse from .. import core from ..constants import masked @@ -42,7 +34,6 @@ cfdm_to_memory, cfdm_where, ) - from .utils import ( allclose, chunk_indices, @@ -58,8 +49,6 @@ logger = logging.getLogger(__name__) -print(' 9 data', time.time()-s) - class Data(Container, NetCDFAggregation, NetCDFChunks, Files, core.Data): """An N-dimensional data array with units and masked values. @@ -435,7 +424,7 @@ def __init__( # Is the array a sparse array? from scipy.sparse import issparse - + sparse_array = issparse(array) # Is the array data in memory? @@ -722,7 +711,7 @@ def __getitem__(self, indices): # Subspace axes which have list/1-d array indices import dask.array as da - + for axis in axes_with_list_indices: dx = da.take(dx, indices[axis], axis=axis) @@ -2080,7 +2069,7 @@ def _modify_dask_graph( if updated: import dask.array as da - + # The Dask graph was modified, so recast the dictionary # representation as a Dask array. dx = self.to_dask_array( @@ -3053,7 +3042,7 @@ def mask(self): """ import dask.array as da - + mask_data_obj = self.copy(array=False) dx = self.to_dask_array( @@ -3464,7 +3453,7 @@ def any(self, axis=None, keepdims=True, split_every=None): """ import dask.array as da - + d = self.copy(array=False) dx = self.to_dask_array( _force_mask_hardness=False, _force_to_memory=True @@ -3675,7 +3664,7 @@ def apply_masking( if mask is not None: import dask.array as da - + dx = da.ma.masked_where(mask, dx) CFA = self._CFA else: @@ -3864,7 +3853,7 @@ def compressed(self, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) dx = d.to_dask_array(_force_mask_hardness=True, _force_to_memory=True) @@ -4041,8 +4030,6 @@ def concatenate( # Remove unnecessary components from the graph, which may # improve performance, and because complicated task graphs # can sometimes confuse da.concatenate. - from dask.optimization import cull - for d in data: d.cull_graph() @@ -4060,7 +4047,7 @@ def concatenate( return data0 import dask.array as da - + conformed_data = [data0] for data1 in data[1:]: # Turn any scalar array into a 1-d array @@ -4502,7 +4489,7 @@ def empty( """ import dask.array as da - + dx = da.empty(shape, dtype=dtype, chunks=chunks) return cls(dx, units=units, calendar=calendar) @@ -4586,7 +4573,7 @@ def equals( return False import dask.array as da - + self_dx = self.to_dask_array(_force_mask_hardness=False) other_dx = other.to_dask_array(_force_mask_hardness=False) @@ -4680,7 +4667,7 @@ def equals( # a masked object and if so, force the desired result (True). # # This early compute won't degrade performance because it - # would be performed towards result.compute() below anyway. + # would be performed towards result.compute() below anyway. data_comparison = da.all(self_dx == other_dx).compute() if data_comparison is np.ma.masked: data_comparison = True @@ -4784,6 +4771,8 @@ def filled(self, fill_value=None, inplace=False): if fill_value is None: fill_value = d.get_fill_value(None) if fill_value is None: # still... + from netCDF4 import default_fillvals + fill_value = default_fillvals.get(d.dtype.str[1:]) if fill_value is None and d.dtype.kind in ("SU"): fill_value = default_fillvals.get("S1", None) @@ -5052,7 +5041,7 @@ def full( """ import dask.array as da - + if dtype is None: # Need to explicitly set the default because dtype is not # a named keyword of da.full @@ -5933,7 +5922,7 @@ def masked_values(self, value, rtol=None, atol=None, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) if rtol is None: @@ -5992,7 +5981,7 @@ def masked_where(self, condition, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) array = cfdm_where(d.array, condition, masked, None, d.hardmask) @@ -6060,7 +6049,7 @@ def max( """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) d = collapse( da.max, @@ -6118,7 +6107,7 @@ def min(self, axes=None, squeeze=False, split_every=None, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) d = collapse( da.min, @@ -6172,7 +6161,7 @@ def ones(cls, shape, dtype=None, units=None, calendar=None, chunks="auto"): """ import dask.array as da - + dx = da.ones(shape, dtype=dtype, chunks=chunks) return cls(dx, units=units, calendar=calendar) @@ -6229,7 +6218,7 @@ def pad_missing(self, axis, pad_width=None, to_size=None, inplace=False): """ import dask.array as da - + if not 0 <= axis < self.ndim: raise ValueError( f"'axis' must be a valid dimension position. Got {axis}" @@ -6503,7 +6492,7 @@ def replace_filenames(self, filenames): """ import dask.array as da - + filenames = np.ma.filled(filenames, "") if self.numblocks != filenames.shape: raise ValueError( @@ -6939,7 +6928,7 @@ def sum(self, axes=None, squeeze=False, split_every=None, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) d = collapse( da.sum, @@ -7187,7 +7176,7 @@ def transpose(self, axes=None, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) ndim = d.ndim @@ -7300,7 +7289,7 @@ def unique(self, inplace=False): """ import dask.array as da - + d = _inplace_enabled_define_and_cleanup(self) original_shape = self.shape @@ -7368,7 +7357,7 @@ def zeros( """ import dask.array as da - + dx = da.zeros(shape, dtype=dtype, chunks=chunks) return cls(dx, units=units, calendar=calendar) diff --git a/cfdm/data/fragment/fragmentfilearray.py b/cfdm/data/fragment/fragmentfilearray.py index f8bc37cac..0e4e9c07a 100644 --- a/cfdm/data/fragment/fragmentfilearray.py +++ b/cfdm/data/fragment/fragmentfilearray.py @@ -1,8 +1,7 @@ from os.path import join -from uritools import urisplit +from cfdm.functions import abspath -from ...functions import abspath from ..abstract import FileArray from ..mixin import IndexMixin from .mixin import FragmentArrayMixin @@ -225,6 +224,8 @@ def get_filename(self, normalise=False, default=AttributeError()): ) if normalise: + from uritools import urisplit + uri = urisplit(filename) # Convert the file name to an absolute URI diff --git a/cfdm/data/fragment/mixin/fragmentarraymixin.py b/cfdm/data/fragment/mixin/fragmentarraymixin.py index 9130fc5ab..d18cdc60d 100644 --- a/cfdm/data/fragment/mixin/fragmentarraymixin.py +++ b/cfdm/data/fragment/mixin/fragmentarraymixin.py @@ -2,7 +2,8 @@ import numpy as np -from ....units import Units +from cfdm.units import Units + from ...netcdfindexer import netcdf_indexer diff --git a/cfdm/data/fullarray.py b/cfdm/data/fullarray.py index bc1b34d5f..35da1bfef 100644 --- a/cfdm/data/fullarray.py +++ b/cfdm/data/fullarray.py @@ -1,6 +1,7 @@ import numpy as np -from ..functions import indices_shape, parse_indices +from cfdm.functions import indices_shape, parse_indices + from .abstract import Array from .mixin import IndexMixin from .mixin.arraymixin import array_implements diff --git a/cfdm/data/h5netcdfarray.py b/cfdm/data/h5netcdfarray.py index 30a7367b8..7e1b45de7 100644 --- a/cfdm/data/h5netcdfarray.py +++ b/cfdm/data/h5netcdfarray.py @@ -1,7 +1,5 @@ import logging -import h5netcdf - from . import abstract from .locks import netcdf_lock from .mixin import IndexMixin @@ -211,6 +209,6 @@ def open(self, **kwargs): within the file. """ - return super().open( - h5netcdf.File, mode="r", decode_vlen_strings=True, **kwargs - ) + from h5netcdf import File + + return super().open(File, mode="r", decode_vlen_strings=True, **kwargs) diff --git a/cfdm/data/mixin/__init__.py b/cfdm/data/mixin/__init__.py index 1bc208763..2c9e1ef28 100644 --- a/cfdm/data/mixin/__init__.py +++ b/cfdm/data/mixin/__init__.py @@ -1,10 +1,3 @@ -import time -s = time.time() -print('0 data/mixin/__init__') from .arraymixin import ArrayMixin -print(' 7 data/mixin/__init__', time.time()-s); s = time.time() from .compressedarraymixin import CompressedArrayMixin -print(' 8 data/mixin/__init__', time.time()-s); s = time.time() from .indexmixin import IndexMixin - -print(' 9 data/mixin/__init__', time.time()-s); s = time.time() diff --git a/cfdm/data/mixin/arraymixin.py b/cfdm/data/mixin/arraymixin.py index 6274d678a..b8eefb7e9 100644 --- a/cfdm/data/mixin/arraymixin.py +++ b/cfdm/data/mixin/arraymixin.py @@ -1,13 +1,8 @@ -import time -s = time.time() -print('0 data/abstract/array') from copy import deepcopy import numpy as np -#from cfunits import Units - -print(' 9 data/mixin/arraymixin', time.time()-s); s = time.time() +from cfdm.units import Units class ArrayMixin: @@ -142,8 +137,6 @@ def Units(self): .. versionadded:: (cfdm) 1.11.2.0 """ - from cfunits import Units - return Units(self.get_units(None), self.get_calendar(None)) def astype(self, dtype, **kwargs): diff --git a/cfdm/data/mixin/compressedarraymixin.py b/cfdm/data/mixin/compressedarraymixin.py index de698a45c..71fd4eed3 100644 --- a/cfdm/data/mixin/compressedarraymixin.py +++ b/cfdm/data/mixin/compressedarraymixin.py @@ -43,7 +43,7 @@ def _lock_file_read(self, array): pass else: import dask.array as da - + array = da.from_array(array, chunks=chunks, lock=True) return array @@ -74,13 +74,12 @@ def to_dask_array(self, chunks="auto"): from functools import partial import dask.array as da - from dask import config -# from dask.array.core import getter + + # from dask.array.core import getter from dask.base import tokenize getter = da.core.getter - from ..utils import normalize_chunks diff --git a/cfdm/data/mixin/filearraymixin.py b/cfdm/data/mixin/filearraymixin.py index bab8529ac..c62cca613 100644 --- a/cfdm/data/mixin/filearraymixin.py +++ b/cfdm/data/mixin/filearraymixin.py @@ -1,9 +1,6 @@ from copy import deepcopy -from urllib.parse import urlparse -#from s3fs import S3FileSystem - -from ...functions import abspath +from cfdm.functions import abspath class DeprecationError(Exception): @@ -284,6 +281,8 @@ def get_storage_options( and "endpoint_url" not in client_kwargs ): if parsed_filename is None: + from urllib.parse import urlparse + if filename is None: try: filename = self.get_filename() @@ -328,6 +327,8 @@ def open(self, func, *args, **kwargs): """ # Loop round the files, returning as soon as we find one that # works. + from urllib.parse import urlparse + filenames = self.get_filenames() for filename, address in zip(filenames, self.get_addresses()): url = urlparse(filename) diff --git a/cfdm/data/mixin/indexmixin.py b/cfdm/data/mixin/indexmixin.py index c55c4d0cc..c2cdfcb0f 100644 --- a/cfdm/data/mixin/indexmixin.py +++ b/cfdm/data/mixin/indexmixin.py @@ -1,10 +1,8 @@ from numbers import Integral import numpy as np -#from dask.array.slicing import normalize_index -#from dask.base import is_dask_collection -from ...functions import indices_shape, parse_indices +from cfdm.functions import indices_shape, parse_indices class IndexMixin: @@ -103,7 +101,6 @@ def __getitem__(self, index): The subspaced data. """ - from dask.array.slicing import normalize_index from dask.base import is_dask_collection shape0 = self.shape @@ -378,6 +375,8 @@ def index(self, conform=True): # 2) Converting, where possible, sequences of integers to # slices. This helps when the parent class can't cope with # indices that are sequences of integers. + from dask.array.slicing import normalize_index + ind = list(ind) for n, (i, size) in enumerate(zip(ind[:], self.original_shape)): if isinstance(i, slice): diff --git a/cfdm/data/netcdfindexer.py b/cfdm/data/netcdfindexer.py index 8dc57d2c5..f1c932487 100644 --- a/cfdm/data/netcdfindexer.py +++ b/cfdm/data/netcdfindexer.py @@ -25,7 +25,6 @@ from numbers import Integral import numpy as np -#from dask.array.slicing import normalize_index from netCDF4 import chartostring, default_fillvals from netCDF4.utils import _safecast diff --git a/cfdm/data/subarray/abstract/subsampledsubarray.py b/cfdm/data/subarray/abstract/subsampledsubarray.py index 4ff535035..d4d879793 100644 --- a/cfdm/data/subarray/abstract/subsampledsubarray.py +++ b/cfdm/data/subarray/abstract/subsampledsubarray.py @@ -1,6 +1,7 @@ import numpy as np -from ....core.utils import cached_property +from cfdm.core.utils import cached_property + from .subarray import Subarray diff --git a/cfdm/data/subarray/cellconnectivitysubarray.py b/cfdm/data/subarray/cellconnectivitysubarray.py index 4fde29184..8dc83b3ff 100644 --- a/cfdm/data/subarray/cellconnectivitysubarray.py +++ b/cfdm/data/subarray/cellconnectivitysubarray.py @@ -1,6 +1,5 @@ import numpy as np -from ...functions import integer_dtype from .abstract import MeshSubarray @@ -30,6 +29,8 @@ def __getitem__(self, indices): .. versionadded:: (cfdm) 1.11.0.0 """ + from cfdm.functions import integer_dtype + start_index = self.start_index shape = self.shape start = 0 diff --git a/cfdm/data/subarray/mixin/pointtopology.py b/cfdm/data/subarray/mixin/pointtopology.py index 9d83ac6cc..4567b3ecc 100644 --- a/cfdm/data/subarray/mixin/pointtopology.py +++ b/cfdm/data/subarray/mixin/pointtopology.py @@ -1,7 +1,5 @@ import numpy as np -from ....functions import integer_dtype - class PointTopology: """Mixin class for point topology array compressed by UGRID. @@ -24,6 +22,8 @@ def __getitem__(self, indices): from scipy.sparse import csr_array + from cfdm.functions import integer_dtype + start_index = self.start_index node_connectivity = self._select_data(check_mask=False) diff --git a/cfdm/data/subsampledarray.py b/cfdm/data/subsampledarray.py index f1b0fff9e..97c720516 100644 --- a/cfdm/data/subsampledarray.py +++ b/cfdm/data/subsampledarray.py @@ -4,7 +4,8 @@ import numpy as np -from ..core.utils import cached_property +from cfdm.core.utils import cached_property + from .abstract import CompressedArray from .mixin import CompressedArrayMixin from .netcdfindexer import netcdf_indexer diff --git a/cfdm/data/utils.py b/cfdm/data/utils.py index f950e7ea0..4e79b7bbd 100644 --- a/cfdm/data/utils.py +++ b/cfdm/data/utils.py @@ -4,11 +4,9 @@ from itertools import product import cftime -#import dask.array as da import numpy as np -from dask.core import flatten -from ..units import Units +from cfdm.units import Units _default_calendar = "standard" @@ -66,6 +64,8 @@ def allclose(x, y, masked_equal=True, rtol=None, atol=None): # Dask's internal algorithms require these to be set as parameters. def allclose(a_blocks, b_blocks, rtol=rtol, atol=atol): """Run `ma.allclose` across multiple blocks over two arrays.""" + from dask.core import flatten + result = True # Handle scalars, including 0-d arrays, for which a_blocks and # b_blocks will have the corresponding type and hence not be iterable. diff --git a/cfdm/data/zarrarray.py b/cfdm/data/zarrarray.py index 194228903..cfa1ccf5c 100644 --- a/cfdm/data/zarrarray.py +++ b/cfdm/data/zarrarray.py @@ -1,6 +1,5 @@ from . import abstract from .mixin import IndexMixin -from .netcdfindexer import netcdf_indexer class ZarrArray(IndexMixin, abstract.FileArray): @@ -27,6 +26,8 @@ def _get_array(self, index=None): The subspace. """ + from .netcdfindexer import netcdf_indexer + if index is None: index = self.index() diff --git a/cfdm/functions.py b/cfdm/functions.py index aa8eeca13..97ab196f8 100644 --- a/cfdm/functions.py +++ b/cfdm/functions.py @@ -11,9 +11,6 @@ from os.path import join import numpy as np -#from dask import config as _config -#from dask.base import is_dask_collection -#from dask.utils import parse_bytes from uritools import uricompose, urisplit from . import __cf_version__, __file__, __version__, core @@ -1810,7 +1807,7 @@ def _parse(cls, arg): """ from dask import config from dask.utils import parse_bytes - + config.set({"array.chunk-size": arg}) return parse_bytes(arg) @@ -1889,7 +1886,7 @@ class log_level(ConstantAccess): _name = "LOG_LEVEL" _default = logging.getLevelName(logging.getLogger().level) - + # Define the valid log levels _ValidLogLevels = ValidLogLevels @@ -2244,7 +2241,7 @@ def indices_shape(indices, full_shape, keepdims=True): """ from dask.base import is_dask_collection - + shape = [] # i = 0 for index, full_size in zip(indices, full_shape): diff --git a/cfdm/mixin/__init__.py b/cfdm/mixin/__init__.py index 15e980d4a..027bc77e2 100644 --- a/cfdm/mixin/__init__.py +++ b/cfdm/mixin/__init__.py @@ -1,26 +1,16 @@ -import time -s = time.time() from .boundsmixin import BoundsMixin from .container import Container from .files import Files from .quantizationmixin import QuantizationMixin from .properties import Properties -print('1 mizin/__initi__', time.time()-s) from .propertiesdata import PropertiesData -print('2 mizin/__initi__', time.time()-s) from .propertiesdatabounds import PropertiesDataBounds -print('3 mizin/__initi__', time.time()-s) from .coordinate import Coordinate -print('mizin/__initi__', time.time()-s) from .topology import Topology -print('mizin/__initi__', time.time()-s) - from .parameters import Parameters from .parametersdomainancillaries import ParametersDomainAncillaries -print('mizin/__initi__', time.time()-s) - from .netcdf import ( NetCDFComponents, NetCDFGlobalAttributes, @@ -40,8 +30,4 @@ NetCDFVariable, ) -print('mizin/__initi__', time.time()-s) - from .fielddomain import FieldDomain - -print('mizin/__initi__', time.time()-s) diff --git a/cfdm/mixin/container.py b/cfdm/mixin/container.py index 4d2dbe6cd..2a4577652 100644 --- a/cfdm/mixin/container.py +++ b/cfdm/mixin/container.py @@ -1,19 +1,13 @@ - -import time -s = time.time() import logging import numpy as np -import time - from ..decorators import _manage_log_level_via_verbosity from ..docstring import _docstring_substitution_definitions from ..functions import atol, rtol logger = logging.getLogger(__name__) -print('mixin container', time.time()-s) class Container: """Mixin class for storing object components. diff --git a/cfdm/mixin/netcdf.py b/cfdm/mixin/netcdf.py index 40bf66368..eb02eaed5 100644 --- a/cfdm/mixin/netcdf.py +++ b/cfdm/mixin/netcdf.py @@ -1,15 +1,9 @@ -import time -s = time.time() -print('0 netcdf') from numbers import Integral from re import split -#from dask.utils import parse_bytes - from ..core.functions import deepcopy from ..functions import _DEPRECATION_ERROR_METHOD -print(' 9 netcdf', time.time()-s) class DeprecationError(Exception): """An error indicating a method is no longer available.""" @@ -2734,7 +2728,7 @@ def nc_set_dataset_chunksizes(self, chunksizes): if chunksizes != "contiguous": from dask.utils import parse_bytes - + try: chunksizes = parse_bytes(chunksizes) except ValueError: diff --git a/cfdm/mixin/propertiesdata.py b/cfdm/mixin/propertiesdata.py index 7f978e662..d2d91fba2 100644 --- a/cfdm/mixin/propertiesdata.py +++ b/cfdm/mixin/propertiesdata.py @@ -1,11 +1,6 @@ -import time -s = time.time() import logging -print('0 propertiesdata') - from ..data import Data - from ..decorators import ( _display_or_return, _inplace_enabled, @@ -13,17 +8,11 @@ _manage_log_level_via_verbosity, _test_decorator_args, ) - - from ..functions import _DEPRECATION_ERROR_METHOD from . import Properties - - logger = logging.getLogger(__name__) -print(' 9 propertiesdata', time.time()-s) - class PropertiesData(Properties): """Mixin class for a data array with descriptive properties. diff --git a/cfdm/read_write/abstract/readwrite.py b/cfdm/read_write/abstract/readwrite.py index d9558583d..afc61557e 100644 --- a/cfdm/read_write/abstract/readwrite.py +++ b/cfdm/read_write/abstract/readwrite.py @@ -1,8 +1,8 @@ from collections.abc import Iterable -from ...cfdmimplementation import implementation -from ...core import DocstringRewriteMeta -from ...docstring import _docstring_substitution_definitions +from cfdm.cfdmimplementation import implementation +from cfdm.core import DocstringRewriteMeta +from cfdm.docstring import _docstring_substitution_definitions class ReadWrite(metaclass=DocstringRewriteMeta): diff --git a/cfdm/read_write/netcdf/constants.py b/cfdm/read_write/netcdf/constants.py index e29b04dce..a0e1c972e 100644 --- a/cfdm/read_write/netcdf/constants.py +++ b/cfdm/read_write/netcdf/constants.py @@ -1,4 +1,4 @@ -from ...quantization import Quantization +from cfdm.quantization import Quantization CODE0 = { # Physically meaningful and corresponding to constructs diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py index d5902d568..525371d6c 100644 --- a/cfdm/read_write/netcdf/netcdfread.py +++ b/cfdm/read_write/netcdf/netcdfread.py @@ -16,15 +16,11 @@ import netCDF4 import numpy as np -#from dask.array.core import normalize_chunks -#from dask.base import tokenize -from packaging.version import Version -#from s3fs import S3FileSystem -from uritools import urisplit - -from ...data.netcdfindexer import netcdf_indexer -from ...decorators import _manage_log_level_via_verbosity -from ...functions import abspath, is_log_level_debug, is_log_level_detail + +from cfdm.data.netcdfindexer import netcdf_indexer +from cfdm.decorators import _manage_log_level_via_verbosity +from cfdm.functions import abspath, is_log_level_debug, is_log_level_detail + from .. import IORead from ..exceptions import DatasetTypeError, ReadError from .constants import ( @@ -516,6 +512,8 @@ def file_open(self, dataset, flatten=True, verbose=None): >>> r.file_open('file.nc') """ + from uritools import urisplit + g = self.read_vars netcdf_backend = g["netcdf_backend"] @@ -549,7 +547,7 @@ def file_open(self, dataset, flatten=True, verbose=None): # An S3 file system with these options does not exist, # so create one. from s3fs import S3FileSystem - + file_system = S3FileSystem(**storage_options) file_systems[fs_key] = file_system @@ -828,6 +826,8 @@ def dataset_type(cls, dataset, allowed_dataset_types): * `None` for anything else. """ + from uritools import urisplit + # Assume that non-local URIs are netCDF or zarr u = urisplit(dataset) if u.scheme not in (None, "file"): @@ -1077,6 +1077,8 @@ def read( The field or domain constructs in the file. """ + from packaging.version import Version + debug = is_log_level_debug(logger) # ------------------------------------------------------------ @@ -11365,7 +11367,7 @@ def _dask_chunks(self, array, ncvar, compressed, construct_type=None): # original Dask: (5, 15, 150, 5, 160) 9000000 # storage-aligned: (50, 100, 150, 20, 5) 75000000 # -------------------------------------------------------- - # 1) Initialise the Dask chunk shape + # 1) Initialise the Dask chunk shape from dask.array.core import normalize_chunks dask_chunks = normalize_chunks( diff --git a/cfdm/read_write/netcdf/netcdfwrite.py b/cfdm/read_write/netcdf/netcdfwrite.py index 34070df0d..a1a5b66f6 100644 --- a/cfdm/read_write/netcdf/netcdfwrite.py +++ b/cfdm/read_write/netcdf/netcdfwrite.py @@ -3,18 +3,13 @@ import os import re -#import dask.array as da import netCDF4 import numpy as np -from dask import config as dask_config -#from dask.array.core import normalize_chunks -#from dask.utils import parse_bytes -from packaging.version import Version -from uritools import uricompose, urisplit - -from ...data.dask_utils import cfdm_to_memory -from ...decorators import _manage_log_level_via_verbosity -from ...functions import abspath, dirname, integer_dtype + +from cfdm.data.dask_utils import cfdm_to_memory +from cfdm.decorators import _manage_log_level_via_verbosity +from cfdm.functions import abspath, dirname, integer_dtype + from .. import IOWrite from .constants import ( CF_QUANTIZATION_PARAMETER_LIMITS, @@ -3191,7 +3186,7 @@ def _write_data( # Still here? The write a normal (non-aggregation) variable # ------------------------------------------------------------ import dask.array as da - + if compressed: # Write data in its compressed form data = data.source().source() @@ -4971,6 +4966,8 @@ def write( See `cfdm.write` for examples. """ + from packaging.version import Version + logger.info(f"Writing to {fmt}") # pragma: no cover # Expand file name @@ -5112,7 +5109,7 @@ def write( # Parse the 'dataset_chunks' parameter if dataset_chunks != "contiguous": from dask.utils import parse_bytes - + try: self.write_vars["dataset_chunks"] = parse_bytes(dataset_chunks) except (ValueError, AttributeError): @@ -5317,6 +5314,8 @@ def _file_io_iteration( group, ): """Perform a file-writing iteration with the given settings.""" + from packaging.version import Version + # ------------------------------------------------------------ # Initiate file IO with given write variables # ------------------------------------------------------------ @@ -5682,8 +5681,9 @@ def _chunking_parameters(self, data, ncdimensions): d_dtype = d.dtype dtype = g["datatype"].get(d_dtype, d_dtype) + from dask import config as dask_config from dask.array.core import normalize_chunks - + with dask_config.set({"array.chunk-size": dataset_chunks}): chunksizes = normalize_chunks("auto", shape=d.shape, dtype=dtype) @@ -6202,6 +6202,8 @@ def _cfa_fragment_array_variables(self, data, cfvar): out = {"map": type(data)(aggregation_shape)} if data.nc_get_aggregation_fragment_type() == "uri": + from uritools import uricompose, urisplit + # -------------------------------------------------------- # Create 'uris' and 'idenftifiers' arrays # -------------------------------------------------------- diff --git a/cfdm/read_write/read.py b/cfdm/read_write/read.py index 350a46e99..6899ada3e 100644 --- a/cfdm/read_write/read.py +++ b/cfdm/read_write/read.py @@ -4,10 +4,9 @@ from os import walk from os.path import expanduser, expandvars, isdir, join -from uritools import urisplit +from cfdm.decorators import _manage_log_level_via_verbosity +from cfdm.functions import abspath, is_log_level_info -from ..decorators import _manage_log_level_via_verbosity -from ..functions import abspath, is_log_level_info from .abstract import ReadWrite from .exceptions import DatasetTypeError from .netcdf import NetCDFRead @@ -342,6 +341,8 @@ def _datasets(self): return + from uritools import urisplit + if followlinks and not recursive: raise ValueError( f"Can only set followlinks={followlinks!r} when " diff --git a/cfdm/units.py b/cfdm/units.py index 042e5a9cc..c5d598d2b 100644 --- a/cfdm/units.py +++ b/cfdm/units.py @@ -1,20 +1,7 @@ -import time -s = time.time() -print('0 units') -#from ctypes.util import find_library - from cfunits import Units as cfUnits from .core.meta import DocstringRewriteMeta -#_libpath = find_library("udunits2") -#if _libpath is None: -# raise FileNotFoundError( -# "cfdm UNIDATA UDUNITS-2. Can't find the 'udunits2' library." -# ) - - -print(' 9 units', time.time()-s); s = time.time() class Units(metaclass=DocstringRewriteMeta): """Store, combine, compare, and convert physical units. From 7821fc3024ab962c7fde0611fe0f85bab54806ce Mon Sep 17 00:00:00 2001 From: David Hassell Date: Wed, 22 Oct 2025 14:16:42 +0100 Subject: [PATCH 06/18] dev --- Changelog.rst | 2 +- cfdm/constants.py | 9 --- cfdm/core/meta/docstringrewrite.py | 3 + cfdm/data/abstract/filearray.py | 1 + cfdm/functions.py | 101 +++++++++++++++++------------ 5 files changed, 64 insertions(+), 52 deletions(-) diff --git a/Changelog.rst b/Changelog.rst index 3feae8e63..73bf1fb76 100644 --- a/Changelog.rst +++ b/Changelog.rst @@ -4,7 +4,7 @@ Version NEXTVERSION **2025-12-??** * Reduce the time taken to import `cfdm` - (https://github.com/NCAS-CMS/cfdm/issues/???) + (https://github.com/NCAS-CMS/cfdm/issues/361) ---- diff --git a/cfdm/constants.py b/cfdm/constants.py index 6d1de4b57..1f359483d 100644 --- a/cfdm/constants.py +++ b/cfdm/constants.py @@ -2,15 +2,6 @@ import numpy as np -# -------------------------------------------------------------------- -# A dictionary of useful constants. -# -# Whilst the dictionary may be modified directly, it is safer to -# retrieve and set the values with the dedicated get-and-set -# functions. -# -------------------------------------------------------------------- -CONSTANTS = {} - # -------------------------------------------------------------------- # logging diff --git a/cfdm/core/meta/docstringrewrite.py b/cfdm/core/meta/docstringrewrite.py index b78ba2d0f..2de4be530 100644 --- a/cfdm/core/meta/docstringrewrite.py +++ b/cfdm/core/meta/docstringrewrite.py @@ -636,6 +636,9 @@ def _docstring_update( if substitutions: _docstring_substitutions[1] += 1 + # Remove duplicates + substitutions = set(substitutions) + # Special substitutions if "{{package}}" in substitutions: # Insert the name of the package diff --git a/cfdm/data/abstract/filearray.py b/cfdm/data/abstract/filearray.py index bdc11046e..99fec389b 100644 --- a/cfdm/data/abstract/filearray.py +++ b/cfdm/data/abstract/filearray.py @@ -417,6 +417,7 @@ def get_storage_options( ): if parsed_filename is None: from urllib.parse import urlparse + if filename is None: try: diff --git a/cfdm/functions.py b/cfdm/functions.py index 97ab196f8..c38447293 100644 --- a/cfdm/functions.py +++ b/cfdm/functions.py @@ -14,7 +14,7 @@ from uritools import uricompose, urisplit from . import __cf_version__, __file__, __version__, core -from .constants import CONSTANTS, ValidLogLevels +from .constants import ValidLogLevels from .core import DocstringRewriteMeta from .core.docstring import ( _docstring_substitution_definitions as _core_docstring_substitution_definitions, @@ -220,13 +220,6 @@ def _configuration(_Configuration, **kwargs): values are specified. """ - old = {name.lower(): val for name, val in CONSTANTS.items()} - - # Filter out 'None' kwargs from configuration() defaults. Note that this - # does not filter out '0' or 'True' values, which is important as the user - # might be trying to set those, as opposed to None emerging as default. - kwargs = {name: val for name, val in kwargs.items() if val is not None} - # Note values are the functions not the keyword arguments of same name: reset_mapping = { "new_atol": atol, @@ -235,6 +228,17 @@ def _configuration(_Configuration, **kwargs): "new_chunksize": chunksize, } + # Make sure that the constants dictionary is fully populated + for func in reset_mapping.values(): + func() + + old = ConstantAccess.constants(copy=True) + + # Filter out 'None' kwargs from configuration() defaults. Note that this + # does not filter out '0' or 'True' values, which is important as the user + # might be trying to set those, as opposed to None emerging as default. + kwargs = {name: val for name, val in kwargs.items() if val is not None} + old_values = {} try: @@ -1463,38 +1467,41 @@ def copy(self): class ConstantAccess(metaclass=DocstringRewriteMeta): - '''Base class to act as a function accessing package-wide constants. + """Base class to act as a function accessing package-wide constants. Subclasses must implement or inherit a method called `_parse` as - follows: + follows:: def _parse(cls, arg): - """Parse a new constant value. + '''Parse a new constant value. - :Parameter: + :Parameter: - cls: - This class. + cls: + This class. - arg: - The given new constant value. + arg: + The given new constant value. - :Returns: + :Returns: - A version of the new constant value suitable for - insertion into the `CONSTANTS` dictionary. + A version of the new constant value suitable for + insertion into the `_constants` dictionary. - """ + ''' - ''' + """ - # Define the dictionary that stores the constant values - _CONSTANTS = CONSTANTS + # Define the dictionary that stores all constant values. + # + # Sublasses must re-define this as an empty dictionary (unless + # it's OK for the child modify the parent's disctionary). + _constants = {} - # Define the `Constant` object that contains a constant value + # Define the `Constant` class that contains a constant value _Constant = Constant - # Define the key of the _CONSTANTS dictionary that contains the + # Define the key of the `_constants` dictionary that contains the # constant value _name = None @@ -1503,7 +1510,9 @@ def _parse(cls, arg): def __new__(cls, *arg): """Return a `Constant` instance during class creation.""" - old = cls._CONSTANTS.get(cls._name, cls._default) + name = cls._name + constants = cls.constants(copy=False) + old = constants.setdefault(name, cls._default) if arg: arg = arg[0] try: @@ -1512,7 +1521,7 @@ def __new__(cls, *arg): except AttributeError: pass - cls._CONSTANTS[cls._name] = cls._parse(cls, arg) + constants[name] = cls._parse(cls, arg) return cls._Constant(old, _func=cls) @@ -1547,6 +1556,15 @@ def __docstring_package_depth__(self): """ return 0 + @classmethod + def constants(cls, copy=True): + """TODO.""" + out = cls._constants + if copy: + out = out.copy() + + return out + class atol(ConstantAccess): """The numerical equality tolerance on absolute differences. @@ -1609,7 +1627,7 @@ class atol(ConstantAccess): """ - _name = "ATOL" + _name = "atol" _default = sys.float_info.epsilon def _parse(cls, arg): @@ -1628,7 +1646,7 @@ def _parse(cls, arg): :Returns: A version of the new constant value suitable for - insertion into the `CONSTANTS` dictionary. + insertion into the `_constants` dictionary. """ return float(arg) @@ -1695,7 +1713,7 @@ class rtol(ConstantAccess): """ - _name = "RTOL" + _name = "rtol" _default = sys.float_info.epsilon def _parse(cls, arg): @@ -1713,8 +1731,8 @@ def _parse(cls, arg): :Returns: - A version of the new constant value suitable for insertion - into the `CONSTANTS` dictionary. + A version of the new constant value suitable for + insertion into the `_constants` dictionary. """ return float(arg) @@ -1781,10 +1799,8 @@ class chunksize(ConstantAccess): """ - _name = "CHUNKSIZE" - - # 134217728 = 128 MiB - _default = 134217728 + _name = "chunksize" + _default = 134217728 # 134217728 = 128 MiB def _parse(cls, arg): """Parse a new constant value. @@ -1801,15 +1817,16 @@ def _parse(cls, arg): :Returns: - A version of the new constant value suitable for insertion - into the `CONSTANTS` dictionary. + A version of the new constant value suitable for + insertion into the `_constants` dictionary. """ from dask import config from dask.utils import parse_bytes + arg = parse_bytes(arg) config.set({"array.chunk-size": arg}) - return parse_bytes(arg) + return arg class log_level(ConstantAccess): @@ -1884,7 +1901,7 @@ class log_level(ConstantAccess): """ - _name = "LOG_LEVEL" + _name = "log_level" _default = logging.getLevelName(logging.getLogger().level) # Define the valid log levels @@ -1916,8 +1933,8 @@ def _parse(cls, arg): :Returns: - A version of the new constant value suitable for insertion - into the `CONSTANTS` dictionary. + A version of the new constant value suitable for + insertion into the `_constants` dictionary. """ # Ensuring it is a valid level specifier to set & use, either From 82b6cb266d964eb25a735a0aa0ddb21e0273ded2 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Fri, 24 Oct 2025 11:18:32 +0100 Subject: [PATCH 07/18] dev --- cfdm/data/locks.py | 5 ++--- cfdm/data/netcdfindexer.py | 30 +++++++++++++++++------------- cfdm/functions.py | 5 ++++- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/cfdm/data/locks.py b/cfdm/data/locks.py index f2309c14d..d159e4a65 100644 --- a/cfdm/data/locks.py +++ b/cfdm/data/locks.py @@ -1,4 +1,3 @@ -from dask.utils import SerializableLock +from threading import Lock -# Global lock for netCDFfile access -netcdf_lock = SerializableLock() +netcdf_lock = Lock() diff --git a/cfdm/data/netcdfindexer.py b/cfdm/data/netcdfindexer.py index f1c932487..f03c2c82a 100644 --- a/cfdm/data/netcdfindexer.py +++ b/cfdm/data/netcdfindexer.py @@ -3,20 +3,20 @@ Portions of this code were adapted from the `netCDF4` Python library, which carries the following MIT License: -Copyright 2008 Jeffrey Whitaker + Copyright 2008 Jeffrey Whitaker -https://opensource.org/license/mit + https://opensource.org/license/mit -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. """ @@ -25,8 +25,6 @@ from numbers import Integral import numpy as np -from netCDF4 import chartostring, default_fillvals -from netCDF4.utils import _safecast logger = logging.getLogger(__name__) @@ -274,6 +272,8 @@ def __getitem__(self, index): elif data.dtype.kind in "OSU": kind = data.dtype.kind if kind == "S": + from netCDF4 import chartostring # , default_fillvals + data = chartostring(data) # Assume that object arrays are arrays of strings @@ -364,6 +364,8 @@ def _check_safecast(self, attr, dtype, attributes): except ValueError: safe = False else: + from netCDF4.utils import _safecast + safe = _safecast(att, atta) if not safe: @@ -391,6 +393,8 @@ def _default_FillValue(self, dtype): The default ``_FillValue``. """ + from netCDF4 import default_fillvals + if dtype.kind in "OS": return default_fillvals["S1"] diff --git a/cfdm/functions.py b/cfdm/functions.py index c38447293..a88c4bf59 100644 --- a/cfdm/functions.py +++ b/cfdm/functions.py @@ -11,7 +11,6 @@ from os.path import join import numpy as np -from uritools import uricompose, urisplit from . import __cf_version__, __file__, __version__, core from .constants import ValidLogLevels @@ -584,6 +583,8 @@ def abspath(path, uri=None): ValueError: Can't set uri=False for path='http:///file.nc' """ + from uritools import uricompose, urisplit + u = urisplit(path) scheme = u.scheme path = u.path @@ -730,6 +731,8 @@ def dirname(path, normalise=False, uri=None, isdir=False, sep=False): '/data' """ + from uritools import uricompose, urisplit + u = urisplit(path) scheme = u.scheme path = u.path From 9909c2fc1a87900eb58217636435aa9f37b6b231 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Fri, 24 Oct 2025 13:10:58 +0100 Subject: [PATCH 08/18] dev --- cfdm/data/netcdf4array.py | 4 ++-- cfdm/data/utils.py | 9 ++++++++- cfdm/read_write/netcdf/netcdfread.py | 9 ++++++++- cfdm/read_write/netcdf/netcdfwrite.py | 5 ++++- 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/cfdm/data/netcdf4array.py b/cfdm/data/netcdf4array.py index 57a796c4d..5f515da71 100644 --- a/cfdm/data/netcdf4array.py +++ b/cfdm/data/netcdf4array.py @@ -1,5 +1,3 @@ -import netCDF4 - from . import abstract from .locks import netcdf_lock from .mixin import IndexMixin @@ -221,4 +219,6 @@ def open(self): address of the data within the file. """ + import netCDF4 + return super().open(netCDF4.Dataset, mode="r") diff --git a/cfdm/data/utils.py b/cfdm/data/utils.py index 4e79b7bbd..f77ba7dfb 100644 --- a/cfdm/data/utils.py +++ b/cfdm/data/utils.py @@ -3,7 +3,6 @@ from functools import lru_cache, partial from itertools import product -import cftime import numpy as np from cfdm.units import Units @@ -338,6 +337,8 @@ def convert_to_reftime(a, units=None, first_value=None): if first_value is not None: x = first_value else: + import cftime + x = cftime.DatetimeGregorian(1970, 1, 1) x_since = "days since " + "-".join(map(str, (x.year, x.month, x.day))) @@ -793,6 +794,8 @@ def dt2rt(array, units_out): [-- 685.5] """ + import cftime + isscalar = not np.ndim(array) array = cftime.date2num( @@ -846,6 +849,8 @@ def rt2dt(array, units_in): # mask return np.ma.masked_all((), dtype=object) + import cftime + units = units_in.units calendar = getattr(units_in, "calendar", "standard") @@ -890,6 +895,8 @@ def st2datetime(date_string, calendar=None): `cftime.datetime` """ + import cftime + if date_string.count("-") != 2: raise ValueError( "Input date-time string must contain at least a year, a month " diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py index 525371d6c..34b0b28f8 100644 --- a/cfdm/read_write/netcdf/netcdfread.py +++ b/cfdm/read_write/netcdf/netcdfread.py @@ -14,7 +14,6 @@ from typing import Any from uuid import uuid4 -import netCDF4 import numpy as np from cfdm.data.netcdfindexer import netcdf_indexer @@ -599,6 +598,8 @@ def file_open(self, dataset, flatten=True, verbose=None): # If the file has a group structure then flatten it (CF>=1.8) # ------------------------------------------------------------ if flatten and self._dataset_has_groups(nc): + import netCDF4 + # Create a diskless, non-persistent container for the # flattened file flat_file = tempfile.NamedTemporaryFile( @@ -649,6 +650,8 @@ def _open_netCDF4(self, filename): `netCDF4.Dataset` """ + import netCDF4 + nc = netCDF4.Dataset(filename, "r") self.read_vars["file_opened_with"] = "netCDF4" return nc @@ -912,6 +915,8 @@ def default_netCDF_fill_value(self, ncvar): 9.969209968386869e+36 """ + import netCDF4 + data_type = self.read_vars["variables"][ncvar].dtype.str[-2:] return netCDF4.default_fillvals[data_type] @@ -11739,6 +11744,8 @@ def _cache_data_elements(self, data, ncvar): # collapse (by concatenation) the outermost (fastest # varying) dimension. E.g. [['a','b','c']] becomes # ['abc'] + import netCDF4 + if dtype.kind == "U": value = value.astype("S") diff --git a/cfdm/read_write/netcdf/netcdfwrite.py b/cfdm/read_write/netcdf/netcdfwrite.py index a1a5b66f6..31900a4db 100644 --- a/cfdm/read_write/netcdf/netcdfwrite.py +++ b/cfdm/read_write/netcdf/netcdfwrite.py @@ -3,7 +3,6 @@ import os import re -import netCDF4 import numpy as np from cfdm.data.dask_utils import cfdm_to_memory @@ -2819,6 +2818,8 @@ def _write_netcdf_variable( if quantize_on_write: # Set "implemention" to this version of the netCDF-C # library + import netCDF4 + self.implementation.set_parameter( q, "implementation", @@ -4660,6 +4661,8 @@ def file_open(self, filename, mode, fmt, fields): A `netCDF4.Dataset` object for the file. """ + import netCDF4 + if fields and mode == "w": filename = os.path.abspath(filename) for f in fields: From ebf0c7775d58442b187273a119b048e7b7f41d94 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Mon, 27 Oct 2025 08:55:40 +0000 Subject: [PATCH 09/18] dev --- cfdm/core/functions.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/cfdm/core/functions.py b/cfdm/core/functions.py index e3e1423b8..b60694830 100644 --- a/cfdm/core/functions.py +++ b/cfdm/core/functions.py @@ -1,9 +1,4 @@ -import os -import platform -import sys -from pickle import dumps, loads - -from . import __cf_version__, __file__, __version__ +from . import __cf_version__ def environment(display=True, paths=True): @@ -46,8 +41,14 @@ def environment(display=True, paths=True): cfdm.core: 1.12.2.0 """ - import numpy as np + import os import packaging + import platform + import sys + + import numpy as np + + from . import __file__, __version__ dependency_version_paths_mapping = { "Platform": (platform.platform(), ""), @@ -104,4 +105,6 @@ def deepcopy(x): b) be "not slower, sometimes much faster" than `copy.deepcopy`. """ + from pickle import dumps, loads + return loads(dumps(x)) From af66327efa604e71d7fecb6d93288774593a3eca Mon Sep 17 00:00:00 2001 From: David Hassell Date: Mon, 27 Oct 2025 09:44:27 +0000 Subject: [PATCH 10/18] dev --- cfdm/core/functions.py | 2 +- cfdm/mixin/fielddomain.py | 5 ++++- cfdm/read_write/netcdf/flatten/flatten.py | 5 +++-- cfdm/read_write/netcdf/netcdfread.py | 10 ++++++++-- cfdm/read_write/netcdf/netcdfwrite.py | 5 ++++- 5 files changed, 20 insertions(+), 7 deletions(-) diff --git a/cfdm/core/functions.py b/cfdm/core/functions.py index b60694830..3ebf22e40 100644 --- a/cfdm/core/functions.py +++ b/cfdm/core/functions.py @@ -42,11 +42,11 @@ def environment(display=True, paths=True): """ import os - import packaging import platform import sys import numpy as np + import packaging from . import __file__, __version__ diff --git a/cfdm/mixin/fielddomain.py b/cfdm/mixin/fielddomain.py index 06b84bd5d..ed5363c57 100644 --- a/cfdm/mixin/fielddomain.py +++ b/cfdm/mixin/fielddomain.py @@ -1,5 +1,4 @@ import logging -import re from ..decorators import _manage_log_level_via_verbosity @@ -480,6 +479,8 @@ def _unique_construct_names(self): 'domainaxis2': 'key%domainaxis2'} """ + import re + key_to_name = {} ignore = self.constructs._ignore @@ -517,6 +518,8 @@ def _unique_domain_axis_identities(self): 'domainaxis2': 'time(1)'} """ + import re + key_to_name = {} name_to_keys = {} diff --git a/cfdm/read_write/netcdf/flatten/flatten.py b/cfdm/read_write/netcdf/flatten/flatten.py index 1ca120f25..2a4c16c5e 100644 --- a/cfdm/read_write/netcdf/flatten/flatten.py +++ b/cfdm/read_write/netcdf/flatten/flatten.py @@ -13,9 +13,7 @@ """ -import hashlib import logging -import re import warnings from .config import ( @@ -147,6 +145,7 @@ def parse_attribute(name, attribute): The parsed string. """ + import re def subst(s): """Substitute tokens for WORD and SEP.""" @@ -1616,6 +1615,8 @@ def generate_flattened_name(self, input_group, orig_name): The new valid name of the dimension or variable. """ + import hashlib + # If element is at root: no change if self.parent(input_group) is None: new_name = orig_name diff --git a/cfdm/read_write/netcdf/netcdfread.py b/cfdm/read_write/netcdf/netcdfread.py index 34b0b28f8..6942847f2 100644 --- a/cfdm/read_write/netcdf/netcdfread.py +++ b/cfdm/read_write/netcdf/netcdfread.py @@ -1,6 +1,5 @@ import logging import operator -import re import struct import subprocess import tempfile @@ -829,6 +828,8 @@ def dataset_type(cls, dataset, allowed_dataset_types): * `None` for anything else. """ + import re + from uritools import urisplit # Assume that non-local URIs are netCDF or zarr @@ -1082,6 +1083,8 @@ def read( The field or domain constructs in the file. """ + import re + from packaging.version import Version debug = is_log_level_debug(logger) @@ -7241,6 +7244,8 @@ def _parse_cell_methods(self, cell_methods_string, field_ncvar=None): ... 't: mean over ENSO years)') """ + import re + if field_ncvar: attribute = {field_ncvar + ":cell_methods": cell_methods_string} @@ -9485,6 +9490,7 @@ def _parse_x( # ============================================================ # Thanks to Alan Iwi for creating these regular expressions # ============================================================ + import re def subst(s): """Substitutes WORD and SEP tokens for regular expressions. @@ -11946,7 +11952,7 @@ def _set_quantization(self, parent, ncvar): :Returns: - `None`ppp + `None` """ g = self.read_vars diff --git a/cfdm/read_write/netcdf/netcdfwrite.py b/cfdm/read_write/netcdf/netcdfwrite.py index 31900a4db..7ffee840e 100644 --- a/cfdm/read_write/netcdf/netcdfwrite.py +++ b/cfdm/read_write/netcdf/netcdfwrite.py @@ -1,7 +1,6 @@ import copy import logging import os -import re import numpy as np @@ -3366,6 +3365,8 @@ def _write_field_or_domain( `None` """ + import re + g = self.write_vars ncdim_size_to_spanning_constructs = [] seen = g["seen"] @@ -4467,6 +4468,8 @@ def _write_global_attributes(self, fields): `None` """ + import re + g = self.write_vars # ------------------------------------------------------------ From 39a84f620078985606d12062b75f50e55a2a31c6 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:45:49 +0000 Subject: [PATCH 11/18] Remove dead code Co-authored-by: Sadie L. Bartholomew --- cfdm/data/mixin/compressedarraymixin.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cfdm/data/mixin/compressedarraymixin.py b/cfdm/data/mixin/compressedarraymixin.py index 71fd4eed3..5b97fe515 100644 --- a/cfdm/data/mixin/compressedarraymixin.py +++ b/cfdm/data/mixin/compressedarraymixin.py @@ -75,8 +75,6 @@ def to_dask_array(self, chunks="auto"): import dask.array as da from dask import config - - # from dask.array.core import getter from dask.base import tokenize getter = da.core.getter From 1967d6c3e8b04a41f4b4de5370ac76dc504ecc12 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:46:23 +0000 Subject: [PATCH 12/18] Remove dead code Co-authored-by: Sadie L. Bartholomew --- cfdm/data/aggregatedarray.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cfdm/data/aggregatedarray.py b/cfdm/data/aggregatedarray.py index 376def47f..0afbf070f 100644 --- a/cfdm/data/aggregatedarray.py +++ b/cfdm/data/aggregatedarray.py @@ -9,8 +9,6 @@ from .netcdfindexer import netcdf_indexer from .utils import chunk_locations, chunk_positions -# from uritools import isuri, uricompose - class AggregatedArray(abstract.FileArray): """An array stored in a CF aggregation variable. From 3af83fc075ca5851d098be6a0599ff74682732af Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:46:49 +0000 Subject: [PATCH 13/18] Typo Co-authored-by: Sadie L. Bartholomew --- cfdm/core/meta/docstringrewrite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfdm/core/meta/docstringrewrite.py b/cfdm/core/meta/docstringrewrite.py index 2de4be530..3a1c7b12e 100644 --- a/cfdm/core/meta/docstringrewrite.py +++ b/cfdm/core/meta/docstringrewrite.py @@ -612,7 +612,7 @@ def _docstring_update( substitutions. class_docstring, `str` or `None` - If docstring of a class, or `None` of a method + If docstring of a class, or `None` if a method docstring is being updated. :Returns: From 929c9eadb579fbe74ae74d3d989e2472dd8cb26b Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:49:22 +0000 Subject: [PATCH 14/18] typo Co-authored-by: Sadie L. Bartholomew --- cfdm/functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfdm/functions.py b/cfdm/functions.py index a88c4bf59..29a39cc57 100644 --- a/cfdm/functions.py +++ b/cfdm/functions.py @@ -1498,7 +1498,7 @@ def _parse(cls, arg): # Define the dictionary that stores all constant values. # # Sublasses must re-define this as an empty dictionary (unless - # it's OK for the child modify the parent's disctionary). + # it's OK for the child to modify the parent's dictionary). _constants = {} # Define the `Constant` class that contains a constant value From 767982707eafcc3277ec4a4a420acab0d6c26b98 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:51:50 +0000 Subject: [PATCH 15/18] Remove dead code Co-authored-by: Sadie L. Bartholomew --- cfdm/data/creation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cfdm/data/creation.py b/cfdm/data/creation.py index dc8260f7a..265699714 100644 --- a/cfdm/data/creation.py +++ b/cfdm/data/creation.py @@ -1,6 +1,5 @@ """Functions used during the creation of `Data` objects.""" -# import dask.array as da import numpy as np # from dask.base import is_dask_collection From d50ec73ec28ef1cc3911f3abf17fb840069ac94c Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:52:11 +0000 Subject: [PATCH 16/18] Remove dead code Co-authored-by: Sadie L. Bartholomew --- cfdm/data/creation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cfdm/data/creation.py b/cfdm/data/creation.py index 265699714..6fb9ca2e9 100644 --- a/cfdm/data/creation.py +++ b/cfdm/data/creation.py @@ -2,8 +2,6 @@ import numpy as np -# from dask.base import is_dask_collection - def to_dask(array, chunks, **from_array_options): """Create a `dask` array. From 6fbd05bd3ae87650320b81a0c085309969c18f2c Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:52:39 +0000 Subject: [PATCH 17/18] Remove dead code Co-authored-by: Sadie L. Bartholomew --- cfdm/data/netcdfindexer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfdm/data/netcdfindexer.py b/cfdm/data/netcdfindexer.py index f03c2c82a..8a2d4cd0a 100644 --- a/cfdm/data/netcdfindexer.py +++ b/cfdm/data/netcdfindexer.py @@ -272,7 +272,7 @@ def __getitem__(self, index): elif data.dtype.kind in "OSU": kind = data.dtype.kind if kind == "S": - from netCDF4 import chartostring # , default_fillvals + from netCDF4 import chartostring data = chartostring(data) From a032396dd6f625e611ad30faaad885472d36a3a7 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Tue, 4 Nov 2025 17:54:05 +0000 Subject: [PATCH 18/18] ConstantAccess.constants docstring Co-authored-by: Sadie L. Bartholomew --- cfdm/functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfdm/functions.py b/cfdm/functions.py index 29a39cc57..4c515f987 100644 --- a/cfdm/functions.py +++ b/cfdm/functions.py @@ -1561,7 +1561,7 @@ def __docstring_package_depth__(self): @classmethod def constants(cls, copy=True): - """TODO.""" + """See docstring to `ConstantAccess`.""" out = cls._constants if copy: out = out.copy()