diff --git a/blimgui.zip b/blimgui.zip index f0c5aaa..87c5144 100644 Binary files a/blimgui.zip and b/blimgui.zip differ diff --git a/blimgui/__init__.py b/blimgui/__init__.py index c982978..587d346 100644 --- a/blimgui/__init__.py +++ b/blimgui/__init__.py @@ -1,6 +1,22 @@ +# ===== BLImGui bootstrap: force bundled deps (BL4 / Python 3.14) ===== +import sys +from pathlib import Path +import importlib + +base = Path(__file__).parent.absolute() +dist64 = str(base / "dist64") +dist32 = str(base / "dist32") + +# Remove any previous occurrences so we can control priority +for p in (dist64, dist32): + if p in sys.path: + sys.path.remove(p) + +# Do NOT import imgui/pydantic before this finishes +# =================================================== + import site from collections.abc import Callable -from pathlib import Path from typing import Any from mods_base import Game, Library, build_mod, options @@ -8,17 +24,102 @@ THREADED_RENDERING = False +# Select the correct dist folder and put it FIRST on sys.path match Game.get_tree(): case Game.Oak: - site.addsitedir(str(Path(__file__).parent.absolute() / "dist64")) + sys.path.insert(0, dist64) THREADED_RENDERING = True + case Game.Willow2: - site.addsitedir(str(Path(__file__).parent.absolute() / "dist32")) + sys.path.insert(0, dist32) + case Game.Willow1: - site.addsitedir(str(Path(__file__).parent.absolute() / "dist32")) + sys.path.insert(0, dist32) + + case Game.Oak2: # BL4 + sys.path.insert(0, dist64) + THREADED_RENDERING = False + case _: - raise RuntimeError("Unknown Game.") + raise RuntimeError(f"Unknown Game: {Game.get_tree()}") + +# 🔴 Force-load the REAL pydantic_core binary from our dist64 and override any shadow package +try: + import importlib.util + import importlib.machinery + from pathlib import Path + + pdc_path = Path(dist64) / "pydantic_core" + + # Find the .pyd file (e.g. _pydantic_core.cp314-win_amd64.pyd) + pyd_files = list(pdc_path.glob("_pydantic_core*.pyd")) + if not pyd_files: + raise RuntimeError("No _pydantic_core*.pyd found in dist64/pydantic_core") + + pyd_file = pyd_files[0] + + # Load the extension module directly + spec = importlib.util.spec_from_file_location("pydantic_core._pydantic_core", pyd_file) + if spec is None or spec.loader is None: + raise RuntimeError("Failed to create spec for pydantic_core binary") + + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Create a proper package object for pydantic_core + pkg = importlib.util.module_from_spec( + importlib.machinery.ModuleSpec("pydantic_core", loader=None, is_package=True) + ) + pkg.__path__ = [str(pdc_path)] + + # Expose symbols from the binary + pkg._pydantic_core = module + for name in ("PydanticCustomError", "PydanticKnownError", "__version__"): + if hasattr(module, name): + setattr(pkg, name, getattr(module, name)) + + # 🔧 Re-export common symbols expected by pydantic + for name in ("CoreSchema", "PydanticOmit", "to_jsonable_python"): + if hasattr(module, name): + setattr(pkg, name, getattr(module, name)) + + # 🔧 ALSO load the core_schema Python module and attach it + core_schema_spec = importlib.util.spec_from_file_location( + "pydantic_core.core_schema", pdc_path / "core_schema.py" + ) + if core_schema_spec is None or core_schema_spec.loader is None: + raise RuntimeError("Failed to load pydantic_core.core_schema") + + core_schema_mod = importlib.util.module_from_spec(core_schema_spec) + core_schema_spec.loader.exec_module(core_schema_mod) + + pkg.core_schema = core_schema_mod + + # 🔧 Expose MISSING sentinel expected by pydantic + try: + from pydantic._internal import _utils as _pyd_utils + + pkg.MISSING = _pyd_utils.MISSING + except Exception: + # Fallback: create a unique sentinel + class _MissingSentinel: + pass + + + pkg.MISSING = _MissingSentinel() + + # Register in sys.modules, overriding anything the game provided + sys.modules["pydantic_core"] = pkg + sys.modules["pydantic_core._pydantic_core"] = module + sys.modules["pydantic_core.core_schema"] = core_schema_mod + + print("BLImGui forced REAL pydantic_core from:", pyd_file) + +except Exception as e: + print("BLImGui failed to force-load REAL pydantic_core:", e) + +# ONLY NOW import imgui (and anything that pulls pydantic) from imgui_bundle import ( hello_imgui, # type: ignore imgui, diff --git a/blimgui/backends/hook_based.py b/blimgui/backends/hook_based.py index fe3b6d2..d22f6b8 100644 --- a/blimgui/backends/hook_based.py +++ b/blimgui/backends/hook_based.py @@ -12,6 +12,8 @@ HOOK_ADDRESSES = { Game.Willow1: "Engine.GameViewportClient:Tick", Game.Willow2: "WillowGame.WillowGameViewportClient:Tick", + # BL4: + Game.Oak2: "/Script/Engine.CameraModifier:BlueprintModifyCamera", } except AttributeError: # Fallback while the SDK's nightly is not released @@ -27,7 +29,7 @@ def initialize(self) -> None: add_hook( hook_addr, - Type.POST_UNCONDITIONAL, + Type.POST, # use POST like BL4 mods "blimgui_hooked_render", self.render, ) diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/WHEEL b/blimgui/dist64/imgui_bundle-1.6.2.dist-info/WHEEL deleted file mode 100644 index 5e77734..0000000 --- a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: scikit-build-core 0.11.2 -Root-Is-Purelib: false -Tag: cp313-cp313-win_amd64 - diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/direct_url.json b/blimgui/dist64/imgui_bundle-1.6.2.dist-info/direct_url.json deleted file mode 100644 index cc1b910..0000000 --- a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/direct_url.json +++ /dev/null @@ -1 +0,0 @@ -{"dir_info": {}, "url": "file:///C:/Users/kayli/imgui_bundle"} \ No newline at end of file diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/INSTALLER b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/INSTALLER similarity index 100% rename from blimgui/dist64/imgui_bundle-1.6.2.dist-info/INSTALLER rename to blimgui/dist64/imgui_bundle-1.92.5.dist-info/INSTALLER diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/METADATA b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/METADATA similarity index 70% rename from blimgui/dist64/imgui_bundle-1.6.2.dist-info/METADATA rename to blimgui/dist64/imgui_bundle-1.92.5.dist-info/METADATA index a6c740d..cd7d675 100644 --- a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/METADATA +++ b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/METADATA @@ -1,7 +1,7 @@ Metadata-Version: 2.2 Name: imgui-bundle -Version: 1.6.2 -Summary: Dear ImGui Bundle: easily create ImGui applications in Python and C++. Batteries included! +Version: 1.92.5 +Summary: Dear ImGui Bundle: From expressive code to powerful GUIs in no time. A fast, feature-rich, cross-platform toolkit for C++ and Python. Author-Email: Pascal Thomet Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: MIT License @@ -9,9 +9,6 @@ Classifier: Topic :: Scientific/Engineering Classifier: Typing :: Typed Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: 3.11 -Classifier: Programming Language :: Python :: 3.12 Project-URL: Homepage, https://pthom.github.io/imgui_bundle/ Project-URL: Documentation, https://pthom.github.io/imgui_bundle/ Project-URL: Repository, https://github.com/pthom/imgui_bundle @@ -19,11 +16,46 @@ Project-URL: Issues, https://github.com/pthom/imgui_bundle/issues Project-URL: Changelog, https://github.com/pthom/imgui_bundle/blob/main/CHANGELOG.md Requires-Python: >=3.10 Requires-Dist: numpy -Requires-Dist: munch -Requires-Dist: glfw -Requires-Dist: PyOpenGL -Requires-Dist: pillow -Requires-Dist: pydantic +Provides-Extra: matplotlib +Requires-Dist: matplotlib; extra == "matplotlib" +Provides-Extra: imgproc +Requires-Dist: pillow; extra == "imgproc" +Requires-Dist: opencv-python; extra == "imgproc" +Provides-Extra: pydantic +Requires-Dist: pydantic>=2.0.0; extra == "pydantic" +Provides-Extra: opengl +Requires-Dist: PyOpenGL; extra == "opengl" +Provides-Extra: glfw +Requires-Dist: glfw; extra == "glfw" +Provides-Extra: sdl2 +Requires-Dist: PySDL2; extra == "sdl2" +Provides-Extra: sdl3 +Requires-Dist: PySDL3; extra == "sdl3" +Provides-Extra: wgpu +Requires-Dist: wgpu; extra == "wgpu" +Requires-Dist: rendercanvas; extra == "wgpu" +Provides-Extra: pyglet +Requires-Dist: pyglet; extra == "pyglet" +Provides-Extra: all-backends +Requires-Dist: PyOpenGL; extra == "all-backends" +Requires-Dist: glfw; extra == "all-backends" +Requires-Dist: PySDL2; extra == "all-backends" +Requires-Dist: PySDL3; extra == "all-backends" +Requires-Dist: wgpu; extra == "all-backends" +Requires-Dist: pyglet; extra == "all-backends" +Provides-Extra: full +Requires-Dist: matplotlib; extra == "full" +Requires-Dist: pillow; extra == "full" +Requires-Dist: opencv-python; extra == "full" +Requires-Dist: PyOpenGL; extra == "full" +Requires-Dist: glfw; extra == "full" +Requires-Dist: PySDL2; extra == "full" +Requires-Dist: PySDL3; extra == "full" +Requires-Dist: wgpu; extra == "full" +Requires-Dist: rendercanvas; extra == "full" +Requires-Dist: pyglet; extra == "full" +Requires-Dist: pydantic>=2.0.0; extra == "full" +Requires-Dist: PyGLM; extra == "full" Provides-Extra: test Requires-Dist: pytest; extra == "test" Description-Content-Type: text/markdown @@ -50,42 +82,90 @@ the interactive manual!* **Key Features** -- **A lot of widgets and libraries:** All of Dear ImGui along with a - suite of additional libraries for plotting, node editing, markdown - rendering, and much more. +- **Python Bindings:** Using Dear ImGui Bundle in Python is extremely + easy. Here is a beginner-friendly introduction: [Immediate Mode GUI + with Python and Dear ImGui + Bundle](https://github.com/pthom/imgui_bundle/blob/main/docs/docs_md/imgui_python_intro.md) -- **Always up-to-date:** The libraries are always very close to the - latest version of Dear ImGui. This is also true for Python - developers, since the bindings are automatically generated. +- **Cross-platform in C++ and Python:** Works on Windows, Linux, macOS, + iOS, Android, and WebAssembly! -- **Interactive Demos and Documentation:** Quickly get started with - our interactive manual and demos that showcase the capabilities of - the pack. Read or copy-paste the source code (Python and C++) - directly from the interactive manual! +- **Easy to use, yet very powerful:** Start your first app in 3 lines. + The Immediate Mode GUI (IMGUI) paradigm is simple and powerful, + letting you focus on the creative aspects of your projects. -- **Cross-platform:** Works on Windows, Linux, macOS, iOS, Android, - and WebAssembly! +- **A lot of widgets and libraries:** All of Dear ImGui along with a + suite of additional libraries for plotting, node editing, markdown + rendering, and much more. -- **Easy to use, yet very powerful:** Start your first app in 3 lines. - The Immediate Mode GUI (IMGUI) paradigm is simple and powerful, - letting you focus on the creative aspects of your projects. +- **Web ready**: Develop full web applications, in C++ via Emscripten; + or in Python thanks to ImGui Bundle’s integration within *Pyodide* -- **Fast:** Rendering is done via OpenGL (or any other renderer you - choose), through native code. +- **Always up-to-date:** The libraries are always very close to the + latest version of Dear ImGui. This is also true for Python developers, + since the bindings are automatically generated. -- **Beautifully documented Python bindings and stubs:** The Python - bindings stubs reflect the C++ API and documentation, serving as a - reference and aiding autocompletion in your IDE. See for example the - [stubs for - imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/imgui/__init__.pyi), - and [for - hello\_imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/hello_imgui.pyi) - (which complete the [hello\_imgui - manual](https://pthom.github.io/hello_imgui/book/intro.html)). +- **Interactive Demos and Documentation:** Quickly get started with our + interactive manual and demos that showcase the capabilities of the + pack. Read or copy-paste the source code (Python and C++) directly + from the interactive manual! + +- **Fast:** Rendering is done via OpenGL (or any other renderer you + choose), through native code. + +- **Beautifully documented Python bindings and stubs:** The Python + bindings stubs reflect the C++ API and documentation, serving as a + reference and aiding autocompletion in your IDE. See for example the + [stubs for + imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/imgui/__init__.pyi), + and [for + hello\_imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/hello_imgui.pyi) + (which complete the [hello\_imgui + manual](https://pthom.github.io/hello_imgui/book/intro.html)). For a detailed look at each feature and more information, explore the sections listed in the Table of Contents. +**Interactive Manual** + +Click on the animated demonstration below to launch the fully +interactive manual. + +
+ +
Dear ImGui Bundle interactive manual (in C++, via +Emscripten)
+
+ +**Online playground in Pure Python (via Pyodide)** + +Since ImGui Bundle is available in Python and Pyodide, an [online +playground](https://traineq.org/imgui_bundle_online/projects/imgui_bundle_playground/) +will enable you to run and edit various ImGui applications in the +browser without any setup. + +
+ +
ImGui Bundle online playground (in Python, via +Pyodide)
+
+ +See [this +page](https://code-ballads.net/dear-imgui-bundle-build-real-time-python-web-applications-with-zero-fuss/) +for more information about availability of ImGui Bundle in Pyodide. + +**Full manual (PDF)** + +View or download the [full +pdf](https://raw.githubusercontent.com/pthom/imgui_related_docs/refs/heads/main/manuals/imgui_bundle_manual.pdf) +for this manual. + +You may feed it into a LLM such as ChatGPT, so that it can help you when +using ImGui bundle. + **Example code** *A hello world example with Dear ImGui Bundle* @@ -104,17 +184,6 @@ hello](https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_ #include "imgui.h" int main() { ImmApp::Run([] { ImGui::Text("Hello, world!"); }); } -**Interactive Manual** - -Click on the animated demonstration below to launch the fully -interactive manual. - -
- -
Dear ImGui Bundle interactive manual
-
- # What’s in the pack? Dear ImGui Bundle includes the following libraries, which are available @@ -126,7 +195,7 @@ in C++ *and* in Python: - +

Dear ImGui : Bloat-free Graphical User interface with minimal dependencies

@@ -134,7 +203,7 @@ Graphical User interface with minimal dependencies

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_imgui.jpg" alt="demo widgets imgui" />

- +

ImGui Test Engine: Dear ImGui Tests & Automation Engine

@@ -142,7 +211,7 @@ Engine: Dear ImGui Tests & Automation Engine

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_testengine.jpg" alt="demo testengine" />

- +

Hello ImGui: cross-platform Gui apps with the simplicity of a "Hello World" @@ -153,7 +222,7 @@ alt="demo docking" /> demo custom background

- +

ImPlot: Immediate Mode Plotting

@@ -161,7 +230,7 @@ Plotting

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/battery_implot.jpg" alt="battery implot" />

- +

ImPlot3D: Immediate Mode 3D Plotting

@@ -169,7 +238,7 @@ href="https://github.com/brenocq/implot3d">ImPlot3D: Immediate Mode src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/battery_implot3d.jpg" alt="battery implot3d" />

- +

ImGuizmo: Immediate mode 3D gizmo for scene editing and other controls based on @@ -178,7 +247,7 @@ Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_gizmo.jpg" alt="demo gizmo" />

- +

ImGuiColorTextEdit: Colorizing text editor for ImGui

@@ -186,7 +255,7 @@ Colorizing text editor for ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_editor.jpg" alt="demo widgets editor" />

- +

imgui-node-editor: Node Editor built using Dear ImGui

@@ -194,7 +263,7 @@ Node Editor built using Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_node_editor.jpg" alt="demo node editor" />

- +

imgui_md: Markdown renderer for Dear ImGui using MD4C parser

@@ -202,7 +271,7 @@ renderer for Dear ImGui using MD4C parser

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_md.jpg" alt="demo widgets md" />

- +

ImmVision: Immediate image debugger and insights

@@ -212,7 +281,7 @@ alt="demo immvision process 1" /> demo immvision process 2

- +

NanoVG: Antialiased 2D vector drawing library on top of OpenGL for UI and @@ -221,7 +290,7 @@ visualizations

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/nanovg_full_demo.jpg" alt="nanovg full demo" />

- +

imgui_tex_inspect: A texture inspector tool for Dear ImGui

@@ -229,7 +298,7 @@ A texture inspector tool for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_imgui_tex_inspector.jpg" alt="demo imgui tex inspector" />

- +

ImFileDialog: A file dialog library for Dear ImGui

@@ -237,7 +306,7 @@ file dialog library for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_imfiledialog.jpg" alt="demo widgets imfiledialog" />

- +

portable-file-dialogs OS native file dialogs library (C++11, single-header)

@@ -245,7 +314,7 @@ href="https://github.com/samhocevar/portable-file-dialogs">portable-file-dialogs src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_portablefiledialogs.jpg" alt="demo widgets portablefiledialogs" />

- +

imgui-knobs: Knobs widgets for ImGui

@@ -253,7 +322,7 @@ widgets for ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_knobs.jpg" alt="demo widgets knobs" />

- +

imspinner: Set of nice spinners for imgui

@@ -261,7 +330,7 @@ spinners for imgui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_spinners.jpg" alt="demo widgets spinners" />

- +

imgui_toggle: A toggle switch widget for Dear ImGui

@@ -269,7 +338,7 @@ switch widget for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_toggle.jpg" alt="demo widgets toggle" />

- +

ImCoolBar: A Cool bar for Dear ImGui

@@ -277,7 +346,7 @@ for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_coolbar.jpg" alt="demo widgets coolbar" />

- +

imgui-command-palette: A Sublime Text or VSCode style command palette in ImGui

@@ -294,43 +363,41 @@ A big thank you to their authors for their awesome work! ## Install from pypi + # Minimal install pip install imgui-bundle - pip install opencv-python - pip install pyGLM -- imgui\_bundle: Binary wheels are available for Windows, MacOS and - Linux. If a compilation from source is needed, the build process - might take up to 5 minutes, and will require an internet connection. + # or to get all optional features: + pip install "imgui-bundle[full]" -- OpenCV: in order to run the immvision module, install opencv-python. - The alternative OpenCV versions, such as opencv-python-headless - (headless) opencv-contrib-python (with extra modules) also work. - -- pyGLM: in order to run the demo, install pyGLM +Binary wheels are available for Windows, MacOS and Linux. If a +compilation from source is needed, the build process might take up to 5 +minutes, and will require an internet connection. **Platform notes** -- *Windows*: Under windows, you might need to install [msvc - redist](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170#visual-studio-2015-2017-2019-and-2022). +- *Windows*: Under windows, you might need to install [msvc + redist](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170#visual-studio-2015-2017-2019-and-2022). -- *macOS* : under macOS, if a binary wheel is not available (e.g. for - older macOS versions), pip will try to compile from source. This - might fail if you do not have XCode installed. In this case, install - imgui-bundle with the following command - `SYSTEM_VERSION_COMPAT=0 pip install --only-binary=:all: imgui_bundle` +- *macOS* : under macOS, if a binary wheel is not available (e.g. for + older macOS versions), pip will try to compile from source. This might + fail if you do not have XCode installed. In this case, install + imgui-bundle with the following command + `SYSTEM_VERSION_COMPAT=0 pip install --only-binary=:all: imgui_bundle` ## Install from source + # Clone the repository git clone https://github.com/pthom/imgui_bundle.git cd imgui_bundle - git submodule update --init --recursive + + # Build and install the package (minimal install) pip install -v . - pip install opencv-python - pip install pyGLM -- Since there are lots of submodules, this might take a few minutes + # or build and install the package with all optional features: + # pip install -v ".[full]" -- The build process might take up to 5 minutes +The build process might take up to 5 minutes, and will clone the +submodules if needed (an internet connection is required). ## Run the python demo @@ -352,12 +419,12 @@ time between an idea and a first GUI prototype down to almost zero. It is well adapted for -- developers and researchers who want to switch easily between and - research and development environment by facilitating the port of - research artifacts +- developers and researchers who want to switch easily between and + research and development environment by facilitating the port of + research artifacts -- beginners and developers who want to quickly develop an application - without learning a GUI framework +- beginners and developers who want to quickly develop an application + without learning a GUI framework ### Who is this project **not** for @@ -470,16 +537,16 @@ Contributions are welcome! Three of my past projects gave me the idea to develop this library. -- [ImGui - Manual](https://pthom.github.io/imgui_manual_online/manual/imgui_manual.html), - an interactive manual for Dear ImGui, which I developed in June 2020 +- [ImGui + Manual](https://pthom.github.io/imgui_manual_online/manual/imgui_manual.html), + an interactive manual for Dear ImGui, which I developed in June 2020 -- [implot demo](https://traineq.org/implot_demo/src/implot_demo.html) - which I developed in 2020. +- [implot demo](https://traineq.org/implot_demo/src/implot_demo.html) + which I developed in 2020. -- [imgui\_datascience](https://github.com/pthom/imgui_datascience), a - python package I developed in 2018 for image analysis and debugging. - Its successor is immvision. +- [imgui\_datascience](https://github.com/pthom/imgui_datascience), a + python package I developed in 2018 for image analysis and debugging. + Its successor is immvision. Developments for Dear ImGui Bundle and its related automatic binding generator began in january 2022. diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/RECORD b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/RECORD similarity index 66% rename from blimgui/dist64/imgui_bundle-1.6.2.dist-info/RECORD rename to blimgui/dist64/imgui_bundle-1.92.5.dist-info/RECORD index 8850e99..c06f513 100644 --- a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/RECORD +++ b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/RECORD @@ -1,468 +1,467 @@ -../../bin/demo_imgui_bundle.exe,sha256=aPQlRNKr8ZF40M-SYwVn4LHnuwgn2mn5x3ccDpWKr8I,108419 -../../bin/imgui_bundle_demo.exe,sha256=aPQlRNKr8ZF40M-SYwVn4LHnuwgn2mn5x3ccDpWKr8I,108419 -imgui_bundle-1.6.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -imgui_bundle-1.6.2.dist-info/METADATA,sha256=Qj5LN6EKUWdTtUzsHrxhxGUkpypWj1ivTLY0JSgIj4M,20196 -imgui_bundle-1.6.2.dist-info/RECORD,, -imgui_bundle-1.6.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle-1.6.2.dist-info/WHEEL,sha256=sy0UFHejwb8HKZF4TZKxRLz0uqFoYZA7Xu3tB1pL5yA,106 -imgui_bundle-1.6.2.dist-info/direct_url.json,sha256=pUM4dIrz_UDRfwAXHnbaXS-ULr0GtPjlLZwzlsrj94c,62 -imgui_bundle-1.6.2.dist-info/entry_points.txt,sha256=N8FuM8pqoMN40DBjFX_gzkV5PAwzUAZbXNkmCSBe8WE,157 -imgui_bundle-1.6.2.dist-info/licenses/LICENSE,sha256=8qrd403zx-jESpGcOUO5PV3-Pg1X4c8B2Teq0WbTwck,1106 -imgui_bundle/.gitignore,sha256=foxuxedhAe0NXgNJsVnWYh_loWRbUEm4yY9Bgm4eCNE,72 -imgui_bundle/LICENSE,sha256=CFux1FcOxwVQyX7YfI8vw11XaGaqT5gVrbpvzCAAeV8,1101 -imgui_bundle/Readme_pypi.md,sha256=PT7zmwe9PpUMdd-6ex1Y3JIRgPQT17mzKvm0nMxzNW8,19414 -imgui_bundle/__init__.py,sha256=GrLEbgHSup8kCzm88018hVtzV7N1fMTOgxkGi6-mFMI,7015 -imgui_bundle/__init__.pyi,sha256=RAYXlO7XnUqXsKMxpFN_5nFeUEYYJ6qRH1hhIpefaS4,1574 -imgui_bundle/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/__pycache__/_glfw_set_search_path.cpython-313.pyc,, -imgui_bundle/__pycache__/_patch_runners_add_save_screenshot_param.cpython-313.pyc,, -imgui_bundle/__pycache__/glfw_utils.cpython-313.pyc,, -imgui_bundle/__pycache__/im_col32.cpython-313.pyc,, -imgui_bundle/__pycache__/imgui_ctx.cpython-313.pyc,, -imgui_bundle/__pycache__/imgui_fig.cpython-313.pyc,, -imgui_bundle/__pycache__/imgui_node_editor_ctx.cpython-313.pyc,, -imgui_bundle/__pycache__/imgui_pydantic.cpython-313.pyc,, -imgui_bundle/__pycache__/notebook_patch_runners.cpython-313.pyc,, -imgui_bundle/__pycache__/pyodide_patch_runners.cpython-313.pyc,, -imgui_bundle/_glfw_set_search_path.py,sha256=gVpZThS-c3JR7whQKqzpWFUyhCvgbfj10qmGmeQlLhQ,1817 -imgui_bundle/_imgui_bundle.cp313-win_amd64.pyd,sha256=UNQxIVRn-O4ud8r_BYHrgxHy2Odn7LWNUUc_lvep5jY,21831680 -imgui_bundle/_patch_runners_add_save_screenshot_param.py,sha256=8-0brfj8dpUQw7-Sx_CyrEtHkf7LPxLuie2oPM-4iXo,1805 -imgui_bundle/assets/app_settings/apple/Info.plist,sha256=0PEHZdHZhGefi9ryXpCsglj2A6PhVW5itYDGfixqdfc,1020 -imgui_bundle/assets/app_settings/icon.png,sha256=yNa_UjWjKyrIyGcrTMWeXyQgeCt8xqeRPDlJj4B7gkQ,53086 -imgui_bundle/assets/fonts/DroidSans.ttf,sha256=TiNxvA5M9pgzQuFQQS8UDaedZ0yb4LVkWEAfWBBy7NM,190044 -imgui_bundle/assets/fonts/Font_Awesome_6_Free-Solid-900.otf,sha256=4CuI-K1cfSlImkDpSsE3NbSXPJRpvafO_bt78GrtQWA,1023996 -imgui_bundle/assets/fonts/Inconsolata-Medium.ttf,sha256=grwtZwr4wjrJ7674wDRvG0RMeSUqN-O-OVqMnkUICoI,102176 -imgui_bundle/assets/fonts/Roboto/LICENSE.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560 -imgui_bundle/assets/fonts/Roboto/Roboto-Bold.ttf,sha256=7GhaRhBSlv5GyHRNpKEc-BGLpsEScZQXZvelRt9qp8c,167336 -imgui_bundle/assets/fonts/Roboto/Roboto-BoldItalic.ttf,sha256=Yd9Zf3PJHyOMvoj-PFNnAtEg0Ei3myX6D094GP0SOm8,171508 -imgui_bundle/assets/fonts/Roboto/Roboto-Regular.ttf,sha256=ThR6tkuf322J0B9rjDygs83cWdYIqOIhj5olBLXJjhQ,168260 -imgui_bundle/assets/fonts/Roboto/Roboto-RegularItalic.ttf,sha256=meSoUGETbpngUpKe0NheNjhPulw0t3MTmo9kM5xgmUM,170504 -imgui_bundle/assets/fonts/fontawesome-webfont.ttf,sha256=qljzPyOaD7AvXHpsRcBD16msmgkzNYBmlOzW1O3A1qg,165548 -imgui_bundle/assets/images/markdown_broken_image.png,sha256=AHNWv5WbiArzyRFyxelvVvZhGS2mEDBYTR3LMIwXhuI,2640 -imgui_bundle/assets/images/world.png,sha256=af0eoBzKHzFv691LJgBOZp23yNa0aPs5DVj6LlSaNdU,14112 -imgui_bundle/demos_assets/app_settings/apple/Info.plist,sha256=0PEHZdHZhGefi9ryXpCsglj2A6PhVW5itYDGfixqdfc,1020 -imgui_bundle/demos_assets/app_settings/icon.png,sha256=kkhYXNp4TMd_sakAzVUoeTcp7JnqBU6oufU2v8oXQNU,344507 -imgui_bundle/demos_assets/fonts/Akronim-Regular.ttf,sha256=BjusHSINvTfjuiwuJMIJD-Gufhz5wdgs9A4rQbxJYLQ,109540 -imgui_bundle/demos_assets/fonts/DroidSans.ttf,sha256=TiNxvA5M9pgzQuFQQS8UDaedZ0yb4LVkWEAfWBBy7NM,190044 -imgui_bundle/demos_assets/fonts/Font_Awesome_6_Free-Solid-900.otf,sha256=4CuI-K1cfSlImkDpSsE3NbSXPJRpvafO_bt78GrtQWA,1023996 -imgui_bundle/demos_assets/fonts/Inconsolata-Medium.ttf,sha256=grwtZwr4wjrJ7674wDRvG0RMeSUqN-O-OVqMnkUICoI,102176 -imgui_bundle/demos_assets/fonts/NotoEmoji-Regular.ttf,sha256=Zfwh9q2GrL5Dwp-J_8Ddd2IXCaUXpQ7dE3CqgCMMyPs,878928 -imgui_bundle/demos_assets/fonts/Playbox/Playbox-FREE.otf,sha256=SWjEWSzS_im4E4Ji-N5jvqHaI4iBa7v3b4DZNnh6rZ8,213620 -imgui_bundle/demos_assets/fonts/Playbox/Playbox-license.txt,sha256=_swGxy3KPtBcBjTkhdK7wQNNXSXOo95AJKLZ6Qz0teo,3955 -imgui_bundle/demos_assets/fonts/Roboto/LICENSE_.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560 -imgui_bundle/demos_assets/fonts/Roboto/Roboto-Bold.ttf,sha256=7GhaRhBSlv5GyHRNpKEc-BGLpsEScZQXZvelRt9qp8c,167336 -imgui_bundle/demos_assets/fonts/Roboto/Roboto-BoldItalic.ttf,sha256=Yd9Zf3PJHyOMvoj-PFNnAtEg0Ei3myX6D094GP0SOm8,171508 -imgui_bundle/demos_assets/fonts/Roboto/Roboto-Regular.ttf,sha256=ThR6tkuf322J0B9rjDygs83cWdYIqOIhj5olBLXJjhQ,168260 -imgui_bundle/demos_assets/fonts/Roboto/Roboto-RegularItalic.ttf,sha256=meSoUGETbpngUpKe0NheNjhPulw0t3MTmo9kM5xgmUM,170504 -imgui_bundle/demos_assets/fonts/entypo.ttf,sha256=OVs8BhEXFfLIA9bns7i2WCT8zVpuZ8WG1vTnzW8Auwo,35392 -imgui_bundle/demos_assets/fonts/fontawesome-webfont.ttf,sha256=qljzPyOaD7AvXHpsRcBD16msmgkzNYBmlOzW1O3A1qg,165548 -imgui_bundle/demos_assets/images/badge_interactive_manual.png,sha256=SypOZaI7DFJdPJnp3GVXI8POS6JjV9vpA-HcVuJ5278,4367 -imgui_bundle/demos_assets/images/badge_view_docs.png,sha256=dslfbTd1fU1jzLb4DKMkBjSS1aYtJmoicvYWJxYgZSU,3588 -imgui_bundle/demos_assets/images/badge_view_sources.png,sha256=8nxhINXLt_Ju3Nzi_rH0Xyt4kTxHzhmlMsym3l_F4ew,3798 -imgui_bundle/demos_assets/images/bear_transparent.png,sha256=jTX5C_CRhcvkyneCt2D5BROBxavGFv-Z1DjnMKumlmU,39050 -imgui_bundle/demos_assets/images/demo_tex_inspect.png,sha256=2V1tjK9moTo5WaDUD6p_olxgaw1V81WwcJ6oEvq3fVk,40026 -imgui_bundle/demos_assets/images/dmla.jpg,sha256=Mq-1zZAHsYJ0WxbbPxDmx8H4n7pOpSU--wXSh620ZK4,67037 -imgui_bundle/demos_assets/images/gizmo_screenshot.jpg,sha256=gMF7TEB-YBbuLkATU6hNUGqNdbRk4f3YjYDo6kSEzSY,163208 -imgui_bundle/demos_assets/images/haiku.png,sha256=6bzrldqKzHRcyWMJeM9vEmKQ7AtXE7HZfnztPoXj3jM,21737 -imgui_bundle/demos_assets/images/house.jpg,sha256=-Y-Lw7kACiSiG8iYAJU29YHEU2cbiM0ocxw7ARwXgBA,120412 -imgui_bundle/demos_assets/images/immapp_notebook_example.jpg,sha256=yaP9RHqoJkqkGydCVzQJ5Sb50exRLPmaYr1JlLDrR-0,139989 -imgui_bundle/demos_assets/images/logo_imgui_600.png,sha256=i8TkSOQDOOy70urgLy-oppb4LbwxD-CZcaRMe8PAhUQ,317613 -imgui_bundle/demos_assets/images/logo_imgui_bundle_512.png,sha256=OEMxH6ZVVKrOWvXYH57GOMxchwcLuKOPzYYcPBsO4DI,283075 -imgui_bundle/demos_assets/images/markdown_broken_image.png,sha256=AHNWv5WbiArzyRFyxelvVvZhGS2mEDBYTR3LMIwXhuI,2640 -imgui_bundle/demos_assets/images/nanovg_demo_heart.jpg,sha256=r7MNWC21tSfy1TAr-pb5Zfiza3rDhwFAykZeGnA8ph0,20232 -imgui_bundle/demos_assets/images/nanovg_full_demo.jpg,sha256=mUv1uww5G2Ln8c_bNxVTNOmugChJad4IwyxJUfviRqo,32294 -imgui_bundle/demos_assets/images/node_editor_fiat.jpg,sha256=JEnCBufehp0ZvL0nFxJIUjhpv3rco8-HtFzL3wQth-s,134624 -imgui_bundle/demos_assets/images/node_editor_screenshot.jpg,sha256=UoaxS9dyb3SLvatURpUkefGu6w5yJBuKJJ7YiCCRNXw,70021 -imgui_bundle/demos_assets/images/tennis.jpg,sha256=OFGzn3rL_6B9UZeTcvQ2PnZdf2QmHKsD2KJeIDzDBlc,50311 -imgui_bundle/demos_assets/images/world.png,sha256=af0eoBzKHzFv691LJgBOZp23yNa0aPs5DVj6LlSaNdU,14112 -imgui_bundle/demos_assets/nanovg_demo_images/image1.jpg,sha256=HOieYayzDyOv21Hqes1JCbs96XjlHxx-cbRrk5M4WKE,25760 -imgui_bundle/demos_assets/nanovg_demo_images/image10.jpg,sha256=DTzBQHDy0O6ERHzD7IRfA609sQOW13ibZcmIZ7cd7Qc,3439 -imgui_bundle/demos_assets/nanovg_demo_images/image11.jpg,sha256=3YhRr6H8YKriF3QUCSadnCmvJx2zHzxJyRi15dMD3zc,3818 -imgui_bundle/demos_assets/nanovg_demo_images/image12.jpg,sha256=p8XLBZW76PRRPxnOoyQ3qv_1OJ-rrn8vX06mXUKJ2D0,5452 -imgui_bundle/demos_assets/nanovg_demo_images/image2.jpg,sha256=IQrpeus8XbMv2z4wGvmv_NEgFkygkIhquxK0Zy3-QPQ,24091 -imgui_bundle/demos_assets/nanovg_demo_images/image3.jpg,sha256=Sq2i1aVtZ3XZdWkspQVbmPTA-22XAx7asqom_39IL54,29282 -imgui_bundle/demos_assets/nanovg_demo_images/image4.jpg,sha256=h3OatOlnfFsLVdIu1L7OcXvBaguFm9Gv08Qndwp6J6Q,23830 -imgui_bundle/demos_assets/nanovg_demo_images/image5.jpg,sha256=OwcY53ndH02HyWKcXvoADiEctVI7VMTxdV5gPFx1SiA,27131 -imgui_bundle/demos_assets/nanovg_demo_images/image6.jpg,sha256=GwJz2cSFPLeZl48xyUIqTM3VhxmxeRfcqUrLEyqldcU,25116 -imgui_bundle/demos_assets/nanovg_demo_images/image7.jpg,sha256=2z6U4e-UDylYbWTR1jWJRPhpI8VWqYAU3DHWnXDtin0,25590 -imgui_bundle/demos_assets/nanovg_demo_images/image8.jpg,sha256=uH2QM0sh-JH1SFOgZJyDXGrOfHWQLLIPrpiqPpEkNmc,24607 -imgui_bundle/demos_assets/nanovg_demo_images/image9.jpg,sha256=yBmR6tWkdthSqGm0P1bIrJb56pTcZnQlQm-uWSt-lyE,4035 -imgui_bundle/demos_cpp/CMakeLists.txt,sha256=kRQ6aDwSlkA7ewWReqjO5RHK2y4ASbCZOGBweOgBXKk,3629 -imgui_bundle/demos_cpp/_auto_main/_auto_main.cpp.in,sha256=hCL5YVUKLu2VyrYMH62YzZ4VA-z_o_Eo6vIT4__Y-6o,854 -imgui_bundle/demos_cpp/coi-serviceworker.js,sha256=T422Jh_xvLIw6ajBvqgQl4pSqiX0Npo-wF4x4tkNX7M,4933 -imgui_bundle/demos_cpp/demo_imgui_bundle.cpp,sha256=H170Xqpv5oKfo3M4P6Riuxg-97PE1dwGVfaBJ6u5yPE,5627 -imgui_bundle/demos_cpp/demo_imgui_bundle_intro.cpp,sha256=8X0Aw9LpLK5fXM18Lv-WwsCFmaoZMjaUdGxFXY0DWRY,7848 -imgui_bundle/demos_cpp/demo_imgui_md.cpp,sha256=7I0IHXRAkEHMN1ZUUqg7Y2LBpqWNuRDRKruTfu6AQhQ,2716 -imgui_bundle/demos_cpp/demo_imgui_show_demo_window.cpp,sha256=EI-6r4cZiINBwso0dpXN3TnqdgJv-fWD3B1NsN79fpI,1778 -imgui_bundle/demos_cpp/demo_imguizmo_launcher.cpp,sha256=9HQ1BsI-NJKPotXyOCnLzVkVVSKfItHbFzSayjBs_4E,1113 -imgui_bundle/demos_cpp/demo_immapp_launcher.cpp,sha256=kUL_M-sE61Atqcy2zlddSQVtm1GAgiMyGMo1q8YN8QE,4576 -imgui_bundle/demos_cpp/demo_immvision_launcher.cpp,sha256=c33hUASA4ILqwmddd-DtghQO0_CrS6GLbsq42n178yo,2051 -imgui_bundle/demos_cpp/demo_implot.cpp,sha256=0v_mG9XiWXS7wuLtYcelW_M12xIfyQuwtXm7J7XwEdQ,2078 -imgui_bundle/demos_cpp/demo_logger.cpp,sha256=CmggXf7FOTINnjeglpEMR2OrmsZE2WRFw7HMAC-Pvc8,2408 -imgui_bundle/demos_cpp/demo_nanovg_launcher.cpp,sha256=vAzYQ6nDD5LlblVQzwdlsyIc9_B8uxi56rTalo75SQc,3032 -imgui_bundle/demos_cpp/demo_node_editor_launcher.cpp,sha256=kfqhalJ-2eCIlVWJwgkDaBfvRLaz_5a2uTuFD6Olw0U,1713 -imgui_bundle/demos_cpp/demo_tex_inspect_launcher.cpp,sha256=afTL-elXTz9guJPMKBYhxANuoVQl98u0OTwHNhpuCWY,1737 -imgui_bundle/demos_cpp/demo_text_edit.cpp,sha256=I3p5J_lF7A_qjqESuB8wjDybC0wq0q7yOAMi4l3SpOA,1716 -imgui_bundle/demos_cpp/demo_themes.cpp,sha256=ixXG9QBWmqqEmO9RjKSAF905Ovs5ROvfgOzyT3lH0CA,2229 -imgui_bundle/demos_cpp/demo_utils/CMakeLists.txt,sha256=q2-ARiF67Q29L4PFjlymJ3Kqv1e3kAA--e0brebRFrM,254 -imgui_bundle/demos_cpp/demo_utils/animate_logo.cpp,sha256=S_YLmsip3VwAlpFTxBrElF76nIF9R89GYUDWHi0eeSI,3338 -imgui_bundle/demos_cpp/demo_utils/animate_logo.h,sha256=se4ebC5Do1cq10JLjUow7y81kCF2TiydHmWczLuZkgk,291 -imgui_bundle/demos_cpp/demo_utils/api_demos.cpp,sha256=IkyEGL-m8mHUEfJ5QZlSyDxSuw3h-wbw-TMNlGjHwWM,8026 -imgui_bundle/demos_cpp/demo_utils/api_demos.h,sha256=0jkhDeZFj-w2wRuUks1fnRh7Nnr0S1IS4o8auzNPDNE,1022 -imgui_bundle/demos_cpp/demo_utils/demo_app_table.cpp,sha256=BYt2iwVBfBGBtkCiI75JFXK226SC4-eZd3EQnUCZdLg,5474 -imgui_bundle/demos_cpp/demo_utils/demo_app_table.h,sha256=nU_uKOKdLIJOGUTDcJduJFyVIXtnLf2uVdhMwU5n7l8,888 -imgui_bundle/demos_cpp/demo_utils/subprocess.h,sha256=qORyFmo7ELmxG_3s_ils5iEOG8uTB6IbQ3sO21QqKmQ,37583 -imgui_bundle/demos_cpp/demo_widgets.cpp,sha256=mQX3NucK8IDvgenJ1Db9elhflOhxxodXbG2pJaD8Vow,16567 -imgui_bundle/demos_cpp/demos_imguizmo/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 -imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo.cpp,sha256=iBBasqFERtC0arafg4DioUSv8PUZnwsBuJdsfGckQWY,15820 -imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo_pure.cpp,sha256=MjJRROTcdYoWN-D8XJpClATcY452uLEJcluOx5aTkGI,17344 -imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_curve_edit.cpp,sha256=9UgHHfCgRpCK3S6UAXuXqBur0bLBfJ91Pv5etC8yH9k,3887 -imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_gradient.cpp,sha256=Dk7K2EdTffoink8EZrlHujD8R3JTpNGN1A7Ex906nxI,2719 -imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_sequencer.draft_cpp,sha256=GOPLY9Qzn2YBoFhXkLlVww0rW4de_VA4qC-7eDUkHgA,10347 -imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_zoom_slider.cpp,sha256=aAjlDeH7bF1xke_vf6xoN4g-hjybkFIC5oEMxEXUyeE,5160 -imgui_bundle/demos_cpp/demos_immapp/CMakeLists.txt,sha256=dkr5nUGmjhqnNrhRoJVwyJoPi6uDMC8koYhf1YrHvc8,1542 -imgui_bundle/demos_cpp/demos_immapp/coi-serviceworker.js,sha256=T422Jh_xvLIw6ajBvqgQl4pSqiX0Npo-wF4x4tkNX7M,4933 -imgui_bundle/demos_cpp/demos_immapp/demo_assets.cpp,sha256=XQCY_LB7me-b1bikb3sNUDZ4BxLgvzU0Wm_aRz09rmU,1249 -imgui_bundle/demos_cpp/demos_immapp/demo_assets_addons.cpp,sha256=2DeJGY_tGK7ObMW1PzQypALRIc1zA4GcQtRN-58xYO8,10023 -imgui_bundle/demos_cpp/demos_immapp/demo_command_palette.cpp,sha256=y20HFFZUA5hPFx-p6rP9JzannOIV4haiYgnkmFzFWIs,2580 -imgui_bundle/demos_cpp/demos_immapp/demo_custom_background.cpp,sha256=naLMvoUwxjCzVdzkzDBTTURiRxzo06A8crxueg7lYvM,21530 -imgui_bundle/demos_cpp/demos_immapp/demo_docking.cpp,sha256=QXluiiYs6IweSQXsbfZwfD58bBwDJPu8xb4lqdh4LXQ,39476 -imgui_bundle/demos_cpp/demos_immapp/demo_drag_and_drop.cpp,sha256=MlMFOk8UDMtaBN7sBaYe96oBcIa-rNMrLpZJVL0jtGY,2973 -imgui_bundle/demos_cpp/demos_immapp/demo_font_common_glyph_range.cpp,sha256=nwrc_0aUO4pUXOnKT5mGiSUsPoj18wixjZzF0PZSysI,2081 -imgui_bundle/demos_cpp/demos_immapp/demo_hello_world.cpp,sha256=qRuU-utPmbBw6PVwCTffYuUi2TuiV5tYI5oKmLB3T7Y,387 -imgui_bundle/demos_cpp/demos_immapp/demo_implot_markdown.cpp,sha256=Acbd80a1tiLF8CaPUj6mNoEgWUjwQXjOy-XGOve7lSc,1499 -imgui_bundle/demos_cpp/demos_immapp/demo_parametric_curve.cpp,sha256=xYq84wD44kBAq-ZfZF1CNrmM24oORN8ippjqbL5NG_0,2286 -imgui_bundle/demos_cpp/demos_immapp/demo_powersave.cpp,sha256=QHCQ7b74sORh-4_EG2JWDof5Hg24SHzDiVpgvvEmv9Q,1726 -imgui_bundle/demos_cpp/demos_immapp/demo_testengine.cpp,sha256=J5SDm20r7Xo6kVy_dj1wEHtLtYsl_VbMp1wBWlPLvMY,10835 -imgui_bundle/demos_cpp/demos_immapp/haiku_butterfly.cpp,sha256=0HcB2S-0yuyC7EI-5Q9SrDIpNDmuOkj8LnytnZSPbI4,4492 -imgui_bundle/demos_cpp/demos_immapp/haiku_implot_heart.cpp,sha256=ZOcjTkVnR2SdjvVXNu4UnCeAaYEYM6dDCR1ps1hHOq0,1913 -imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.cpp,sha256=_L_AZQt7aH9yEe8hh9h1Qh6Z_Vz6hpu2qEowUAWhMJk,11043 -imgui_bundle/demos_cpp/demos_immapp/shell.emscripten.html,sha256=-7VrhE6xR-oaz58JeI6xz7rr349MNdgFAV1cCX4b89M,3725 -imgui_bundle/demos_cpp/demos_immvision/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 -imgui_bundle/demos_cpp/demos_immvision/demo_immvision_display.cpp,sha256=F3PXtV6puSox0hKxBjaq8LWB8QzQ7x2dscRL6c2iP68,1118 -imgui_bundle/demos_cpp/demos_immvision/demo_immvision_inspector.cpp,sha256=ZHC-RIEobKMij5qkE4EZo_BME2VlPt0GQI_z5hIIy44,960 -imgui_bundle/demos_cpp/demos_immvision/demo_immvision_link.cpp,sha256=XOG_K8JJbRvZ_n3eRlTyOvmwZlK7xKz__YOVIFTKA4A,1205 -imgui_bundle/demos_cpp/demos_immvision/demo_immvision_process.cpp,sha256=SwKCX7VFUR7_7w18RsOLr2PWUkY-P7_DTNq3yO8gW68,5050 -imgui_bundle/demos_cpp/demos_nanovg/CMakeLists.txt,sha256=kGf5pNCPD2Dyh4BJ5VOQtxbBm8coJjUkt37RtRpU9KQ,362 -imgui_bundle/demos_cpp/demos_nanovg/coi-serviceworker.js,sha256=T422Jh_xvLIw6ajBvqgQl4pSqiX0Npo-wF4x4tkNX7M,4933 -imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_full.cpp,sha256=bg10-6myMwoVcwYPyC_Rl0zcuIw4MMopcU5oKa1_hBM,4388 -imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_full/demo_nanovg_full_impl.cpp,sha256=_2N2MjXD9oO81G722Ikwtt-Bjhb7LcDg6qU47TRvoMY,34903 -imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_full/demo_nanovg_full_impl.h,sha256=Vy33bipaw5rp7JTAzrEoPakhAJeF56cmFUloTGe7C6g,629 -imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_heart.cpp,sha256=FqfZXYYfGhd7muKf3wixy5leXUWx3M6EbDpe577H8gs,5150 -imgui_bundle/demos_cpp/demos_nanovg/shell.emscripten.html,sha256=-7VrhE6xR-oaz58JeI6xz7rr349MNdgFAV1cCX4b89M,3725 -imgui_bundle/demos_cpp/demos_node_editor/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 -imgui_bundle/demos_cpp/demos_node_editor/demo_node_editor_basic.cpp,sha256=Y_JnUfExjjTzgZWMITrk8FIp3XVg_XDXEWYvdJjuBuE,8072 -imgui_bundle/demos_cpp/demos_node_editor/demo_romeo_and_juliet.cpp,sha256=qIJL--IQXHwhCJRqOCWfI451pO-BtVcKq6lL-eqXOnE,2986 -imgui_bundle/demos_cpp/demos_tex_inspect/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 -imgui_bundle/demos_cpp/demos_tex_inspect/demo_tex_inspect_demo_window.cpp,sha256=8PS-C2heBGvPF0o1MYW6OpZns7e5usS7ejDqs6A_zDE,840 -imgui_bundle/demos_cpp/demos_tex_inspect/demo_tex_inspect_simple.cpp,sha256=-F_wxw16pdHPejesx8UqtP7aKONMrmKfYgBx-yqmSFk,1777 -imgui_bundle/demos_cpp/msvc_note.txt,sha256=a-eabTYaj6czbsNNBOnv2uerE2V-QCxI9rKV4_3jWYU,853 -imgui_bundle/demos_cpp/sandbox/CMakeLists.txt,sha256=N9_BwZKvQ72z7mJzN1jKiQQTkey7SZVFjyz_MLRlRYE,36 -imgui_bundle/demos_cpp/sandbox/bundle_cpp_sandbox.cpp,sha256=lS15gPzfxIPxekr7aEt1mWlIjH2UMfTMxYTy43df4T4,273 -imgui_bundle/demos_cpp/sandbox/sandbox_custom_opengl_version.cpp,sha256=u4eqbDiIiTNSGUW_6xhqtcZMPvYRVsNtbV6BWitrol0,786 -imgui_bundle/demos_cpp/sandbox/sandbox_immapp_manual_render.cpp,sha256=g_lPnXKGzSwC-lCvHbQOOIGXEEKrtNEbbz3LG3XlH5U,1973 -imgui_bundle/demos_cpp/sandbox/sandbox_node_image.cpp,sha256=vqIVrtfJDKvaQuCgFZ8EijORxTNOd8TUb4qBQJkgJ4U,1813 -imgui_bundle/demos_cpp/sandbox/sandbox_node_md_code.cpp,sha256=saSgp-ppmSUzUuJ55KO-IETAVRxYMK6sKwHBzBtls0g,1639 -imgui_bundle/demos_cpp/sandbox/sandbox_node_popup.cpp,sha256=7j-xlPFFs08f9TFVC8B74rkAjQG7i2jiIlSN4tAKt9w,6524 -imgui_bundle/demos_cpp/sandbox/sandbox_node_text_wrap.cpp,sha256=yfkaMdI3eItqEGj5XKgy_Fvrt6PSE6h1aEmsBYPJQGo,3826 -imgui_bundle/demos_cpp/sandbox/sandbox_plot_in_node.cpp,sha256=0zi0V0PqNcyO9BjUSh6Ipe9srt0Kyvf7rTuJKf6VVmk,1396 -imgui_bundle/demos_cpp/sandbox/sandbox_stacklayout.cpp,sha256=DBnrlakliDMPBzrEN_IOoUrTb7FooEmEHQFuU6nnOkI,1567 -imgui_bundle/demos_cpp/sandbox/sandbox_tmp.cpp,sha256=4Np-IFroVBgZqfs6UkP7_kIKOjCHkm_B2an4_XgTVYo,160 -imgui_bundle/demos_cpp/sandbox/sandbox_tstengine.cpp,sha256=5TOKSzi_40KQJ36LgJypVsNA6TCCe12nOg8vycaLUUs,2262 -imgui_bundle/demos_cpp/sandbox/sandnode_stack.cpp,sha256=HKssPDHsk5K6yCFWXteGxp-eal15nAju6Kh2u5BySAE,1402 -imgui_bundle/demos_cpp/sandbox/sandnode_suspend.cpp,sha256=pIspshFfKve8c7mub49qYPIDb-IQQFf7DZg4Ch-e93E,2979 -imgui_bundle/demos_cpp/shell.emscripten.html,sha256=-7VrhE6xR-oaz58JeI6xz7rr349MNdgFAV1cCX4b89M,3725 -imgui_bundle/demos_python/.gitignore,sha256=nKxsXgtWy50PAko_ZXIsk9g5nONGt3pHsywgIkKCuHw,47 -imgui_bundle/demos_python/__init__.py,sha256=_kRM_95wlYHerwCfF0WusMCJlFN3FCTXpBOCuDkf64Y,291 -imgui_bundle/demos_python/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_imgui_bundle.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_imgui_bundle_intro.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_imgui_md.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_imgui_show_demo_window.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_imguizmo_launcher.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_immapp_launcher.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_immvision_launcher.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_implot.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_logger.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_nanovg_launcher.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_node_editor_launcher.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_tex_inspect_launcher.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_text_edit.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_themes.cpython-313.pyc,, -imgui_bundle/demos_python/__pycache__/demo_widgets.cpython-313.pyc,, -imgui_bundle/demos_python/demo_imgui_bundle.py,sha256=uSSphpuoiglqqyFLOWwEknz4-YuFmjD3oH2cuU0MSUw,6349 -imgui_bundle/demos_python/demo_imgui_bundle_intro.py,sha256=oGkZghT3QRyL9h4rXWYG75UyEZKIDTnzoy00MlSElwc,7700 -imgui_bundle/demos_python/demo_imgui_md.py,sha256=cIMyXFOGF_ELczgiyLCDLSM6Giwu74mRuJ4aBpGwUFQ,2671 -imgui_bundle/demos_python/demo_imgui_show_demo_window.py,sha256=vc4XWlm4LU07VLhphkeYxFPfIIJXmBg3ILPpn1LMAWA,2374 -imgui_bundle/demos_python/demo_imguizmo_launcher.py,sha256=RTEqNTakW_DOS7Xdo0ZgY3lUUjJdMPUJsXMF-QBdet4,1433 -imgui_bundle/demos_python/demo_immapp_launcher.py,sha256=rw4vIWzp5XjxrK7f65oqb57YwryzYLlTHNbfl1CfwQs,5373 -imgui_bundle/demos_python/demo_immvision_launcher.py,sha256=YX6jHDVUCWTjb21ynLP8viLhokBwL6gOxPjdeS7NcSs,2224 -imgui_bundle/demos_python/demo_implot.py,sha256=Mv9zmfzWQ0AY_CGJiLI4M8IxuBQ9cVdplQFrmh7wGR0,1977 -imgui_bundle/demos_python/demo_logger.py,sha256=FaJjZlyW8PFySdGnorkf4mDg28F-9ephZE2Gjk4As9Q,2718 -imgui_bundle/demos_python/demo_nanovg_launcher.py,sha256=ZtdpwKDMgGRTH4BimuDZZxp_OJvy7oyAGKWQo9o_ZAs,3870 -imgui_bundle/demos_python/demo_node_editor_launcher.py,sha256=H6HQWB_YRvwQN5LlhDmKcIbjrKCVuSV8K0M4xUUdp1A,1934 -imgui_bundle/demos_python/demo_packaging/macos/.gitignore,sha256=XhR1GzzBHiiMzAT-ikVH4TC4htPd5KqMAFJ_5-QWeRA,15 -imgui_bundle/demos_python/demo_packaging/macos/Readme.md,sha256=zt8OMVqJXuM7jTDIo3yryG0XhyM8PCNHm5PuzO6MVgI,1381 -imgui_bundle/demos_python/demo_packaging/macos/__pycache__/bundle_macos_demo.cpython-313.pyc,, -imgui_bundle/demos_python/demo_packaging/macos/assets/images/world.png,sha256=XJ0vpgpIMKBRKCYA5mUC9H8uqPhLUdPn-wCHD8wx8WY,45007 -imgui_bundle/demos_python/demo_packaging/macos/bundle_macos_demo.py,sha256=DnMN20WhLC-g5E0nLesoyOmiQQ9o7GEhXqBG74R-clE,779 -imgui_bundle/demos_python/demo_packaging/macos/bundle_macos_demo.spec,sha256=pA-mTaD2zEQV0CCHOfN69BDMF7e_SsXPQiK-PQ6Ryso,1046 -imgui_bundle/demos_python/demo_tex_inspect_launcher.py,sha256=5v0KpNio_K8k-NE27cMYCiO3tTuYbxKr-VdwD60C4Os,1744 -imgui_bundle/demos_python/demo_text_edit.py,sha256=0AtCWh2t2B8iTY1A2ERuFdNl3D9DbqFir4BqNcyfJOk,1779 -imgui_bundle/demos_python/demo_themes.py,sha256=rIgQtCH5gwwz5fiZgCxD9ELPEtZ9sWynqHvFuNzaGLg,1753 -imgui_bundle/demos_python/demo_utils/__init__.py,sha256=MPYQWRr14I-Sn81e_AM5dgMi3jR5hIQknVhv030o4AE,617 -imgui_bundle/demos_python/demo_utils/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demo_utils/__pycache__/animate_logo.cpython-313.pyc,, -imgui_bundle/demos_python/demo_utils/__pycache__/api_demos.cpython-313.pyc,, -imgui_bundle/demos_python/demo_utils/__pycache__/demo_app_table.cpython-313.pyc,, -imgui_bundle/demos_python/demo_utils/__pycache__/functional_utils.cpython-313.pyc,, -imgui_bundle/demos_python/demo_utils/__pycache__/imread_pil.cpython-313.pyc,, -imgui_bundle/demos_python/demo_utils/animate_logo.py,sha256=eYqiQw_bgJXxSX95L-4HGpUika9bN01zj-RvI-tD9yA,3826 -imgui_bundle/demos_python/demo_utils/api_demos.py,sha256=EtqVNty0Bc9PCGWKdj2ZUiMTJbP6um30t6R5NvJ0Kok,3372 -imgui_bundle/demos_python/demo_utils/demo_app_table.py,sha256=Cxyo68sk1o92t0SzCI3x36KTS5UkygWKWi1teUOElqI,6000 -imgui_bundle/demos_python/demo_utils/functional_utils.py,sha256=7g38QtLJDqpiTI7Lslcfs5wOO9JPFgVPLMokSgxN4M0,290 -imgui_bundle/demos_python/demo_utils/imread_pil.py,sha256=-7YnJAx-L84pwg18sdeR6rsvWtV6278JqPn1beqX-d8,2698 -imgui_bundle/demos_python/demo_widgets.py,sha256=3w-ZqRKStSc__5lEJaTkZhPMHtQ0YNAh9tbt6YG3cZ0,15828 -imgui_bundle/demos_python/demos_imguizmo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/demos_imguizmo/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_imguizmo/__pycache__/demo_gizmo.cpython-313.pyc,, -imgui_bundle/demos_python/demos_imguizmo/demo_gizmo.py,sha256=U_QSyHxB0-Zabh0JqXxlrSTBsSiwd39R_58tAx_aR7M,15616 -imgui_bundle/demos_python/demos_immapp/.gitignore,sha256=gd3Ts_Xo4zDaOHkdQED2l6oyIA7vjfOooKhxO8q5Obw,7 -imgui_bundle/demos_python/demos_immapp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/demos_immapp/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_assets.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_assets_addons.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_command_palette.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_custom_background.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_docking.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_drag_and_drop.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_font_common_glyph_range.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_hello_world.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_implot_markdown.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_matplotlib.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_parametric_curve.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_powersave.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_pydantic.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_python_context_manager.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/demo_testengine.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/haiku_butterfly.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/haiku_implot_heart.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/imgui_demo.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/__pycache__/imgui_example_glfw_opengl3.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immapp/demo_assets.py,sha256=cojtTZtcFYiQz7awEEvz6l3vZSSPe2FYyiv4SPDHvNg,1365 -imgui_bundle/demos_python/demos_immapp/demo_assets_addons.py,sha256=ummdc2Q8idXVmQzNyziNIC1y5k6_745_Ey6dYvNciIc,9790 -imgui_bundle/demos_python/demos_immapp/demo_command_palette.py,sha256=Eddit6x729LGdF5mlHnn8UaOodeQnl5_d2icHRb-FZU,2492 -imgui_bundle/demos_python/demos_immapp/demo_custom_background.py,sha256=jEuJFUBgDGMvDbJWo0Xbv1qGij2LMEBzf7eJrRqjbV4,20823 -imgui_bundle/demos_python/demos_immapp/demo_docking.py,sha256=EkVnEz_9u4_yG2TK97FOXulZylQOTYIyoah0dFHOiQw,38924 -imgui_bundle/demos_python/demos_immapp/demo_drag_and_drop.py,sha256=-44GBSX2Nux3Y1nrz5rqMPdDF1dkdH5XX7-KcG_Qf-o,3491 -imgui_bundle/demos_python/demos_immapp/demo_font_common_glyph_range.py,sha256=XXuFYJmeotCpYMJ8rUut2EIio0rH8-7wv9gCXqlT3gk,2047 -imgui_bundle/demos_python/demos_immapp/demo_hello_world.py,sha256=uEz_4zNrwODaAq9LLfvC80zss32JPqojzwNccI90Y74,420 -imgui_bundle/demos_python/demos_immapp/demo_implot_markdown.py,sha256=_dXnlVNX6fDHfWMv3USpDvTohYlfqvl163ESZNVgNbw,886 -imgui_bundle/demos_python/demos_immapp/demo_matplotlib.py,sha256=NcFKmcHejrUwzej9WwxOn4Q9ChnglNvyKAOZX5AM-rY,2645 -imgui_bundle/demos_python/demos_immapp/demo_parametric_curve.py,sha256=S7nlr56pmnYNBTHqDf2TeEpzCUqTFb0uL3Nb3Dirfv4,1466 -imgui_bundle/demos_python/demos_immapp/demo_powersave.py,sha256=5uoaA3RaBanmPqK9tDeX-8ROCWOvUlJ6HBeqy5K9IE0,1639 -imgui_bundle/demos_python/demos_immapp/demo_pydantic.py,sha256=Xky59lsvzDDA5M3cvGwJW2nT044_JRcZ3BRQp-T4uKw,1908 -imgui_bundle/demos_python/demos_immapp/demo_python_context_manager.py,sha256=DIIy8ruYNnDdvXHrya087mpDlKIiAkCem0Sh9zyRliQ,11373 -imgui_bundle/demos_python/demos_immapp/demo_testengine.py,sha256=QM8Ag8zY7l6MVvb4ve-r-Z-8WkzE2SCFJi45aSi0CTE,11246 -imgui_bundle/demos_python/demos_immapp/haiku_butterfly.py,sha256=HMBfZkta9pOLreYoLLF-xxZHLLcwK7am0410BVgyUXo,3889 -imgui_bundle/demos_python/demos_immapp/haiku_implot_heart.py,sha256=Fb3e6plszcfeHl-bEObvsCzQwjfpLrZD1_JUejgUyfo,1102 -imgui_bundle/demos_python/demos_immapp/imgui_demo.py,sha256=OMYhyUj9QYp50qGkd21N3MGCV9GbMjzbz3glJTJRhXk,187229 -imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.py,sha256=h57FtoDEJkHava13gPFVjYffPx4m1j7LpKRdc8sYfXY,11765 -imgui_bundle/demos_python/demos_immvision/__init__.py,sha256=Khtdyg9ss2_x1O3gnL0XZedDC-I_E9wcHp2RKGXdksQ,682 -imgui_bundle/demos_python/demos_immvision/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_contours.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_display.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_inspector.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_inspector_mandelbrot.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_link.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_no_opencv.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_process.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/__pycache__/demo_laplacian.cpython-313.pyc,, -imgui_bundle/demos_python/demos_immvision/demo_contours.py,sha256=18Qi2OI5osWHz7m2D6V5LkccNAkyYY3cYIY9O9tOGvc,2060 -imgui_bundle/demos_python/demos_immvision/demo_immvision_display.py,sha256=VTYOvVdTpdXDZA2MN-4x2KEkQLulphgDua_N0pTvcdE,1409 -imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector.py,sha256=UK68gKQcFHSEhasoQeq6qL69y4Koaa4o2lLWJs5VKz0,1097 -imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector_mandelbrot.py,sha256=eAeSizx7DXxD_MA8q22jTsUgP7m5BL4OsgMtVtwsfB0,3269 -imgui_bundle/demos_python/demos_immvision/demo_immvision_link.py,sha256=yJRipEGpY24YfRyaAcMjQEXNwQw5QOx2t6U4e91xAr4,1188 -imgui_bundle/demos_python/demos_immvision/demo_immvision_no_opencv.py,sha256=3qI8CB7dPxdNoqdNPzLiIe7GDx6BXg4raSCmgXyCdgI,1081 -imgui_bundle/demos_python/demos_immvision/demo_immvision_process.py,sha256=cI9rJzpS2SlIjc3BJkVAcUU-1GyxgoWikZBZaD9f8Dc,5332 -imgui_bundle/demos_python/demos_immvision/demo_laplacian.py,sha256=Z1cZbTFE01k-aQUE86Iqio_gRCEuXhqbAHvDh7f45zE,2311 -imgui_bundle/demos_python/demos_implot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/demos_implot/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_implot/__pycache__/demo_implot_stock.cpython-313.pyc,, -imgui_bundle/demos_python/demos_implot/__pycache__/implot_demo.cpython-313.pyc,, -imgui_bundle/demos_python/demos_implot/demo_implot_stock.py,sha256=4x0RQdtnWyMDVdXI9HQUhRFEnuiPD9ycymEQGINzHH4,12167 -imgui_bundle/demos_python/demos_implot/implot_demo.py,sha256=Jdo3fATtW2ltXZs8u_ZTrPk30Kyefd7I2NPj60ZE4oM,96198 -imgui_bundle/demos_python/demos_implot3d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/demos_implot3d/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_implot3d/__pycache__/implot3d_demo.cpython-313.pyc,, -imgui_bundle/demos_python/demos_implot3d/__pycache__/implot3d_meshes.cpython-313.pyc,, -imgui_bundle/demos_python/demos_implot3d/implot3d_demo.py,sha256=znjoeZBVlTOuaLUIp5MJ8OXcqSfRbmnY-3CJGc9fMyY,41846 -imgui_bundle/demos_python/demos_implot3d/implot3d_meshes.py,sha256=egqM_ssmDD8jmXtZ6623_8WBv5g5E3-_rIEnSRkpQvY,31908 -imgui_bundle/demos_python/demos_nanovg/__pycache__/demo_nanovg_full.cpython-313.pyc,, -imgui_bundle/demos_python/demos_nanovg/__pycache__/demo_nanovg_heart.cpython-313.pyc,, -imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full.py,sha256=UpSJ2k6f-eZ06eRPY57vWqbDpeq_aBqdb2bcLvPzhDE,5087 -imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/__pycache__/demo_nanovg_full_impl.cpython-313.pyc,, -imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/demo_nanovg_full_impl.py,sha256=IPU-YLtKSLaFr-emh8faQBnxzIz35HZU_A4y_XLVQ9M,34984 -imgui_bundle/demos_python/demos_nanovg/demo_nanovg_heart.py,sha256=MHdmO0LsMKAL-q4_ZC1HJR1rLcvwedMpQBarOoK3KQY,5038 -imgui_bundle/demos_python/demos_node_editor/__init__.py,sha256=_5ZmJH4ScNiu6Dvw2F2o7fTaQH8y1G8r47zctrunDFU,184 -imgui_bundle/demos_python/demos_node_editor/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_node_editor/__pycache__/demo_node_editor_basic.cpython-313.pyc,, -imgui_bundle/demos_python/demos_node_editor/__pycache__/demo_romeo_and_juliet.cpython-313.pyc,, -imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.json,sha256=cLUk21uRqL1qW-ovgxK0m9616SV-S5ZZrY8vAgQ0ieo,334 -imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.py,sha256=gf-awPZ6MCEAGAXq-EVEcxGkGD5NRmPjkxIYJCvUChA,8487 -imgui_bundle/demos_python/demos_node_editor/demo_romeo_and_juliet.py,sha256=0pOa9cAwokiQkUdwmWKCKqROAJm55rRLSyAQ7LT6cNU,2272 -imgui_bundle/demos_python/demos_node_editor/romeo_and_juliet.json,sha256=Izot8WkS2pM3bTy0HccR4gootK6BrW24FkNQCsst2M8,355 -imgui_bundle/demos_python/demos_tex_inspect/__init__.py,sha256=DsgZmUvDVm8XYN-wI9uBIVzNOJa7mTTMJLpCkmPb9Kg,200 -imgui_bundle/demos_python/demos_tex_inspect/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/demos_tex_inspect/__pycache__/demo_tex_inspect_demo_window.cpython-313.pyc,, -imgui_bundle/demos_python/demos_tex_inspect/__pycache__/demo_tex_inspect_simple.cpython-313.pyc,, -imgui_bundle/demos_python/demos_tex_inspect/demo_tex_inspect_demo_window.py,sha256=B3weap7DnRShHi9D8qyvWgtmZCuAm42ttiVhYJCyco0,657 -imgui_bundle/demos_python/demos_tex_inspect/demo_tex_inspect_simple.py,sha256=FAggQjzh1hKo9xIDvL0kFvY11qCLtesyhxETRoi7mGw,1195 -imgui_bundle/demos_python/haikus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/haikus/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/notebooks/.gitignore,sha256=GuUYDQtPg3HrDxUGVHJ75oOoGk-KtR_tfOFWnhzUOEY,23 -imgui_bundle/demos_python/notebooks/NodeEditor.json,sha256=K3Z3pupq1Gi8qNderYHAbLPMoXC7m27veIW7WO4JNkk,340 -imgui_bundle/demos_python/notebooks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/notebooks/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/notebooks/demo_notebook.ipynb,sha256=EIGeBTHo9ru8N6DLvpQMRJwVOCjxc24IZ_Jw9FoauLc,529129 -imgui_bundle/demos_python/ruff.toml,sha256=5xzgJnq_0kYIwzrJ7oOCIF5eVpCbrAOqR4O0KclWm6Q,110 -imgui_bundle/demos_python/sandbox/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/demos_python/sandbox/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sand_node.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sand_plot.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sand_runnable_code_cell.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sand_subplots.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_app.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_custom_opengl_version.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_glfw_window_manip.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_immapp_manual_render.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_node_md_code.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_node_text_wrap.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_plot_in_node.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_plotmesh3d.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_stacklayout.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_tmp.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/__pycache__/sandbox_tstengine_keys.cpython-313.pyc,, -imgui_bundle/demos_python/sandbox/sand_node.py,sha256=DSnslxuREcy-S6szrD3Cpi-_XDqtHO6YwW9NrcJvS0g,582 -imgui_bundle/demos_python/sandbox/sand_plot.py,sha256=Gg-lTv4FfXySqSn8sQsqFa5SNEIexSU0wiLVO77py5Q,321 -imgui_bundle/demos_python/sandbox/sand_runnable_code_cell.py,sha256=rBBCd8c-IGslHv5ZIk2TCLpwkN7OzKpkImEkGI-_tcU,1867 -imgui_bundle/demos_python/sandbox/sand_subplots.py,sha256=blj18rvOKhhUmZGA2-fUq5gNRzzs_UwWCTXSWw2CL68,909 -imgui_bundle/demos_python/sandbox/sandbox_app.py,sha256=yMtQCZJ_caUYnPT65ofxb6lKFp4OrubMb-wMRUfbUrw,823 -imgui_bundle/demos_python/sandbox/sandbox_custom_opengl_version.py,sha256=F6tB15GnzPrvOuBk6WoYvBZPeqIw16cTTMZjEy_tMIc,877 -imgui_bundle/demos_python/sandbox/sandbox_glfw_window_manip.py,sha256=kZIJPCmCx0k870uFP2lAFJFN3rwEH8kJXaOwGAxRDfo,560 -imgui_bundle/demos_python/sandbox/sandbox_immapp_manual_render.py,sha256=X5vu_59RbLFkL_Z6MddqpY2ow1tgGQrtHHUH1hSf_Wg,654 -imgui_bundle/demos_python/sandbox/sandbox_node_md_code.py,sha256=cbdp15xa_jKcRXX3hdFkHE9Pz0JIx5ZrTZtYKmXxDvc,1439 -imgui_bundle/demos_python/sandbox/sandbox_node_text_wrap.py,sha256=DEArDar35LPgoJ_-_nvprUDIl6EaqA3LDH4N98SeYeE,3507 -imgui_bundle/demos_python/sandbox/sandbox_plot_in_node.py,sha256=x5F6ldCqX_DEnbwT9vDWb9yhz5aWuHQ6IwvuA2RVNgQ,794 -imgui_bundle/demos_python/sandbox/sandbox_plotmesh3d.py,sha256=qMOFo487BpRSVE3k8iVTJhVqOAJisuLuHD9IktIAhjc,1042 -imgui_bundle/demos_python/sandbox/sandbox_stacklayout.py,sha256=dKQQ5uy2DSASik9TTk-X_lvvEMbIRh6XeAwHhTo7Qkw,610 -imgui_bundle/demos_python/sandbox/sandbox_tmp.py,sha256=e0s9VmUDGtgfHwws3q6JwAJ7mlLH2LC31mntXYhfvno,116 -imgui_bundle/demos_python/sandbox/sandbox_tstengine_keys.py,sha256=beeCGXeYBI2lviGB1C-wLIjgvZguS8HhUnSQH9KJom8,1501 -imgui_bundle/glfw3.dll,sha256=Gs0pbxiNPWeHoRCP2ut5gcOCmigDU2MIKhMjgVFO1PY,218624 -imgui_bundle/glfw3dll.lib,sha256=LfUbPSgI6RzCbUKxLxwTdDhG_eSk31NPbWENR7mZbf4,30306 -imgui_bundle/glfw_utils.py,sha256=xl054Y72FN-OcMbfZxeFFJZFBBam2TgazCbb68Dx4F8,785 -imgui_bundle/hello_imgui.pyi,sha256=R2eNyJzFRuSJM5wyNtmXG0s9D6vP5HwPkqb3Nwazz7s,176348 -imgui_bundle/im_col32.py,sha256=JCOcnwXgm1C_EAKfLKraAm__3GhDnCOyc4fF2pIV7_I,461 -imgui_bundle/im_cool_bar.pyi,sha256=PlKk-T5MSwvdqufAxfvLL4DNR6uVNe88sd_B_G4WoHA,3307 -imgui_bundle/im_file_dialog.pyi,sha256=vxF0kVjbS2d4Iw_srj8e7-w83yhq4oAbgCkvSrn_2tg,2404 -imgui_bundle/imgui/__init__.pyi,sha256=rM66PofGZgKkLwxLh9OPTk3egaTt-JDoLljXiM4mPMo,697747 -imgui_bundle/imgui/__pycache__/test_engine_checks.cpython-313.pyc,, -imgui_bundle/imgui/backends.pyi,sha256=yw5IWjk10E1tToP-HKlL3eh-PCUsERPYEsPTibD7FjU,3519 -imgui_bundle/imgui/internal.pyi,sha256=C_I-vdKWyAqzd0Z4nDZkgfxFBijzGVLojFZLMLLTqoA,444457 -imgui_bundle/imgui/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/imgui/test_engine.pyi,sha256=s3QrGjUa1D3tAO6DYROjo_WVwz7PkwoNPYQJJlOfXt8,134333 -imgui_bundle/imgui/test_engine_checks.py,sha256=QcenN-QZhxa62kRaJ58M69EuOy98aafdXsTwDWM2xr4,1388 -imgui_bundle/imgui_bundle.pyi,sha256=4aU4KGF9x4FIaFBRq5gVdMr4uJ2m8XXH9CulY5NvXbI,436 -imgui_bundle/imgui_color_text_edit.pyi,sha256=rzp3XQ2doTJqjhdKW-ioUf21hmCWZqs_JUN5LmJLvOE,9076 -imgui_bundle/imgui_command_palette.pyi,sha256=QRv8-SVHJIMgagHLOR_22XC_2uAiivtp9GhxMHF-i0k,3366 -imgui_bundle/imgui_ctx.py,sha256=gPcWdRwS2R89UBYkyGCGaToxQWO4zBC_xafF1Go2wRM,27153 -imgui_bundle/imgui_fig.py,sha256=GNvUkTyQgLwtqJULMBCYwDxkMvqhSNSgRKy9V1aktf0,3874 -imgui_bundle/imgui_knobs.pyi,sha256=FEDz2JaKV2Sm55_svOn553iNJnDK_j05X_Dh5FIvJIM,2276 -imgui_bundle/imgui_md.pyi,sha256=1-4fRNDidZPQpRc6Ca7xHD7rZ3_0kXkOF5PfTKtdubc,5240 -imgui_bundle/imgui_node_editor.pyi,sha256=PBMir1567BZ8caLl1syWrIBluDBa1Fy1AAehDNylXmc,41364 -imgui_bundle/imgui_node_editor_ctx.py,sha256=4Qm3KC7IVTHJDQBceZj9gTKOYxROg6Qz4cLWWmfsiTk,5322 -imgui_bundle/imgui_pydantic.py,sha256=qJVLpeq7sc0BtX2jBiBt5Sv0RFgKCQEmVX5hMmn3uac,5586 -imgui_bundle/imgui_tex_inspect.pyi,sha256=6rvjlE9i7kBJDzLwwFa4SgK28tpldctUqRmaHdfCXUw,13563 -imgui_bundle/imgui_toggle.pyi,sha256=bMEhCN-CcWKJSOxuuzgZs2__BAHuQup3mZAujOxJgqA,21964 -imgui_bundle/imguizmo.pyi,sha256=RF3fY831rlRsAonSRy3iyUpqdDLMme2QFHsVXazMKlQ,18665 -imgui_bundle/immapp/__init__.py,sha256=HLzGWzdtHAQi0A__1_cFORe6jhHeJdD3u2W65fGd9ac,2687 -imgui_bundle/immapp/__init__.pyi,sha256=z5ZylI40IJmE7y2R6LsE-md4q4N0tjSMZ7v8YS8HZtw,1093 -imgui_bundle/immapp/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/immapp/__pycache__/icons_fontawesome_4.cpython-313.pyc,, -imgui_bundle/immapp/__pycache__/icons_fontawesome_6.cpython-313.pyc,, -imgui_bundle/immapp/__pycache__/immapp_code_utils.cpython-313.pyc,, -imgui_bundle/immapp/__pycache__/immapp_notebook.cpython-313.pyc,, -imgui_bundle/immapp/__pycache__/immapp_utils.cpython-313.pyc,, -imgui_bundle/immapp/__pycache__/runnable_code_cell.cpython-313.pyc,, -imgui_bundle/immapp/icons_fontawesome_4.py,sha256=TFU7wqJL8KxzELCuOZtmU2vkGSIHG1FTZRjhLnqA9mc,52062 -imgui_bundle/immapp/icons_fontawesome_6.py,sha256=xIhHDDTskFhD53FhL64g175aEIyk_hCrCIUka7388yw,93641 -imgui_bundle/immapp/immapp_code_utils.py,sha256=0jhFBEy9nscArQWd4fnV4_SiVTCABCxVj_mEZHsfOC4,3262 -imgui_bundle/immapp/immapp_cpp.pyi,sha256=KcOnPip4HNgnWcOY7JP7IiQGRHtMZKeCsW9r3VDwTf8,19473 -imgui_bundle/immapp/immapp_notebook.py,sha256=XxANYCOJ9xGPWzfHY9Y7P4wRhea2hFYp35nayDNHb0s,4892 -imgui_bundle/immapp/immapp_utils.py,sha256=8GSCaRtH1Go73ZxnmWVsHsEMNzm5w4t6a8TYGr1I028,5945 -imgui_bundle/immapp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/immapp/runnable_code_cell.py,sha256=mAHMQ1n3MkRw4LhD4SQIql5mX6EfewPu_lZ7GkzWr9A,4706 -imgui_bundle/immvision.pyi,sha256=XMxpMpJ1QhM-8fHzyNczZEVny5JBu8dkrzYrSHix4tM,42980 -imgui_bundle/implot/__init__.pyi,sha256=WtLLIjS8Hg98J2Ei4Nb27V5Nq4FFhSoeWKLVgTJjMT4,129399 -imgui_bundle/implot/internal.pyi,sha256=ilfQPNvsXCkUUCXD20LBDH96wbm6UDoZyFl8T2XAfiE,106291 -imgui_bundle/implot/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/implot3d/__init__.pyi,sha256=yiiBshWQchjoIhHIqVubbTeErtVlCsCFspmjW268b04,72432 -imgui_bundle/implot3d/internal.pyi,sha256=IDmo8D1BhmArSsB7zt9FMSoVUVx14KcCScJB5UG6VcQ,41480 -imgui_bundle/implot3d/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/imspinner.pyi,sha256=kMz8auo9xYTKeaWonZgnNCqS5BNkiG7jsl19g9rLvpg,44202 -imgui_bundle/mypy.ini,sha256=vV63dcPbLQP4esuqwHeU5veI7v2AaZng69_CEQYh9Rs,479 -imgui_bundle/nanovg.pyi,sha256=WFH_1g6TWvViBh7adBzIATeEyGhwKxK2YstE6zK05fc,58881 -imgui_bundle/notebook_patch_runners.py,sha256=P9LfqIP2mJ5kmu40tfqD6BrynT89I0wA1vB7H3GlEeA,2518 -imgui_bundle/opencv_world4100.dll,sha256=5uMeVbmzd7d1eCxLJ6gLJGDf_j2-54mOA00lcp3KAZg,64653312 -imgui_bundle/portable_file_dialogs.pyi,sha256=IPFo3VmnTPpIuRXr0qdybs6w2kKpGHVzKWX9Fr4ywXg,4116 -imgui_bundle/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/pyodide_patch_runners.py,sha256=_q1i-Sx-5pyLVmJ6smqcqrRa04MCRoHN4q6GwovMT2s,6519 -imgui_bundle/python_backends/LICENSE_pyimgui.txt,sha256=KsiXoMwyAlq7GNigXaBPAcFJqf-y63tandKoHTU5VfY,1508 -imgui_bundle/python_backends/Readme.md,sha256=X35JCgOCdn0BaroVoKANOW_0wjRbB0QoGkxoycukouQ,1440 -imgui_bundle/python_backends/__init__.py,sha256=xYFWpQbUqcO7qPwBFy6C_wGjSvoHMyPA2024-TAneZ8,411 -imgui_bundle/python_backends/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/python_backends/__pycache__/base_backend.cpython-313.pyc,, -imgui_bundle/python_backends/__pycache__/glfw_backend.cpython-313.pyc,, -imgui_bundle/python_backends/__pycache__/opengl_backend.cpython-313.pyc,, -imgui_bundle/python_backends/__pycache__/pyglet_backend.cpython-313.pyc,, -imgui_bundle/python_backends/__pycache__/sdl2_backend.cpython-313.pyc,, -imgui_bundle/python_backends/__pycache__/sdl3_backend.cpython-313.pyc,, -imgui_bundle/python_backends/base_backend.py,sha256=tT7wlb4_0DxLh1eZjk2BV2Lo1if9i_yKzZIp6LQBZeU,884 -imgui_bundle/python_backends/examples/__pycache__/example_python_backend_glfw3.cpython-313.pyc,, -imgui_bundle/python_backends/examples/__pycache__/example_python_backend_pyglet.cpython-313.pyc,, -imgui_bundle/python_backends/examples/__pycache__/example_python_backend_sdl2.cpython-313.pyc,, -imgui_bundle/python_backends/examples/__pycache__/example_python_backend_sdl3.cpython-313.pyc,, -imgui_bundle/python_backends/examples/__pycache__/example_python_backend_wgpu.cpython-313.pyc,, -imgui_bundle/python_backends/examples/example_python_backend_glfw3.py,sha256=4BD3rxbTY0GHzO9aT63UiuLLFirERAgUg132LiH1dTA,5468 -imgui_bundle/python_backends/examples/example_python_backend_pyglet.py,sha256=3noY3efE5BEkfgEG_VupTjIvDZR7N5BgQdpMqymUJrc,2637 -imgui_bundle/python_backends/examples/example_python_backend_sdl2.py,sha256=nKv8CHYyTSG4EA2eQimwB5xLgMCKeSTEqbGXb8E_rH4,4734 -imgui_bundle/python_backends/examples/example_python_backend_sdl3.py,sha256=Gwn51c9i4YqK2WeWjm0U-fLVJb_jvB-6jY8FmUtcuwY,4738 -imgui_bundle/python_backends/examples/example_python_backend_wgpu.py,sha256=s88XszMNTXGNgETCPAFnUv8JV_G2niB-jesEq967B3g,2507 -imgui_bundle/python_backends/examples_disabled/Readme.md,sha256=OqyYuTupzwzBiFcabj7sk2k-gnNoN8aJ2q9U4-6cr7E,119 -imgui_bundle/python_backends/examples_disabled/__pycache__/example_python_backend_cocos2d.cpython-313.pyc,, -imgui_bundle/python_backends/examples_disabled/__pycache__/example_python_backend_pygame.cpython-313.pyc,, -imgui_bundle/python_backends/examples_disabled/example_python_backend_cocos2d.py,sha256=3lQi83RK1XHDTxAZvg4GPe1s8fJw4LqRnYzz4VuPqm4,1783 -imgui_bundle/python_backends/examples_disabled/example_python_backend_pygame.py,sha256=6RrOW42vDY2CsGW5wVcUgx8t68Lyb-73FYwNQr3LvtU,2537 -imgui_bundle/python_backends/glfw_backend.py,sha256=6NyD-k-od2Qq54K3pWAaFpE3g1vejSTLTzitEWj2p4s,9799 -imgui_bundle/python_backends/opengl_backend.py,sha256=lHJEI1iQ4qUA0-VkfGNeX590decXwZqlQabiL6q5Ca0,18524 -imgui_bundle/python_backends/pyglet_backend.py,sha256=qlEGrzHc7Bi8I-YlLtd_eMbDYD957Onxf0E97Iy-MEE,13110 -imgui_bundle/python_backends/python_backends_disabled/Readme.md,sha256=OqyYuTupzwzBiFcabj7sk2k-gnNoN8aJ2q9U4-6cr7E,119 -imgui_bundle/python_backends/python_backends_disabled/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -imgui_bundle/python_backends/python_backends_disabled/__pycache__/__init__.cpython-313.pyc,, -imgui_bundle/python_backends/python_backends_disabled/__pycache__/cocos2d_backend.cpython-313.pyc,, -imgui_bundle/python_backends/python_backends_disabled/__pycache__/glumpy_backend.cpython-313.pyc,, -imgui_bundle/python_backends/python_backends_disabled/__pycache__/pygame_backend.cpython-313.pyc,, -imgui_bundle/python_backends/python_backends_disabled/cocos2d_backend.py,sha256=3w6aFle_0Zuwy-g_Z7lASZ3NLZF_rHC2gxftpd0sc1w,895 -imgui_bundle/python_backends/python_backends_disabled/glumpy_backend.py,sha256=isjNATiHeDBnP7PQ8-CmDBvZu6bX7jpsb3rPFq6Mhno,13906 -imgui_bundle/python_backends/python_backends_disabled/pygame_backend.py,sha256=mkzAY02TV3YeUWSNOH04DrsymKD69LF1AouluS1zVHw,5241 -imgui_bundle/python_backends/sdl2_backend.py,sha256=oA315NGcPgfnJoWj1udnypdkewIIvA6rQttpYHk3RXI,12404 -imgui_bundle/python_backends/sdl3_backend.py,sha256=RhS6TGtEJt6mYpTmb2-k3qnI3c1iaVirXMak_vRVWX0,12376 -imgui_bundle/ruff.toml,sha256=5xzgJnq_0kYIwzrJ7oOCIF5eVpCbrAOqR4O0KclWm6Q,110 -include/plutovg/plutovg.h,sha256=jmjXAIy-3Dw36L-jGOCdb7cdMFWKKJ99a8sChnlRKyE,90114 -lib/cmake/plutovg/plutovgConfig.cmake,sha256=P03G7BvLAWr-Ohg0GRlbDNW67eDAW_PUGTQWfjcWq7s,966 -lib/cmake/plutovg/plutovgConfigVersion.cmake,sha256=QbjARqBeDK1vYuYcU3vfgu3uz9uTft6KbrkYKg4PMHY,2830 -lib/cmake/plutovg/plutovgTargets-release.cmake,sha256=Wn_619DpNiz4LYCt6S_MAx6TTMPk2HdlcPYsv4rCHS4,870 -lib/cmake/plutovg/plutovgTargets.cmake,sha256=OnzvrLKhvaVpEiw_U6L0QCTpRQcOdA7NJ4aHYeU9S4o,4295 -lib/plutovg.lib,sha256=yKi4cboWU8KLz15ieqoGUeH11K87k7eqFO8W1t3Yj4c,619746 +../../Scripts/demo_imgui_bundle.exe,sha256=-E-qzJYNnMiT8yrcAqCNc8ukRfFA9HEHmhVbWhLbGxA,108390 +../../Scripts/imgui_bundle_demo.exe,sha256=-E-qzJYNnMiT8yrcAqCNc8ukRfFA9HEHmhVbWhLbGxA,108390 +imgui_bundle-1.92.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +imgui_bundle-1.92.5.dist-info/METADATA,sha256=ksxHj08rVD3oPGDWMauGST19p20c0-dWEQfraGeSzgs,22464 +imgui_bundle-1.92.5.dist-info/RECORD,, +imgui_bundle-1.92.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle-1.92.5.dist-info/WHEEL,sha256=gWMs92Yhbl9pSGNRFWCXG1mfeuNl7HhxcJG5aLu4nQc,106 +imgui_bundle-1.92.5.dist-info/entry_points.txt,sha256=N8FuM8pqoMN40DBjFX_gzkV5PAwzUAZbXNkmCSBe8WE,157 +imgui_bundle-1.92.5.dist-info/licenses/LICENSE,sha256=8qrd403zx-jESpGcOUO5PV3-Pg1X4c8B2Teq0WbTwck,1106 +imgui_bundle/.gitignore,sha256=foxuxedhAe0NXgNJsVnWYh_loWRbUEm4yY9Bgm4eCNE,72 +imgui_bundle/LICENSE,sha256=CFux1FcOxwVQyX7YfI8vw11XaGaqT5gVrbpvzCAAeV8,1101 +imgui_bundle/Readme_pypi.md,sha256=SLMgeZlillzepDM4w_VVsXMNADUOTj3fKtJBDM78f8k,20437 +imgui_bundle/__init__.py,sha256=LuV8dr1rczLS-iWVbwXJ8re_8SYmvClckXMPgaMFwOo,7515 +imgui_bundle/__init__.pyi,sha256=Cjbb_cC1ead9_SiPYoOO4ZU5CrbQG6poXHzcZIkUCtE,1598 +imgui_bundle/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/__pycache__/_glfw_set_search_path.cpython-314.pyc,, +imgui_bundle/__pycache__/_patch_runners_add_save_screenshot_param.cpython-314.pyc,, +imgui_bundle/__pycache__/glfw_utils.cpython-314.pyc,, +imgui_bundle/__pycache__/im_col32.cpython-314.pyc,, +imgui_bundle/__pycache__/imgui_ctx.cpython-314.pyc,, +imgui_bundle/__pycache__/imgui_fig.cpython-314.pyc,, +imgui_bundle/__pycache__/imgui_node_editor_ctx.cpython-314.pyc,, +imgui_bundle/__pycache__/imgui_pydantic.cpython-314.pyc,, +imgui_bundle/__pycache__/notebook_patch_runners.cpython-314.pyc,, +imgui_bundle/__pycache__/pyodide_patch_runners.cpython-314.pyc,, +imgui_bundle/_glfw_set_search_path.py,sha256=6qXY5xNQ-gMRs8e67pUJjHIvA2UbYD2f9GkvkQds_io,1817 +imgui_bundle/_imgui_bundle.cp314-win_amd64.pyd,sha256=7JNl8vqbeEd9cFap6vOUHko_YlPwyhRAC-xlNB7vnqE,22778880 +imgui_bundle/_patch_runners_add_save_screenshot_param.py,sha256=8-0brfj8dpUQw7-Sx_CyrEtHkf7LPxLuie2oPM-4iXo,1805 +imgui_bundle/assets/app_settings/apple/Info.plist,sha256=0PEHZdHZhGefi9ryXpCsglj2A6PhVW5itYDGfixqdfc,1020 +imgui_bundle/assets/app_settings/icon.png,sha256=yNa_UjWjKyrIyGcrTMWeXyQgeCt8xqeRPDlJj4B7gkQ,53086 +imgui_bundle/assets/fonts/DroidSans.ttf,sha256=TiNxvA5M9pgzQuFQQS8UDaedZ0yb4LVkWEAfWBBy7NM,190044 +imgui_bundle/assets/fonts/Font_Awesome_6_Free-Solid-900.otf,sha256=4CuI-K1cfSlImkDpSsE3NbSXPJRpvafO_bt78GrtQWA,1023996 +imgui_bundle/assets/fonts/Inconsolata-Medium.ttf,sha256=grwtZwr4wjrJ7674wDRvG0RMeSUqN-O-OVqMnkUICoI,102176 +imgui_bundle/assets/fonts/Roboto/LICENSE.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560 +imgui_bundle/assets/fonts/Roboto/Roboto-Bold.ttf,sha256=7GhaRhBSlv5GyHRNpKEc-BGLpsEScZQXZvelRt9qp8c,167336 +imgui_bundle/assets/fonts/Roboto/Roboto-BoldItalic.ttf,sha256=Yd9Zf3PJHyOMvoj-PFNnAtEg0Ei3myX6D094GP0SOm8,171508 +imgui_bundle/assets/fonts/Roboto/Roboto-Regular.ttf,sha256=ThR6tkuf322J0B9rjDygs83cWdYIqOIhj5olBLXJjhQ,168260 +imgui_bundle/assets/fonts/Roboto/Roboto-RegularItalic.ttf,sha256=meSoUGETbpngUpKe0NheNjhPulw0t3MTmo9kM5xgmUM,170504 +imgui_bundle/assets/fonts/fontawesome-webfont.ttf,sha256=qljzPyOaD7AvXHpsRcBD16msmgkzNYBmlOzW1O3A1qg,165548 +imgui_bundle/assets/images/markdown_broken_image.png,sha256=AHNWv5WbiArzyRFyxelvVvZhGS2mEDBYTR3LMIwXhuI,2640 +imgui_bundle/assets/images/world.png,sha256=af0eoBzKHzFv691LJgBOZp23yNa0aPs5DVj6LlSaNdU,14112 +imgui_bundle/demos_assets/app_settings/apple/Info.plist,sha256=0PEHZdHZhGefi9ryXpCsglj2A6PhVW5itYDGfixqdfc,1020 +imgui_bundle/demos_assets/app_settings/icon.png,sha256=kkhYXNp4TMd_sakAzVUoeTcp7JnqBU6oufU2v8oXQNU,344507 +imgui_bundle/demos_assets/fonts/Akronim-Regular.ttf,sha256=BjusHSINvTfjuiwuJMIJD-Gufhz5wdgs9A4rQbxJYLQ,109540 +imgui_bundle/demos_assets/fonts/DroidSans.ttf,sha256=TiNxvA5M9pgzQuFQQS8UDaedZ0yb4LVkWEAfWBBy7NM,190044 +imgui_bundle/demos_assets/fonts/Font_Awesome_6_Free-Solid-900.otf,sha256=4CuI-K1cfSlImkDpSsE3NbSXPJRpvafO_bt78GrtQWA,1023996 +imgui_bundle/demos_assets/fonts/Inconsolata-Medium.ttf,sha256=grwtZwr4wjrJ7674wDRvG0RMeSUqN-O-OVqMnkUICoI,102176 +imgui_bundle/demos_assets/fonts/NotoEmoji-Regular.ttf,sha256=Zfwh9q2GrL5Dwp-J_8Ddd2IXCaUXpQ7dE3CqgCMMyPs,878928 +imgui_bundle/demos_assets/fonts/Playbox/Playbox-FREE.otf,sha256=SWjEWSzS_im4E4Ji-N5jvqHaI4iBa7v3b4DZNnh6rZ8,213620 +imgui_bundle/demos_assets/fonts/Playbox/Playbox-license.txt,sha256=_swGxy3KPtBcBjTkhdK7wQNNXSXOo95AJKLZ6Qz0teo,3955 +imgui_bundle/demos_assets/fonts/Roboto/LICENSE_.txt,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560 +imgui_bundle/demos_assets/fonts/Roboto/Roboto-Bold.ttf,sha256=7GhaRhBSlv5GyHRNpKEc-BGLpsEScZQXZvelRt9qp8c,167336 +imgui_bundle/demos_assets/fonts/Roboto/Roboto-BoldItalic.ttf,sha256=Yd9Zf3PJHyOMvoj-PFNnAtEg0Ei3myX6D094GP0SOm8,171508 +imgui_bundle/demos_assets/fonts/Roboto/Roboto-Regular.ttf,sha256=ThR6tkuf322J0B9rjDygs83cWdYIqOIhj5olBLXJjhQ,168260 +imgui_bundle/demos_assets/fonts/Roboto/Roboto-RegularItalic.ttf,sha256=meSoUGETbpngUpKe0NheNjhPulw0t3MTmo9kM5xgmUM,170504 +imgui_bundle/demos_assets/fonts/entypo.ttf,sha256=OVs8BhEXFfLIA9bns7i2WCT8zVpuZ8WG1vTnzW8Auwo,35392 +imgui_bundle/demos_assets/fonts/fontawesome-webfont.ttf,sha256=qljzPyOaD7AvXHpsRcBD16msmgkzNYBmlOzW1O3A1qg,165548 +imgui_bundle/demos_assets/images/badge_interactive_manual.png,sha256=SypOZaI7DFJdPJnp3GVXI8POS6JjV9vpA-HcVuJ5278,4367 +imgui_bundle/demos_assets/images/badge_view_docs.png,sha256=dslfbTd1fU1jzLb4DKMkBjSS1aYtJmoicvYWJxYgZSU,3588 +imgui_bundle/demos_assets/images/badge_view_sources.png,sha256=8nxhINXLt_Ju3Nzi_rH0Xyt4kTxHzhmlMsym3l_F4ew,3798 +imgui_bundle/demos_assets/images/bear_transparent.png,sha256=jTX5C_CRhcvkyneCt2D5BROBxavGFv-Z1DjnMKumlmU,39050 +imgui_bundle/demos_assets/images/demo_tex_inspect.png,sha256=2V1tjK9moTo5WaDUD6p_olxgaw1V81WwcJ6oEvq3fVk,40026 +imgui_bundle/demos_assets/images/dmla.jpg,sha256=Mq-1zZAHsYJ0WxbbPxDmx8H4n7pOpSU--wXSh620ZK4,67037 +imgui_bundle/demos_assets/images/gizmo_screenshot.jpg,sha256=gMF7TEB-YBbuLkATU6hNUGqNdbRk4f3YjYDo6kSEzSY,163208 +imgui_bundle/demos_assets/images/haiku.png,sha256=6bzrldqKzHRcyWMJeM9vEmKQ7AtXE7HZfnztPoXj3jM,21737 +imgui_bundle/demos_assets/images/house.jpg,sha256=-Y-Lw7kACiSiG8iYAJU29YHEU2cbiM0ocxw7ARwXgBA,120412 +imgui_bundle/demos_assets/images/immapp_notebook_example.jpg,sha256=yaP9RHqoJkqkGydCVzQJ5Sb50exRLPmaYr1JlLDrR-0,139989 +imgui_bundle/demos_assets/images/logo_imgui_600.png,sha256=i8TkSOQDOOy70urgLy-oppb4LbwxD-CZcaRMe8PAhUQ,317613 +imgui_bundle/demos_assets/images/logo_imgui_bundle_512.png,sha256=OEMxH6ZVVKrOWvXYH57GOMxchwcLuKOPzYYcPBsO4DI,283075 +imgui_bundle/demos_assets/images/markdown_broken_image.png,sha256=AHNWv5WbiArzyRFyxelvVvZhGS2mEDBYTR3LMIwXhuI,2640 +imgui_bundle/demos_assets/images/nanovg_demo_heart.jpg,sha256=r7MNWC21tSfy1TAr-pb5Zfiza3rDhwFAykZeGnA8ph0,20232 +imgui_bundle/demos_assets/images/nanovg_full_demo.jpg,sha256=mUv1uww5G2Ln8c_bNxVTNOmugChJad4IwyxJUfviRqo,32294 +imgui_bundle/demos_assets/images/node_editor_fiat.jpg,sha256=JEnCBufehp0ZvL0nFxJIUjhpv3rco8-HtFzL3wQth-s,134624 +imgui_bundle/demos_assets/images/node_editor_screenshot.jpg,sha256=UoaxS9dyb3SLvatURpUkefGu6w5yJBuKJJ7YiCCRNXw,70021 +imgui_bundle/demos_assets/images/tennis.jpg,sha256=OFGzn3rL_6B9UZeTcvQ2PnZdf2QmHKsD2KJeIDzDBlc,50311 +imgui_bundle/demos_assets/images/world.png,sha256=af0eoBzKHzFv691LJgBOZp23yNa0aPs5DVj6LlSaNdU,14112 +imgui_bundle/demos_assets/nanovg_demo_images/image1.jpg,sha256=HOieYayzDyOv21Hqes1JCbs96XjlHxx-cbRrk5M4WKE,25760 +imgui_bundle/demos_assets/nanovg_demo_images/image10.jpg,sha256=DTzBQHDy0O6ERHzD7IRfA609sQOW13ibZcmIZ7cd7Qc,3439 +imgui_bundle/demos_assets/nanovg_demo_images/image11.jpg,sha256=3YhRr6H8YKriF3QUCSadnCmvJx2zHzxJyRi15dMD3zc,3818 +imgui_bundle/demos_assets/nanovg_demo_images/image12.jpg,sha256=p8XLBZW76PRRPxnOoyQ3qv_1OJ-rrn8vX06mXUKJ2D0,5452 +imgui_bundle/demos_assets/nanovg_demo_images/image2.jpg,sha256=IQrpeus8XbMv2z4wGvmv_NEgFkygkIhquxK0Zy3-QPQ,24091 +imgui_bundle/demos_assets/nanovg_demo_images/image3.jpg,sha256=Sq2i1aVtZ3XZdWkspQVbmPTA-22XAx7asqom_39IL54,29282 +imgui_bundle/demos_assets/nanovg_demo_images/image4.jpg,sha256=h3OatOlnfFsLVdIu1L7OcXvBaguFm9Gv08Qndwp6J6Q,23830 +imgui_bundle/demos_assets/nanovg_demo_images/image5.jpg,sha256=OwcY53ndH02HyWKcXvoADiEctVI7VMTxdV5gPFx1SiA,27131 +imgui_bundle/demos_assets/nanovg_demo_images/image6.jpg,sha256=GwJz2cSFPLeZl48xyUIqTM3VhxmxeRfcqUrLEyqldcU,25116 +imgui_bundle/demos_assets/nanovg_demo_images/image7.jpg,sha256=2z6U4e-UDylYbWTR1jWJRPhpI8VWqYAU3DHWnXDtin0,25590 +imgui_bundle/demos_assets/nanovg_demo_images/image8.jpg,sha256=uH2QM0sh-JH1SFOgZJyDXGrOfHWQLLIPrpiqPpEkNmc,24607 +imgui_bundle/demos_assets/nanovg_demo_images/image9.jpg,sha256=yBmR6tWkdthSqGm0P1bIrJb56pTcZnQlQm-uWSt-lyE,4035 +imgui_bundle/demos_cpp/CMakeLists.txt,sha256=kRQ6aDwSlkA7ewWReqjO5RHK2y4ASbCZOGBweOgBXKk,3629 +imgui_bundle/demos_cpp/_auto_main/_auto_main.cpp.in,sha256=hCL5YVUKLu2VyrYMH62YzZ4VA-z_o_Eo6vIT4__Y-6o,854 +imgui_bundle/demos_cpp/coi-serviceworker.js,sha256=T422Jh_xvLIw6ajBvqgQl4pSqiX0Npo-wF4x4tkNX7M,4933 +imgui_bundle/demos_cpp/demo_imgui_bundle.cpp,sha256=JsfqDwUsRhdY7msj2k4TuhD0RPu8o8o6UwoDfR7kYBc,5921 +imgui_bundle/demos_cpp/demo_imgui_bundle_intro.cpp,sha256=C3v9_jNfT4y_y51vS3EyaWcKHt-gVS6hNNTQe-RLbi4,7928 +imgui_bundle/demos_cpp/demo_imgui_md.cpp,sha256=dl0r5Cgji78RwmmirD0lAxc6ywOkUTqyJxzdqBf_hjY,2780 +imgui_bundle/demos_cpp/demo_imgui_show_demo_window.cpp,sha256=EI-6r4cZiINBwso0dpXN3TnqdgJv-fWD3B1NsN79fpI,1778 +imgui_bundle/demos_cpp/demo_imguizmo_launcher.cpp,sha256=9HQ1BsI-NJKPotXyOCnLzVkVVSKfItHbFzSayjBs_4E,1113 +imgui_bundle/demos_cpp/demo_immapp_launcher.cpp,sha256=WWk4PdYeFWFzpOhy5aXXPpQ2BjsEzRqZ5WM489ioSsU,4421 +imgui_bundle/demos_cpp/demo_immvision_launcher.cpp,sha256=c33hUASA4ILqwmddd-DtghQO0_CrS6GLbsq42n178yo,2051 +imgui_bundle/demos_cpp/demo_implot.cpp,sha256=BaGMSewx5HIgAchK-XiukDKGh5e8E75cDGYHgG3iS20,2079 +imgui_bundle/demos_cpp/demo_logger.cpp,sha256=CmggXf7FOTINnjeglpEMR2OrmsZE2WRFw7HMAC-Pvc8,2408 +imgui_bundle/demos_cpp/demo_nanovg_launcher.cpp,sha256=vAzYQ6nDD5LlblVQzwdlsyIc9_B8uxi56rTalo75SQc,3032 +imgui_bundle/demos_cpp/demo_node_editor_launcher.cpp,sha256=kfqhalJ-2eCIlVWJwgkDaBfvRLaz_5a2uTuFD6Olw0U,1713 +imgui_bundle/demos_cpp/demo_tex_inspect_launcher.cpp,sha256=afTL-elXTz9guJPMKBYhxANuoVQl98u0OTwHNhpuCWY,1737 +imgui_bundle/demos_cpp/demo_text_edit.cpp,sha256=8GzmpStL5Dh-YfeZR6Kr63dGMM2t4lQYQM6Z-j48wOo,1766 +imgui_bundle/demos_cpp/demo_themes.cpp,sha256=3onPEfWKdFG2PhGUD2ZbOX2xdGuYqYdYSLCCiCuKLQI,1008 +imgui_bundle/demos_cpp/demo_utils/CMakeLists.txt,sha256=q2-ARiF67Q29L4PFjlymJ3Kqv1e3kAA--e0brebRFrM,254 +imgui_bundle/demos_cpp/demo_utils/animate_logo.cpp,sha256=S_YLmsip3VwAlpFTxBrElF76nIF9R89GYUDWHi0eeSI,3338 +imgui_bundle/demos_cpp/demo_utils/animate_logo.h,sha256=se4ebC5Do1cq10JLjUow7y81kCF2TiydHmWczLuZkgk,291 +imgui_bundle/demos_cpp/demo_utils/api_demos.cpp,sha256=IkyEGL-m8mHUEfJ5QZlSyDxSuw3h-wbw-TMNlGjHwWM,8026 +imgui_bundle/demos_cpp/demo_utils/api_demos.h,sha256=0jkhDeZFj-w2wRuUks1fnRh7Nnr0S1IS4o8auzNPDNE,1022 +imgui_bundle/demos_cpp/demo_utils/demo_app_table.cpp,sha256=BYt2iwVBfBGBtkCiI75JFXK226SC4-eZd3EQnUCZdLg,5474 +imgui_bundle/demos_cpp/demo_utils/demo_app_table.h,sha256=nU_uKOKdLIJOGUTDcJduJFyVIXtnLf2uVdhMwU5n7l8,888 +imgui_bundle/demos_cpp/demo_utils/subprocess.h,sha256=qORyFmo7ELmxG_3s_ils5iEOG8uTB6IbQ3sO21QqKmQ,37583 +imgui_bundle/demos_cpp/demo_widgets.cpp,sha256=mS6zI-C0UvuJyrcr3U5fQZXwvr6cVkHg6O9zQz_Jq3E,16752 +imgui_bundle/demos_cpp/demos_imguizmo/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 +imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo.cpp,sha256=a2B6S0_VP5e5BdYWy9q-fFoFZS6wh1iUikj0ewap0xM,15853 +imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo_pure.cpp,sha256=mT7HMq-MZxynx_BBjFU4SyVnZt2u6B1v6lJBK2HlBHI,17372 +imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_curve_edit.cpp,sha256=9UgHHfCgRpCK3S6UAXuXqBur0bLBfJ91Pv5etC8yH9k,3887 +imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_gradient.cpp,sha256=ZyZhrF8xMVF1_KymNMpQNyF2kgY1ugeoTFpLJC8bwJA,2588 +imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_sequencer.draft_cpp,sha256=GOPLY9Qzn2YBoFhXkLlVww0rW4de_VA4qC-7eDUkHgA,10347 +imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_zoom_slider.cpp,sha256=aAjlDeH7bF1xke_vf6xoN4g-hjybkFIC5oEMxEXUyeE,5160 +imgui_bundle/demos_cpp/demos_immapp/CMakeLists.txt,sha256=dkr5nUGmjhqnNrhRoJVwyJoPi6uDMC8koYhf1YrHvc8,1542 +imgui_bundle/demos_cpp/demos_immapp/coi-serviceworker.js,sha256=T422Jh_xvLIw6ajBvqgQl4pSqiX0Npo-wF4x4tkNX7M,4933 +imgui_bundle/demos_cpp/demos_immapp/demo_assets.cpp,sha256=XQCY_LB7me-b1bikb3sNUDZ4BxLgvzU0Wm_aRz09rmU,1249 +imgui_bundle/demos_cpp/demos_immapp/demo_assets_addons.cpp,sha256=2DeJGY_tGK7ObMW1PzQypALRIc1zA4GcQtRN-58xYO8,10023 +imgui_bundle/demos_cpp/demos_immapp/demo_command_palette.cpp,sha256=y20HFFZUA5hPFx-p6rP9JzannOIV4haiYgnkmFzFWIs,2580 +imgui_bundle/demos_cpp/demos_immapp/demo_custom_background.cpp,sha256=naLMvoUwxjCzVdzkzDBTTURiRxzo06A8crxueg7lYvM,21530 +imgui_bundle/demos_cpp/demos_immapp/demo_docking.cpp,sha256=qMDVfI0kYPk-zwmJZxkvX4ATeothVF9WbfBFcKsRfrQ,39474 +imgui_bundle/demos_cpp/demos_immapp/demo_drag_and_drop.cpp,sha256=MlMFOk8UDMtaBN7sBaYe96oBcIa-rNMrLpZJVL0jtGY,2973 +imgui_bundle/demos_cpp/demos_immapp/demo_hello_world.cpp,sha256=qRuU-utPmbBw6PVwCTffYuUi2TuiV5tYI5oKmLB3T7Y,387 +imgui_bundle/demos_cpp/demos_immapp/demo_implot_markdown.cpp,sha256=Acbd80a1tiLF8CaPUj6mNoEgWUjwQXjOy-XGOve7lSc,1499 +imgui_bundle/demos_cpp/demos_immapp/demo_parametric_curve.cpp,sha256=xYq84wD44kBAq-ZfZF1CNrmM24oORN8ippjqbL5NG_0,2286 +imgui_bundle/demos_cpp/demos_immapp/demo_powersave.cpp,sha256=QHCQ7b74sORh-4_EG2JWDof5Hg24SHzDiVpgvvEmv9Q,1726 +imgui_bundle/demos_cpp/demos_immapp/demo_testengine.cpp,sha256=J5SDm20r7Xo6kVy_dj1wEHtLtYsl_VbMp1wBWlPLvMY,10835 +imgui_bundle/demos_cpp/demos_immapp/haiku_butterfly.cpp,sha256=0HcB2S-0yuyC7EI-5Q9SrDIpNDmuOkj8LnytnZSPbI4,4492 +imgui_bundle/demos_cpp/demos_immapp/haiku_implot_heart.cpp,sha256=ZOcjTkVnR2SdjvVXNu4UnCeAaYEYM6dDCR1ps1hHOq0,1913 +imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.obsolete.cpp,sha256=ytVbzkVRe4PBoQlIU6u_26c5m1Zb-Q0OB_xsOZ6ebuk,11113 +imgui_bundle/demos_cpp/demos_immapp/shell.emscripten.html,sha256=-7VrhE6xR-oaz58JeI6xz7rr349MNdgFAV1cCX4b89M,3725 +imgui_bundle/demos_cpp/demos_immvision/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 +imgui_bundle/demos_cpp/demos_immvision/demo_immvision_display.cpp,sha256=FhbLO2uNsXWFgzvoHiQowjQqQ76BPwejcFocjZ6mM9s,1690 +imgui_bundle/demos_cpp/demos_immvision/demo_immvision_inspector.cpp,sha256=ZHC-RIEobKMij5qkE4EZo_BME2VlPt0GQI_z5hIIy44,960 +imgui_bundle/demos_cpp/demos_immvision/demo_immvision_link.cpp,sha256=XOG_K8JJbRvZ_n3eRlTyOvmwZlK7xKz__YOVIFTKA4A,1205 +imgui_bundle/demos_cpp/demos_immvision/demo_immvision_process.cpp,sha256=SwKCX7VFUR7_7w18RsOLr2PWUkY-P7_DTNq3yO8gW68,5050 +imgui_bundle/demos_cpp/demos_nanovg/CMakeLists.txt,sha256=kGf5pNCPD2Dyh4BJ5VOQtxbBm8coJjUkt37RtRpU9KQ,362 +imgui_bundle/demos_cpp/demos_nanovg/coi-serviceworker.js,sha256=T422Jh_xvLIw6ajBvqgQl4pSqiX0Npo-wF4x4tkNX7M,4933 +imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_full.cpp,sha256=bg10-6myMwoVcwYPyC_Rl0zcuIw4MMopcU5oKa1_hBM,4388 +imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_full/demo_nanovg_full_impl.cpp,sha256=_2N2MjXD9oO81G722Ikwtt-Bjhb7LcDg6qU47TRvoMY,34903 +imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_full/demo_nanovg_full_impl.h,sha256=Vy33bipaw5rp7JTAzrEoPakhAJeF56cmFUloTGe7C6g,629 +imgui_bundle/demos_cpp/demos_nanovg/demo_nanovg_heart.cpp,sha256=FqfZXYYfGhd7muKf3wixy5leXUWx3M6EbDpe577H8gs,5150 +imgui_bundle/demos_cpp/demos_nanovg/shell.emscripten.html,sha256=-7VrhE6xR-oaz58JeI6xz7rr349MNdgFAV1cCX4b89M,3725 +imgui_bundle/demos_cpp/demos_node_editor/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 +imgui_bundle/demos_cpp/demos_node_editor/demo_node_editor_basic.cpp,sha256=Y_JnUfExjjTzgZWMITrk8FIp3XVg_XDXEWYvdJjuBuE,8072 +imgui_bundle/demos_cpp/demos_node_editor/demo_romeo_and_juliet.cpp,sha256=qIJL--IQXHwhCJRqOCWfI451pO-BtVcKq6lL-eqXOnE,2986 +imgui_bundle/demos_cpp/demos_tex_inspect/CMakeLists.txt,sha256=oBFskqvGdnEPUsIsx3fEs7FxLL5VUdfISEIY_084mRw,234 +imgui_bundle/demos_cpp/demos_tex_inspect/demo_tex_inspect_demo_window.cpp,sha256=8PS-C2heBGvPF0o1MYW6OpZns7e5usS7ejDqs6A_zDE,840 +imgui_bundle/demos_cpp/demos_tex_inspect/demo_tex_inspect_simple.cpp,sha256=-F_wxw16pdHPejesx8UqtP7aKONMrmKfYgBx-yqmSFk,1777 +imgui_bundle/demos_cpp/msvc_note.txt,sha256=a-eabTYaj6czbsNNBOnv2uerE2V-QCxI9rKV4_3jWYU,853 +imgui_bundle/demos_cpp/sandbox/CMakeLists.txt,sha256=N9_BwZKvQ72z7mJzN1jKiQQTkey7SZVFjyz_MLRlRYE,36 +imgui_bundle/demos_cpp/sandbox/bundle_cpp_sandbox.cpp,sha256=lS15gPzfxIPxekr7aEt1mWlIjH2UMfTMxYTy43df4T4,273 +imgui_bundle/demos_cpp/sandbox/sandbox_custom_opengl_version.cpp,sha256=u4eqbDiIiTNSGUW_6xhqtcZMPvYRVsNtbV6BWitrol0,786 +imgui_bundle/demos_cpp/sandbox/sandbox_immapp_manual_render.cpp,sha256=g_lPnXKGzSwC-lCvHbQOOIGXEEKrtNEbbz3LG3XlH5U,1973 +imgui_bundle/demos_cpp/sandbox/sandbox_node_clipping_issue.cpp,sha256=Ut_8XW901dp_LDrfsrAK1XT_DrkeyYi3zSb6oQpciVY,4054 +imgui_bundle/demos_cpp/sandbox/sandbox_node_image.cpp,sha256=vqIVrtfJDKvaQuCgFZ8EijORxTNOd8TUb4qBQJkgJ4U,1813 +imgui_bundle/demos_cpp/sandbox/sandbox_node_md_code.cpp,sha256=saSgp-ppmSUzUuJ55KO-IETAVRxYMK6sKwHBzBtls0g,1639 +imgui_bundle/demos_cpp/sandbox/sandbox_node_popup.cpp,sha256=7j-xlPFFs08f9TFVC8B74rkAjQG7i2jiIlSN4tAKt9w,6524 +imgui_bundle/demos_cpp/sandbox/sandbox_node_text_wrap.cpp,sha256=yfkaMdI3eItqEGj5XKgy_Fvrt6PSE6h1aEmsBYPJQGo,3826 +imgui_bundle/demos_cpp/sandbox/sandbox_plot_in_node.cpp,sha256=0zi0V0PqNcyO9BjUSh6Ipe9srt0Kyvf7rTuJKf6VVmk,1396 +imgui_bundle/demos_cpp/sandbox/sandbox_stacklayout.cpp,sha256=DBnrlakliDMPBzrEN_IOoUrTb7FooEmEHQFuU6nnOkI,1567 +imgui_bundle/demos_cpp/sandbox/sandbox_tmp.cpp,sha256=4Np-IFroVBgZqfs6UkP7_kIKOjCHkm_B2an4_XgTVYo,160 +imgui_bundle/demos_cpp/sandbox/sandbox_tstengine.cpp,sha256=5TOKSzi_40KQJ36LgJypVsNA6TCCe12nOg8vycaLUUs,2262 +imgui_bundle/demos_cpp/sandbox/sandnode_stack.cpp,sha256=HKssPDHsk5K6yCFWXteGxp-eal15nAju6Kh2u5BySAE,1402 +imgui_bundle/demos_cpp/sandbox/sandnode_suspend.cpp,sha256=pIspshFfKve8c7mub49qYPIDb-IQQFf7DZg4Ch-e93E,2979 +imgui_bundle/demos_cpp/shell.emscripten.html,sha256=-7VrhE6xR-oaz58JeI6xz7rr349MNdgFAV1cCX4b89M,3725 +imgui_bundle/demos_python/.gitignore,sha256=nKxsXgtWy50PAko_ZXIsk9g5nONGt3pHsywgIkKCuHw,47 +imgui_bundle/demos_python/__init__.py,sha256=b-VpPYiOQnVZNRgmWw4-C1N5C357Udn1Vda3AIM3l2g,291 +imgui_bundle/demos_python/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_imgui_bundle.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_imgui_bundle_intro.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_imgui_md.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_imgui_show_demo_window.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_imguizmo_launcher.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_immapp_launcher.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_immvision_launcher.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_implot.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_logger.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_nanovg_launcher.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_node_editor_launcher.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_tex_inspect_launcher.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_text_edit.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_themes.cpython-314.pyc,, +imgui_bundle/demos_python/__pycache__/demo_widgets.cpython-314.pyc,, +imgui_bundle/demos_python/demo_imgui_bundle.py,sha256=zaXOiAtAC1bAh3x7YtBdi_6sn90iwCY-kKgJeSbvhIc,6675 +imgui_bundle/demos_python/demo_imgui_bundle_intro.py,sha256=6SgndR_dnVUZATUPD9NqHnPB9fU8Hn2wq85VN3C19zw,7768 +imgui_bundle/demos_python/demo_imgui_md.py,sha256=fWcAN4BHrASAxCbh6MqESAdJ9GuoLq7gofUBWsfwq28,2771 +imgui_bundle/demos_python/demo_imgui_show_demo_window.py,sha256=Ov47hH5h_XaVks-Keyx8gRnRQZQNZlZlcgR1W0t62Hc,2362 +imgui_bundle/demos_python/demo_imguizmo_launcher.py,sha256=QGq2mJoUoOk5-bkmt-bIFUfcZUdLRTDFIt-DwP7XMSM,1494 +imgui_bundle/demos_python/demo_immapp_launcher.py,sha256=wgyXwD_qRTsRnCqmKFTEDXctgUEgC8rJ7LgROZkaw6A,5279 +imgui_bundle/demos_python/demo_immvision_launcher.py,sha256=sI9hiHxjYbzm9ll3joRA_wvMiTIGskbZeDPcsiNSxRY,2911 +imgui_bundle/demos_python/demo_implot.py,sha256=RnIEryuTVK8LidLT79sukJ0iI4XlLZ2GPN7Y32aUmL8,1977 +imgui_bundle/demos_python/demo_logger.py,sha256=DfaaWFrDC-wHp16aspsT7zTwWjPXLOP6HGg4uc2kRsQ,2718 +imgui_bundle/demos_python/demo_nanovg_launcher.py,sha256=LwHBq--e-egbM_Qxb1PybVkLCCLpgJSjDJrV6GvrVQw,3976 +imgui_bundle/demos_python/demo_node_editor_launcher.py,sha256=NEKZkNMclOXcyR-OJRvO2Fon0A10Ua2VjQw2kQkxl1g,1934 +imgui_bundle/demos_python/demo_packaging/macos/.gitignore,sha256=XhR1GzzBHiiMzAT-ikVH4TC4htPd5KqMAFJ_5-QWeRA,15 +imgui_bundle/demos_python/demo_packaging/macos/Readme.md,sha256=zt8OMVqJXuM7jTDIo3yryG0XhyM8PCNHm5PuzO6MVgI,1381 +imgui_bundle/demos_python/demo_packaging/macos/__pycache__/bundle_macos_demo.cpython-314.pyc,, +imgui_bundle/demos_python/demo_packaging/macos/assets/images/world.png,sha256=XJ0vpgpIMKBRKCYA5mUC9H8uqPhLUdPn-wCHD8wx8WY,45007 +imgui_bundle/demos_python/demo_packaging/macos/bundle_macos_demo.py,sha256=DnMN20WhLC-g5E0nLesoyOmiQQ9o7GEhXqBG74R-clE,779 +imgui_bundle/demos_python/demo_packaging/macos/bundle_macos_demo.spec,sha256=pA-mTaD2zEQV0CCHOfN69BDMF7e_SsXPQiK-PQ6Ryso,1046 +imgui_bundle/demos_python/demo_tex_inspect_launcher.py,sha256=iZh1MKjUELsFfXE8nVktEg0f8nS8L68-kPl6EJppfRI,1839 +imgui_bundle/demos_python/demo_text_edit.py,sha256=hv2BtHO-6JnWeT2OWg-WboVC_0r2ho-pMeS4uCRw1mQ,1826 +imgui_bundle/demos_python/demo_themes.py,sha256=6VyOndw_g33gOngv6S6ekIC6QBts0ixzZbuiKFw230Y,1048 +imgui_bundle/demos_python/demo_utils/__init__.py,sha256=5UIlUXqKyonjF2GdtZw0DSVDYp7takmW98ZrJwWH3Vs,716 +imgui_bundle/demos_python/demo_utils/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demo_utils/__pycache__/animate_logo.cpython-314.pyc,, +imgui_bundle/demos_python/demo_utils/__pycache__/api_demos.cpython-314.pyc,, +imgui_bundle/demos_python/demo_utils/__pycache__/demo_app_table.cpython-314.pyc,, +imgui_bundle/demos_python/demo_utils/__pycache__/functional_utils.cpython-314.pyc,, +imgui_bundle/demos_python/demo_utils/__pycache__/imread_demo.cpython-314.pyc,, +imgui_bundle/demos_python/demo_utils/animate_logo.py,sha256=F34v5z8cRmtxz3AmFF2F-sOQVgtWPMzYh38N7YiyHHM,3846 +imgui_bundle/demos_python/demo_utils/api_demos.py,sha256=Mc3sBR58cV-FTRK0ktnaYwIJmD727NCTQBpaSaqBnrM,3813 +imgui_bundle/demos_python/demo_utils/demo_app_table.py,sha256=npFiO8SEeh5u3OfmBP86tbmyXqI8CzVPWik9jhZud1g,5992 +imgui_bundle/demos_python/demo_utils/functional_utils.py,sha256=7g38QtLJDqpiTI7Lslcfs5wOO9JPFgVPLMokSgxN4M0,290 +imgui_bundle/demos_python/demo_utils/imread_demo.py,sha256=bI1TYcJouicQbyGY9Av22TPz2AsvRmQfbDUcaLDx0jo,2073 +imgui_bundle/demos_python/demo_widgets.py,sha256=BKEsrBbq2rJZd4QnLsgz0v3rV2BzS7KNUunokuRkTF8,16058 +imgui_bundle/demos_python/demos_imguizmo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/demos_imguizmo/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_imguizmo/__pycache__/demo_gizmo.cpython-314.pyc,, +imgui_bundle/demos_python/demos_imguizmo/demo_gizmo.py,sha256=Hhx6kYfxaAg9BaKAS5i_YuOQQSYEHs63aUywFpnnxJg,15572 +imgui_bundle/demos_python/demos_immapp/.gitignore,sha256=gd3Ts_Xo4zDaOHkdQED2l6oyIA7vjfOooKhxO8q5Obw,7 +imgui_bundle/demos_python/demos_immapp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/demos_immapp/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_assets.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_assets_addons.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_command_palette.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_custom_background.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_docking.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_drag_and_drop.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_hello_world.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_implot_markdown.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_matplotlib.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_parametric_curve.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_powersave.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_pydantic.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_python_context_manager.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/demo_testengine.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/haiku_butterfly.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/haiku_implot_heart.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/imgui_demo.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/__pycache__/imgui_example_glfw_opengl3.obsolete.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immapp/demo_assets.py,sha256=cojtTZtcFYiQz7awEEvz6l3vZSSPe2FYyiv4SPDHvNg,1365 +imgui_bundle/demos_python/demos_immapp/demo_assets_addons.py,sha256=eNq-VJqugqwEuUvRBVcLq3fgY1uYHBJSn93PcCEn_Eo,9778 +imgui_bundle/demos_python/demos_immapp/demo_command_palette.py,sha256=Eddit6x729LGdF5mlHnn8UaOodeQnl5_d2icHRb-FZU,2492 +imgui_bundle/demos_python/demos_immapp/demo_custom_background.py,sha256=MzRw7C8RMwCy67XTmEv_VBGzbMRLOJ56OW-QvUfuYr0,20819 +imgui_bundle/demos_python/demos_immapp/demo_docking.py,sha256=2a8RF8EQ5pYCZxFpd2LK0lsBxurTIi2faXcFWHXuDHo,38846 +imgui_bundle/demos_python/demos_immapp/demo_drag_and_drop.py,sha256=_e1UleyNJyNPTE1C-qkpcL8CsPQ2h39NqJBT9lI9PQY,3485 +imgui_bundle/demos_python/demos_immapp/demo_hello_world.py,sha256=uEz_4zNrwODaAq9LLfvC80zss32JPqojzwNccI90Y74,420 +imgui_bundle/demos_python/demos_immapp/demo_implot_markdown.py,sha256=0LRbkMH2HmOh-Yimj-tmoCnHK0diY1igc6GrzC6aCbQ,1686 +imgui_bundle/demos_python/demos_immapp/demo_matplotlib.py,sha256=NcFKmcHejrUwzej9WwxOn4Q9ChnglNvyKAOZX5AM-rY,2645 +imgui_bundle/demos_python/demos_immapp/demo_parametric_curve.py,sha256=S7nlr56pmnYNBTHqDf2TeEpzCUqTFb0uL3Nb3Dirfv4,1466 +imgui_bundle/demos_python/demos_immapp/demo_powersave.py,sha256=5uoaA3RaBanmPqK9tDeX-8ROCWOvUlJ6HBeqy5K9IE0,1639 +imgui_bundle/demos_python/demos_immapp/demo_pydantic.py,sha256=Xky59lsvzDDA5M3cvGwJW2nT044_JRcZ3BRQp-T4uKw,1908 +imgui_bundle/demos_python/demos_immapp/demo_python_context_manager.py,sha256=iFLCt73xYGeeLhX0r4fSbnGnUPyWFoMm24C3ryKSG0o,11331 +imgui_bundle/demos_python/demos_immapp/demo_testengine.py,sha256=_suygM6vBUDiEiL2T8yFVVOyFlosoOEWRD_2vGe_F98,11216 +imgui_bundle/demos_python/demos_immapp/haiku_butterfly.py,sha256=YekTUCaRr0vOp_WK0-bg95Y8yD36jXM5GP4ecDiczDE,3871 +imgui_bundle/demos_python/demos_immapp/haiku_implot_heart.py,sha256=Fb3e6plszcfeHl-bEObvsCzQwjfpLrZD1_JUejgUyfo,1102 +imgui_bundle/demos_python/demos_immapp/imgui_demo.py,sha256=QAFudhrxBulYpSj2AOR2B_0tZJdE_k1767tjkfF3x8Y,185928 +imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.obsolete.py,sha256=_CggTPgvmyspFb6sDIqRvbIftsqp25udiSjqiK0pV3o,11604 +imgui_bundle/demos_python/demos_immvision/__init__.py,sha256=Khtdyg9ss2_x1O3gnL0XZedDC-I_E9wcHp2RKGXdksQ,682 +imgui_bundle/demos_python/demos_immvision/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_contours.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_display.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_inspector.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_inspector_mandelbrot.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_link.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_no_opencv.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_immvision_process.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/__pycache__/demo_laplacian.cpython-314.pyc,, +imgui_bundle/demos_python/demos_immvision/demo_contours.py,sha256=18Qi2OI5osWHz7m2D6V5LkccNAkyYY3cYIY9O9tOGvc,2060 +imgui_bundle/demos_python/demos_immvision/demo_immvision_display.py,sha256=2J_fT_3lMU-JHjZJ-U5ZgXyg8XX8duokERR033cEyh8,1926 +imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector.py,sha256=qS8M7_PxCA-wUHlGZsPQa9ENnrAG9x5EgEUR8hbY3Bo,1098 +imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector_mandelbrot.py,sha256=eAeSizx7DXxD_MA8q22jTsUgP7m5BL4OsgMtVtwsfB0,3269 +imgui_bundle/demos_python/demos_immvision/demo_immvision_link.py,sha256=0RFXFjd-GCq15uqqt7kOZ8wvg1etvK4q8k4XIAPrGkk,1189 +imgui_bundle/demos_python/demos_immvision/demo_immvision_no_opencv.py,sha256=3qI8CB7dPxdNoqdNPzLiIe7GDx6BXg4raSCmgXyCdgI,1081 +imgui_bundle/demos_python/demos_immvision/demo_immvision_process.py,sha256=ho2Q9e9MMuM0pBUP-oRjPG58AYwTigFg82FSIkKClxo,5333 +imgui_bundle/demos_python/demos_immvision/demo_laplacian.py,sha256=Z1cZbTFE01k-aQUE86Iqio_gRCEuXhqbAHvDh7f45zE,2311 +imgui_bundle/demos_python/demos_implot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/demos_implot/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_implot/__pycache__/demo_implot_stock.cpython-314.pyc,, +imgui_bundle/demos_python/demos_implot/__pycache__/implot_demo.cpython-314.pyc,, +imgui_bundle/demos_python/demos_implot/demo_implot_stock.py,sha256=fjekJRpjm3GMQCgDFxCHC9VXCCLDDWUIJrqYVY0dHF8,12149 +imgui_bundle/demos_python/demos_implot/implot_demo.py,sha256=DK_S-gXC24V5B3E1I_JXgpsqoMRugn4sy39asrrNngA,95372 +imgui_bundle/demos_python/demos_implot3d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/demos_implot3d/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_implot3d/__pycache__/implot3d_demo.cpython-314.pyc,, +imgui_bundle/demos_python/demos_implot3d/__pycache__/implot3d_meshes.cpython-314.pyc,, +imgui_bundle/demos_python/demos_implot3d/implot3d_demo.py,sha256=V1JZaXCNL2hR3cMHOaYMxTrMfjlupcxNug83z5dCmDE,40731 +imgui_bundle/demos_python/demos_implot3d/implot3d_meshes.py,sha256=cgT7zwe6S86RgB98CLMtHSZgq7yusseXZiLYB8fivY4,31972 +imgui_bundle/demos_python/demos_nanovg/__pycache__/demo_nanovg_full.cpython-314.pyc,, +imgui_bundle/demos_python/demos_nanovg/__pycache__/demo_nanovg_heart.cpython-314.pyc,, +imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full.py,sha256=0ZtChlCB3ACnUozU8P9PdTOEkacwtMqJnrjhPJahpqU,5113 +imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/__pycache__/demo_nanovg_full_impl.cpython-314.pyc,, +imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full/demo_nanovg_full_impl.py,sha256=IPU-YLtKSLaFr-emh8faQBnxzIz35HZU_A4y_XLVQ9M,34984 +imgui_bundle/demos_python/demos_nanovg/demo_nanovg_heart.py,sha256=iOaMZPYt0LMYYd3qFlDcj8zvXkOYOIw4znv2Wzp-OHg,5085 +imgui_bundle/demos_python/demos_node_editor/__init__.py,sha256=_5ZmJH4ScNiu6Dvw2F2o7fTaQH8y1G8r47zctrunDFU,184 +imgui_bundle/demos_python/demos_node_editor/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_node_editor/__pycache__/demo_node_editor_basic.cpython-314.pyc,, +imgui_bundle/demos_python/demos_node_editor/__pycache__/demo_romeo_and_juliet.cpython-314.pyc,, +imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.json,sha256=cLUk21uRqL1qW-ovgxK0m9616SV-S5ZZrY8vAgQ0ieo,334 +imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.py,sha256=B1rwLgSMjfFMOXYLU_O5YFtBZk7wH6K50PNHfWzjJow,8599 +imgui_bundle/demos_python/demos_node_editor/demo_romeo_and_juliet.py,sha256=0pOa9cAwokiQkUdwmWKCKqROAJm55rRLSyAQ7LT6cNU,2272 +imgui_bundle/demos_python/demos_node_editor/romeo_and_juliet.json,sha256=Izot8WkS2pM3bTy0HccR4gootK6BrW24FkNQCsst2M8,355 +imgui_bundle/demos_python/demos_tex_inspect/__init__.py,sha256=DsgZmUvDVm8XYN-wI9uBIVzNOJa7mTTMJLpCkmPb9Kg,200 +imgui_bundle/demos_python/demos_tex_inspect/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/demos_tex_inspect/__pycache__/demo_tex_inspect_demo_window.cpython-314.pyc,, +imgui_bundle/demos_python/demos_tex_inspect/__pycache__/demo_tex_inspect_simple.cpython-314.pyc,, +imgui_bundle/demos_python/demos_tex_inspect/demo_tex_inspect_demo_window.py,sha256=B3weap7DnRShHi9D8qyvWgtmZCuAm42ttiVhYJCyco0,657 +imgui_bundle/demos_python/demos_tex_inspect/demo_tex_inspect_simple.py,sha256=FAggQjzh1hKo9xIDvL0kFvY11qCLtesyhxETRoi7mGw,1195 +imgui_bundle/demos_python/haikus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/haikus/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/notebooks/.gitignore,sha256=GuUYDQtPg3HrDxUGVHJ75oOoGk-KtR_tfOFWnhzUOEY,23 +imgui_bundle/demos_python/notebooks/NodeEditor.json,sha256=K3Z3pupq1Gi8qNderYHAbLPMoXC7m27veIW7WO4JNkk,340 +imgui_bundle/demos_python/notebooks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/notebooks/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/notebooks/demo_notebook.ipynb,sha256=EIGeBTHo9ru8N6DLvpQMRJwVOCjxc24IZ_Jw9FoauLc,529129 +imgui_bundle/demos_python/ruff.toml,sha256=5xzgJnq_0kYIwzrJ7oOCIF5eVpCbrAOqR4O0KclWm6Q,110 +imgui_bundle/demos_python/sandbox/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/demos_python/sandbox/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sand_node.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sand_plot.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sand_runnable_code_cell.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sand_subplots.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_app.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_custom_opengl_version.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_glfw_window_manip.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_immapp_manual_render.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_node_md_code.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_node_text_wrap.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_plot_in_node.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_plotmesh3d.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_stacklayout.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_tmp.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/__pycache__/sandbox_tstengine_keys.cpython-314.pyc,, +imgui_bundle/demos_python/sandbox/sand_node.py,sha256=DSnslxuREcy-S6szrD3Cpi-_XDqtHO6YwW9NrcJvS0g,582 +imgui_bundle/demos_python/sandbox/sand_plot.py,sha256=Gg-lTv4FfXySqSn8sQsqFa5SNEIexSU0wiLVO77py5Q,321 +imgui_bundle/demos_python/sandbox/sand_runnable_code_cell.py,sha256=rBBCd8c-IGslHv5ZIk2TCLpwkN7OzKpkImEkGI-_tcU,1867 +imgui_bundle/demos_python/sandbox/sand_subplots.py,sha256=blj18rvOKhhUmZGA2-fUq5gNRzzs_UwWCTXSWw2CL68,909 +imgui_bundle/demos_python/sandbox/sandbox_app.py,sha256=yMtQCZJ_caUYnPT65ofxb6lKFp4OrubMb-wMRUfbUrw,823 +imgui_bundle/demos_python/sandbox/sandbox_custom_opengl_version.py,sha256=F6tB15GnzPrvOuBk6WoYvBZPeqIw16cTTMZjEy_tMIc,877 +imgui_bundle/demos_python/sandbox/sandbox_glfw_window_manip.py,sha256=BS7C3JrEMeeZFcKea2soGp9kXsTCmHwbxwO5EN3KrDY,580 +imgui_bundle/demos_python/sandbox/sandbox_immapp_manual_render.py,sha256=X5vu_59RbLFkL_Z6MddqpY2ow1tgGQrtHHUH1hSf_Wg,654 +imgui_bundle/demos_python/sandbox/sandbox_node_md_code.py,sha256=cbdp15xa_jKcRXX3hdFkHE9Pz0JIx5ZrTZtYKmXxDvc,1439 +imgui_bundle/demos_python/sandbox/sandbox_node_text_wrap.py,sha256=DEArDar35LPgoJ_-_nvprUDIl6EaqA3LDH4N98SeYeE,3507 +imgui_bundle/demos_python/sandbox/sandbox_plot_in_node.py,sha256=x5F6ldCqX_DEnbwT9vDWb9yhz5aWuHQ6IwvuA2RVNgQ,794 +imgui_bundle/demos_python/sandbox/sandbox_plotmesh3d.py,sha256=qMOFo487BpRSVE3k8iVTJhVqOAJisuLuHD9IktIAhjc,1042 +imgui_bundle/demos_python/sandbox/sandbox_stacklayout.py,sha256=dKQQ5uy2DSASik9TTk-X_lvvEMbIRh6XeAwHhTo7Qkw,610 +imgui_bundle/demos_python/sandbox/sandbox_tmp.py,sha256=e0s9VmUDGtgfHwws3q6JwAJ7mlLH2LC31mntXYhfvno,116 +imgui_bundle/demos_python/sandbox/sandbox_tstengine_keys.py,sha256=wunk6e9SWkFKYl-pMmYogBrnGslyjxAXWl_dGw9rujU,1477 +imgui_bundle/glfw3.dll,sha256=3MpOjdUL0Z6qDMqNZmV_r8Hzn-kEBRmadr4XnHzl3bE,218624 +imgui_bundle/glfw3dll.lib,sha256=a2soIHV4K5XJCIbm-m7IBFc9qjdBWdZo5GpFzL3ChTs,30306 +imgui_bundle/glfw_utils.py,sha256=Zq2hUYxbCEPVuSKfkT2Tz9Zzcu7iCtYKjEKqSeQRLdI,1017 +imgui_bundle/hello_imgui.pyi,sha256=wy6igf4KeiTlj24qH2SgJ54-o8A0xzxEcCu_ARxa5T4,168468 +imgui_bundle/im_col32.py,sha256=OEAOm_QdU1GSU07WYgaDVdzcrgwAjVmlizexfFK3tY4,461 +imgui_bundle/im_cool_bar.pyi,sha256=pin_xT7nH77RKkrmzFR7nM7BVyp00eFdZldQYt33Qww,3407 +imgui_bundle/im_file_dialog.pyi,sha256=vxF0kVjbS2d4Iw_srj8e7-w83yhq4oAbgCkvSrn_2tg,2404 +imgui_bundle/imgui/__init__.pyi,sha256=fN12fdolUre78SRqsAwrBc4SfH1MGbR7VDrZgt3oivk,740141 +imgui_bundle/imgui/__pycache__/test_engine_checks.cpython-314.pyc,, +imgui_bundle/imgui/backends.pyi,sha256=8SBSVvyD9nHFhZbn0p_ZMw8T35pZZSoMS8gqCL4axMU,3522 +imgui_bundle/imgui/internal.pyi,sha256=Kc6jPlAQbvAkw6OlkCZ_KETRHLsMDixbXIBDqgKoT_g,481362 +imgui_bundle/imgui/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/imgui/test_engine.pyi,sha256=I66UGm6qcAub2mks0ouRfuCeD0ggUWH5geIyUhUJl28,137229 +imgui_bundle/imgui/test_engine_checks.py,sha256=QcenN-QZhxa62kRaJ58M69EuOy98aafdXsTwDWM2xr4,1388 +imgui_bundle/imgui_bundle.pyi,sha256=4aU4KGF9x4FIaFBRq5gVdMr4uJ2m8XXH9CulY5NvXbI,436 +imgui_bundle/imgui_color_text_edit.pyi,sha256=6kY7rgsdW5H6Z--8OWV--AqJdfNi7QuS35oSuruGxBA,9026 +imgui_bundle/imgui_command_palette.pyi,sha256=iLexoZxdyk2be8Yv7lH1AAgFBJhrgPIiY1MH4kV6N4w,3372 +imgui_bundle/imgui_ctx.py,sha256=VGGbBHpK_drcRfaCQBtsuA6RGopjIty4yDVXaudYvKk,28981 +imgui_bundle/imgui_fig.py,sha256=GNvUkTyQgLwtqJULMBCYwDxkMvqhSNSgRKy9V1aktf0,3874 +imgui_bundle/imgui_knobs.pyi,sha256=O4DYLR7OUUfzPNvFAXtUj6ap5zvRk8xM52FlUyn3jVk,2513 +imgui_bundle/imgui_md.pyi,sha256=xJ6RX60StqadAXooP3Lx85kn4swpdtE0QQgL1iRvzfs,5850 +imgui_bundle/imgui_node_editor.pyi,sha256=7mVv5k_vvWApjMxT2LzyN6J8njzj8fcHdVzuTE2nCQs,41456 +imgui_bundle/imgui_node_editor_ctx.py,sha256=0lgOn2TvGVmQOrNb3LF4HEScCdc6ZLvpuEYNf8_enXI,6215 +imgui_bundle/imgui_pydantic.py,sha256=qJVLpeq7sc0BtX2jBiBt5Sv0RFgKCQEmVX5hMmn3uac,5586 +imgui_bundle/imgui_tex_inspect.pyi,sha256=tCNaX1UJbXpf58O3Fsuu8MkfwEdhtqRNxqPo_KVKQRE,13575 +imgui_bundle/imgui_toggle.pyi,sha256=T7mM9X0bhYfqlRr_dRwrrWgG-r1NXmpx1bEIhd1yJcY,21970 +imgui_bundle/imguizmo.pyi,sha256=NoDscmV36dyeasJdNB0l30QhZJkg5gO9YIkn18sD2Qg,19863 +imgui_bundle/immapp/__init__.py,sha256=ijp0NZzX5WWF_xOAT7EsXKGMQIQMDWb1khauvmxuQ1g,2712 +imgui_bundle/immapp/__init__.pyi,sha256=z5ZylI40IJmE7y2R6LsE-md4q4N0tjSMZ7v8YS8HZtw,1093 +imgui_bundle/immapp/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/immapp/__pycache__/icons_fontawesome_4.cpython-314.pyc,, +imgui_bundle/immapp/__pycache__/icons_fontawesome_6.cpython-314.pyc,, +imgui_bundle/immapp/__pycache__/immapp_code_utils.cpython-314.pyc,, +imgui_bundle/immapp/__pycache__/immapp_notebook.cpython-314.pyc,, +imgui_bundle/immapp/__pycache__/immapp_utils.cpython-314.pyc,, +imgui_bundle/immapp/__pycache__/runnable_code_cell.cpython-314.pyc,, +imgui_bundle/immapp/icons_fontawesome_4.py,sha256=TFU7wqJL8KxzELCuOZtmU2vkGSIHG1FTZRjhLnqA9mc,52062 +imgui_bundle/immapp/icons_fontawesome_6.py,sha256=xIhHDDTskFhD53FhL64g175aEIyk_hCrCIUka7388yw,93641 +imgui_bundle/immapp/immapp_code_utils.py,sha256=0jhFBEy9nscArQWd4fnV4_SiVTCABCxVj_mEZHsfOC4,3262 +imgui_bundle/immapp/immapp_cpp.pyi,sha256=Y6BqAOcaElThe-mQDgXjg814IUUMPhSG_6uxKravSVo,19613 +imgui_bundle/immapp/immapp_notebook.py,sha256=b2KXfcQpix2giuRzuuKI-DzrL_H0si3x2rAtvcM-WzA,4892 +imgui_bundle/immapp/immapp_utils.py,sha256=1XVp-YaH8SmtWHBrYeY8HoA_NGi7drggoz5VsIGklbc,2106 +imgui_bundle/immapp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/immapp/runnable_code_cell.py,sha256=mAHMQ1n3MkRw4LhD4SQIql5mX6EfewPu_lZ7GkzWr9A,4706 +imgui_bundle/immvision.pyi,sha256=l1jNNbin9Vt4l7SWmly5W4W3RJ1x_BISvh4lPeyi6ns,42962 +imgui_bundle/implot/__init__.pyi,sha256=LtV3ckkj9o5Oq1iQE2byXdEBeTznRpAq41xiu86JUJc,129544 +imgui_bundle/implot/internal.pyi,sha256=BfMjHRTWt9WO4ZXD87VUNf1fNIpl5YSCeaPksnS5K0o,105855 +imgui_bundle/implot/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/implot3d/__init__.pyi,sha256=_MtyvT8Lc71N0t8uloH2nsbbVPcUbWDLfaF_C0Dn_uA,76452 +imgui_bundle/implot3d/internal.pyi,sha256=5-sfSKdjj-n-8SCvovo9kES6L9KXunNUGT44aWtqGtA,48372 +imgui_bundle/implot3d/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/imspinner.pyi,sha256=Tbb_oOQsLGQ6t8X2uIr38eaOBXtOAv9mcOTMbPPpb4I,47401 +imgui_bundle/mypy.ini,sha256=vV63dcPbLQP4esuqwHeU5veI7v2AaZng69_CEQYh9Rs,479 +imgui_bundle/nanovg.pyi,sha256=aNl5CcHAvHIVOzYr5go6T7-JGvkTv9lRUiwOFd5nbTc,58909 +imgui_bundle/notebook_patch_runners.py,sha256=P9LfqIP2mJ5kmu40tfqD6BrynT89I0wA1vB7H3GlEeA,2518 +imgui_bundle/opencv_world4100.dll,sha256=5uMeVbmzd7d1eCxLJ6gLJGDf_j2-54mOA00lcp3KAZg,64653312 +imgui_bundle/portable_file_dialogs.pyi,sha256=DQYzcuOMe2PiQS20Qi5_F9Fdrs47AP0ydzhUVS2qw9U,4128 +imgui_bundle/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/pyodide_patch_runners.py,sha256=DUJx0UHq5nDaSUaRCvWWjaljBXtGjTurq6F4L-SaLuk,6613 +imgui_bundle/python_backends/LICENSE_pyimgui.txt,sha256=KsiXoMwyAlq7GNigXaBPAcFJqf-y63tandKoHTU5VfY,1508 +imgui_bundle/python_backends/Readme.md,sha256=X35JCgOCdn0BaroVoKANOW_0wjRbB0QoGkxoycukouQ,1440 +imgui_bundle/python_backends/__init__.py,sha256=xYFWpQbUqcO7qPwBFy6C_wGjSvoHMyPA2024-TAneZ8,411 +imgui_bundle/python_backends/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/glfw_backend.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/opengl_backend_programmable.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/opengl_base_backend.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/pygame_backend.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/pyglet_backend.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/sdl2_backend.cpython-314.pyc,, +imgui_bundle/python_backends/__pycache__/sdl3_backend.cpython-314.pyc,, +imgui_bundle/python_backends/examples/__pycache__/example_python_backend_glfw3.cpython-314.pyc,, +imgui_bundle/python_backends/examples/__pycache__/example_python_backend_pygame.cpython-314.pyc,, +imgui_bundle/python_backends/examples/__pycache__/example_python_backend_pyglet.cpython-314.pyc,, +imgui_bundle/python_backends/examples/__pycache__/example_python_backend_sdl2.cpython-314.pyc,, +imgui_bundle/python_backends/examples/__pycache__/example_python_backend_sdl3.cpython-314.pyc,, +imgui_bundle/python_backends/examples/__pycache__/example_python_backend_wgpu.cpython-314.pyc,, +imgui_bundle/python_backends/examples/example_python_backend_glfw3.py,sha256=XeMGZkZawFYcwKDxldMkxHf0YEOcik9B-gkgC8dO_6U,4680 +imgui_bundle/python_backends/examples/example_python_backend_pygame.py,sha256=BRRN0YtvePS2pP26zIOlPeblJ87c7zVVQT0opAYeeaM,2977 +imgui_bundle/python_backends/examples/example_python_backend_pyglet.py,sha256=3noY3efE5BEkfgEG_VupTjIvDZR7N5BgQdpMqymUJrc,2637 +imgui_bundle/python_backends/examples/example_python_backend_sdl2.py,sha256=sihJ2mDXsQ2f6A0iBWbcNW2D4dTWWriTEKmy39TIVG4,4742 +imgui_bundle/python_backends/examples/example_python_backend_sdl3.py,sha256=IZ0DPBxmAMqM-0xP3Uiza1n9LuU8eMWZCEt3ChpKnM8,4746 +imgui_bundle/python_backends/examples/example_python_backend_wgpu.py,sha256=gCJsHWY-Z9oOaDWaX3F57Z9H6X4pj7xlaGRJ1kfUPOg,2460 +imgui_bundle/python_backends/examples_disabled/Readme.md,sha256=OqyYuTupzwzBiFcabj7sk2k-gnNoN8aJ2q9U4-6cr7E,119 +imgui_bundle/python_backends/examples_disabled/__pycache__/example_python_backend_cocos2d.cpython-314.pyc,, +imgui_bundle/python_backends/examples_disabled/example_python_backend_cocos2d.py,sha256=3lQi83RK1XHDTxAZvg4GPe1s8fJw4LqRnYzz4VuPqm4,1783 +imgui_bundle/python_backends/glfw_backend.py,sha256=rZZ2syGLmt2JWUPzFM4pkW_9C9dGRaS6ekt-VPDnRsc,9844 +imgui_bundle/python_backends/opengl_backend_programmable.py,sha256=vOuDw99ZesPz-_Uz87D5glZ6KCYLua26F0XrRB1KWJw,9077 +imgui_bundle/python_backends/opengl_base_backend.py,sha256=GTU-FVpyaFxdkPXjZ2qDeiLxOCwLGsdzNw8iuZmOr0U,10034 +imgui_bundle/python_backends/pygame_backend.py,sha256=WNWlHL-1yK262TJy99XZC8KchkcadOQowuvXXthj9fA,4940 +imgui_bundle/python_backends/pyglet_backend.py,sha256=Qrsj8e3DqRgpUDR2xT5WmkUHcg0Rvy55oQfb7e7eiFM,12002 +imgui_bundle/python_backends/python_backends_disabled/Readme.md,sha256=OqyYuTupzwzBiFcabj7sk2k-gnNoN8aJ2q9U4-6cr7E,119 +imgui_bundle/python_backends/python_backends_disabled/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imgui_bundle/python_backends/python_backends_disabled/__pycache__/__init__.cpython-314.pyc,, +imgui_bundle/python_backends/python_backends_disabled/__pycache__/cocos2d_backend.cpython-314.pyc,, +imgui_bundle/python_backends/python_backends_disabled/__pycache__/glumpy_backend.cpython-314.pyc,, +imgui_bundle/python_backends/python_backends_disabled/__pycache__/opengl_backend_fixed.cpython-314.pyc,, +imgui_bundle/python_backends/python_backends_disabled/cocos2d_backend.py,sha256=AK70oIoMCONPxfAYTfVmOj0CNFnmtWwgD09LbeSPe1I,901 +imgui_bundle/python_backends/python_backends_disabled/glumpy_backend.py,sha256=2WnugNcZyh8mijmte0gQ3n5gdz6kfegXDAiOIwNODuw,13941 +imgui_bundle/python_backends/python_backends_disabled/opengl_backend_fixed.py,sha256=bo91DNWx29vGh1LnGZmRZt1Ufa7TlHIOa_1s6hlkIWA,4225 +imgui_bundle/python_backends/sdl2_backend.py,sha256=_P3Dy07ffcJSDan3s_GAcveiR0oxuUoHXdz-PyhAuFM,12417 +imgui_bundle/python_backends/sdl3_backend.py,sha256=WNkOdX2vLfZqrMV74dcsO8Nakhvyo-S8UdRV-qb3K8w,12389 +imgui_bundle/ruff.toml,sha256=5xzgJnq_0kYIwzrJ7oOCIF5eVpCbrAOqR4O0KclWm6Q,110 +include/plutovg/plutovg.h,sha256=aiBAWg8y5ODA9741BhCPlwrP7M9Wxd4RoEWEZTsl4m8,93180 +lib/cmake/plutovg/plutovgConfig.cmake,sha256=P03G7BvLAWr-Ohg0GRlbDNW67eDAW_PUGTQWfjcWq7s,966 +lib/cmake/plutovg/plutovgConfigVersion.cmake,sha256=wrWvwXUoWsHZLhxkVa5C6bH6n4gMev3yLcWv7f5jNBw,2827 +lib/cmake/plutovg/plutovgTargets-release.cmake,sha256=Wn_619DpNiz4LYCt6S_MAx6TTMPk2HdlcPYsv4rCHS4,870 +lib/cmake/plutovg/plutovgTargets.cmake,sha256=L7AZDe1CuY2wYEVb7gYjBBPo0dth6ekUWCLwgwaD18U,4295 +lib/plutovg.lib,sha256=TJmYOf7KdkQ3ptLxMCesS0CTtEp30EBxGh_UhzbThek,646386 diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/REQUESTED b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/REQUESTED similarity index 100% rename from blimgui/dist64/imgui_bundle-1.6.2.dist-info/REQUESTED rename to blimgui/dist64/imgui_bundle-1.92.5.dist-info/REQUESTED diff --git a/blimgui/dist64/imgui_bundle-1.92.5.dist-info/WHEEL b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/WHEEL new file mode 100644 index 0000000..672ea5f --- /dev/null +++ b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: scikit-build-core 0.11.6 +Root-Is-Purelib: false +Tag: cp314-cp314-win_amd64 + diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/entry_points.txt b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/entry_points.txt similarity index 100% rename from blimgui/dist64/imgui_bundle-1.6.2.dist-info/entry_points.txt rename to blimgui/dist64/imgui_bundle-1.92.5.dist-info/entry_points.txt diff --git a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/licenses/LICENSE b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/licenses/LICENSE similarity index 98% rename from blimgui/dist64/imgui_bundle-1.6.2.dist-info/licenses/LICENSE rename to blimgui/dist64/imgui_bundle-1.92.5.dist-info/licenses/LICENSE index b0376d4..42e99fb 100644 --- a/blimgui/dist64/imgui_bundle-1.6.2.dist-info/licenses/LICENSE +++ b/blimgui/dist64/imgui_bundle-1.92.5.dist-info/licenses/LICENSE @@ -1,21 +1,21 @@ -The MIT License (MIT) - -Copyright (c) 2021-2023 Pascal Thomet - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +The MIT License (MIT) + +Copyright (c) 2021-2023 Pascal Thomet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/blimgui/dist64/imgui_bundle/Readme_pypi.md b/blimgui/dist64/imgui_bundle/Readme_pypi.md index eba81df..165fd77 100644 --- a/blimgui/dist64/imgui_bundle/Readme_pypi.md +++ b/blimgui/dist64/imgui_bundle/Readme_pypi.md @@ -20,42 +20,90 @@ the interactive manual!* **Key Features** -- **A lot of widgets and libraries:** All of Dear ImGui along with a - suite of additional libraries for plotting, node editing, markdown - rendering, and much more. - -- **Always up-to-date:** The libraries are always very close to the - latest version of Dear ImGui. This is also true for Python - developers, since the bindings are automatically generated. - -- **Interactive Demos and Documentation:** Quickly get started with - our interactive manual and demos that showcase the capabilities of - the pack. Read or copy-paste the source code (Python and C++) - directly from the interactive manual! - -- **Cross-platform:** Works on Windows, Linux, macOS, iOS, Android, - and WebAssembly! - -- **Easy to use, yet very powerful:** Start your first app in 3 lines. - The Immediate Mode GUI (IMGUI) paradigm is simple and powerful, - letting you focus on the creative aspects of your projects. - -- **Fast:** Rendering is done via OpenGL (or any other renderer you - choose), through native code. - -- **Beautifully documented Python bindings and stubs:** The Python - bindings stubs reflect the C++ API and documentation, serving as a - reference and aiding autocompletion in your IDE. See for example the - [stubs for - imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/imgui/__init__.pyi), - and [for - hello\_imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/hello_imgui.pyi) - (which complete the [hello\_imgui - manual](https://pthom.github.io/hello_imgui/book/intro.html)). +- **Python Bindings:** Using Dear ImGui Bundle in Python is extremely + easy. Here is a beginner-friendly introduction: [Immediate Mode GUI + with Python and Dear ImGui + Bundle](https://github.com/pthom/imgui_bundle/blob/main/docs/docs_md/imgui_python_intro.md) + +- **Cross-platform in C++ and Python:** Works on Windows, Linux, macOS, + iOS, Android, and WebAssembly! + +- **Easy to use, yet very powerful:** Start your first app in 3 lines. + The Immediate Mode GUI (IMGUI) paradigm is simple and powerful, + letting you focus on the creative aspects of your projects. + +- **A lot of widgets and libraries:** All of Dear ImGui along with a + suite of additional libraries for plotting, node editing, markdown + rendering, and much more. + +- **Web ready**: Develop full web applications, in C++ via Emscripten; + or in Python thanks to ImGui Bundle’s integration within *Pyodide* + +- **Always up-to-date:** The libraries are always very close to the + latest version of Dear ImGui. This is also true for Python developers, + since the bindings are automatically generated. + +- **Interactive Demos and Documentation:** Quickly get started with our + interactive manual and demos that showcase the capabilities of the + pack. Read or copy-paste the source code (Python and C++) directly + from the interactive manual! + +- **Fast:** Rendering is done via OpenGL (or any other renderer you + choose), through native code. + +- **Beautifully documented Python bindings and stubs:** The Python + bindings stubs reflect the C++ API and documentation, serving as a + reference and aiding autocompletion in your IDE. See for example the + [stubs for + imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/imgui/__init__.pyi), + and [for + hello\_imgui](https://github.com/pthom/imgui_bundle/blob/main/bindings/imgui_bundle/hello_imgui.pyi) + (which complete the [hello\_imgui + manual](https://pthom.github.io/hello_imgui/book/intro.html)). For a detailed look at each feature and more information, explore the sections listed in the Table of Contents. +**Interactive Manual** + +Click on the animated demonstration below to launch the fully +interactive manual. + +
+ +
Dear ImGui Bundle interactive manual (in C++, via +Emscripten)
+
+ +**Online playground in Pure Python (via Pyodide)** + +Since ImGui Bundle is available in Python and Pyodide, an [online +playground](https://traineq.org/imgui_bundle_online/projects/imgui_bundle_playground/) +will enable you to run and edit various ImGui applications in the +browser without any setup. + +
+ +
ImGui Bundle online playground (in Python, via +Pyodide)
+
+ +See [this +page](https://code-ballads.net/dear-imgui-bundle-build-real-time-python-web-applications-with-zero-fuss/) +for more information about availability of ImGui Bundle in Pyodide. + +**Full manual (PDF)** + +View or download the [full +pdf](https://raw.githubusercontent.com/pthom/imgui_related_docs/refs/heads/main/manuals/imgui_bundle_manual.pdf) +for this manual. + +You may feed it into a LLM such as ChatGPT, so that it can help you when +using ImGui bundle. + **Example code** *A hello world example with Dear ImGui Bundle* @@ -74,17 +122,6 @@ hello](https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_ #include "imgui.h" int main() { ImmApp::Run([] { ImGui::Text("Hello, world!"); }); } -**Interactive Manual** - -Click on the animated demonstration below to launch the fully -interactive manual. - -
- -
Dear ImGui Bundle interactive manual
-
- # What’s in the pack? Dear ImGui Bundle includes the following libraries, which are available @@ -96,7 +133,7 @@ in C++ *and* in Python: - +

Dear ImGui : Bloat-free Graphical User interface with minimal dependencies

@@ -104,7 +141,7 @@ Graphical User interface with minimal dependencies

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_imgui.jpg" alt="demo widgets imgui" />

- +

ImGui Test Engine: Dear ImGui Tests & Automation Engine

@@ -112,7 +149,7 @@ Engine: Dear ImGui Tests & Automation Engine

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_testengine.jpg" alt="demo testengine" />

- +

Hello ImGui: cross-platform Gui apps with the simplicity of a "Hello World" @@ -123,7 +160,7 @@ alt="demo docking" /> demo custom background

- +

ImPlot: Immediate Mode Plotting

@@ -131,7 +168,7 @@ Plotting

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/battery_implot.jpg" alt="battery implot" />

- +

ImPlot3D: Immediate Mode 3D Plotting

@@ -139,7 +176,7 @@ href="https://github.com/brenocq/implot3d">ImPlot3D: Immediate Mode src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/battery_implot3d.jpg" alt="battery implot3d" />

- +

ImGuizmo: Immediate mode 3D gizmo for scene editing and other controls based on @@ -148,7 +185,7 @@ Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_gizmo.jpg" alt="demo gizmo" />

- +

ImGuiColorTextEdit: Colorizing text editor for ImGui

@@ -156,7 +193,7 @@ Colorizing text editor for ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_editor.jpg" alt="demo widgets editor" />

- +

imgui-node-editor: Node Editor built using Dear ImGui

@@ -164,7 +201,7 @@ Node Editor built using Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_node_editor.jpg" alt="demo node editor" />

- +

imgui_md: Markdown renderer for Dear ImGui using MD4C parser

@@ -172,7 +209,7 @@ renderer for Dear ImGui using MD4C parser

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_md.jpg" alt="demo widgets md" />

- +

ImmVision: Immediate image debugger and insights

@@ -182,7 +219,7 @@ alt="demo immvision process 1" /> demo immvision process 2

- +

NanoVG: Antialiased 2D vector drawing library on top of OpenGL for UI and @@ -191,7 +228,7 @@ visualizations

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/nanovg_full_demo.jpg" alt="nanovg full demo" />

- +

imgui_tex_inspect: A texture inspector tool for Dear ImGui

@@ -199,7 +236,7 @@ A texture inspector tool for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_imgui_tex_inspector.jpg" alt="demo imgui tex inspector" />

- +

ImFileDialog: A file dialog library for Dear ImGui

@@ -207,7 +244,7 @@ file dialog library for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_imfiledialog.jpg" alt="demo widgets imfiledialog" />

- +

portable-file-dialogs OS native file dialogs library (C++11, single-header)

@@ -215,7 +252,7 @@ href="https://github.com/samhocevar/portable-file-dialogs">portable-file-dialogs src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_portablefiledialogs.jpg" alt="demo widgets portablefiledialogs" />

- +

imgui-knobs: Knobs widgets for ImGui

@@ -223,7 +260,7 @@ widgets for ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_knobs.jpg" alt="demo widgets knobs" />

- +

imspinner: Set of nice spinners for imgui

@@ -231,7 +268,7 @@ spinners for imgui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_spinners.jpg" alt="demo widgets spinners" />

- +

imgui_toggle: A toggle switch widget for Dear ImGui

@@ -239,7 +276,7 @@ switch widget for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_toggle.jpg" alt="demo widgets toggle" />

- +

ImCoolBar: A Cool bar for Dear ImGui

@@ -247,7 +284,7 @@ for Dear ImGui

src="https://raw.githubusercontent.com/pthom/imgui_bundle/main/bindings/imgui_bundle/doc/doc_images/demo_widgets_coolbar.jpg" alt="demo widgets coolbar" />

- +

imgui-command-palette: A Sublime Text or VSCode style command palette in ImGui

@@ -264,43 +301,41 @@ A big thank you to their authors for their awesome work! ## Install from pypi + # Minimal install pip install imgui-bundle - pip install opencv-python - pip install pyGLM - -- imgui\_bundle: Binary wheels are available for Windows, MacOS and - Linux. If a compilation from source is needed, the build process - might take up to 5 minutes, and will require an internet connection. -- OpenCV: in order to run the immvision module, install opencv-python. - The alternative OpenCV versions, such as opencv-python-headless - (headless) opencv-contrib-python (with extra modules) also work. + # or to get all optional features: + pip install "imgui-bundle[full]" -- pyGLM: in order to run the demo, install pyGLM +Binary wheels are available for Windows, MacOS and Linux. If a +compilation from source is needed, the build process might take up to 5 +minutes, and will require an internet connection. **Platform notes** -- *Windows*: Under windows, you might need to install [msvc - redist](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170#visual-studio-2015-2017-2019-and-2022). +- *Windows*: Under windows, you might need to install [msvc + redist](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170#visual-studio-2015-2017-2019-and-2022). -- *macOS* : under macOS, if a binary wheel is not available (e.g. for - older macOS versions), pip will try to compile from source. This - might fail if you do not have XCode installed. In this case, install - imgui-bundle with the following command - `SYSTEM_VERSION_COMPAT=0 pip install --only-binary=:all: imgui_bundle` +- *macOS* : under macOS, if a binary wheel is not available (e.g. for + older macOS versions), pip will try to compile from source. This might + fail if you do not have XCode installed. In this case, install + imgui-bundle with the following command + `SYSTEM_VERSION_COMPAT=0 pip install --only-binary=:all: imgui_bundle` ## Install from source + # Clone the repository git clone https://github.com/pthom/imgui_bundle.git cd imgui_bundle - git submodule update --init --recursive + + # Build and install the package (minimal install) pip install -v . - pip install opencv-python - pip install pyGLM -- Since there are lots of submodules, this might take a few minutes + # or build and install the package with all optional features: + # pip install -v ".[full]" -- The build process might take up to 5 minutes +The build process might take up to 5 minutes, and will clone the +submodules if needed (an internet connection is required). ## Run the python demo @@ -322,12 +357,12 @@ time between an idea and a first GUI prototype down to almost zero. It is well adapted for -- developers and researchers who want to switch easily between and - research and development environment by facilitating the port of - research artifacts +- developers and researchers who want to switch easily between and + research and development environment by facilitating the port of + research artifacts -- beginners and developers who want to quickly develop an application - without learning a GUI framework +- beginners and developers who want to quickly develop an application + without learning a GUI framework ### Who is this project **not** for @@ -440,16 +475,16 @@ Contributions are welcome! Three of my past projects gave me the idea to develop this library. -- [ImGui - Manual](https://pthom.github.io/imgui_manual_online/manual/imgui_manual.html), - an interactive manual for Dear ImGui, which I developed in June 2020 +- [ImGui + Manual](https://pthom.github.io/imgui_manual_online/manual/imgui_manual.html), + an interactive manual for Dear ImGui, which I developed in June 2020 -- [implot demo](https://traineq.org/implot_demo/src/implot_demo.html) - which I developed in 2020. +- [implot demo](https://traineq.org/implot_demo/src/implot_demo.html) + which I developed in 2020. -- [imgui\_datascience](https://github.com/pthom/imgui_datascience), a - python package I developed in 2018 for image analysis and debugging. - Its successor is immvision. +- [imgui\_datascience](https://github.com/pthom/imgui_datascience), a + python package I developed in 2018 for image analysis and debugging. + Its successor is immvision. Developments for Dear ImGui Bundle and its related automatic binding generator began in january 2022. diff --git a/blimgui/dist64/imgui_bundle/__init__.py b/blimgui/dist64/imgui_bundle/__init__.py index 492b11b..5f7e393 100644 --- a/blimgui/dist64/imgui_bundle/__init__.py +++ b/blimgui/dist64/imgui_bundle/__init__.py @@ -1,14 +1,23 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import os from imgui_bundle._imgui_bundle import __bundle_submodules__, __bundle_pyodide__ # type: ignore from imgui_bundle._imgui_bundle import __version__, compilation_time from typing import Union, Tuple, List - def has_submodule(submodule_name): return submodule_name in __bundle_submodules__ +def _is_pydantic_v2_available() -> bool: + from importlib import metadata + try: + version_str: str = metadata.version("pydantic") + except metadata.PackageNotFoundError: + return False + major: int = int(version_str.split(".")[0]) + return major >= 2 + + __all__ = ["__version__", "compilation_time"] @@ -18,12 +27,6 @@ def has_submodule(submodule_name): if has_submodule("imgui"): from imgui_bundle._imgui_bundle import imgui as imgui from imgui_bundle._imgui_bundle.imgui import ImVec2, ImVec4, ImColor, FLT_MIN, FLT_MAX # noqa: F401 - from imgui_bundle import imgui_pydantic as imgui_pydantic - from imgui_bundle.imgui_pydantic import ( - ImVec4_Pydantic as ImVec4_Pydantic, - ImVec2_Pydantic as ImVec2_Pydantic, - ImColor_Pydantic as ImColor_Pydantic, - ) from imgui_bundle.im_col32 import IM_COL32 # noqa: F401, E402 from imgui_bundle import imgui_ctx as imgui_ctx # noqa: E402 @@ -44,20 +47,29 @@ def has_submodule(submodule_name): "FLT_MAX", "IM_COL32", "imgui_ctx", - # Pydantic types - "imgui_pydantic", - "ImVec4_Pydantic", - "ImVec2_Pydantic", - "ImColor_Pydantic", ]) - # Patch after imgui v1.90.9, where # the enum ImGuiDir_ was renamed to ImGuiDir and ImGuiSortDirection_ was renamed to ImGuiSortDirection # this enables old python to continue to work imgui.Dir_ = imgui.Dir imgui.SortDirection_ = imgui.SortDirection + # If pydantic v2 is available, import the pydantic-serializable types + if _is_pydantic_v2_available(): + from imgui_bundle import imgui_pydantic as imgui_pydantic # noqa: E402 + from imgui_bundle.imgui_pydantic import ( # noqa: E402 + ImVec4_Pydantic as ImVec4_Pydantic, + ImVec2_Pydantic as ImVec2_Pydantic, + ImColor_Pydantic as ImColor_Pydantic, + ) + + __all__.extend([ + "imgui_pydantic", + "ImVec4_Pydantic", + "ImVec2_Pydantic", + "ImColor_Pydantic", + ]) if has_submodule("hello_imgui"): from imgui_bundle._imgui_bundle import hello_imgui as hello_imgui diff --git a/blimgui/dist64/imgui_bundle/__init__.pyi b/blimgui/dist64/imgui_bundle/__init__.pyi index 6bda51e..63095f7 100644 --- a/blimgui/dist64/imgui_bundle/__init__.pyi +++ b/blimgui/dist64/imgui_bundle/__init__.pyi @@ -30,6 +30,10 @@ from .imgui import ImVec2 as ImVec2, ImVec4 as ImVec4, ImColor as ImColor, ImVec from .imgui_pydantic import ImVec2_Pydantic as ImVec2_Pydantic, ImVec4_Pydantic as ImVec4_Pydantic, ImColor_Pydantic as ImColor_Pydantic from .im_col32 import IM_COL32 as IM_COL32 + +__version__: str + + def compilation_time() -> str: """Return date and time when imgui_bundle was compiled""" pass diff --git a/blimgui/dist64/imgui_bundle/_glfw_set_search_path.py b/blimgui/dist64/imgui_bundle/_glfw_set_search_path.py index 85b3b46..4604d66 100644 --- a/blimgui/dist64/imgui_bundle/_glfw_set_search_path.py +++ b/blimgui/dist64/imgui_bundle/_glfw_set_search_path.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle def _glfw_set_search_path() -> None: diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle.cpp index 60a3621..e6bc2a6 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle.cpp @@ -92,9 +92,9 @@ int main(int, char **) #define DEMO_DETAILS(label, function_name) DemoDetails{ label, #function_name, function_name } std::vector demos { - DEMO_DETAILS("Dear ImGui Bundle", demo_imgui_bundle_intro), + DEMO_DETAILS("Intro", demo_imgui_bundle_intro), DEMO_DETAILS("Dear ImGui", demo_imgui_show_demo_window), - DEMO_DETAILS("Immediate Apps", demo_immapp_launcher), + DEMO_DETAILS("Demo Apps", demo_immapp_launcher), DEMO_DETAILS("Implot [3D]", demo_implot), DEMO_DETAILS("Node Editor", demo_node_editor_launcher), DEMO_DETAILS("Markdown", demo_imgui_md), @@ -128,6 +128,13 @@ int main(int, char **) nbFrames += 1; }; + auto showEditFontScaleInStatusBar = [&runnerParams]() + { + ImGui::SetNextItemWidth(ImGui::GetContentRegionAvail().x / 10.f); + ImGui::SliderFloat("Font scale", & ImGui::GetStyle().FontScaleMain, 0.5f, 5.f); + }; + runnerParams.callbacks.ShowStatus = showEditFontScaleInStatusBar; + runnerParams.callbacks.ShowGui = showGui; runnerParams.useImGuiTestEngine = true; diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle_intro.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle_intro.cpp index 86e589b..cf5caa1 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle_intro.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_bundle_intro.cpp @@ -16,14 +16,14 @@ ImGuiTest* AutomationShowMeCode() ImGuiTestEngine *engine = HelloImGui::GetImGuiTestEngine(); ImGuiTest* automation = IM_REGISTER_TEST(engine, "Automation", "ShowMeCode"); - auto testOpenPopupFunc = [](ImGuiTestContext *ctx) { - ctx->SetRef("Dear ImGui Bundle"); + auto testFunc = [](ImGuiTestContext *ctx) { + ctx->SetRef("Intro"); ctx->ItemOpen("Code for this demo"); ctx->Sleep(2.5); ctx->ItemClose("Code for this demo"); const char* tabLoggerName = "//**/Logger"; - const char* tabIntroName = "//**/Dear ImGui Bundle"; + const char* tabIntroName = "//**/Intro"; ctx->MouseMove(tabLoggerName); ctx->MouseClick(0); @@ -33,7 +33,7 @@ ImGuiTest* AutomationShowMeCode() ctx->MouseMove(tabIntroName); ctx->MouseClick(0); }; - automation->TestFunc = testOpenPopupFunc; + automation->TestFunc = testFunc; return automation; } @@ -43,9 +43,9 @@ ImGuiTest* AutomationShowMeImmediateApps() ImGuiTestEngine *engine = HelloImGui::GetImGuiTestEngine(); ImGuiTest* automation = IM_REGISTER_TEST(engine, "Automation", "ShowMeImmediateApps"); - auto testOpenPopupFunc = [](ImGuiTestContext *ctx) { - const char* tabImmAppsName = "//**/Immediate Apps"; - const char* tabIntroName = "//**/Dear ImGui Bundle"; + auto testFunc = [](ImGuiTestContext *ctx) { + const char* tabImmAppsName = "//**/Demo Apps"; + const char* tabIntroName = "//**/Intro"; ctx->MouseMove(tabImmAppsName); ctx->MouseClick(0); @@ -56,7 +56,7 @@ ImGuiTest* AutomationShowMeImmediateApps() ctx->MouseMove(tabIntroName); ctx->MouseClick(0); }; - automation->TestFunc = testOpenPopupFunc; + automation->TestFunc = testFunc; return automation; } @@ -66,9 +66,9 @@ ImGuiTest* AutomationShowMeImGuiTestEngine() ImGuiTestEngine *engine = HelloImGui::GetImGuiTestEngine(); ImGuiTest* automation = IM_REGISTER_TEST(engine, "Automation", "ShowMeImGuiTestEngine"); - auto testOpenPopupFunc = [](ImGuiTestContext *ctx) { - const char* tabImmAppsName = "//**/Immediate Apps"; - const char* tabIntroName = "//**/Dear ImGui Bundle"; + auto testFunc = [](ImGuiTestContext *ctx) { + const char* tabImmAppsName = "//**/Demo Apps"; + const char* tabIntroName = "//**/Intro"; ctx->MouseMove(tabImmAppsName); ctx->MouseClick(0); @@ -78,7 +78,7 @@ ImGuiTest* AutomationShowMeImGuiTestEngine() ctx->MouseMove(tabIntroName); ctx->MouseClick(0); }; - automation->TestFunc = testOpenPopupFunc; + automation->TestFunc = testFunc; return automation; } #endif // #ifdef HELLOIMGUI_WITH_TEST_ENGINE @@ -119,13 +119,14 @@ void demo_imgui_bundle_intro() Welcome to the interactive manual for *Dear ImGui Bundle*! This manual present lots of examples, together with their code (in C++ and Python). Advices: - * This interactive manual works best when viewed together with "Dear ImGui Bundle docs" + * For Python users, read this introduction to Immediate Mode GUI with Python and Dear ImGui Bundle )"); ImGui::SetCursorPosX(ImGui::GetCursorPosX() + HelloImGui::EmSize(1.f)); - if (ImGui::Button("Open Dear ImGui Bundle docs")) - ImmApp::BrowseToUrl("https://pthom.github.io/imgui_bundle/"); + if (ImGui::Button("Immediate Mode GUI with Python")) + ImmApp::BrowseToUrl("https://github.com/pthom/imgui_bundle/blob/main/docs/docs_md/imgui_python_intro.md"); ImGuiMd::RenderUnindented(R"( + * This interactive manual works best when viewed together with ["Dear ImGui Bundle docs"](https://pthom.github.io/imgui_bundle/) * Browse through demos in the different tabs: at the top of each tab, there is a collapsible header named "Code for this demo". Click on it to show the source code for the current demo. )"); #ifdef HELLOIMGUI_WITH_TEST_ENGINE @@ -137,7 +138,7 @@ void demo_imgui_bundle_intro() } ImGuiMd::RenderUnindented(R"( - * The "Immediate Apps" tab is especially interesting, as it provide sample starter apps from which you can take inspiration. Click on the "View Code" button to view the apps code, and click on "Run" to run them. + * The "Demo Apps" tab is especially interesting, as it provide sample starter apps from which you can take inspiration. Click on the "View Code" button to view the apps code, and click on "Run" to run them. )"); if (HelloImGui::GetRunnerParams()->useImGuiTestEngine) { diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_md.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_md.cpp index 59d7dd8..2f3c32b 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_md.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_imgui_md.cpp @@ -8,17 +8,19 @@ std::string exampleMarkdownString() { return R"( -# Markdown example +# Markdown example (H1) Hello World!
![World](images/world.png) -## Acknowledgments +## Acknowledgments (H2) This markdown renderer is based on [imgui_md](https://github.com/mekhontsev/imgui_md), by Dmitry Mekhontsev. -### Supported features +### Supported features (H3) -imgui_md currently supports the following markdown functionality: +imgui_md currently supports the following markdown functionality. + +#### Text formatting (H4) * Wrapped text * Headers @@ -46,6 +48,8 @@ int answer() ---- +#### Tables (H4) + *Warning about tables layout*: the first row will impose the columns widths. Use nbsp\; to increase the columns sizes on the first row if required. diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_immapp_launcher.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_immapp_launcher.cpp index 305861d..250f26d 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_immapp_launcher.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_immapp_launcher.cpp @@ -46,15 +46,11 @@ std::function makeGui() "demo_pydantic", "Python: How to use ImVec2 and ImVec4 with Pydantic", }, - DemoApp{ - "demo_font_common_glyph_range", - "How to load fonts with specific glyph ranges (e.g., Chinese, Japanese, Korean)", - }, - DemoApp{ - "imgui_example_glfw_opengl3", - "Python: translation of the [GLFW+OpenGL3 example](https://github.com/ocornut/imgui/blob/master/examples/example_glfw_opengl3/main.cpp) from Dear ImGui. " - "Demonstrates how to port from C++ to Python (here, *backend rendering is implemented in C++*)", - }, + // DemoApp{ + // "imgui_example_glfw_opengl3", + // "Python: translation of the [GLFW+OpenGL3 example](https://github.com/ocornut/imgui/blob/master/examples/example_glfw_opengl3/main.cpp) from Dear ImGui. " + // "Demonstrates how to port from C++ to Python (here, *backend rendering is implemented in C++*)", + // }, DemoApp{ "example_python_backend_glfw3", "Python: how to use ImGui with GLFW3 using a *full python* backend", diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_implot.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_implot.cpp index 908cf66..972d849 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_implot.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_implot.cpp @@ -41,5 +41,5 @@ void demo_implot() #else // defined(IMGUI_BUNDLE_WITH_IMPLOT) && defined(IMGUI_BUNDLE_WITH_IMPLOT3D) #include "imgui.h" -void demo_implot() { ImGui::Text("Dear ImGui Bundle was compiled without support for both ImPlot and ImPlot3D); } +void demo_implot() { ImGui::Text("Dear ImGui Bundle was compiled without support for both ImPlot and ImPlot3D"); } #endif diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_text_edit.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_text_edit.cpp index dd63d44..b2df198 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_text_edit.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_text_edit.cpp @@ -24,7 +24,7 @@ void demo_text_edit() static TextEditor editor = _PrepareTextEditor(); ImGuiMd::Render(R"( -# ImGuiColorTextEdit: +# ImGuiColorTextEdit [ImGuiColorTextEdit](https://github.com/BalazsJako/ImGuiColorTextEdit) is a colorizing text editor for ImGui, able to colorize C, C++, hlsl, Sql, angel_script and lua code )"); @@ -44,7 +44,8 @@ void demo_text_edit() }; ShowPaletteButtons(); - ImGui::PushFont(ImGuiMd::GetCodeFont()); + auto codeFont = ImGuiMd::GetCodeFont(); + ImGui::PushFont(codeFont.font, codeFont.size); editor.Render("Code"); ImGui::PopFont(); } diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_themes.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_themes.cpp index 45a9496..663b740 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_themes.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_themes.cpp @@ -1,48 +1,13 @@ +// Demo for ImGui theming using ImGui Bundle. +// +// In order to apply a theme, you can use: +// ======================================= + +// ImGuiTheme::ApplyTheme(ImGuiTheme::ImGuiTheme_Cherry) + // Part of ImGui Bundle - MIT License - Copyright (c) 2022-2024 Pascal Thomet - https://github.com/pthom/imgui_bundle #include "imgui_md_wrapper/imgui_md_wrapper.h" #include "hello_imgui/hello_imgui.h" -#include - - -const std::vector ALL_THEMES = { - ImGuiTheme::ImGuiTheme_ImGuiColorsClassic, - ImGuiTheme::ImGuiTheme_ImGuiColorsDark, - ImGuiTheme::ImGuiTheme_ImGuiColorsLight, - ImGuiTheme::ImGuiTheme_MaterialFlat, - ImGuiTheme::ImGuiTheme_PhotoshopStyle, - ImGuiTheme::ImGuiTheme_GrayVariations, - ImGuiTheme::ImGuiTheme_GrayVariations_Darker, - ImGuiTheme::ImGuiTheme_MicrosoftStyle, - ImGuiTheme::ImGuiTheme_Cherry, - ImGuiTheme::ImGuiTheme_Darcula, - ImGuiTheme::ImGuiTheme_DarculaDarker, - ImGuiTheme::ImGuiTheme_LightRounded, - ImGuiTheme::ImGuiTheme_SoDark_AccentBlue, - ImGuiTheme::ImGuiTheme_SoDark_AccentYellow, - ImGuiTheme::ImGuiTheme_SoDark_AccentRed, - ImGuiTheme::ImGuiTheme_BlackIsBlack, - ImGuiTheme::ImGuiTheme_WhiteIsWhite, -}; - -const std::vector ALL_THEMES_NAMES = { - "ImGuiColorsClassic", - "ImGuiColorsDark", - "ImGuiColorsLight", - "MaterialFlat", - "PhotoshopStyle", - "GrayVariations", - "GrayVariations_Darker", - "MicrosoftStyle", - "Cherry", - "Darcula", - "DarculaDarker", - "LightRounded", - "SoDark_AccentBlue", - "SoDark_AccentYellow", - "SoDark_AccentRed", - "BlackIsBlack", - "WhiteIsWhite" -}; void demo_themes() @@ -54,15 +19,11 @@ void demo_themes() Select the menu View/Theme/Theme tweak window to explore all the themes and their customization. )"); + ImGui::NewLine(); + auto & tweakedTheme = HelloImGui::GetRunnerParams()->imGuiWindowParams.tweakedTheme; + bool themeChanged = ImGuiTheme::ShowThemeTweakGui(&tweakedTheme); + if (themeChanged) + ImGuiTheme::ApplyTweakedTheme(tweakedTheme); - ImGui::Text("Theme"); - - std::vector all_themes_names_antic; - for (const auto &v: ALL_THEMES_NAMES) - all_themes_names_antic.push_back(v.c_str()); - - bool changed = ImGui::ListBox( - "##Theme", ¤t_theme_idx, all_themes_names_antic.data(), all_themes_names_antic.size(), all_themes_names_antic.size()); - if (changed) - ImGuiTheme::ApplyTheme(ALL_THEMES[current_theme_idx]); + ImGui::ShowDemoWindow(); } diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demo_widgets.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demo_widgets.cpp index ab741d9..62df065 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demo_widgets.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demo_widgets.cpp @@ -15,7 +15,7 @@ #include "ImFileDialog/ImFileDialog.h" #endif #include "imgui_md_wrapper.h" -#include "ImCoolBar/ImCoolbar.h" +#include "ImCoolBar/ImCoolBar.h" #include "demo_utils/api_demos.h" #include @@ -130,6 +130,12 @@ void DemoSpinner() ImGui::Text("spinner_ang_triple"); ImGui::SameLine(); ImSpinner::SpinnerAngTriple("spinner_ang_triple", radius1, radius1 * 1.5f, radius1 * 2.0f, 2.5f, color, color, color); + + static bool show_full_demo = false; + ImGui::SameLine(); + ImGui::Checkbox("Show full spinners demo", &show_full_demo); + if (show_full_demo) + ImSpinner::demoSpinners(); } @@ -451,7 +457,7 @@ void DemoCoolBar() std::vector buttonLabels {"A", "B", "C", "D", "E", "F"}; ImGuiMd::RenderUnindented(R"( - # ImCoolBar: + # ImCoolBar ImCoolBar provides a dock-like Cool bar for Dear ImGui )"); @@ -477,10 +483,12 @@ void DemoCoolBar() void demo_widgets() { DemoCoolBar(); - DemoPortableFileDialogs(); ImGui::NewLine(); - DemoImFileDialog(); ImGui::NewLine(); - DemoKnobs(); - DemoToggle(); ImGui::NewLine(); + DemoToggle(); DemoSpinner(); + DemoKnobs(); DemoCommandPalette(); + ImGui::NewLine(); + DemoPortableFileDialogs(); + ImGui::NewLine(); + DemoImFileDialog(); } diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo.cpp index 2dfbe90..16afdb4 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo.cpp @@ -405,13 +405,14 @@ GuiFunction make_closure_demo_guizmo() ImGui::Separator(); for (int matId = 0; matId < gizmoCount; matId++) { - ImGuizmo::SetID(matId); + ImGuizmo::PushID(matId); EditTransform(cameraView, cameraProjection, gObjectMatrix[matId], lastUsing == matId); if (ImGuizmo::IsUsing()) { lastUsing = matId; } + ImGuizmo::PopID(); } ImGui::End(); diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo_pure.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo_pure.cpp index 93eb50f..cb83cbd 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo_pure.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_gizmo_pure.cpp @@ -342,7 +342,7 @@ void EditTransform( mCurrentGizmoOperation, mCurrentGizmoMode, objectMatrix, - std::nullopt, + nullptr, ifFlag(useSnap, snap), ifFlag(boundSizing, bounds), ifFlag(boundSizingSnap, boundsSnap) @@ -453,13 +453,14 @@ GuiFunction make_closure_demo_guizmo_pure() ImGui::Separator(); for (int matId = 0; matId < gizmoCount; matId++) { - ImGuizmo::SetID(matId); + ImGuizmo::PushID(matId); EditTransform(cameraView, cameraProjection, gObjectMatrix[matId], lastUsing == matId); if (ImGuizmo::IsUsing()) { lastUsing = matId; } + ImGuizmo::PopID(); } ImGui::End(); diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_gradient.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_gradient.cpp index b0dda96..7a38b59 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_gradient.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demos_imguizmo/demo_guizmo_gradient.cpp @@ -13,10 +13,6 @@ #include -static ImVec4 operator*(const ImVec4& lhs, float rhs) -{ return ImVec4(lhs.x * rhs, lhs.y * rhs, lhs.z * rhs, lhs.w * rhs); } - - struct MyGradient: public ImGradient::DelegateStl { MyGradient() diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_docking.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_docking.cpp index 53ea8b5..11a9a60 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_docking.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_docking.cpp @@ -85,18 +85,14 @@ void LoadFonts(AppState& appState) // This is called by runnerParams.callbacks.L HelloImGui::ImGuiDefaultSettings::LoadDefaultFont_WithFontAwesomeIcons(); // Load the title font. Also manually merge FontAwesome icons to it - HelloImGui::FontLoadingParams fontLoadingParamsTitleIcons; - fontLoadingParamsTitleIcons.mergeFontAwesome = true; - appState.TitleFont = HelloImGui::LoadFont("fonts/Roboto/Roboto-BoldItalic.ttf", 18.f, fontLoadingParamsTitleIcons); + appState.TitleFont = HelloImGui::LoadFontTTF_WithFontAwesomeIcons("fonts/Roboto/Roboto-BoldItalic.ttf", 18.f); // Load an Emoji font HelloImGui::FontLoadingParams fontLoadingParamsEmoji; - fontLoadingParamsEmoji.useFullGlyphRange = true; appState.EmojiFont = HelloImGui::LoadFont("fonts/NotoEmoji-Regular.ttf", 24.f, fontLoadingParamsEmoji); // Load a large icon font HelloImGui::FontLoadingParams fontLoadingParamsLargeIcon; - fontLoadingParamsLargeIcon.useFullGlyphRange = true; appState.LargeIconFont = HelloImGui::LoadFont("fonts/Font_Awesome_6_Free-Solid-900.otf", 24.f, fontLoadingParamsLargeIcon); #ifdef IMGUI_ENABLE_FREETYPE @@ -108,6 +104,12 @@ void LoadFonts(AppState& appState) // This is called by runnerParams.callbacks.L } +void PushFontWithDefaultSize(ImFont* font) +{ + ImGui::PushFont(font, font->LegacySize); +} + + ////////////////////////////////////////////////////////////////////////// // Save additional settings in the ini file ////////////////////////////////////////////////////////////////////////// @@ -161,7 +163,7 @@ void SaveMyAppSettings(const AppState& appState) // Display a button that will hide the application window void DemoHideWindow(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Hide app window"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Hide app window"); ImGui::PopFont(); static double lastHideTime = -1.; if (ImGui::Button("Hide")) { @@ -189,7 +191,7 @@ void DemoShowAdditionalWindow(AppState& appState) // Note: you should not modify manually the content of runnerParams.dockingParams.dockableWindows // (since HelloImGui is constantly looping on it) - ImGui::PushFont(appState.TitleFont); ImGui::Text("Dynamically add window"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Dynamically add window"); ImGui::PopFont(); auto currentWindow = ImGui::GetCurrentWindow(); @@ -217,7 +219,7 @@ void DemoShowAdditionalWindow(AppState& appState) void DemoLogs(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Log Demo"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Log Demo"); ImGui::PopFont(); ImGui::BeginGroup(); // Edit a float using a slider from 0.0f to 1.0f @@ -241,7 +243,7 @@ void DemoLogs(AppState& appState) void DemoUserSettings(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("User settings"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("User settings"); ImGui::PopFont(); ImGui::BeginGroup(); ImGui::SetNextItemWidth(HelloImGui::EmSize(7.f)); ImGui::SliderInt("Value", &appState.myAppSettings.value, 0, 100); @@ -254,7 +256,7 @@ void DemoUserSettings(AppState& appState) void DemoRocket(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Status Bar Demo"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Status Bar Demo"); ImGui::PopFont(); ImGui::BeginGroup(); if (appState.rocket_state == AppState::RocketState::Init) { @@ -291,7 +293,7 @@ void DemoRocket(AppState& appState) void DemoDockingFlags(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Main dock space node flags"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Main dock space node flags"); ImGui::PopFont(); ImGui::TextWrapped(R"( This will edit the ImGuiDockNodeFlags for "MainDockSpace". Most flags are inherited by children dock spaces. @@ -302,12 +304,12 @@ Most flags are inherited by children dock spaces. std::string tip; }; std::vector all_flags = { - {ImGuiDockNodeFlags_NoSplit, "NoSplit", "prevent Dock Nodes from being split"}, + {ImGuiDockNodeFlags_NoDockingSplit, "NoSplit", "prevent Dock Nodes from being split"}, {ImGuiDockNodeFlags_NoResize, "NoResize", "prevent Dock Nodes from being resized"}, {ImGuiDockNodeFlags_AutoHideTabBar, "AutoHideTabBar", "show tab bar only if multiple windows\n" "You will need to restore the layout after changing (Menu \"View/Restore Layout\")"}, - {ImGuiDockNodeFlags_NoDockingInCentralNode, "NoDockingInCentralNode", + {ImGuiDockNodeFlags_NoDockingOverCentralNode, "NoDockingInCentralNode", "prevent docking in central node\n" "(only works with the main dock space)"}, // {ImGuiDockNodeFlags_PassthruCentralNode, "PassthruCentralNode", "advanced"}, @@ -323,12 +325,12 @@ Most flags are inherited by children dock spaces. void GuiWindowLayoutCustomization(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Switch between layouts"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Switch between layouts"); ImGui::PopFont(); ImGui::Text("with the menu \"View/Layouts\""); if (ImGui::IsItemHovered()) ImGui::SetTooltip("Each layout remembers separately the modifications applied by the user, \nand the selected layout is restored at startup"); ImGui::Separator(); - ImGui::PushFont(appState.TitleFont); ImGui::Text("Change the theme"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Change the theme"); ImGui::PopFont(); ImGui::Text("with the menu \"View/Theme\""); if (ImGui::IsItemHovered()) ImGui::SetTooltip("The selected theme is remembered and restored at startup"); @@ -357,7 +359,7 @@ void GuiWindowAlternativeTheme(AppState& appState) if (windowOpened) { // Display some widgets - ImGui::PushFont(appState.TitleFont); ImGui::Text("Alternative Theme"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Alternative Theme"); ImGui::PopFont(); ImGui::Text("This window uses a different theme"); ImGui::SetItemTooltip(" ImGuiTheme::ImGuiTweakedTheme tweakedTheme;\n" " tweakedTheme.Theme = ImGuiTheme::ImGuiTheme_WhiteIsWhite;\n" @@ -446,7 +448,7 @@ Handling Japanese font is of course possible within ImGui / Hello ImGui! void DemoAssets(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Image From Asset"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Image From Asset"); ImGui::PopFont(); HelloImGui::BeginGroupColumn(); ImGui::Dummy(HelloImGui::EmToVec2(0.f, 0.45f)); ImGui::Text("Hello"); @@ -456,7 +458,7 @@ void DemoAssets(AppState& appState) void DemoFonts(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Fonts - " ICON_FA_ROCKET); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Fonts - " ICON_FA_ROCKET); ImGui::PopFont(); ImGui::TextWrapped("Mix icons " ICON_FA_FACE_SMILE " and text " ICON_FA_ROCKET ""); if (ImGui::IsItemHovered()) @@ -466,7 +468,7 @@ void DemoFonts(AppState& appState) ImGui::BeginGroup(); { - ImGui::PushFont(appState.EmojiFont); + PushFontWithDefaultSize(appState.EmojiFont); // ✌️ (Victory Hand Emoji) ImGui::Text(U8_TO_CHAR(u8"\U0000270C\U0000FE0F")); ImGui::SameLine(); @@ -493,7 +495,7 @@ void DemoFonts(AppState& appState) #ifdef IMGUI_ENABLE_FREETYPE ImGui::Text("Colored Fonts"); - ImGui::PushFont(appState.ColorFont); + PushFontWithDefaultSize(appState.ColorFont); ImGui::Text("COLOR!"); ImGui::PopFont(); if (ImGui::IsItemHovered()) @@ -503,7 +505,7 @@ void DemoFonts(AppState& appState) void DemoThemes(AppState& appState) { - ImGui::PushFont(appState.TitleFont); ImGui::Text("Themes"); ImGui::PopFont(); + PushFontWithDefaultSize(appState.TitleFont); ImGui::Text("Themes"); ImGui::PopFont(); auto& tweakedTheme = HelloImGui::GetRunnerParams()->imGuiWindowParams.tweakedTheme; ImGui::BeginGroup(); @@ -583,7 +585,7 @@ void ShowAppMenuItems() void ShowTopToolbar(AppState& appState) { - ImGui::PushFont(appState.LargeIconFont); + PushFontWithDefaultSize(appState.LargeIconFont); if (ImGui::Button(ICON_FA_POWER_OFF)) HelloImGui::GetRunnerParams()->appShallExit = true; @@ -604,7 +606,7 @@ void ShowTopToolbar(AppState& appState) void ShowRightToolbar(AppState& appState) { - ImGui::PushFont(appState.LargeIconFont); + PushFontWithDefaultSize(appState.LargeIconFont); if (ImGui::Button(ICON_FA_CIRCLE_ARROW_LEFT)) HelloImGui::Log(HelloImGui::LogLevel::Info, "Clicked on Circle left in the right toolbar"); diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_font_common_glyph_range.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_font_common_glyph_range.cpp deleted file mode 100644 index 09d891f..0000000 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/demo_font_common_glyph_range.cpp +++ /dev/null @@ -1,61 +0,0 @@ -//Demonstrates how to load a font with Chinese characters and display them in the GUI, -//using the common glyph ranges defined in by ImGui. - -#include "imgui.h" -#include "hello_imgui/hello_imgui.h" -#include "demo_utils/api_demos.h" - - -ImFont* font_cn = nullptr; - - -void LoadFont() -{ - // Note: this font is not provided with the ImGui bundle (too large). - // You will need to provide it yourself, or use another font. - const char *font_filename = "fonts/NotoSerifSC-VariableFont_wght.ttf"; - if (! HelloImGui::AssetExists(font_filename)) - return; - - // The range of Chinese characters is defined by ImGui as a single list of characters (List[ImWchar]), with a terminating 0. - // (each range is a pair of successive characters in this list, with the second character being the last one in the range) - const ImWchar * cn_glyph_ranges_imgui = ImGui::GetIO().Fonts->GetGlyphRangesChineseSimplifiedCommon(); - // We need to convert this list into a list of pairs (List[ImWcharPair]), where each pair is a range of characters. - auto cn_glyph_ranges_pair = HelloImGui::TranslateCommonGlyphRanges(cn_glyph_ranges_imgui); - - HelloImGui::FontLoadingParams font_loading_params; - font_loading_params.glyphRanges = cn_glyph_ranges_pair; - font_cn = HelloImGui::LoadFont(font_filename, 40.0f, font_loading_params); -} - - -void Gui() -{ - if (font_cn != nullptr) - { - ImGui::PushFont(font_cn); - ImGui::Text("Hello world"); - ImGui::Text("你好,世界"); - ImGui::PopFont(); - } - else - { - ImGui::Text("Font file not found"); - ImGui::TextWrapped(R"( - This font is not provided with the ImGui bundle (too large). - You will need to provide it yourself, or use another font. - )"); - } -} - - - -int main(int, char **) -{ - ChdirBesideAssetsFolder(); - - HelloImGui::RunnerParams runner_params; - runner_params.callbacks.LoadAdditionalFonts = LoadFont; - runner_params.callbacks.ShowGui = Gui; - HelloImGui::Run(runner_params); -} diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.obsolete.cpp similarity index 97% rename from blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.cpp rename to blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.obsolete.cpp index 5ff21d4..b2c70fe 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immapp/imgui_example_glfw_opengl3.obsolete.cpp @@ -1,228 +1,229 @@ -#if defined(HELLOIMGUI_HAS_OPENGL3) && defined(HELLOIMGUI_USE_GLFW3) - -#ifndef __EMSCRIPTEN__ // to keep the build process simple, this demo is currently disabled with emscripten (although ImGui and Imgui Bundle are perfectly compatible with emscripten) - -// Dear ImGui: standalone example application for GLFW + OpenGL 3, using programmable pipeline -// (GLFW is a cross-platform general purpose library for handling windows, inputs, OpenGL/Vulkan/Metal graphics context creation, etc.) -// If you are new to Dear ImGui, read documentation from the docs/ folder + read the top of imgui.cpp. -// Read online: https://github.com/ocornut/imgui/tree/master/docs -#include "imgui.h" -#include "imgui_impl_glfw.h" -#include "imgui_impl_opengl3.h" -#include -#include -#define GL_SILENCE_DEPRECATION -#if defined(IMGUI_IMPL_OPENGL_ES2) -#include -#endif - -// [Win32] Our example includes a copy of glfw3.lib pre-compiled with VS2010 to maximize ease of testing and compatibility with old VS compilers. -// To link with VS2010-era libraries, VS2015+ requires linking with legacy_stdio_definitions.lib, which we do using this pragma. -// Your own project should not be affected, as you are likely to link with a newer binary of GLFW that is adequate for your version of Visual Studio. -#if defined(_MSC_VER) && (_MSC_VER >= 1900) && !defined(IMGUI_DISABLE_WIN32_FUNCTIONS) -#pragma comment(lib, "legacy_stdio_definitions") -#endif - -// This example can also compile and run with Emscripten! See 'Makefile.emscripten' for details. -#ifdef __EMSCRIPTEN__ -#include "imgui/examples/libs/emscripten/emscripten_mainloop_stub.h" -#endif - -static void glfw_error_callback(int error, const char* description) -{ - fprintf(stderr, "GLFW Error %d: %s\n", error, description); -} - - -// Main code -int main(int, char**) -{ - glfwSetErrorCallback(glfw_error_callback); - if (!glfwInit()) - return 1; - - - // Decide GL+GLSL versions -#if defined(IMGUI_IMPL_OPENGL_ES2) - // GL ES 2.0 + GLSL 100 - const char* glsl_version = "#version 100"; - glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); - glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); - glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API); -#elif defined(__APPLE__) - // GL 3.2 + GLSL 150 - const char* glsl_version = "#version 150"; - glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); - glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2); - glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only - glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // Required on Mac -#else - // GL 3.0 + GLSL 130 - const char* glsl_version = "#version 130"; - glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); - glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); - //glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only - //glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // 3.0+ only -#endif - - - // Create window with graphics context - GLFWwindow* window = glfwCreateWindow(1280, 720, "Dear ImGui GLFW+OpenGL3 example", nullptr, nullptr); - if (window == nullptr) - return 1; - glfwMakeContextCurrent(window); - glfwSwapInterval(1); // Enable vsync - - - // Setup Dear ImGui context - IMGUI_CHECKVERSION(); - ImGui::CreateContext(); - ImGuiIO& io = ImGui::GetIO(); (void)io; - io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls - io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad; // Enable Gamepad Controls - io.ConfigFlags |= ImGuiConfigFlags_DockingEnable; // Enable Docking - io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; // Enable Multi-Viewport / Platform Windows - //io.ConfigViewportsNoAutoMerge = true; - //io.ConfigViewportsNoTaskBarIcon = true; - - // Setup Dear ImGui style - ImGui::StyleColorsDark(); - //ImGui::StyleColorsLight(); - - // When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones. - ImGuiStyle& style = ImGui::GetStyle(); - if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) - { - style.WindowRounding = 0.0f; - style.Colors[ImGuiCol_WindowBg].w = 1.0f; - } - - // Setup Platform/Renderer backends - ImGui_ImplGlfw_InitForOpenGL(window, true); - ImGui_ImplOpenGL3_Init(glsl_version); - - // Load Fonts - // - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use ImGui::PushFont()/PopFont() to select them. - // - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple. - // - If the file cannot be loaded, the function will return a nullptr. Please handle those errors in your application (e.g. use an assertion, or display an error and quit). - // - The fonts will be rasterized at a given size (w/ oversampling) and stored into a texture when calling ImFontAtlas::Build()/GetTexDataAsXXXX(), which ImGui_ImplXXXX_NewFrame below will call. - // - Use '#define IMGUI_ENABLE_FREETYPE' in your imconfig file to use Freetype for higher quality font rendering. - // - Read 'docs/FONTS.md' for more instructions and details. - // - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ ! - // - Our Emscripten build process allows embedding fonts to be accessible at runtime from the "fonts/" folder. See Makefile.emscripten for details. - //io.Fonts->AddFontDefault(); - //io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\segoeui.ttf", 18.0f); - //io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf", 16.0f); - //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf", 16.0f); - //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf", 15.0f); - //ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf", 18.0f, nullptr, io.Fonts->GetGlyphRangesJapanese()); - //IM_ASSERT(font != nullptr); - - // Our state - bool show_demo_window = true; - bool show_another_window = false; - ImVec4 clear_color = ImVec4(0.45f, 0.55f, 0.60f, 1.00f); - - // Main loop -#ifdef __EMSCRIPTEN__ - // For an Emscripten build we are disabling file-system access, so let's not attempt to do a fopen() of the imgui.ini file. - // You may manually call LoadIniSettingsFromMemory() to load settings from your own storage. - io.IniFilename = nullptr; - EMSCRIPTEN_MAINLOOP_BEGIN -#else - while (!glfwWindowShouldClose(window)) -#endif - { - // Poll and handle events (inputs, window resize, etc.) - // You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs. - // - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data. - // - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data. - // Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags. - glfwPollEvents(); - - // Start the Dear ImGui frame - ImGui_ImplOpenGL3_NewFrame(); - ImGui_ImplGlfw_NewFrame(); - ImGui::NewFrame(); - - // 1. Show the big demo window (Most of the sample code is in ImGui::ShowDemoWindow()! You can browse its code to learn more about Dear ImGui!). - if (show_demo_window) - ImGui::ShowDemoWindow(&show_demo_window); - - // 2. Show a simple window that we create ourselves. We use a Begin/End pair to create a named window. - { - static float f = 0.0f; - static int counter = 0; - - ImGui::Begin("Hello, world!"); // Create a window called "Hello, world!" and append into it. - - ImGui::Text("This is some useful text."); // Display some text (you can use a format strings too) - ImGui::Checkbox("Demo Window", &show_demo_window); // Edit bools storing our window open/close state - ImGui::Checkbox("Another Window", &show_another_window); - - ImGui::SliderFloat("float", &f, 0.0f, 1.0f); // Edit 1 float using a slider from 0.0f to 1.0f - ImGui::ColorEdit3("clear color", (float*)&clear_color); // Edit 3 floats representing a color - - if (ImGui::Button("Button")) // Buttons return true when clicked (most widgets return true when edited/activated) - counter++; - ImGui::SameLine(); - ImGui::Text("counter = %d", counter); - - ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / io.Framerate, io.Framerate); - ImGui::End(); - } - - // 3. Show another simple window. - if (show_another_window) - { - ImGui::Begin("Another Window", &show_another_window); // Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked) - ImGui::Text("Hello from another window!"); - if (ImGui::Button("Close Me")) - show_another_window = false; - ImGui::End(); - } - - // Rendering - ImGui::Render(); - int display_w, display_h; - glfwGetFramebufferSize(window, &display_w, &display_h); - glViewport(0, 0, display_w, display_h); - glClearColor(clear_color.x * clear_color.w, clear_color.y * clear_color.w, clear_color.z * clear_color.w, clear_color.w); - glClear(GL_COLOR_BUFFER_BIT); - ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData()); - - // Update and Render additional Platform Windows - // (Platform functions may change the current OpenGL context, so we save/restore it to make it easier to paste this code elsewhere. - // For this specific demo app we could also call glfwMakeContextCurrent(window) directly) - if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) - { - GLFWwindow* backup_current_context = glfwGetCurrentContext(); - ImGui::UpdatePlatformWindows(); - ImGui::RenderPlatformWindowsDefault(); - glfwMakeContextCurrent(backup_current_context); - } - - glfwSwapBuffers(window); - } -#ifdef __EMSCRIPTEN__ - EMSCRIPTEN_MAINLOOP_END; -#endif - - // Cleanup - ImGui_ImplOpenGL3_Shutdown(); - ImGui_ImplGlfw_Shutdown(); - ImGui::DestroyContext(); - - glfwDestroyWindow(window); - glfwTerminate(); - - return 0; -} -#else // #ifndef __EMSCRIPTEN__ -int main(int, char**) {} -#endif - -#else // #if defined(HELLOIMGUI_HAS_OPENGL3) && defined(HELLOIMGUI_USE_GLFW3) -#include -int main() { printf("Glfw not found!"); } +// Obsoleted since v1.92 in Python: use pure python backends instead +#if defined(HELLOIMGUI_HAS_OPENGL3) && defined(HELLOIMGUI_USE_GLFW3) + +#ifndef __EMSCRIPTEN__ // to keep the build process simple, this demo is currently disabled with emscripten (although ImGui and Imgui Bundle are perfectly compatible with emscripten) + +// Dear ImGui: standalone example application for GLFW + OpenGL 3, using programmable pipeline +// (GLFW is a cross-platform general purpose library for handling windows, inputs, OpenGL/Vulkan/Metal graphics context creation, etc.) +// If you are new to Dear ImGui, read documentation from the docs/ folder + read the top of imgui.cpp. +// Read online: https://github.com/ocornut/imgui/tree/master/docs +#include "imgui.h" +#include "imgui_impl_glfw.h" +#include "imgui_impl_opengl3.h" +#include +#include +#define GL_SILENCE_DEPRECATION +#if defined(IMGUI_IMPL_OPENGL_ES2) +#include +#endif + +// [Win32] Our example includes a copy of glfw3.lib pre-compiled with VS2010 to maximize ease of testing and compatibility with old VS compilers. +// To link with VS2010-era libraries, VS2015+ requires linking with legacy_stdio_definitions.lib, which we do using this pragma. +// Your own project should not be affected, as you are likely to link with a newer binary of GLFW that is adequate for your version of Visual Studio. +#if defined(_MSC_VER) && (_MSC_VER >= 1900) && !defined(IMGUI_DISABLE_WIN32_FUNCTIONS) +#pragma comment(lib, "legacy_stdio_definitions") +#endif + +// This example can also compile and run with Emscripten! See 'Makefile.emscripten' for details. +#ifdef __EMSCRIPTEN__ +#include "imgui/examples/libs/emscripten/emscripten_mainloop_stub.h" +#endif + +static void glfw_error_callback(int error, const char* description) +{ + fprintf(stderr, "GLFW Error %d: %s\n", error, description); +} + + +// Main code +int main(int, char**) +{ + glfwSetErrorCallback(glfw_error_callback); + if (!glfwInit()) + return 1; + + + // Decide GL+GLSL versions +#if defined(IMGUI_IMPL_OPENGL_ES2) + // GL ES 2.0 + GLSL 100 + const char* glsl_version = "#version 100"; + glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); + glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); + glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API); +#elif defined(__APPLE__) + // GL 3.2 + GLSL 150 + const char* glsl_version = "#version 150"; + glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); + glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2); + glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only + glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // Required on Mac +#else + // GL 3.0 + GLSL 130 + const char* glsl_version = "#version 130"; + glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); + glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); + //glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only + //glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // 3.0+ only +#endif + + + // Create window with graphics context + GLFWwindow* window = glfwCreateWindow(1280, 720, "Dear ImGui GLFW+OpenGL3 example", nullptr, nullptr); + if (window == nullptr) + return 1; + glfwMakeContextCurrent(window); + glfwSwapInterval(1); // Enable vsync + + + // Setup Dear ImGui context + IMGUI_CHECKVERSION(); + ImGui::CreateContext(); + ImGuiIO& io = ImGui::GetIO(); (void)io; + io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls + io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad; // Enable Gamepad Controls + io.ConfigFlags |= ImGuiConfigFlags_DockingEnable; // Enable Docking + io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; // Enable Multi-Viewport / Platform Windows + //io.ConfigViewportsNoAutoMerge = true; + //io.ConfigViewportsNoTaskBarIcon = true; + + // Setup Dear ImGui style + ImGui::StyleColorsDark(); + //ImGui::StyleColorsLight(); + + // When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones. + ImGuiStyle& style = ImGui::GetStyle(); + if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) + { + style.WindowRounding = 0.0f; + style.Colors[ImGuiCol_WindowBg].w = 1.0f; + } + + // Setup Platform/Renderer backends + ImGui_ImplGlfw_InitForOpenGL(window, true); + ImGui_ImplOpenGL3_Init(glsl_version); + + // Load Fonts + // - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use ImGui::PushFont()/PopFont() to select them. + // - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple. + // - If the file cannot be loaded, the function will return a nullptr. Please handle those errors in your application (e.g. use an assertion, or display an error and quit). + // - The fonts will be rasterized at a given size (w/ oversampling) and stored into a texture when calling ImFontAtlas::Build()/GetTexDataAsXXXX(), which ImGui_ImplXXXX_NewFrame below will call. + // - Use '#define IMGUI_ENABLE_FREETYPE' in your imconfig file to use Freetype for higher quality font rendering. + // - Read 'docs/FONTS.md' for more instructions and details. + // - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ ! + // - Our Emscripten build process allows embedding fonts to be accessible at runtime from the "fonts/" folder. See Makefile.emscripten for details. + //io.Fonts->AddFontDefault(); + //io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\segoeui.ttf", 18.0f); + //io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf", 16.0f); + //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf", 16.0f); + //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf", 15.0f); + //ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf", 18.0f, nullptr, io.Fonts->GetGlyphRangesJapanese()); + //IM_ASSERT(font != nullptr); + + // Our state + bool show_demo_window = true; + bool show_another_window = false; + ImVec4 clear_color = ImVec4(0.45f, 0.55f, 0.60f, 1.00f); + + // Main loop +#ifdef __EMSCRIPTEN__ + // For an Emscripten build we are disabling file-system access, so let's not attempt to do a fopen() of the imgui.ini file. + // You may manually call LoadIniSettingsFromMemory() to load settings from your own storage. + io.IniFilename = nullptr; + EMSCRIPTEN_MAINLOOP_BEGIN +#else + while (!glfwWindowShouldClose(window)) +#endif + { + // Poll and handle events (inputs, window resize, etc.) + // You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs. + // - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data. + // - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data. + // Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags. + glfwPollEvents(); + + // Start the Dear ImGui frame + ImGui_ImplOpenGL3_NewFrame(); + ImGui_ImplGlfw_NewFrame(); + ImGui::NewFrame(); + + // 1. Show the big demo window (Most of the sample code is in ImGui::ShowDemoWindow()! You can browse its code to learn more about Dear ImGui!). + if (show_demo_window) + ImGui::ShowDemoWindow(&show_demo_window); + + // 2. Show a simple window that we create ourselves. We use a Begin/End pair to create a named window. + { + static float f = 0.0f; + static int counter = 0; + + ImGui::Begin("Hello, world!"); // Create a window called "Hello, world!" and append into it. + + ImGui::Text("This is some useful text."); // Display some text (you can use a format strings too) + ImGui::Checkbox("Demo Window", &show_demo_window); // Edit bools storing our window open/close state + ImGui::Checkbox("Another Window", &show_another_window); + + ImGui::SliderFloat("float", &f, 0.0f, 1.0f); // Edit 1 float using a slider from 0.0f to 1.0f + ImGui::ColorEdit3("clear color", (float*)&clear_color); // Edit 3 floats representing a color + + if (ImGui::Button("Button")) // Buttons return true when clicked (most widgets return true when edited/activated) + counter++; + ImGui::SameLine(); + ImGui::Text("counter = %d", counter); + + ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / io.Framerate, io.Framerate); + ImGui::End(); + } + + // 3. Show another simple window. + if (show_another_window) + { + ImGui::Begin("Another Window", &show_another_window); // Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked) + ImGui::Text("Hello from another window!"); + if (ImGui::Button("Close Me")) + show_another_window = false; + ImGui::End(); + } + + // Rendering + ImGui::Render(); + int display_w, display_h; + glfwGetFramebufferSize(window, &display_w, &display_h); + glViewport(0, 0, display_w, display_h); + glClearColor(clear_color.x * clear_color.w, clear_color.y * clear_color.w, clear_color.z * clear_color.w, clear_color.w); + glClear(GL_COLOR_BUFFER_BIT); + ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData()); + + // Update and Render additional Platform Windows + // (Platform functions may change the current OpenGL context, so we save/restore it to make it easier to paste this code elsewhere. + // For this specific demo app we could also call glfwMakeContextCurrent(window) directly) + if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable) + { + GLFWwindow* backup_current_context = glfwGetCurrentContext(); + ImGui::UpdatePlatformWindows(); + ImGui::RenderPlatformWindowsDefault(); + glfwMakeContextCurrent(backup_current_context); + } + + glfwSwapBuffers(window); + } +#ifdef __EMSCRIPTEN__ + EMSCRIPTEN_MAINLOOP_END; +#endif + + // Cleanup + ImGui_ImplOpenGL3_Shutdown(); + ImGui_ImplGlfw_Shutdown(); + ImGui::DestroyContext(); + + glfwDestroyWindow(window); + glfwTerminate(); + + return 0; +} +#else // #ifndef __EMSCRIPTEN__ +int main(int, char**) {} +#endif + +#else // #if defined(HELLOIMGUI_HAS_OPENGL3) && defined(HELLOIMGUI_USE_GLFW3) +#include +int main() { printf("Glfw not found!"); } #endif \ No newline at end of file diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immvision/demo_immvision_display.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immvision/demo_immvision_display.cpp index ba3ff9c..75086be 100644 --- a/blimgui/dist64/imgui_bundle/demos_cpp/demos_immvision/demo_immvision_display.cpp +++ b/blimgui/dist64/imgui_bundle/demos_cpp/demos_immvision/demo_immvision_display.cpp @@ -1,8 +1,9 @@ #include "immapp/immapp.h" #include "immvision/immvision.h" #include "demo_utils/api_demos.h" +#include "hello_imgui/hello_imgui.h" #include - +#include "imgui.h" void demo_immvision_display() { @@ -18,14 +19,30 @@ void demo_immvision_display() std::string assetsDir = DemosAssetsFolder() + "/images/"; bear = cv::imread(assetsDir + "bear_transparent.png", cv::IMREAD_UNCHANGED); tennis = cv::imread(assetsDir + "tennis.jpg"); + + int bearDisplaySize = int(HelloImGui::EmSize(15.f)); + params.ImageDisplaySize = cv::Size(bearDisplaySize, bearDisplaySize); + inited = true; } - ImGuiMd::RenderUnindented("ImmVision::ImageDisplay() will simply display an image"); + ImGui::BeginGroup(); + ImGuiMd::RenderUnindented("# ImmVision::ImageDisplay()"); + ImGuiMd::RenderUnindented("Displays an image (possibly resizable)"); ImmVision::ImageDisplayResizable("Tennis", tennis, &imageDisplaySize); + ImGui::EndGroup(); - ImGuiMd::RenderUnindented(R"( - immvision.image() will display an image, while providing lots of visualization options.
- Open the options panel by clicking on the settings button at the bottom right corner of the image)"); + ImGui::SameLine(); + + ImGui::BeginGroup(); + ImGuiMd::RenderUnindented("# ImmVision::Image()"); + ImGuiMd::RenderUnindented("Displays an image, while providing lots of visualization options."); ImmVision::Image("Bear", bear, ¶ms); + ImGuiMd::RenderUnindented(R"( + * Zoom in/out using the mouse wheel. + * Pixel values are displayed at high zoom levels. + * Pan the image by dragging it with the left mouse button + * Open settings via button (bottom right corner of the image) + )"); + ImGui::EndGroup(); } diff --git a/blimgui/dist64/imgui_bundle/demos_cpp/sandbox/sandbox_node_clipping_issue.cpp b/blimgui/dist64/imgui_bundle/demos_cpp/sandbox/sandbox_node_clipping_issue.cpp new file mode 100644 index 0000000..bde1c79 --- /dev/null +++ b/blimgui/dist64/imgui_bundle/demos_cpp/sandbox/sandbox_node_clipping_issue.cpp @@ -0,0 +1,117 @@ +// This sandbox was used to investigate a clipping issue +// when using imgui-node-editor inside a docked window with popups +// See related investigation: https://chatgpt.com/c/68d83a07-826c-8325-a084-00f9c87d6a3d +// +// All in all, when inside a node-editor's canvas: +// - popups now works fine with imgui-node-editor +// => as a consequence, tooltips, ColorEdit, Combo, ImGui::BeginPopup, etc. also work fine +// - standard Windows may not work (may cause clipping issues) +// => as a consequence, avoid calling ImGui::Begin from inside a node-editor's canvas +// - child windows (ImGui::BeginChild) will not work +// => as a consequence, avoid calling ImGui::BeginChild from inside a node-editor's canvas +#define IMGUI_DEFINE_MATH_OPERATORS +#include "immapp/immapp.h" +#include "imgui.h" +#include "imgui_internal.h" +#include "imgui-node-editor/imgui_node_editor.h" + +namespace ed = ax::NodeEditor; + +ImVec4 gColor(0.1, 0.2, 0.8, 1); + +static void DebugWidgetInfo(const char* label) +{ + ImVec2 min = ImGui::GetItemRectMin(); + ImVec2 max = ImGui::GetItemRectMax(); + ImVec2 mouse = ImGui::GetIO().MousePos; + if (mouse.x > -151.f && mouse.x < -150.f) + printf("Break\n"); + bool hover = ImGui::IsItemHovered(); + + ImGui::Text("=== %s ===", label); + ImGui::Text(" Rect: (%.1f, %.1f) – (%.1f, %.1f)", min.x, min.y, max.x, max.y); + ImGui::Text(" Mouse: (%.1f, %.1f)", mouse.x, mouse.y); + ImGui::Text(" Hovered: %s", hover ? "YES" : "NO"); +} + + +void Gui() +{ + ed::Begin("My Node Editor"); + + ed::BeginNode(1); + + ImGui::BeginVertical("BV"); + + ImGui::Dummy(ImVec2(500, 0)); + + if (ImGui::Button("TestButton")) + ImGui::Text("Button clicked!"); + DebugWidgetInfo("TestButton"); + ImGuiWindow* win = ImGui::GetCurrentWindow(); + ImGui::Text("CurrentWindow: %s", win->Name); + ImGui::Text("ClipRect: (%.1f, %.1f) – (%.1f, %.1f)", + win->ClipRect.Min.x, win->ClipRect.Min.y, + win->ClipRect.Max.x, win->ClipRect.Max.y); + + ImGui::SeparatorText("Test DrawList"); + + ImDrawList* dl = ImGui::GetWindowDrawList(); + ImVec2 p = ImGui::GetCursorScreenPos(); + dl->AddRectFilled(p + ImVec2(-50, -50), p + ImVec2(-20, -20), IM_COL32(255, 0, 0, 200)); // Red rect + + ImGui::Text("My Node Editor"); + ImGui::SetNextItemWidth(200.f); + ImGui::ColorEdit4("Color", &gColor.x); + + p = ImGui::GetCursorScreenPos(); + dl->AddRectFilled(p + ImVec2(20, 20), p + ImVec2(50, 50), IM_COL32(0, 255, 0, 200)); // Green rect + + + if (ImGui::Button("Test Popup")) + ImGui::OpenPopup("TestPopup"); + if (ImGui::BeginPopup("TestPopup")) + { + ImGui::Text("Hello from popup!"); + DebugWidgetInfo("TestPopup"); + ImGui::ColorEdit4("Color2", &gColor.x); + ImGui::EndPopup(); + } + + + ImGui::EndVertical(); + + // Standard ImGui windows do not work well with imgui-node-editor + // ImGuiWindowFlags flags = 0; + // ImGui::Begin("Inner Window", nullptr, flags); + // ImGui::Text("Inner Window"); + // ImGui::SetNextItemWidth(200.f); + // ImGui::ColorEdit4("Color2", &gColor.x); + // ImGui::End(); + + ed::EndNode(); + + ed::End(); +} + + +int main(int, char**) +{ + HelloImGui::RunnerParams runnerParams; + ImmApp::AddOnsParams addOnsParams; + + runnerParams.imGuiWindowParams.defaultImGuiWindowType = HelloImGui::DefaultImGuiWindowType::ProvideFullScreenDockSpace; + + // runnerParams.callbacks.ShowGui = Gui; + HelloImGui::DockableWindow dockwindow("Node Editor Demo"); + dockwindow.GuiFunction = Gui; + dockwindow.dockSpaceName = "MainDockSpace"; + runnerParams.dockingParams.dockableWindows = {dockwindow}; + + runnerParams.imGuiWindowParams.enableViewports = true; + addOnsParams.withNodeEditor = true; + addOnsParams.withNodeEditorConfig = ed::Config(); + addOnsParams.withNodeEditorConfig->ForceWindowContentWidthToNodeWidth = true; + ImmApp::Run(runnerParams, addOnsParams); + return 0; +} diff --git a/blimgui/dist64/imgui_bundle/demos_python/__init__.py b/blimgui/dist64/imgui_bundle/demos_python/__init__.py index 22bf127..bb1ccdf 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/__init__.py +++ b/blimgui/dist64/imgui_bundle/demos_python/__init__.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import os from imgui_bundle import hello_imgui diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle.py b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle.py index e1fdfd4..4c9b5f2 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from typing import List, Callable from types import ModuleType from dataclasses import dataclass @@ -88,9 +88,9 @@ class DemoDetails: demo_module: ModuleType demos = [ - DemoDetails("Dear ImGui Bundle", demo_imgui_bundle_intro), + DemoDetails("Intro", demo_imgui_bundle_intro), DemoDetails("Dear ImGui", demo_imgui_show_demo_window), - DemoDetails("Immediate Apps", demo_immapp_launcher), + DemoDetails("Demo Apps", demo_immapp_launcher), DemoDetails("Implot [3D]", demo_implot), DemoDetails("Node Editor", demo_node_editor_launcher), DemoDetails("Markdown", demo_imgui_md), @@ -118,6 +118,13 @@ def show_gui(): runner_params.docking_params.focus_dockable_window("Dear ImGui Bundle") show_gui.nb_frames += 1 + def show_edit_font_scale_in_status_bar(): + imgui.set_next_item_width(imgui.get_content_region_avail().x / 10) + _, imgui.get_style().font_scale_main = imgui.slider_float( + "Font scale", imgui.get_style().font_scale_main, 0.5, 5) + + runner_params.callbacks.show_status = show_edit_font_scale_in_status_bar + runner_params.callbacks.show_gui = show_gui if "test_engine" in dir(imgui): # only enable test engine if available (i.e. if imgui bundle was compiled with it) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle_intro.py b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle_intro.py index a16a336..634fabc 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle_intro.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_bundle_intro.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import webbrowser from imgui_bundle import imgui, imgui_md, hello_imgui @@ -10,14 +10,14 @@ def automation_show_me_code(): engine = hello_imgui.get_imgui_test_engine() automation = imgui.test_engine.register_test(engine, "Automation", "ShowMeCode") - def test_open_popup_func(ctx): - ctx.set_ref("Dear ImGui Bundle") + def test_func(ctx): + ctx.set_ref("Intro") ctx.item_open("Code for this demo") ctx.sleep(2.5) ctx.item_close("Code for this demo") tab_logger_name = "//**/Logger" - tab_intro_name = "//**/Dear ImGui Bundle" + tab_intro_name = "//**/Intro" ctx.mouse_move(tab_logger_name) ctx.mouse_click(0) @@ -27,7 +27,7 @@ def test_open_popup_func(ctx): ctx.mouse_move(tab_intro_name) ctx.mouse_click(0) - automation.test_func = test_open_popup_func + automation.test_func = test_func return automation @@ -37,9 +37,9 @@ def automation_show_me_immediate_apps(): engine, "Automation", "ShowMeImmediateApps" ) - def test_open_popup_func(ctx): - tab_imm_apps_name = "//**/Immediate Apps" - tab_intro_name = "//**/Dear ImGui Bundle" + def test_func(ctx): + tab_imm_apps_name = "//**/Demo Apps" + tab_intro_name = "//**/Intro" ctx.mouse_move(tab_imm_apps_name) ctx.mouse_click(0) @@ -50,7 +50,7 @@ def test_open_popup_func(ctx): ctx.mouse_move(tab_intro_name) ctx.mouse_click(0) - automation.test_func = test_open_popup_func + automation.test_func = test_func return automation @@ -60,9 +60,9 @@ def automation_show_me_imgui_test_engine(): engine, "Automation", "ShowMeImGuiTestEngine" ) - def test_open_popup_func(ctx): - tab_imm_apps_name = "//**/Immediate Apps" - tab_intro_name = "//**/Dear ImGui Bundle" + def test_func(ctx): + tab_imm_apps_name = "//**/Demo Apps" + tab_intro_name = "//**/Intro" ctx.mouse_move(tab_imm_apps_name) ctx.mouse_click(0) @@ -72,7 +72,7 @@ def test_open_popup_func(ctx): ctx.mouse_move(tab_intro_name) ctx.mouse_click(0) - automation.test_func = test_open_popup_func + automation.test_func = test_func return automation @@ -113,15 +113,16 @@ def demo_gui(): Welcome to the interactive manual for *Dear ImGui Bundle*! This manual present lots of examples, together with their code (in C++ and Python). Advices: - * This interactive manual works best when viewed together with "Dear ImGui Bundle docs" + * For Python users, read this introduction to Immediate Mode GUI with Python and Dear ImGui Bundle """ ) imgui.set_cursor_pos_x(imgui.get_cursor_pos_x() + hello_imgui.em_size(1.0)) - if imgui.button("Open Dear ImGui Bundle docs"): - webbrowser.open("https://pthom.github.io/imgui_bundle/") + if imgui.button("Immediate Mode GUI with Python"): + webbrowser.open("https://github.com/pthom/imgui_bundle/blob/main/docs/docs_md/imgui_python_intro.md") imgui_md.render_unindented( """ + * This interactive manual works best when viewed together with ["Dear ImGui Bundle docs"](https://pthom.github.io/imgui_bundle/) * Browse through demos in the different tabs: at the top of each tab, there is a collapsible header named "Code for this demo". Click on it to show the source code for the current demo. """ ) @@ -134,7 +135,7 @@ def demo_gui(): imgui_md.render_unindented( """ - * The "Immediate Apps" tab is especially interesting, as it provides sample starter apps from which you can take inspiration. Click on the "View Code" button to view the app's code, and click on "Run" to run them. + * The "Demo Apps" tab is especially interesting, as it provides sample starter apps from which you can take inspiration. Click on the "View Code" button to view the app's code, and click on "Run" to run them. """ ) if hello_imgui.get_runner_params().use_imgui_test_engine: diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_md.py b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_md.py index 6ee19c9..227537d 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_md.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_md.py @@ -1,19 +1,23 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from imgui_bundle import imgui_md, immapp def example_markdown_string() -> str: markdown = r""" -# Markdown with imgui_md -[imgui_md](https://github.com/mekhontsev/imgui_md) is a markdown renderer for Dear ImGui using MD4C parser. +# Markdown example (H1) -### Supported features +Hello World!
+![World](images/world.png) -imgui_md currently supports the following markdown functionality: +## Acknowledgments (H2) +This markdown renderer is based on [imgui_md](https://github.com/mekhontsev/imgui_md), by Dmitry Mekhontsev. -* Images +### Supported features (H3) + +imgui_md currently supports the following markdown functionality. + +#### Text formatting (H4) -![World](images/world.png) * Wrapped text * Headers * *Emphasis* (\*Emphasis\*) @@ -29,7 +33,7 @@ def example_markdown_string() -> str: * Backslash Escapes * Inline `code element` (using \`code element\`) * Tables -* Block code (using \`\`\`) +* Block code like this (using \`\`\`) ``` int answer() { @@ -40,6 +44,8 @@ def example_markdown_string() -> str: ---- +#### Tables (H4) + *Warning about tables layout*: the first row will impose the columns widths. Use nbsp\; to increase the columns sizes on the first row if required. diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_show_demo_window.py b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_show_demo_window.py index d2185f3..afa5894 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_show_demo_window.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_imgui_show_demo_window.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from imgui_bundle import imgui, imgui_md, immapp, ImVec2 from imgui_bundle.demos_python import demo_utils # this will set the assets folder import webbrowser @@ -33,14 +33,14 @@ def demo_gui(): # SetImGuiDemoCodeWindowPos(ImVec2(ImGui::GetCursorScreenPos().x + windowSize.x, ImGui::GetCursorScreenPos().y) , # windowSize, ImGuiCond_Appearing); window_size = ImVec2(imgui.get_content_region_avail().x / 2, imgui.get_content_region_avail().y) - imgui.set_imgui_demo_window_pos(imgui.get_cursor_screen_pos(), window_size, imgui.Cond_.appearing.value) + imgui.set_imgui_demo_window_pos(imgui.get_cursor_screen_pos(), window_size, imgui.Cond_.appearing) imgui.set_imgui_demo_code_window_pos( ImVec2( imgui.get_cursor_screen_pos().x + window_size.x, imgui.get_cursor_screen_pos().y, ), window_size, - imgui.Cond_.appearing.value, + imgui.Cond_.appearing, ) imgui.show_demo_window() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_imguizmo_launcher.py b/blimgui/dist64/imgui_bundle/demos_python/demo_imguizmo_launcher.py index bd0ca21..3dfdd30 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_imguizmo_launcher.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_imguizmo_launcher.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import os.path import subprocess import sys @@ -21,11 +21,12 @@ def demo_gui(): imgui.text( "Click the button below to launch the demo (below the button is a screenshot of the app that will be launched)" ) - if imgui.button("Run gizmo demo"): - this_dir = os.path.dirname(__file__) - subprocess.Popen( - [sys.executable, this_dir + "/demos_imguizmo/demo_gizmo.py"] - ) + if demo_utils.can_run_subprocess(): + if imgui.button("Run gizmo demo"): + this_dir = os.path.dirname(__file__) + subprocess.Popen( + [sys.executable, this_dir + "/demos_imguizmo/demo_gizmo.py"] + ) hello_imgui.image_from_asset( "images/gizmo_screenshot.jpg", size=ImVec2(0, immapp.em_size(15.0)) ) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_immapp_launcher.py b/blimgui/dist64/imgui_bundle/demos_python/demo_immapp_launcher.py index 64ff92d..5bc3040 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_immapp_launcher.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_immapp_launcher.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import os from imgui_bundle import imgui_md @@ -66,15 +66,12 @@ def make_gui() -> GuiFunction: "demo_pydantic", "Python: How to use ImVec2 and ImVec4 with Pydantic", ), - DemoApp( - "demo_font_common_glyph_range", - "How to load fonts with specific glyph ranges (e.g., Chinese, Japanese, Korean)", - ), - DemoApp( - "imgui_example_glfw_opengl3", - "Python: translation of the [GLFW+OpenGL3 example](https://github.com/ocornut/imgui/blob/master/examples/example_glfw_opengl3/main.cpp) from Dear ImGui. " - "Demonstrates how to port from C++ to Python (here, *backend rendering is implemented in C++*)", - ), + # Disabled since v1.92 (use pure Python backend instead) + # DemoApp( + # "imgui_example_glfw_opengl3", + # "Python: translation of the [GLFW+OpenGL3 example](https://github.com/ocornut/imgui/blob/master/examples/example_glfw_opengl3/main.cpp) from Dear ImGui. " + # "Demonstrates how to port from C++ to Python (here, *backend rendering is implemented in C++*)", + # ), DemoApp( "example_python_backend_glfw3", "Python: how to use ImGui with GLFW3 using a *full python* backend", diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_immvision_launcher.py b/blimgui/dist64/imgui_bundle/demos_python/demo_immvision_launcher.py index 98ee9e1..4dffbae 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_immvision_launcher.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_immvision_launcher.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from imgui_bundle import imgui, immapp, imgui_md, has_submodule HAS_IMMVISION = has_submodule("immvision") if HAS_IMMVISION: @@ -8,6 +8,7 @@ HAS_OPENCV = importlib.util.find_spec("cv2") is not None +HAS_PILLOW = importlib.util.find_spec("PIL") is not None if HAS_IMMVISION: @@ -31,6 +32,19 @@ def demo_gui(): """ ) + if not HAS_PILLOW: + imgui.new_line() + imgui_md.render_unindented(""" + ## Missing dependency: Pillow + This demo requires the Python package *Pillow* to load images. + Please install it with: + ``` + pip install Pillow + ``` + (This demo will use dummy images until Pillow is installed.) + """) + imgui.new_line() + if imgui.collapsing_header("Display images"): demos_immvision.demo_immvision_display.demo_gui() demo_utils.show_python_vs_cpp_file("demos_immvision/demo_immvision_display") @@ -40,12 +54,19 @@ def demo_gui(): if imgui.collapsing_header("Image inspector"): demos_immvision.demo_immvision_inspector.demo_gui() demo_utils.show_python_vs_cpp_file("demos_immvision/demo_immvision_inspector") - if HAS_OPENCV: - if imgui.collapsing_header("Example with image processing"): + if imgui.collapsing_header("Example with image processing"): + if HAS_OPENCV: demos_immvision.demo_immvision_process.demo_gui() demo_utils.show_python_vs_cpp_file( "demos_immvision/demo_immvision_process", nb_lines=40 ) + else: + imgui_md.render_unindented(""" + This demo requires OpenCv. Please install OpenCv to run it, with: + ``` + pip install opencv-python + ``` + """) def main(): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_implot.py b/blimgui/dist64/imgui_bundle/demos_python/demo_implot.py index 3fd4cdf..b7dd403 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_implot.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_implot.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import webbrowser from imgui_bundle import imgui, imgui_md, immapp from imgui_bundle.demos_python.demos_implot3d import implot3d_demo diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_logger.py b/blimgui/dist64/imgui_bundle/demos_python/demo_logger.py index cb0ca93..f048c9e 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_logger.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_logger.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import random from imgui_bundle import imgui, hello_imgui, imgui_md, immapp from imgui_bundle.demos_python.demo_utils import api_demos diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_nanovg_launcher.py b/blimgui/dist64/imgui_bundle/demos_python/demo_nanovg_launcher.py index ed709bd..24e1d76 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_nanovg_launcher.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_nanovg_launcher.py @@ -42,8 +42,9 @@ def demo_gui(): "Click the button below to launch the demo" ) imgui.new_line() - if imgui.button("Run full demo"): - _do_spawn_demo("demo_nanovg_full") + if demo_utils.can_run_subprocess(): + if imgui.button("Run full demo"): + _do_spawn_demo("demo_nanovg_full") imgui.end_group() imgui.same_line(imgui.get_window_width() - hello_imgui.em_size(14.0)) @@ -72,8 +73,9 @@ def demo_gui(): "Click the button below to launch the demo" ) imgui.new_line() - if imgui.button("Run simple demo"): - _do_spawn_demo("demo_nanovg_heart") + if demo_utils.can_run_subprocess(): + if imgui.button("Run simple demo"): + _do_spawn_demo("demo_nanovg_heart") imgui.end_group() imgui.same_line(imgui.get_window_width() - hello_imgui.em_size(14.0)) if hello_imgui.image_button_from_asset("images/nanovg_demo_heart.jpg", ImVec2(hello_imgui.em_size(11.0), 0.0)): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_node_editor_launcher.py b/blimgui/dist64/imgui_bundle/demos_python/demo_node_editor_launcher.py index 0984a31..507834b 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_node_editor_launcher.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_node_editor_launcher.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from imgui_bundle import imgui, hello_imgui, immapp, imgui_md from imgui_bundle.demos_python import demo_utils # this will set the assets folder from imgui_bundle.demos_python import demos_node_editor diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_tex_inspect_launcher.py b/blimgui/dist64/imgui_bundle/demos_python/demo_tex_inspect_launcher.py index 6e25e95..05552cc 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_tex_inspect_launcher.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_tex_inspect_launcher.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle import os.path import subprocess import sys @@ -22,19 +22,21 @@ def demo_gui(): if imgui.collapsing_header("Simple Demo"): demos_tex_inspect.demo_tex_inspect_simple.demo_gui() demo_utils.show_python_vs_cpp_file("demos_tex_inspect/demo_tex_inspect_simple") - if imgui.collapsing_header("Full Demo"): - imgui.text("Click the button below to launch the demo") - if imgui.button("Run demo"): - this_dir = os.path.dirname(__file__) - subprocess.Popen( - [ - sys.executable, - this_dir + "/demos_tex_inspect/demo_tex_inspect_demo_window.py", - ] + + if demo_utils.can_run_subprocess(): + if imgui.collapsing_header("Full Demo"): + imgui.text("Click the button below to launch the demo") + if imgui.button("Run demo"): + this_dir = os.path.dirname(__file__) + subprocess.Popen( + [ + sys.executable, + this_dir + "/demos_tex_inspect/demo_tex_inspect_demo_window.py", + ] + ) + demo_utils.show_python_vs_cpp_file( + "demos_tex_inspect/demo_tex_inspect_demo_window" ) - demo_utils.show_python_vs_cpp_file( - "demos_tex_inspect/demo_tex_inspect_demo_window" - ) def main(): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_text_edit.py b/blimgui/dist64/imgui_bundle/demos_python/demo_text_edit.py index 7975ac2..15c94fe 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_text_edit.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_text_edit.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from imgui_bundle import imgui, imgui_color_text_edit as ed, imgui_md from imgui_bundle.immapp import static @@ -22,7 +22,7 @@ def demo_gui(): imgui_md.render( """ -# ImGuiColorTextEdit: +# ImGuiColorTextEdit [ImGuiColorTextEdit](https://github.com/BalazsJako/ImGuiColorTextEdit) is a colorizing text editor for ImGui, able to colorize C, C++, hlsl, Sql, angel_script and lua code """ ) @@ -41,7 +41,8 @@ def show_palette_buttons(): editor.set_palette(TextEditor.PaletteId.mariana) show_palette_buttons() - imgui.push_font(imgui_md.get_code_font()) + code_font = imgui_md.get_code_font() + imgui.push_font(code_font.font, code_font.size) editor.render("Code") imgui.pop_font() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_themes.py b/blimgui/dist64/imgui_bundle/demos_python/demo_themes.py index 1792ca0..9dd29db 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_themes.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_themes.py @@ -1,48 +1,30 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +"""Demo for ImGui theming using ImGui Bundle. + +In order to apply a theme, you can use: +======================================= + hello_imgui.apply_theme(hello_imgui.ImGuiTheme_.cherry) + +""" +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from imgui_bundle import immapp, hello_imgui, imgui, imgui_md -ALL_THEMES = [ - hello_imgui.ImGuiTheme_.darcula_darker, - hello_imgui.ImGuiTheme_.darcula, - hello_imgui.ImGuiTheme_.imgui_colors_classic, - hello_imgui.ImGuiTheme_.imgui_colors_dark, - hello_imgui.ImGuiTheme_.imgui_colors_light, - hello_imgui.ImGuiTheme_.material_flat, - hello_imgui.ImGuiTheme_.photoshop_style, - hello_imgui.ImGuiTheme_.gray_variations, - hello_imgui.ImGuiTheme_.gray_variations_darker, - hello_imgui.ImGuiTheme_.microsoft_style, - hello_imgui.ImGuiTheme_.cherry, - hello_imgui.ImGuiTheme_.light_rounded, - hello_imgui.ImGuiTheme_.so_dark_accent_blue, - hello_imgui.ImGuiTheme_.so_dark_accent_yellow, - hello_imgui.ImGuiTheme_.so_dark_accent_red, - hello_imgui.ImGuiTheme_.black_is_black, - hello_imgui.ImGuiTheme_.white_is_white, -] - -ALL_THEMES_NAMES = [theme.name for theme in ALL_THEMES] - - -@immapp.static(current_theme_idx=0) def demo_gui(): - static = demo_gui imgui_md.render_unindented( """ # Theming HelloImGui adds support for advanced theming to ImGui. Select the menu View/Theme/Theme tweak window to explore all the themes and their customization. - """ + """ ) + imgui.new_line() + tweaked_theme = hello_imgui.get_runner_params().imgui_window_params.tweaked_theme + theme_changed = hello_imgui.show_theme_tweak_gui(tweaked_theme) + if theme_changed: + hello_imgui.apply_tweaked_theme(tweaked_theme) - imgui.text("Theme") - changed, static.current_theme_idx = imgui.list_box( - "##Theme", static.current_theme_idx, ALL_THEMES_NAMES, len(ALL_THEMES_NAMES) - ) - if changed: - hello_imgui.apply_theme(ALL_THEMES[static.current_theme_idx]) + imgui.show_demo_window() if __name__ == "__main__": diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/__init__.py b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/__init__.py index 1d62c5e..88a903b 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/__init__.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/__init__.py @@ -4,9 +4,11 @@ show_python_vs_cpp_and_run, show_python_vs_cpp_file, set_hello_imgui_demo_assets_folder, + spawn_demo_file, + can_run_subprocess ) from imgui_bundle.demos_python.demo_utils.animate_logo import animate_logo -from imgui_bundle.demos_python.demo_utils.imread_pil import imread_pil +from imgui_bundle.demos_python.demo_utils.imread_demo import imread_demo set_hello_imgui_demo_assets_folder() @@ -17,5 +19,7 @@ "show_python_vs_cpp_file", "set_hello_imgui_demo_assets_folder", "animate_logo", - "imread_pil", + "imread_demo", + "spawn_demo_file", + "can_run_subprocess" ] diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/animate_logo.py b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/animate_logo.py index 359d5f7..fc859c2 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/animate_logo.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/animate_logo.py @@ -9,7 +9,7 @@ def draw_transparent_image( ): alpha_color = imgui.get_color_u32(ImVec4(1.0, 1.0, 1.0, alpha)) imgui.get_foreground_draw_list().add_image_quad( - texture, + imgui.ImTextureRef(texture), ImVec2(rect.min.x, rect.min.y), ImVec2(rect.max.x, rect.min.y), ImVec2(rect.max.x, rect.max.y), diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/api_demos.py b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/api_demos.py index aae3822..5277016 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/api_demos.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/api_demos.py @@ -108,3 +108,19 @@ def read_markdown_code(doc_filename: str) -> str: doc_file = markdown_doc_folder() + "/" + doc_filename + ".adoc.md" r: str = read_code(doc_file) # type: ignore return r + + +def can_run_subprocess() -> bool: + from imgui_bundle._imgui_bundle import __bundle_pyodide__ # type: ignore + return not __bundle_pyodide__ + + +def spawn_demo_file(demo_file_path: str) -> None: + if can_run_subprocess(): + import subprocess + import sys + subprocess.Popen( + [sys.executable, demo_file_path] + ) + else: + print("Cannot run subprocess in this environment.") diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/demo_app_table.py b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/demo_app_table.py index c6de565..af12111 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/demo_app_table.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/demo_app_table.py @@ -1,10 +1,9 @@ from dataclasses import dataclass from typing import List -import subprocess -import sys import os from imgui_bundle import imgui, imgui_md, immapp, hello_imgui, ImVec2 +from imgui_bundle.demos_python import demo_utils from typing import Callable @@ -115,10 +114,10 @@ def gui(self): def fn_table_gui(): table_flags = ( - imgui.TableFlags_.row_bg.value - | imgui.TableFlags_.borders.value - | imgui.TableFlags_.resizable.value - | imgui.TableFlags_.sizing_stretch_same.value + imgui.TableFlags_.row_bg + | imgui.TableFlags_.borders + | imgui.TableFlags_.resizable + | imgui.TableFlags_.sizing_stretch_same ) nb_columns = 3 if imgui.begin_table("Apps", nb_columns, table_flags): @@ -144,10 +143,9 @@ def fn_table_gui(): imgui.same_line() - if imgui.button("Run"): - subprocess.Popen( - [sys.executable, self._demo_python_file_path(demo_app)] - ) + if demo_utils.can_run_subprocess(): + if imgui.button("Run"): + demo_utils.spawn_demo_file(self._demo_python_file_path(demo_app)) imgui.pop_id() imgui.end_table() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/imread_pil.py b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/imread_demo.py similarity index 62% rename from blimgui/dist64/imgui_bundle/demos_python/demo_utils/imread_pil.py rename to blimgui/dist64/imgui_bundle/demos_python/demo_utils/imread_demo.py index a7083cc..cd2639c 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_utils/imread_pil.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_utils/imread_demo.py @@ -1,84 +1,62 @@ -import numpy as np -from numpy.typing import NDArray - - -_HAS_PIL = False -try: - from PIL import Image - _HAS_PIL = True -except ImportError: - import logging - logging.error(""" - pillow is required to read images for Dear ImGui Bundle demo (using demo pattern instead). Install it with: - pip install pillow # or conda install pillow - """) - - -def _dummy_image(with_alpha: bool) -> NDArray[np.uint8]: - """ - Generates a 400x400 RGBA image with a visually appealing sine wave interference pattern - and a transparent background. - """ - width, height = 400, 400 - - # Create a grid of x and y coordinates - x = np.linspace(-1 * np.pi, 1 * np.pi, width) - y = np.linspace(-1 * np.pi, 1 * np.pi, height) - X, Y = np.meshgrid(x, y) - - # Calculate sine wave interference pattern - pattern = np.sin(X**2 + Y**2) + np.sin(3 * X + 2.5 * Y) - - # Normalize the pattern to range [0, 1] - normalized_pattern = (pattern - pattern.min()) / (pattern.max() - pattern.min()) - - # Map the pattern to RGB colors - R = (np.sin(2 * np.pi * normalized_pattern) * 127 + 128).astype(np.uint8) - G = (np.cos(3 * np.pi * normalized_pattern + np.pi / 2) * 127 + 128).astype(np.uint8) - B = (np.sin(2 * np.pi * normalized_pattern + np.pi) * 127 + 128).astype(np.uint8) - - # Combine into an RGB image - rgb_image = np.dstack((R, G, B)) - - if not with_alpha: - return rgb_image - - # Create an alpha channel: fully opaque for non-zero patterns - alpha = (normalized_pattern > 0.15).astype(np.uint8) * 255 - - # Combine RGB and alpha channels into RGBA - rgba_image = np.dstack((rgb_image, alpha)) - - return rgba_image - - - - -def imread_pil(image_file: str, convert_to_bgr: bool = False, load_alpha: bool = False) -> NDArray[np.uint]: - """Read an image from a file using PIL, returns a numpy array.""" - if not _HAS_PIL: - return _dummy_image(load_alpha) - - image_pil = Image.open(image_file) - - def rgb_to_bgr(image: NDArray[np.uint]) -> NDArray[np.uint]: - assert len(image.shape) == 3 - if image.shape[2] == 3: - return np.ascontiguousarray(image[:, :, ::-1]) - elif image.shape[2] == 4: - bgr = image[:, :, :3][:, :, ::-1] - a = image[:, :, 3] - bgra = np.dstack((bgr, a)) - return np.ascontiguousarray(bgra) - else: - raise ValueError("Invalid shape") - - if load_alpha: - image = np.array(image_pil.convert("RGBA")) - else: - image = np.array(image_pil.convert("RGB")) - - if convert_to_bgr: - image = rgb_to_bgr(image) - - return image +import numpy as np +from numpy.typing import NDArray + + +_HAS_PIL = False +try: + from PIL import Image + _HAS_PIL = True +except ImportError: + import logging + logging.error(""" + pillow is required to read images for Dear ImGui Bundle demo (using demo pattern instead). Install it with: + pip install pillow # or conda install pillow + """) + + +def _dummy_image(with_alpha: bool) -> NDArray[np.uint8]: + """ + Generates a 400x400 RGBA image with a visually appealing sine wave interference pattern + and a transparent background. + """ + width, height = 400, 400 + + # Create a grid of x and y coordinates + x = np.linspace(-1 * np.pi, 1 * np.pi, width) + y = np.linspace(-1 * np.pi, 1 * np.pi, height) + X, Y = np.meshgrid(x, y) + + # Calculate sine wave interference pattern + pattern = np.sin(X**2 + Y**2) + np.sin(3 * X + 2.5 * Y) + + # Normalize the pattern to range [0, 1] + normalized_pattern = (pattern - pattern.min()) / (pattern.max() - pattern.min()) + + # Map the pattern to RGB colors + R = (np.sin(2 * np.pi * normalized_pattern) * 127 + 128).astype(np.uint8) + G = (np.cos(3 * np.pi * normalized_pattern + np.pi / 2) * 127 + 128).astype(np.uint8) + B = (np.sin(2 * np.pi * normalized_pattern + np.pi) * 127 + 128).astype(np.uint8) + + # Combine into an RGB image + rgb_image = np.dstack((R, G, B)) + + if not with_alpha: + return rgb_image + + # Create an alpha channel: fully opaque for non-zero patterns + alpha = (normalized_pattern > 0.15).astype(np.uint8) * 255 + + # Combine RGB and alpha channels into RGBA + rgba_image = np.dstack((rgb_image, alpha)) + + return rgba_image + + +def imread_demo(image_file: str, load_alpha: bool = False) -> NDArray[np.uint8]: + """Read an image using Pillow or ImageIO, fallback to dummy pattern.""" + if _HAS_PIL: + img = Image.open(image_file) + mode = "RGBA" if load_alpha else "RGB" + return np.array(img.convert(mode)) + + return _dummy_image(load_alpha) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demo_widgets.py b/blimgui/dist64/imgui_bundle/demos_python/demo_widgets.py index b6d68df..f205a12 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demo_widgets.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demo_widgets.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from typing import List from imgui_bundle import ( imgui, @@ -88,7 +88,9 @@ def show_int_knobs(knob_size: float): imgui.end_group() +@immapp.static(show_full_demo=False) def demo_spinner(): + static = demo_spinner from imgui_bundle import imspinner imgui_md.render( @@ -123,6 +125,11 @@ def demo_spinner(): color, ) + imgui.same_line() + _, static.show_full_demo = imgui.checkbox("Show full spinners demo", static.show_full_demo) + if static.show_full_demo: + imspinner.demo_spinners() + @immapp.static(flag=True) def demo_toggle(): @@ -425,7 +432,7 @@ def show_cool_bar_button(label): button_labels = ["A", "B", "C", "D", "E", "F"] imgui_md.render_unindented( """ - # ImCoolBar: + # ImCoolBar ImCoolBar provides a dock-like Cool bar for Dear ImGui """ ) @@ -449,15 +456,14 @@ def show_cool_bar_button(label): def demo_gui(): demo_cool_bar() - demo_portable_file_dialogs() - imgui.new_line() - demo_imfile_dialog() - imgui.new_line() - demo_knobs() demo_toggle() - imgui.new_line() demo_spinner() + demo_knobs() demo_command_palette() + imgui.new_line() + demo_portable_file_dialogs() + imgui.new_line() + demo_imfile_dialog() if __name__ == "__main__": diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_imguizmo/demo_gizmo.py b/blimgui/dist64/imgui_bundle/demos_python/demos_imguizmo/demo_gizmo.py index e1626a4..6efaa63 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_imguizmo/demo_gizmo.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_imguizmo/demo_gizmo.py @@ -15,7 +15,6 @@ from typing import List, Tuple import math -import munch # type: ignore from imgui_bundle import imgui, imguizmo, hello_imgui, ImVec2, immapp from imgui_bundle.demos_python.demo_utils.api_demos import GuiFunction @@ -119,9 +118,9 @@ def EditTransform( ) -> None: statics = EditTransform.statics global mCurrentGizmoOperation - if statics is None: - EditTransform.statics = munch.Munch() - statics = EditTransform.statics + + statics = EditTransform + if not hasattr(statics, "initialized"): statics.mCurrentGizmoMode = gizmo.MODE.local statics.useSnap = False statics.snap = Matrix3([1.0, 1.0, 1.0]) @@ -130,6 +129,7 @@ def EditTransform( statics.boundSizing = False statics.boundSizingSnap = False statics.gizmoWindowFlags = 0 + statics.initialized = True if editTransformDecomposition: if imgui.is_key_pressed(imgui.Key.t): @@ -211,10 +211,10 @@ def EditTransform( viewManipulateTop = 0.0 if useWindow: - imgui.set_next_window_size(ImVec2(800, 400), imgui.Cond_.appearing.value) - imgui.set_next_window_pos(ImVec2(400, 20), imgui.Cond_.appearing.value) + imgui.set_next_window_size(ImVec2(800, 400), imgui.Cond_.appearing) + imgui.set_next_window_pos(ImVec2(400, 20), imgui.Cond_.appearing) imgui.push_style_color( - imgui.Col_.window_bg.value, imgui.ImColor(0.35, 0.3, 0.3).value + imgui.Col_.window_bg, imgui.ImColor(0.35, 0.3, 0.3).value ) imgui.begin("Gizmo", None, statics.gizmoWindowFlags) gizmo.set_drawlist() @@ -309,12 +309,12 @@ def gui(): gizmo.set_orthographic(not isPerspective) gizmo.begin_frame() - imgui.set_next_window_pos(ImVec2(1024, 100), imgui.Cond_.appearing.value) - imgui.set_next_window_size(ImVec2(256, 256), imgui.Cond_.appearing.value) + imgui.set_next_window_pos(ImVec2(1024, 100), imgui.Cond_.appearing) + imgui.set_next_window_size(ImVec2(256, 256), imgui.Cond_.appearing) # create a window and insert the inspector - imgui.set_next_window_pos(ImVec2(10, 10), imgui.Cond_.appearing.value) - imgui.set_next_window_size(ImVec2(320, 340), imgui.Cond_.appearing.value) + imgui.set_next_window_pos(ImVec2(10, 10), imgui.Cond_.appearing) + imgui.set_next_window_size(ImVec2(320, 340), imgui.Cond_.appearing) imgui.begin("Editor") if imgui.radio_button("Full view", not useWindow): useWindow = False @@ -376,8 +376,9 @@ def gui(): imgui.separator() for matId in range(gizmoCount): - gizmo.set_id(matId) + gizmo.push_id(matId) EditTransform(cameraView, cameraProjection, gObjectMatrix[matId], lastUsing == matId) + gizmo.pop_id() imgui.end() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_assets_addons.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_assets_addons.py index ff44b8d..d2787f5 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_assets_addons.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_assets_addons.py @@ -115,8 +115,8 @@ def demo_plot(app_state: AppState): implot.setup_axes( "", "", - implot.AxisFlags_.no_decorations.value, - implot.AxisFlags_.no_decorations.value, + implot.AxisFlags_.no_decorations, + implot.AxisFlags_.no_decorations, ) implot.plot_pie_chart( data_labels, np.array(app_state.plot_data), 0.5, 0.5, 0.35, "%.2f", 90 diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_custom_background.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_custom_background.py index 7ca28ac..a1b021b 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_custom_background.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_custom_background.py @@ -8,7 +8,7 @@ from imgui_bundle import hello_imgui, imgui, immapp, ImVec2, ImVec4, imgui_md from imgui_bundle.demos_python import demo_utils -import OpenGL.GL as GL # type: ignore +import OpenGL.GL as GL # pip install PyOpenGL from dataclasses import dataclass import sys @@ -538,8 +538,8 @@ def custom_background(app_state: AppState): def gui(app_state: AppState): """GUI for modifying shader parameters.""" - imgui.set_next_window_pos(hello_imgui.em_to_vec2(0.0, 0.0), imgui.Cond_.appearing.value) - imgui.set_next_window_size(hello_imgui.em_to_vec2(31.0, 14.0), imgui.Cond_.appearing.value) + imgui.set_next_window_pos(hello_imgui.em_to_vec2(0.0, 0.0), imgui.Cond_.appearing) + imgui.set_next_window_size(hello_imgui.em_to_vec2(31.0, 14.0), imgui.Cond_.appearing) imgui.begin("Shader parameters") imgui_md.render_unindented(""" diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_docking.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_docking.py index 645fe07..5524517 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_docking.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_docking.py @@ -72,19 +72,14 @@ def load_fonts(app_state: AppState): # This is called by runnerParams.callbacks hello_imgui.imgui_default_settings.load_default_font_with_font_awesome_icons() # Load the title font - # app_state.title_font = hello_imgui.load_font("fonts/DroidSans.ttf", 18.0) - font_loading_params_title_icons = hello_imgui.FontLoadingParams() - font_loading_params_title_icons.merge_font_awesome = True - app_state.title_font = hello_imgui.load_font("fonts/Roboto/Roboto-BoldItalic.ttf", 18, font_loading_params_title_icons) + app_state.title_font = hello_imgui.load_font_ttf_with_font_awesome_icons("fonts/Roboto/Roboto-BoldItalic.ttf", 18) # Load the emoji font font_loading_params_emoji = hello_imgui.FontLoadingParams() - font_loading_params_emoji.use_full_glyph_range = True app_state.emoji_font = hello_imgui.load_font("fonts/NotoEmoji-Regular.ttf", 24., font_loading_params_emoji) # Load a large icon font font_loading_params_large_icon = hello_imgui.FontLoadingParams() - font_loading_params_large_icon.use_full_glyph_range = True app_state.large_icon_font = hello_imgui.load_font("fonts/fontawesome-webfont.ttf", 24., font_loading_params_large_icon) # Load a colored font @@ -93,6 +88,10 @@ def load_fonts(app_state: AppState): # This is called by runnerParams.callbacks app_state.color_font = hello_imgui.load_font("fonts/Playbox/Playbox-FREE.otf", 24., font_loading_params_color) +def push_font_with_default_size(font: imgui.ImFont): + imgui.push_font(font, font.legacy_size) + + ########################################################################## # Save additional settings in the ini file @@ -143,7 +142,7 @@ def save_my_app_settings(app_state: AppState): @immapp.static(last_hide_time=1) def demo_hide_window(app_state: AppState): # Display a button that will hide the application window - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Hide app window") imgui.pop_font() @@ -166,7 +165,7 @@ def demo_show_additional_window(app_state: AppState): # Note: you should not modify manually the content of runnerParams.docking_params.dockable_windows # (since HelloImGui is constantly looping on it) - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Dynamically add window") imgui.pop_font() @@ -191,7 +190,7 @@ def demo_show_additional_window(app_state: AppState): def demo_basic_widgets(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Basic widgets demo") imgui.pop_font() @@ -216,7 +215,7 @@ def demo_basic_widgets(app_state: AppState): def demo_user_settings(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("User settings") imgui.pop_font() @@ -236,7 +235,7 @@ def demo_user_settings(app_state: AppState): def demo_rocket(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Rocket demo") imgui.pop_font() @@ -263,7 +262,7 @@ def demo_rocket(app_state: AppState): def demo_docking_flags(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Main dock space node flags") imgui.pop_font() imgui.text_wrapped( @@ -320,7 +319,7 @@ def __init__(self, flag, label, tip): def gui_window_layout_customization(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Switch between layouts") imgui.pop_font() imgui.text('with the menu "View/Layouts"') @@ -332,7 +331,7 @@ def gui_window_layout_customization(app_state: AppState): imgui.separator() - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Change the theme") imgui.pop_font() imgui.text('with the menu "View/Theme"') @@ -366,19 +365,19 @@ def gui_window_alternative_theme(app_state: AppState): window_opened = imgui.begin("Alternative Theme") if window_opened: # Display some widgets - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Alternative Theme") imgui.pop_font() imgui.text("This window uses a different theme") imgui.set_item_tooltip(""" tweaked_theme = hello_imgui.ImGuiTheme.ImGuiTweakedTheme() - tweaked_theme.theme = hello_imgui.ImGuiTheme_.white_is_white.value + tweaked_theme.theme = hello_imgui.ImGuiTheme_.white_is_white tweaked_theme.tweaks.rounding = 0.0 hello_imgui.apply_tweaked_theme(tweaked_theme) """ ) - if imgui.collapsing_header("Basic Widgets", imgui.TreeNodeFlags_.default_open.value): + if imgui.collapsing_header("Basic Widgets", imgui.TreeNodeFlags_.default_open): if not hasattr(statics, "checked"): statics.checked = True _, statics.checked = imgui.checkbox("Checkbox", statics.checked) @@ -437,7 +436,7 @@ def gui_window_alternative_theme(app_state: AppState): # a popup with a modal window if imgui.button("Open Modal"): imgui.open_popup("MyModal") - popup_opened, _ = imgui.begin_popup_modal("MyModal", None, imgui.WindowFlags_.always_auto_resize.value) + popup_opened, _ = imgui.begin_popup_modal("MyModal", None, imgui.WindowFlags_.always_auto_resize) if popup_opened: imgui.text("This is a modal window") if imgui.button("Close"): @@ -463,7 +462,7 @@ def gui_window_alternative_theme(app_state: AppState): def demo_assets(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Image From Assets") imgui.pop_font() hello_imgui.begin_group_column() @@ -474,7 +473,7 @@ def demo_assets(app_state: AppState): def demo_fonts(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Fonts - " + icons_fontawesome_6.ICON_FA_ROCKET) imgui.pop_font() @@ -485,7 +484,7 @@ def demo_fonts(app_state: AppState): imgui.text("Emojis") with imgui_ctx.begin_group(): - imgui.push_font(app_state.emoji_font) + push_font_with_default_size(app_state.emoji_font) imgui.text("✌❤🌴🚀") imgui.pop_font() @@ -493,7 +492,7 @@ def demo_fonts(app_state: AppState): imgui.set_tooltip("Example with NotoEmoji font") imgui.text("Colored Fonts") - imgui.push_font(app_state.color_font) + push_font_with_default_size(app_state.color_font) imgui.text("COLOR!") imgui.pop_font() if imgui.is_item_hovered(): @@ -501,7 +500,7 @@ def demo_fonts(app_state: AppState): def demo_themes(app_state: AppState): - imgui.push_font(app_state.title_font) + push_font_with_default_size(app_state.title_font) imgui.text("Themes") imgui.pop_font() @@ -566,7 +565,7 @@ def show_app_menu_items(): def show_top_toolbar(app_state: AppState): - imgui.push_font(app_state.large_icon_font) + push_font_with_default_size(app_state.large_icon_font) if imgui.button(icons_fontawesome_6.ICON_FA_POWER_OFF): hello_imgui.get_runner_params().app_shall_exit = True @@ -586,7 +585,7 @@ def show_top_toolbar(app_state: AppState): def show_right_toolbar(app_state: AppState): - imgui.push_font(app_state.large_icon_font) + push_font_with_default_size(app_state.large_icon_font) if imgui.button(icons_fontawesome_6.ICON_FA_CIRCLE_ARROW_LEFT): hello_imgui.log(hello_imgui.LogLevel.info, "Clicked on Circle left in the right toolbar") if imgui.button(icons_fontawesome_6.ICON_FA_CIRCLE_ARROW_RIGHT): @@ -711,7 +710,7 @@ def create_dockable_windows(app_state: AppState) -> List[hello_imgui.DockableWin dear_imgui_demo_window = hello_imgui.DockableWindow() dear_imgui_demo_window.label = "Dear ImGui Demo" dear_imgui_demo_window.dock_space_name = "MainDockSpace" - dear_imgui_demo_window.imgui_window_flags = imgui.WindowFlags_.menu_bar.value + dear_imgui_demo_window.imgui_window_flags = imgui.WindowFlags_.menu_bar dear_imgui_demo_window.gui_function = imgui.show_demo_window # type: ignore # alternativeThemeWindow @@ -782,7 +781,7 @@ def setup_my_theme(): hello_imgui.apply_tweaked_theme(tweaked_theme) # Note: you can also push/pop the theme in order to apply it only to a specific part of the Gui: hello_imgui.push_tweaked_theme(tweaked_theme) / hello_imgui.pop_tweaked_theme() # Then apply further modifications to ImGui style imgui.get_style().item_spacing = ImVec2(6, 4) # Reduce spacing between items ((8, 4) by default) - imgui.get_style().set_color_(imgui.Col_.text.value, (0.8, 0.8, 0.85, 1.0)) # Change text color + imgui.get_style().set_color_(imgui.Col_.text, (0.8, 0.8, 0.85, 1.0)) # Change text color ########################################################################## diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_drag_and_drop.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_drag_and_drop.py index e672cad..6775167 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_drag_and_drop.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_drag_and_drop.py @@ -54,7 +54,7 @@ def gui_drag_and_drop(state: DemoState) -> None: imgui.button(state.names[n], immapp.em_to_vec2(5.0, 5.0)) # Our buttons are both drag sources and drag targets here! - if imgui.begin_drag_drop_source(imgui.DragDropFlags_.none.value): + if imgui.begin_drag_drop_source(imgui.DragDropFlags_.none): # Set payload to carry the index of our item (in python, the payload is an int) drag_data_id = n imgui.set_drag_drop_payload_py_id("DND_DEMO_CELL", drag_data_id) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_font_common_glyph_range.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_font_common_glyph_range.py deleted file mode 100644 index 035f51a..0000000 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_font_common_glyph_range.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Demonstrates how to load a font with Chinese characters and display them in the GUI, -using the common glyph ranges defined in by ImGui. -""" -from imgui_bundle import imgui, hello_imgui, imgui_ctx -from imgui_bundle.demos_python import demo_utils - - -demo_utils.set_hello_imgui_demo_assets_folder() - - -font_cn: imgui.ImFont | None = None - - -def load_font(): - global font_cn - if not hello_imgui.asset_exists("fonts/NotoSerifSC-VariableFont_wght.ttf"): - return - - # Note: this font is not provided with the ImGui bundle (too large). - # You will need to provide it yourself, or use another font. - font_filename = "fonts/NotoSerifSC-VariableFont_wght.ttf" - - # The range of Chinese characters is defined by ImGui as a single list of characters (List[ImWchar]), with a terminating 0. - # (each range is a pair of successive characters in this list, with the second character being the last one in the range) - cn_glyph_ranges_imgui = imgui.get_io().fonts.get_glyph_ranges_chinese_simplified_common() - # We need to convert this list into a list of pairs (List[ImWcharPair]), where each pair is a range of characters. - cn_glyph_ranges_pair = hello_imgui.translate_common_glyph_ranges(cn_glyph_ranges_imgui) - - font_loading_params = hello_imgui.FontLoadingParams() - font_loading_params.glyph_ranges = cn_glyph_ranges_pair - font_cn = hello_imgui.load_font(font_filename, 40.0, font_loading_params) - - -def gui(): - if font_cn is not None: - with imgui_ctx.push_font(font_cn): - imgui.text("Hello world") - imgui.text("你好,世界") - else: - imgui.text("Font file not found") - imgui.text_wrapped(""" - This font is not provided with the ImGui bundle (too large). - You will need to provide it yourself, or use another font. - """) - - -runner_params = hello_imgui.RunnerParams() -runner_params.callbacks.load_additional_fonts = load_font -runner_params.callbacks.show_gui = gui -hello_imgui.run(runner_params) - diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_implot_markdown.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_implot_markdown.py index a76e881..761a4d7 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_implot_markdown.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_implot_markdown.py @@ -1,25 +1,44 @@ import numpy as np -from imgui_bundle import implot, imgui_md, immapp -from imgui_bundle.demos_python import demo_utils +# imgui_bundle is a package that provides several imgui-related submodules +from imgui_bundle import (imgui, # first we import ImGui + implot, # ImPlot provides advanced real-time plotting + imgui_md, # imgui_md: markdown rendering for imgui + hello_imgui, # hello_imgui: starter pack for imgui apps + immapp, # helper to activate addons (like implot, markdown, etc.) + ) +def gui(): + # Render some markdown text + imgui_md.render_unindented(""" + # Render an animated plot with ImPlot + This example shows how to use `ImPlot` to render an animated plot, + and how to use `imgui_md` to render markdown text (*this text!*). + """) -def main(): - # This call is specific to the ImGui Bundle interactive manual. In a standard application, you could write: - # hello_imgui.set_assets_folder("my_assets"); # (By default, HelloImGui will search inside "assets") - demo_utils.set_hello_imgui_demo_assets_folder() + # Render an animated plot + if implot.begin_plot( + title_id="Plot", + # size in em units (1em = height of a character) + size=hello_imgui.em_to_vec2(40, 20)): + x = np.arange(0, np.pi * 4, 0.01) + y = np.cos(x + imgui.get_time()) + implot.plot_line("y1", x, y) + implot.end_plot() + + if imgui.button("Exit"): + hello_imgui.get_runner_params().app_shall_exit = True - x = np.arange(0, np.pi * 4, 0.01) - y1 = np.cos(x) - y2 = np.sin(x) - def gui(): - imgui_md.render("# This is the plot of _cosinus_ and *sinus*") # Markdown - if implot.begin_plot("Plot"): - implot.plot_line("y1", x, y1) - implot.plot_line("y2", x, y2) - implot.end_plot() +def main(): + # This call is specific to the ImGui Bundle interactive manual. + from imgui_bundle.demos_python import demo_utils + demo_utils.set_hello_imgui_demo_assets_folder() - immapp.run(gui, with_implot=True, with_markdown=True, window_size=(600, 400)) + # Run the app with ImPlot and markdown support + immapp.run(gui, + with_implot=True, + with_markdown=True, + window_size=(700, 500)) if __name__ == "__main__": diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_python_context_manager.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_python_context_manager.py index 1a6ccd6..e1b8b08 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_python_context_manager.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_python_context_manager.py @@ -36,7 +36,7 @@ def demo_begin(): _, statics.opened_window_expandable = imgui.checkbox("Open Window - Expandable", statics.opened_window_expandable) _, statics.opened_closable_window = imgui.checkbox("Open Closable Window", statics.opened_closable_window) - imgui.set_next_window_size(ImVec2(200, 200), imgui.Cond_.appearing.value) + imgui.set_next_window_size(ImVec2(200, 200), imgui.Cond_.appearing) # Shortest use of imgui_ctx.begin() if statics.opened_window_shortest: @@ -87,12 +87,12 @@ def demo_push_pop(): # Example of push_style_color imgui.separator_text("Push Style Color") - with imgui_ctx.push_style_color(imgui.Col_.text.value, ImVec4(1.0, 0.0, 0.0, 1.0)): + with imgui_ctx.push_style_color(imgui.Col_.text, ImVec4(1.0, 0.0, 0.0, 1.0)): imgui.text("This is red!") # Example of push_style_var imgui.separator_text("Push Style Var") - with imgui_ctx.push_style_var(imgui.StyleVar_.alpha.value, 0.5): + with imgui_ctx.push_style_var(imgui.StyleVar_.alpha, 0.5): imgui.text("This is half transparent!") # Example of push_item_width @@ -163,9 +163,9 @@ def demo_begin_tab_bar(): def demo_begin_table(): - table_flags = (imgui.TableFlags_.borders_h.value - | imgui.TableFlags_.borders_v.value - | imgui.TableFlags_.resizable.value) + table_flags = (imgui.TableFlags_.borders_h + | imgui.TableFlags_.borders_v + | imgui.TableFlags_.resizable) with imgui_ctx.begin_table("Table", 3, table_flags) as table: if table: imgui.table_setup_column("Column 1") @@ -229,7 +229,7 @@ def demo_menu_bar(): "Open Window with menu bar", statics.opened_window_with_menu_bar) if statics.opened_window_with_menu_bar: - with imgui_ctx.begin("Window with menu bar", None, imgui.WindowFlags_.menu_bar.value): + with imgui_ctx.begin("Window with menu bar", None, imgui.WindowFlags_.menu_bar): with imgui_ctx.begin_menu_bar(): with imgui_ctx.begin_menu("Enabled Menu", True) as menu: if menu.visible: diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_testengine.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_testengine.py index d468d76..582aaae 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_testengine.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/demo_testengine.py @@ -76,7 +76,7 @@ def test_custom_gui_func(ctx: imgui.test_engine.TestContext) -> None: # Custom GUI for this test: it can edit our custom variable imgui.set_next_window_size(hello_imgui.em_to_vec2(40, 8)) imgui.begin( - "Custom Gui Test Window", None, imgui.WindowFlags_.no_saved_settings.value + "Custom Gui Test Window", None, imgui.WindowFlags_.no_saved_settings ) _, test_var2.my_int = imgui.slider_int("Slider", test_var2.my_int, 0, 1000) imgui.end() @@ -113,10 +113,10 @@ def test_write_func(ctx: imgui.test_engine.TestContext) -> None: # Demo 5: Press Alt+A test_alt_a = imgui.test_engine.register_test(engine, "Demo Tests", "Test key combination (Alt-A)") def test_alt_a_func(ctx: imgui.test_engine.TestContext) -> None: - ctx.key_down(imgui.Key.left_alt.value) - ctx.key_down(imgui.Key.a.value) - ctx.key_up(imgui.Key.a.value) - ctx.key_up(imgui.Key.left_alt.value) + ctx.key_down(imgui.Key.left_alt) + ctx.key_down(imgui.Key.a) + ctx.key_up(imgui.Key.a) + ctx.key_up(imgui.Key.left_alt) test_alt_a.test_func = test_alt_a_func diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/haiku_butterfly.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/haiku_butterfly.py index 7fd4edd..668843f 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/haiku_butterfly.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/haiku_butterfly.py @@ -79,7 +79,7 @@ def gui_params(self): def gui_plot(self): if implot3d.begin_plot("Lorenz Attractor", hello_imgui.em_to_vec2(40, 40)): implot3d.setup_axes("X", "Y", "Z", - implot3d.AxisFlags_.auto_fit.value, implot3d.AxisFlags_.auto_fit.value, implot3d.AxisFlags_.auto_fit.value) + implot3d.AxisFlags_.auto_fit, implot3d.AxisFlags_.auto_fit, implot3d.AxisFlags_.auto_fit) implot3d.plot_line("Trajectory", self.traj1.xs, self.traj1.ys, self.traj1.zs) implot3d.plot_line("Trajectory2", self.traj2.xs, self.traj2.ys, self.traj2.zs) implot3d.end_plot() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_demo.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_demo.py index 0da239d..dfd310b 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_demo.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_demo.py @@ -200,7 +200,7 @@ def show_docking_disabled_message(): imgui.text("Set io.config_flags |= imgui.ConfigFlags.DockingEnable in your code, or ") imgui.same_line(0.0, 0.0) if imgui.small_button("click here"): - io.config_flags |= imgui.ConfigFlags_.docking_enable.value + io.config_flags |= imgui.ConfigFlags_.docking_enable # Helper to wire demo markers located in code to an interactive browser @@ -310,25 +310,25 @@ def show_demo_window(p_open: Optional[bool]) -> Optional[bool]: static.unsaved_document = False window_flags = 0 - if static.no_titlebar: window_flags |= imgui.WindowFlags_.no_title_bar.value - if static.no_scrollbar: window_flags |= imgui.WindowFlags_.no_scrollbar.value - if not static.no_menu: window_flags |= imgui.WindowFlags_.menu_bar.value - if static.no_move: window_flags |= imgui.WindowFlags_.no_move.value - if static.no_resize: window_flags |= imgui.WindowFlags_.no_resize.value - if static.no_collapse: window_flags |= imgui.WindowFlags_.no_collapse.value - if static.no_nav: window_flags |= imgui.WindowFlags_.no_nav.value - if static.no_background: window_flags |= imgui.WindowFlags_.no_background.value - if static.no_bring_to_front: window_flags |= imgui.WindowFlags_.no_bring_to_front_on_focus.value - if static.no_docking: window_flags |= imgui.WindowFlags_.no_docking.value - if static.unsaved_document: window_flags |= imgui.WindowFlags_.unsaved_document.value + if static.no_titlebar: window_flags |= imgui.WindowFlags_.no_title_bar + if static.no_scrollbar: window_flags |= imgui.WindowFlags_.no_scrollbar + if not static.no_menu: window_flags |= imgui.WindowFlags_.menu_bar + if static.no_move: window_flags |= imgui.WindowFlags_.no_move + if static.no_resize: window_flags |= imgui.WindowFlags_.no_resize + if static.no_collapse: window_flags |= imgui.WindowFlags_.no_collapse + if static.no_nav: window_flags |= imgui.WindowFlags_.no_nav + if static.no_background: window_flags |= imgui.WindowFlags_.no_background + if static.no_bring_to_front: window_flags |= imgui.WindowFlags_.no_bring_to_front_on_focus + if static.no_docking: window_flags |= imgui.WindowFlags_.no_docking + if static.unsaved_document: window_flags |= imgui.WindowFlags_.unsaved_document if static.no_close: p_open = None # Don't pass our bool* to Begin # We specify a default position/size in case there's no data in the .ini file. # We only do it to make the demo applications a little more welcoming, but typically this isn't required. main_viewport = imgui.get_main_viewport() - imgui.set_next_window_pos(ImVec2(main_viewport.work_pos.x + 650, main_viewport.work_pos.y + 20), imgui.Cond_.first_use_ever.value) - imgui.set_next_window_size(ImVec2(550, 680), imgui.Cond_.first_use_ever.value) + imgui.set_next_window_pos(ImVec2(main_viewport.work_pos.x + 650, main_viewport.work_pos.y + 20), imgui.Cond_.first_use_ever) + imgui.set_next_window_size(ImVec2(550, 680), imgui.Cond_.first_use_ever) # Main body of the Demo window starts here. if not imgui.begin("Dear ImGui Demo", p_open, window_flags): @@ -417,28 +417,28 @@ def show_demo_window(p_open: Optional[bool]) -> Optional[bool]: if imgui.tree_node("Configuration##2"): imgui.separator_text("General") - _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NavEnableKeyboard", io.config_flags, imgui.ConfigFlags_.nav_enable_keyboard.value) + _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NavEnableKeyboard", io.config_flags, imgui.ConfigFlags_.nav_enable_keyboard) imgui.same_line(); help_marker("Enable keyboard controls.") - _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NavEnableGamepad", io.config_flags, imgui.ConfigFlags_.nav_enable_gamepad.value) + _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NavEnableGamepad", io.config_flags, imgui.ConfigFlags_.nav_enable_gamepad) imgui.same_line(); help_marker("Enable gamepad controls. Require backend to set io.BackendFlags |= ImGuiBackendFlags_HasGamepad.\n\nRead instructions in imgui.cpp for details.") - _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NoMouse", io.config_flags, imgui.ConfigFlags_.no_mouse.value) - if io.config_flags & imgui.ConfigFlags_.no_mouse.value: + _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NoMouse", io.config_flags, imgui.ConfigFlags_.no_mouse) + if io.config_flags & imgui.ConfigFlags_.no_mouse: # The "NoMouse" option can get us stuck with a disabled mouse! Let's provide an alternative way to fix it: if math.fmod(time.time(), 0.40) < 0.20: imgui.same_line() imgui.text("<>") if imgui.is_key_pressed(imgui.Key.space): - io.config_flags &= ~imgui.ConfigFlags_.no_mouse.value - _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NoMouseCursorChange", io.config_flags, imgui.ConfigFlags_.no_mouse_cursor_change.value) + io.config_flags &= ~imgui.ConfigFlags_.no_mouse + _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: NoMouseCursorChange", io.config_flags, imgui.ConfigFlags_.no_mouse_cursor_change) imgui.same_line(); help_marker("Instruct backend to not alter mouse cursor shape and visibility.") - _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: DockingEnable", io.config_flags, imgui.ConfigFlags_.docking_enable.value) + _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: DockingEnable", io.config_flags, imgui.ConfigFlags_.docking_enable) imgui.same_line() if io.config_docking_with_shift: help_marker("Drag from window title bar or their tab to dock/undock. Hold SHIFT to enable docking.\n\nDrag from window menu button (upper-left button) to undock an entire node (all windows).") else: help_marker("Drag from window title bar or their tab to dock/undock. Hold SHIFT to disable docking.\n\nDrag from window menu button (upper-left button) to undock an entire node (all windows).") - if io.config_flags & imgui.ConfigFlags_.docking_enable.value: + if io.config_flags & imgui.ConfigFlags_.docking_enable: imgui.indent() _, io.config_docking_no_split = imgui.checkbox("io.ConfigDockingNoSplit", io.config_docking_no_split) imgui.same_line(); help_marker("Simplified docking mode: disable window splitting, so docking is limited to merging multiple windows together into tab-bars.") @@ -450,9 +450,9 @@ def show_demo_window(p_open: Optional[bool]) -> Optional[bool]: imgui.same_line(); help_marker("Make window or viewport transparent when docking and only display docking boxes on the target viewport. Useful if rendering of multiple viewport cannot be synced. Best used with ConfigViewportsNoAutoMerge.") imgui.unindent() - _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: ViewportsEnable", io.config_flags, imgui.ConfigFlags_.viewports_enable.value) + _, io.config_flags = imgui.checkbox_flags("io.ConfigFlags: ViewportsEnable", io.config_flags, imgui.ConfigFlags_.viewports_enable) imgui.same_line(); help_marker("[beta] Enable beta multi-viewports support. See ImGuiPlatformIO for details.") - if io.config_flags & imgui.ConfigFlags_.viewports_enable.value: + if io.config_flags & imgui.ConfigFlags_.viewports_enable: imgui.indent() _, io.config_viewports_no_auto_merge = imgui.checkbox("io.ConfigViewportsNoAutoMerge", io.config_viewports_no_auto_merge) imgui.same_line(); help_marker("Set to make all floating imgui windows always create their own viewport. Otherwise, they are merged into the main host viewports when overlapping it.") @@ -505,13 +505,13 @@ def show_demo_window(p_open: Optional[bool]) -> Optional[bool]: # Make a local copy to avoid modifying actual backend flags. imgui.begin_disabled() - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasGamepad", io.backend_flags, imgui.BackendFlags_.has_gamepad.value) - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasMouseCursors", io.backend_flags, imgui.BackendFlags_.has_mouse_cursors.value) - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasSetMousePos", io.backend_flags, imgui.BackendFlags_.has_set_mouse_pos.value) - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: PlatformHasViewports", io.backend_flags, imgui.BackendFlags_.platform_has_viewports.value) - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasMouseHoveredViewport", io.backend_flags, imgui.BackendFlags_.has_mouse_hovered_viewport.value) - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: RendererHasVtxOffset", io.backend_flags, imgui.BackendFlags_.renderer_has_vtx_offset.value) - _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: RendererHasViewports", io.backend_flags, imgui.BackendFlags_.renderer_has_viewports.value) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasGamepad", io.backend_flags, imgui.BackendFlags_.has_gamepad) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasMouseCursors", io.backend_flags, imgui.BackendFlags_.has_mouse_cursors) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasSetMousePos", io.backend_flags, imgui.BackendFlags_.has_set_mouse_pos) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: PlatformHasViewports", io.backend_flags, imgui.BackendFlags_.platform_has_viewports) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: HasMouseHoveredViewport", io.backend_flags, imgui.BackendFlags_.has_mouse_hovered_viewport) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: RendererHasVtxOffset", io.backend_flags, imgui.BackendFlags_.renderer_has_vtx_offset) + _, io.backend_flags = imgui.checkbox_flags("io.BackendFlags: RendererHasViewports", io.backend_flags, imgui.BackendFlags_.renderer_has_viewports) imgui.end_disabled() imgui.tree_pop() imgui.spacing() @@ -617,9 +617,9 @@ def show_demo_window_widgets(): imgui.same_line() imgui.push_id(i) hue = i / 7.0 - imgui.push_style_color(imgui.Col_.button.value, imgui.ImColor.hsv(hue, 0.6, 0.6).value) - imgui.push_style_color(imgui.Col_.button_hovered.value, imgui.ImColor.hsv(hue, 0.7, 0.7).value) - imgui.push_style_color(imgui.Col_.button_active.value, imgui.ImColor.hsv(hue, 0.8, 0.8).value) + imgui.push_style_color(imgui.Col_.button, imgui.ImColor.hsv(hue, 0.6, 0.6).value) + imgui.push_style_color(imgui.Col_.button_hovered, imgui.ImColor.hsv(hue, 0.7, 0.7).value) + imgui.push_style_color(imgui.Col_.button_active, imgui.ImColor.hsv(hue, 0.8, 0.8).value) imgui.button("Click") imgui.pop_style_color(3) imgui.pop_id() @@ -632,7 +632,7 @@ def show_demo_window_widgets(): IMGUI_DEMO_MARKER("Widgets/Basic/Buttons (Repeating)") if not hasattr(static, "counter"): static.counter = 0 spacing = imgui.get_style().item_inner_spacing.x - imgui.push_item_flag(imgui.ItemFlags_.button_repeat.value, True) + imgui.push_item_flag(imgui.ItemFlags_.button_repeat, True) if imgui.arrow_button("##left", imgui.Dir.left): static.counter -= 1 imgui.same_line(0.0, spacing) @@ -703,7 +703,7 @@ def show_demo_window_widgets(): "Hold SHIFT/ALT for faster/slower edit.\n" "Double-click or CTRL+click to input value.") - changed, static.i2 = imgui.drag_int("drag int 0..100", static.i2, 1, 0, 100, "%d%%", imgui.SliderFlags_.always_clamp.value) + changed, static.i2 = imgui.drag_int("drag int 0..100", static.i2, 1, 0, 100, "%d%%", imgui.SliderFlags_.always_clamp) if not hasattr(static, 'ff1'): static.ff1 = 1.00 if not hasattr(static, 'ff2'): static.ff2 = 0.0067 @@ -722,7 +722,7 @@ def show_demo_window_widgets(): if not hasattr(static, 'fff1'): static.fff1 = 0.123 if not hasattr(static, 'fff2'): static.fff2 = 0.0 changed, static.fff1 = imgui.slider_float("slider float", static.fff1, 0.0, 1.0, "ratio = %.3f") - changed, static.fff2 = imgui.slider_float("slider float (log)", static.fff2, -10.0, 10.0, "%.4f", imgui.SliderFlags_.logarithmic.value) + changed, static.fff2 = imgui.slider_float("slider float (log)", static.fff2, -10.0, 10.0, "%.4f", imgui.SliderFlags_.logarithmic) IMGUI_DEMO_MARKER("Widgets/Basic/SliderAngle") if not hasattr(static, 'angle'): static.angle = 0.0 @@ -837,32 +837,32 @@ def show_demo_window_widgets(): # Manual tooltip emission example imgui.button("Manual") - if imgui.is_item_hovered(imgui.HoveredFlags_.for_tooltip.value): + if imgui.is_item_hovered(imgui.HoveredFlags_.for_tooltip): imgui.set_tooltip("I am a manually emitted tooltip.") # No delay tooltip example imgui.button("DelayNone") - if imgui.is_item_hovered(imgui.HoveredFlags_.delay_none.value): + if imgui.is_item_hovered(imgui.HoveredFlags_.delay_none): imgui.set_tooltip("I am a tooltip with no delay.") # Short delay tooltip example imgui.button("DelayShort") - if imgui.is_item_hovered(imgui.HoveredFlags_.delay_short.value | imgui.HoveredFlags_.no_shared_delay.value): + if imgui.is_item_hovered(imgui.HoveredFlags_.delay_short | imgui.HoveredFlags_.no_shared_delay): imgui.set_tooltip(f"I am a tooltip with a short delay ({imgui.get_style().hover_delay_short:.2f} sec).") imgui.button("DelayLong") - if imgui.is_item_hovered(imgui.HoveredFlags_.delay_normal.value | imgui.HoveredFlags_.no_shared_delay.value): + if imgui.is_item_hovered(imgui.HoveredFlags_.delay_normal | imgui.HoveredFlags_.no_shared_delay): imgui.set_tooltip(f"I am a tooltip with a long delay ({imgui.get_style().hover_delay_normal:.2f} sec).") imgui.button("Stationary") - if imgui.is_item_hovered(imgui.HoveredFlags_.stationary.value): + if imgui.is_item_hovered(imgui.HoveredFlags_.stationary): imgui.set_tooltip("I am a tooltip requiring mouse to be stationary before activating.") # Tooltips can also be shown for disabled items imgui.begin_disabled() imgui.button("Disabled item") imgui.end_disabled() - if imgui.is_item_hovered(imgui.HoveredFlags_.for_tooltip.value): + if imgui.is_item_hovered(imgui.HoveredFlags_.for_tooltip): imgui.set_tooltip("I am a a tooltip for a disabled item.") # Close the tree node for "Tooltips" @@ -882,7 +882,7 @@ def show_demo_window_widgets(): # Use SetNextItemOpen() to set the default state of a node to be open. We could # also use TreeNodeEx() with the ImGuiTreeNodeFlags_DefaultOpen flag to achieve the same thing! if i == 0: - imgui.set_next_item_open(True, imgui.Cond_.once.value) + imgui.set_next_item_open(True, imgui.Cond_.once) if imgui.tree_node(str(i), f"Child {i}"): imgui.text("blah blah") @@ -899,14 +899,14 @@ def show_demo_window_widgets(): "This is a more typical looking tree with selectable nodes.\n" "Click to select, CTRL+Click to toggle, click on arrows or double-click to open.") - if not hasattr(static, 'base_flags'): static.base_flags = imgui.TreeNodeFlags_.open_on_arrow.value | imgui.TreeNodeFlags_.open_on_double_click.value | imgui.TreeNodeFlags_.span_avail_width.value + if not hasattr(static, 'base_flags'): static.base_flags = imgui.TreeNodeFlags_.open_on_arrow | imgui.TreeNodeFlags_.open_on_double_click | imgui.TreeNodeFlags_.span_avail_width if not hasattr(static, 'align_label_with_current_x_position'): static.align_label_with_current_x_position = False if not hasattr(static, 'test_drag_and_drop'): static.test_drag_and_drop = False - _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_OpenOnArrow", static.base_flags, imgui.TreeNodeFlags_.open_on_arrow.value) - _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_OpenOnDoubleClick", static.base_flags, imgui.TreeNodeFlags_.open_on_double_click.value) - _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_SpanAvailWidth", static.base_flags, imgui.TreeNodeFlags_.span_avail_width.value); imgui.same_line(); help_marker("Extend hit area to all available width instead of allowing more items to be laid out after the node.") - _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_SpanFullWidth", static.base_flags, imgui.TreeNodeFlags_.span_full_width.value) - _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_SpanAllColumns", static.base_flags, imgui.TreeNodeFlags_.span_all_columns.value); imgui.same_line(); help_marker("For use in Tables only.") + _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_OpenOnArrow", static.base_flags, imgui.TreeNodeFlags_.open_on_arrow) + _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_OpenOnDoubleClick", static.base_flags, imgui.TreeNodeFlags_.open_on_double_click) + _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_SpanAvailWidth", static.base_flags, imgui.TreeNodeFlags_.span_avail_width); imgui.same_line(); help_marker("Extend hit area to all available width instead of allowing more items to be laid out after the node.") + _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_SpanFullWidth", static.base_flags, imgui.TreeNodeFlags_.span_full_width) + _, static.base_flags = imgui.checkbox_flags("ImGuiTreeNodeFlags_SpanAllColumns", static.base_flags, imgui.TreeNodeFlags_.span_all_columns); imgui.same_line(); help_marker("For use in Tables only.") _, static.align_label_with_current_x_position = imgui.checkbox("Align label with current X position", static.align_label_with_current_x_position) _, static.test_drag_and_drop = imgui.checkbox("Test tree node as drag source", static.test_drag_and_drop) imgui.text("Hello!") @@ -922,7 +922,7 @@ def show_demo_window_widgets(): node_flags = static.base_flags is_selected = (static.selection_mask & (1 << i)) != 0 if is_selected: - node_flags |= imgui.TreeNodeFlags_.selected.value + node_flags |= imgui.TreeNodeFlags_.selected if i < 3: # Items 0..2 are Tree Nodes. node_open = imgui.tree_node_ex(str(i), node_flags, f"Selectable Node {i}") @@ -938,7 +938,7 @@ def show_demo_window_widgets(): else: # Items 3..5 are Tree Leaves. - node_flags |= imgui.TreeNodeFlags_.leaf.value | imgui.TreeNodeFlags_.no_tree_push_on_open.value + node_flags |= imgui.TreeNodeFlags_.leaf | imgui.TreeNodeFlags_.no_tree_push_on_open imgui.tree_node_ex(str(i), node_flags, f"Selectable Leaf {i}") if imgui.is_item_clicked() and not imgui.is_item_toggled_open(): node_clicked = i @@ -972,7 +972,7 @@ def show_demo_window_widgets(): _, static.closable_group = imgui.checkbox("Show 2nd header", static.closable_group) # First collapsible header - if imgui.collapsing_header("Header", imgui.TreeNodeFlags_.none.value): + if imgui.collapsing_header("Header", imgui.TreeNodeFlags_.none): # Display hover state and some content within the header imgui.text(f"IsItemHovered: {int(imgui.is_item_hovered())}") for i in range(5): @@ -1113,8 +1113,8 @@ def show_demo_window_widgets(): pos = imgui.get_cursor_screen_pos() uv_min = ImVec2(0.0, 0.0) # Top-left uv_max = ImVec2(1.0, 1.0) # Lower-right - tint_col = imgui.get_style_color_vec4(imgui.Col_.text.value) if static.use_text_color_for_tint else (1.0, 1.0, 1.0, 1.0) - border_col = imgui.get_style_color_vec4(imgui.Col_.border.value) + tint_col = imgui.get_style_color_vec4(imgui.Col_.text) if static.use_text_color_for_tint else (1.0, 1.0, 1.0, 1.0) + border_col = imgui.get_style_color_vec4(imgui.Col_.border) imgui.image(my_tex_id, ImVec2(my_tex_w, my_tex_h), uv_min, uv_max, tint_col, border_col) # type: ignore if imgui.begin_item_tooltip(): @@ -1137,7 +1137,7 @@ def show_demo_window_widgets(): for i in range(8): imgui.push_id(i) if i > 0: - imgui.push_style_var(imgui.StyleVar_.frame_padding.value, ImVec2(i - 1.0, i - 1.0)) + imgui.push_style_var(imgui.StyleVar_.frame_padding, ImVec2(i - 1.0, i - 1.0)) size = ImVec2(32.0, 32.0) uv0 = ImVec2(0.0, 0.0) uv1 = ImVec2(32.0 / my_tex_w, 32.0 / my_tex_h) @@ -1157,28 +1157,28 @@ def show_demo_window_widgets(): if imgui.tree_node("Combo"): # Expose flags as checkboxes for the demo if not hasattr(static, 'flags'): static.flags = 0 - _, static.flags = imgui.checkbox_flags("ImGuiComboFlags_PopupAlignLeft", static.flags, imgui.ComboFlags_.popup_align_left.value) + _, static.flags = imgui.checkbox_flags("ImGuiComboFlags_PopupAlignLeft", static.flags, imgui.ComboFlags_.popup_align_left) imgui.same_line(); help_marker("Only makes a difference if the popup is larger than the combo") - changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_NoArrowButton", static.flags, imgui.ComboFlags_.no_arrow_button.value) + changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_NoArrowButton", static.flags, imgui.ComboFlags_.no_arrow_button) if changed: - static.flags &= ~imgui.ComboFlags_.no_preview.value # Clear the other flag, as we cannot combine both - changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_NoPreview", static.flags, imgui.ComboFlags_.no_preview.value) + static.flags &= ~imgui.ComboFlags_.no_preview # Clear the other flag, as we cannot combine both + changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_NoPreview", static.flags, imgui.ComboFlags_.no_preview) if changed: - static.flags &= ~(imgui.ComboFlags_.no_arrow_button.value | imgui.ComboFlags_.width_fit_preview.value) # Clear the other flag, as we cannot combine both - changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_WidthFitPreview", static.flags, imgui.ComboFlags_.width_fit_preview.value) + static.flags &= ~(imgui.ComboFlags_.no_arrow_button | imgui.ComboFlags_.width_fit_preview) # Clear the other flag, as we cannot combine both + changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_WidthFitPreview", static.flags, imgui.ComboFlags_.width_fit_preview) if changed: - static.flags &= ~imgui.ComboFlags_.no_preview.value + static.flags &= ~imgui.ComboFlags_.no_preview # Override default popup height - changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_HeightSmall", static.flags, imgui.ComboFlags_.height_small.value) + changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_HeightSmall", static.flags, imgui.ComboFlags_.height_small) if changed: - static.flags &= ~(imgui.ComboFlags_.height_mask_.value & ~imgui.ComboFlags_.height_small.value) - changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_HeightRegular", static.flags, imgui.ComboFlags_.height_regular.value) + static.flags &= ~(imgui.ComboFlags_.height_mask_ & ~imgui.ComboFlags_.height_small) + changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_HeightRegular", static.flags, imgui.ComboFlags_.height_regular) if changed: - static.flags &= ~(imgui.ComboFlags_.height_mask_.value & ~imgui.ComboFlags_.height_regular.value) - changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_HeightLargest", static.flags, imgui.ComboFlags_.height_largest.value) + static.flags &= ~(imgui.ComboFlags_.height_mask_ & ~imgui.ComboFlags_.height_regular) + changed, static.flags = imgui.checkbox_flags("ImGuiComboFlags_HeightLargest", static.flags, imgui.ComboFlags_.height_largest) if changed: - static.flags &= ~(imgui.ComboFlags_.height_mask_.value & ~imgui.ComboFlags_.height_largest.value) + static.flags &= ~(imgui.ComboFlags_.height_mask_ & ~imgui.ComboFlags_.height_largest) # Generic BeginCombo() API, displaying items with selectable behavior items = ["AAAA", "BBBB", "CCCC", "DDDD", "EEEE", "FFFF", "GGGG", "HHHH", "IIII", "JJJJ", "KKKK", "LLLLLLL", "MMMM", "OOOOOOO"] @@ -1260,7 +1260,7 @@ def show_demo_window_widgets(): _, static.selection[0] = imgui.selectable("1. I am selectable", static.selection[0]) _, static.selection[1] = imgui.selectable("2. I am selectable", static.selection[1]) _, static.selection[2] = imgui.selectable("3. I am selectable", static.selection[2]) - if imgui.selectable("4. I am double clickable", static.selection[3], imgui.SelectableFlags_.allow_double_click.value): + if imgui.selectable("4. I am double clickable", static.selection[3], imgui.SelectableFlags_.allow_double_click): if imgui.is_mouse_double_clicked(0): static.selection[3] = not static.selection[3] imgui.tree_pop() @@ -1315,7 +1315,7 @@ def show_demo_window_widgets(): if imgui.tree_node("In columns"): if not hasattr(static, 'selected_in_columns'): static.selected_in_columns = [False] * 10 - if imgui.begin_table("split1", 3, imgui.TableFlags_.resizable.value | imgui.TableFlags_.no_saved_settings.value | imgui.TableFlags_.borders.value): + if imgui.begin_table("split1", 3, imgui.TableFlags_.resizable | imgui.TableFlags_.no_saved_settings | imgui.TableFlags_.borders): for i in range(10): label = f"Item {i}" imgui.table_next_column() @@ -1323,12 +1323,12 @@ def show_demo_window_widgets(): imgui.end_table() imgui.spacing() - if imgui.begin_table("split2", 3, imgui.TableFlags_.resizable.value | imgui.TableFlags_.no_saved_settings.value | imgui.TableFlags_.borders.value): + if imgui.begin_table("split2", 3, imgui.TableFlags_.resizable | imgui.TableFlags_.no_saved_settings | imgui.TableFlags_.borders): for i in range(10): label = f"Item {i}" imgui.table_next_row() imgui.table_next_column() - _, static.selected_in_columns[i] = imgui.selectable(label, static.selected_in_columns[i], imgui.SelectableFlags_.span_all_columns.value) + _, static.selected_in_columns[i] = imgui.selectable(label, static.selected_in_columns[i], imgui.SelectableFlags_.span_all_columns) imgui.table_next_column() imgui.text("Some other contents") imgui.table_next_column() @@ -1344,7 +1344,7 @@ def show_demo_window_widgets(): time = imgui.get_time() winning_state = all(val == 1 for row in static.grid_selected for val in row) # Check if all cells are selected if winning_state: - imgui.push_style_var(imgui.StyleVar_.selectable_text_align.value, ImVec2(0.5 + 0.5 * math.cos(time * 2), 0.5 + 0.5 * math.sin(time * 3))) + imgui.push_style_var(imgui.StyleVar_.selectable_text_align, ImVec2(0.5 + 0.5 * math.cos(time * 2), 0.5 + 0.5 * math.sin(time * 3))) for y in range(4): for x in range(4): @@ -1374,8 +1374,8 @@ def show_demo_window_widgets(): alignment = ImVec2(float(x) / 2.0, float(y) / 2.0) name = f"({alignment[0]:.1f},{alignment[1]:.1f})" if x > 0: imgui.same_line() - imgui.push_style_var(imgui.StyleVar_.selectable_text_align.value, alignment) - _, static.selectable_alignment[3 * y + x] = imgui.selectable(name, static.selectable_alignment[3 * y + x], imgui.SelectableFlags_.none.value, ImVec2(80, 80)) + imgui.push_style_var(imgui.StyleVar_.selectable_text_align, alignment) + _, static.selectable_alignment[3 * y + x] = imgui.selectable(name, static.selectable_alignment[3 * y + x], imgui.SelectableFlags_.none, ImVec2(80, 80)) imgui.pop_style_var() imgui.tree_pop() imgui.tree_pop() @@ -1406,10 +1406,10 @@ def show_demo_window_widgets(): # The flags are used to configure the behavior of the InputText widget. # In Python, we can directly use the flags provided by the imgui library. if not hasattr(static, 'text_input_flags'): - static.text_input_flags = imgui.InputTextFlags_.allow_tab_input.value - _, static.text_input_flags = imgui.checkbox_flags("ImGuiInputTextFlags_ReadOnly", static.text_input_flags, imgui.InputTextFlags_.read_only.value) - _, static.text_input_flags = imgui.checkbox_flags("ImGuiInputTextFlags_AllowTabInput", static.text_input_flags, imgui.InputTextFlags_.allow_tab_input.value) - _, static.text_input_flags = imgui.checkbox_flags("ImGuiInputTextFlags_CtrlEnterForNewLine", static.text_input_flags, imgui.InputTextFlags_.ctrl_enter_for_new_line.value) + static.text_input_flags = imgui.InputTextFlags_.allow_tab_input + _, static.text_input_flags = imgui.checkbox_flags("ImGuiInputTextFlags_ReadOnly", static.text_input_flags, imgui.InputTextFlags_.read_only) + _, static.text_input_flags = imgui.checkbox_flags("ImGuiInputTextFlags_AllowTabInput", static.text_input_flags, imgui.InputTextFlags_.allow_tab_input) + _, static.text_input_flags = imgui.checkbox_flags("ImGuiInputTextFlags_CtrlEnterForNewLine", static.text_input_flags, imgui.InputTextFlags_.ctrl_enter_for_new_line) # Use InputTextMultiline for a multi-line resizable input box. changed, static.text_input_text = imgui.input_text_multiline("##source", static.text_input_text, ImVec2(-1, imgui.get_text_line_height() * 16), static.text_input_flags) @@ -1438,27 +1438,27 @@ def show_demo_window_widgets(): if not hasattr(static, "filtered_text_input_buf2"): static.filtered_text_input_buf2 = "" - _, static.filtered_text_input_buf2 = imgui.input_text("decimal", static.filtered_text_input_buf2, flags=imgui.InputTextFlags_.chars_decimal.value) + _, static.filtered_text_input_buf2 = imgui.input_text("decimal", static.filtered_text_input_buf2, flags=imgui.InputTextFlags_.chars_decimal) if not hasattr(static, "filtered_text_input_buf3"): static.filtered_text_input_buf3 = "" - _, static.filtered_text_input_buf3 = imgui.input_text("hexadecimal", static.filtered_text_input_buf3, flags=imgui.InputTextFlags_.chars_hexadecimal.value | imgui.InputTextFlags_.chars_uppercase.value) + _, static.filtered_text_input_buf3 = imgui.input_text("hexadecimal", static.filtered_text_input_buf3, flags=imgui.InputTextFlags_.chars_hexadecimal | imgui.InputTextFlags_.chars_uppercase) if not hasattr(static, "filtered_text_input_buf4"): static.filtered_text_input_buf4 = "" - _, static.filtered_text_input_buf4 = imgui.input_text("uppercase", static.filtered_text_input_buf4, flags=imgui.InputTextFlags_.chars_uppercase.value) + _, static.filtered_text_input_buf4 = imgui.input_text("uppercase", static.filtered_text_input_buf4, flags=imgui.InputTextFlags_.chars_uppercase) if not hasattr(static, "filtered_text_input_buf5"): static.filtered_text_input_buf5 = "" - _, static.filtered_text_input_buf5 = imgui.input_text("no blank", static.filtered_text_input_buf5, flags=imgui.InputTextFlags_.chars_no_blank.value) + _, static.filtered_text_input_buf5 = imgui.input_text("no blank", static.filtered_text_input_buf5, flags=imgui.InputTextFlags_.chars_no_blank) # if "filtered_text_input_buf6" not in static: # static.filtered_text_input_buf6 = "" - # _, static.filtered_text_input_buf6 = imgui.input_text("casing swap", static.filtered_text_input_buf6, flags=imgui.InputTextFlags_.callback_char_filter.value, callback=TextFilters.filter_casing_swap) + # _, static.filtered_text_input_buf6 = imgui.input_text("casing swap", static.filtered_text_input_buf6, flags=imgui.InputTextFlags_.callback_char_filter, callback=TextFilters.filter_casing_swap) # # if "filtered_text_input_buf7" not in static: # static.filtered_text_input_buf7 = "" - # _, static.filtered_text_input_buf7 = imgui.input_text("\"imgui\"", static.filtered_text_input_buf7, flags=imgui.InputTextFlags_.callback_char_filter.value, callback=TextFilters.filter_imgui_letters) + # _, static.filtered_text_input_buf7 = imgui.input_text("\"imgui\"", static.filtered_text_input_buf7, flags=imgui.InputTextFlags_.callback_char_filter, callback=TextFilters.filter_imgui_letters) imgui.tree_pop() @@ -1467,19 +1467,19 @@ def show_demo_window_widgets(): if not hasattr(static, "password"): static.password = "password123" - _, static.password = imgui.input_text("password", static.password, imgui.InputTextFlags_.password.value) + _, static.password = imgui.input_text("password", static.password, imgui.InputTextFlags_.password) imgui.same_line(); help_marker("Display all characters as '*'.\nDisable clipboard cut and copy.\nDisable logging.") - _, static.password = imgui.input_text_with_hint("password (w/ hint)", "", static.password, imgui.InputTextFlags_.password.value) + _, static.password = imgui.input_text_with_hint("password (w/ hint)", "", static.password, imgui.InputTextFlags_.password) _, static.password = imgui.input_text("password (clear)", static.password) imgui.tree_pop() IMGUI_DEMO_MARKER("Widgets/Text Input/Miscellaneous") if imgui.tree_node("Miscellaneous"): if not hasattr(static, "misc_buf1"): static.misc_buf1 = "" - if not hasattr(static, "misc_flags"): static.misc_flags = imgui.InputTextFlags_.escape_clears_all.value - changed, static.misc_flags = imgui.checkbox_flags("ImGuiInputTextFlags_EscapeClearsAll", static.misc_flags, imgui.InputTextFlags_.escape_clears_all.value) - changed, static.misc_flags = imgui.checkbox_flags("ImGuiInputTextFlags_ReadOnly", static.misc_flags, imgui.InputTextFlags_.read_only.value) - changed, static.misc_flags = imgui.checkbox_flags("ImGuiInputTextFlags_NoUndoRedo", static.misc_flags, imgui.InputTextFlags_.no_undo_redo.value) + if not hasattr(static, "misc_flags"): static.misc_flags = imgui.InputTextFlags_.escape_clears_all + changed, static.misc_flags = imgui.checkbox_flags("ImGuiInputTextFlags_EscapeClearsAll", static.misc_flags, imgui.InputTextFlags_.escape_clears_all) + changed, static.misc_flags = imgui.checkbox_flags("ImGuiInputTextFlags_ReadOnly", static.misc_flags, imgui.InputTextFlags_.read_only) + changed, static.misc_flags = imgui.checkbox_flags("ImGuiInputTextFlags_NoUndoRedo", static.misc_flags, imgui.InputTextFlags_.no_undo_redo) _, static.misc_buf1 = imgui.input_text("Hello", static.misc_buf1, flags=static.misc_flags) imgui.tree_pop() @@ -1500,7 +1500,7 @@ def show_demo_window_widgets(): if imgui.tree_node("Tabs"): IMGUI_DEMO_MARKER("Widgets/Tabs/Basic") if imgui.tree_node("Basic"): - tab_bar_flags = imgui.TabBarFlags_.none.value + tab_bar_flags = imgui.TabBarFlags_.none if imgui.begin_tab_bar("MyTabBar", tab_bar_flags): if imgui.begin_tab_item("Avocado")[0]: imgui.text("This is the Avocado tab!\nblah blah blah blah blah") @@ -1519,17 +1519,17 @@ def show_demo_window_widgets(): if imgui.tree_node("Advanced & Close Button"): # Expose a couple of the available flags. In most cases, you may just call begin_tab_bar() with no flags (0). if not hasattr(static, "adv_tab_bar_flags"): - static.adv_tab_bar_flags = imgui.TabBarFlags_.reorderable.value - _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_Reorderable", static.adv_tab_bar_flags, imgui.TabBarFlags_.reorderable.value) - _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_AutoSelectNewTabs", static.adv_tab_bar_flags, imgui.TabBarFlags_.auto_select_new_tabs.value) - _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_TabListPopupButton", static.adv_tab_bar_flags, imgui.TabBarFlags_.tab_list_popup_button.value) - _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_NoCloseWithMiddleMouseButton", static.adv_tab_bar_flags, imgui.TabBarFlags_.no_close_with_middle_mouse_button.value) - if (static.adv_tab_bar_flags & imgui.TabBarFlags_.fitting_policy_mask_.value) == 0: - static.adv_tab_bar_flags |= imgui.TabBarFlags_.fitting_policy_default_.value - if imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyResizeDown", static.adv_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_resize_down.value): - static.adv_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_.value ^ imgui.TabBarFlags_.fitting_policy_resize_down.value) - if imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyScroll", static.adv_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_scroll.value): - static.adv_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_.value ^ imgui.TabBarFlags_.fitting_policy_scroll.value) + static.adv_tab_bar_flags = imgui.TabBarFlags_.reorderable + _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_Reorderable", static.adv_tab_bar_flags, imgui.TabBarFlags_.reorderable) + _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_AutoSelectNewTabs", static.adv_tab_bar_flags, imgui.TabBarFlags_.auto_select_new_tabs) + _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_TabListPopupButton", static.adv_tab_bar_flags, imgui.TabBarFlags_.tab_list_popup_button) + _, static.adv_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_NoCloseWithMiddleMouseButton", static.adv_tab_bar_flags, imgui.TabBarFlags_.no_close_with_middle_mouse_button) + if (static.adv_tab_bar_flags & imgui.TabBarFlags_.fitting_policy_mask_) == 0: + static.adv_tab_bar_flags |= imgui.TabBarFlags_.fitting_policy_default_ + if imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyResizeDown", static.adv_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_resize_down): + static.adv_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_ ^ imgui.TabBarFlags_.fitting_policy_resize_down) + if imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyScroll", static.adv_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_scroll): + static.adv_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_ ^ imgui.TabBarFlags_.fitting_policy_scroll) # Tab Bar names = ["Artichoke", "Beetroot", "Celery", "Daikon"] @@ -1544,7 +1544,7 @@ def show_demo_window_widgets(): # the underlying bool will be set to False when the tab is closed. if imgui.begin_tab_bar("MyTabBar", static.adv_tab_bar_flags): for n in range(len(static.opened)): - if static.opened[n] and imgui.begin_tab_item(names[n], static.opened[n], imgui.TabItemFlags_.none.value)[0]: + if static.opened[n] and imgui.begin_tab_item(names[n], static.opened[n], imgui.TabItemFlags_.none)[0]: imgui.text("This is the %s tab!" % names[n]) if n & 1: imgui.text("I am an odd tab.") @@ -1577,22 +1577,22 @@ def show_demo_window_widgets(): # Expose some other flags which are useful to showcase how they interact with Leading/Trailing tabs if not hasattr(static, "lead_trail_tab_bar_flags"): static.lead_trail_tab_bar_flags = ( - imgui.TabBarFlags_.auto_select_new_tabs.value - | imgui.TabBarFlags_.reorderable.value - | imgui.TabBarFlags_.fitting_policy_resize_down.value + imgui.TabBarFlags_.auto_select_new_tabs + | imgui.TabBarFlags_.reorderable + | imgui.TabBarFlags_.fitting_policy_resize_down ) - _, static.lead_trail_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_TabListPopupButton", static.lead_trail_tab_bar_flags, imgui.TabBarFlags_.tab_list_popup_button.value) - changed, static.lead_trail_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyResizeDown", static.lead_trail_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_resize_down.value) + _, static.lead_trail_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_TabListPopupButton", static.lead_trail_tab_bar_flags, imgui.TabBarFlags_.tab_list_popup_button) + changed, static.lead_trail_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyResizeDown", static.lead_trail_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_resize_down) if changed: - static.lead_trail_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_.value ^ imgui.TabBarFlags_.fitting_policy_resize_down.value) - changed, static.lead_trail_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyScroll", static.lead_trail_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_scroll.value) + static.lead_trail_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_ ^ imgui.TabBarFlags_.fitting_policy_resize_down) + changed, static.lead_trail_tab_bar_flags = imgui.checkbox_flags("ImGuiTabBarFlags_FittingPolicyScroll", static.lead_trail_tab_bar_flags, imgui.TabBarFlags_.fitting_policy_scroll) if changed: - static.lead_trail_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_.value ^ imgui.TabBarFlags_.fitting_policy_scroll) # type: ignore + static.lead_trail_tab_bar_flags &= ~(imgui.TabBarFlags_.fitting_policy_mask_ ^ imgui.TabBarFlags_.fitting_policy_scroll) # type: ignore if imgui.begin_tab_bar("MyTabBar", static.lead_trail_tab_bar_flags): # Demo a Leading TabItemButton(): click the "?" button to open a menu if static.show_leading_button: - if imgui.tab_item_button("?", imgui.TabItemFlags_.leading.value | imgui.TabItemFlags_.no_tooltip.value): + if imgui.tab_item_button("?", imgui.TabItemFlags_.leading | imgui.TabItemFlags_.no_tooltip): imgui.open_popup("MyHelpMenu") if imgui.begin_popup("MyHelpMenu"): imgui.selectable("Hello!", False) @@ -1601,7 +1601,7 @@ def show_demo_window_widgets(): # Demo Trailing Tabs: click the "+" button to add a new tab (in your app you may want to use a font icon instead of the "+") # Note that we submit it before the regular tabs, but because of the ImGuiTabItemFlags_Trailing flag it will always appear at the end. if static.show_trailing_button: - if imgui.tab_item_button("+", imgui.TabItemFlags_.trailing.value | imgui.TabItemFlags_.no_tooltip.value): + if imgui.tab_item_button("+", imgui.TabItemFlags_.trailing | imgui.TabItemFlags_.no_tooltip): static.active_tabs.append(static.next_tab_id) # Add new tab static.next_tab_id += 1 @@ -1610,7 +1610,7 @@ def show_demo_window_widgets(): while n < len(static.active_tabs): open = True name = "%04d" % static.active_tabs[n] - if imgui.begin_tab_item(name, open, imgui.TabItemFlags_.none.value)[0]: + if imgui.begin_tab_item(name, open, imgui.TabItemFlags_.none)[0]: imgui.text("This is the %s tab!" % name) imgui.end_tab_item() @@ -1652,9 +1652,9 @@ def show_demo_window_widgets(): _, static.hdr = imgui.checkbox("With HDR", static.hdr) imgui.same_line() help_marker("Currently all this does is to lift the 0..1 limits on dragging widgets.") - misc_flags = (imgui.ColorEditFlags_.hdr.value if static.hdr else 0) | (0 if static.drag_and_drop else imgui.ColorEditFlags_.no_drag_drop.value) | ( - imgui.ColorEditFlags_.alpha_preview_half.value if static.alpha_half_preview else (imgui.ColorEditFlags_.alpha_preview.value if static.alpha_preview else 0)) | ( - 0 if static.options_menu else imgui.ColorEditFlags_.no_options.value) + misc_flags = (imgui.ColorEditFlags_.hdr if static.hdr else 0) | (0 if static.drag_and_drop else imgui.ColorEditFlags_.no_drag_drop) | ( + imgui.ColorEditFlags_.alpha_preview_half if static.alpha_half_preview else (imgui.ColorEditFlags_.alpha_preview if static.alpha_preview else 0)) | ( + 0 if static.options_menu else imgui.ColorEditFlags_.no_options) IMGUI_DEMO_MARKER("Widgets/Color/ColorEdit") imgui.separator_text("Inline color editor") @@ -1667,11 +1667,11 @@ def show_demo_window_widgets(): IMGUI_DEMO_MARKER("Widgets/Color/ColorEdit (HSV, with Alpha)") imgui.text("Color widget HSV with Alpha:") - _, static.color = imgui.color_edit4("MyColor##2", static.color, flags=imgui.ColorEditFlags_.display_hsv.value | misc_flags) # type: ignore + _, static.color = imgui.color_edit4("MyColor##2", static.color, flags=imgui.ColorEditFlags_.display_hsv | misc_flags) # type: ignore IMGUI_DEMO_MARKER("Widgets/Color/ColorEdit (float display)") imgui.text("Color widget with Float Display:") - _, static.color = imgui.color_edit4("MyColor##2f", static.color, flags=imgui.ColorEditFlags_.float.value | misc_flags) + _, static.color = imgui.color_edit4("MyColor##2f", static.color, flags=imgui.ColorEditFlags_.float | misc_flags) IMGUI_DEMO_MARKER("Widgets/Color/ColorButton (with Picker)") imgui.text("Color button with Picker:") @@ -1680,13 +1680,13 @@ def show_demo_window_widgets(): "With the ImGuiColorEditFlags_NoInputs flag you can hide all the slider/text inputs.\n" "With the ImGuiColorEditFlags_NoLabel flag you can pass a non-empty label which will only " "be used for the tooltip and picker popup.") - _, static.color = imgui.color_edit4("MyColor##3", static.color, flags=imgui.ColorEditFlags_.no_inputs.value | imgui.ColorEditFlags_.no_label.value | misc_flags) + _, static.color = imgui.color_edit4("MyColor##3", static.color, flags=imgui.ColorEditFlags_.no_inputs | imgui.ColorEditFlags_.no_label | misc_flags) IMGUI_DEMO_MARKER("Widgets/Color/ColorButton (simple)") imgui.text("Color button only:") if not hasattr(static, "no_border"): static.no_border = False _, static.no_border = imgui.checkbox("ImGuiColorEditFlags_NoBorder", static.no_border) - imgui.color_button("MyColor##3c", static.color, misc_flags | (imgui.ColorEditFlags_.no_border.value if static.no_border else 0), (80, 80)) # type: ignore + imgui.color_button("MyColor##3c", static.color, misc_flags | (imgui.ColorEditFlags_.no_border if static.no_border else 0), (80, 80)) # type: ignore # HSV encoded support (to avoid RGB<>HSV round trips and singularities when S==0 or V==0) if not hasattr(static, "color_hsv"): static.color_hsv = ImVec4(0.23, 1.0, 1.0, 1.0) # Stored as HSV! @@ -1699,8 +1699,8 @@ def show_demo_window_widgets(): "added benefit that you can manipulate hue values with the picker even when saturation or value are zero." ) imgui.text("Color widget with InputHSV:") - _, static.color_hsv = imgui.color_edit4("HSV shown as RGB##1", static.color_hsv, imgui.ColorEditFlags_.display_rgb.value | imgui.ColorEditFlags_.input_hsv.value | imgui.ColorEditFlags_.float.value) # type: ignore - _, static.color_hsv = imgui.color_edit4("HSV shown as HSV##1", static.color_hsv, imgui.ColorEditFlags_.display_hsv.value | imgui.ColorEditFlags_.input_hsv.value | imgui.ColorEditFlags_.float.value) # type: ignore + _, static.color_hsv = imgui.color_edit4("HSV shown as RGB##1", static.color_hsv, imgui.ColorEditFlags_.display_rgb | imgui.ColorEditFlags_.input_hsv | imgui.ColorEditFlags_.float) # type: ignore + _, static.color_hsv = imgui.color_edit4("HSV shown as HSV##1", static.color_hsv, imgui.ColorEditFlags_.display_hsv | imgui.ColorEditFlags_.input_hsv | imgui.ColorEditFlags_.float) # type: ignore # imgui.drag_float4("Raw HSV values", static.color_hsv, 0.01, 0.0, 1.0) imgui.tree_pop() @@ -1709,14 +1709,14 @@ def show_demo_window_widgets(): IMGUI_DEMO_MARKER("Widgets/Drag and Slider Flags") if imgui.tree_node("Drag/Slider Flags"): # Demonstrate using advanced flags for DragXXX and SliderXXX functions. Note that the flags are the same! - if not hasattr(static, "drag_slider_flags"): static.drag_slider_flags = imgui.SliderFlags_.none.value - changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_AlwaysClamp", static.drag_slider_flags, imgui.SliderFlags_.always_clamp.value) + if not hasattr(static, "drag_slider_flags"): static.drag_slider_flags = imgui.SliderFlags_.none + changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_AlwaysClamp", static.drag_slider_flags, imgui.SliderFlags_.always_clamp) imgui.same_line(); help_marker("Always clamp value to min/max bounds (if any) when input manually with CTRL+Click.") - changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_Logarithmic", static.drag_slider_flags, imgui.SliderFlags_.logarithmic.value) + changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_Logarithmic", static.drag_slider_flags, imgui.SliderFlags_.logarithmic) imgui.same_line(); help_marker("Enable logarithmic editing (more precision for small values).") - changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_NoRoundToFormat", static.drag_slider_flags, imgui.SliderFlags_.no_round_to_format.value) + changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_NoRoundToFormat", static.drag_slider_flags, imgui.SliderFlags_.no_round_to_format) imgui.same_line(); help_marker("Disable rounding underlying value to match precision of the format string (e.g. %.3f values are rounded to those 3 digits).") - changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_NoInput", static.drag_slider_flags, imgui.SliderFlags_.no_input.value) + changed, static.drag_slider_flags = imgui.checkbox_flags("ImGuiSliderFlags_NoInput", static.drag_slider_flags, imgui.SliderFlags_.no_input) imgui.same_line(); help_marker("Disable CTRL+Click or Enter key allowing to input text directly into the widget.") # Drags @@ -1791,7 +1791,7 @@ def show_demo_window_widgets(): IMGUI_DEMO_MARKER("Widgets/Vertical Sliders") if imgui.tree_node("Vertical Sliders"): spacing = 4 - imgui.push_style_var(imgui.StyleVar_.item_spacing.value, ImVec2(spacing, spacing)) + imgui.push_style_var(imgui.StyleVar_.item_spacing, ImVec2(spacing, spacing)) if not hasattr(static, "sliderv_int_value"): static.sliderv_int_value = 0 imgui.v_slider_int("##int", ImVec2(18, 160), static.sliderv_int_value, 0, 5) @@ -1803,10 +1803,10 @@ def show_demo_window_widgets(): if i > 0: imgui.same_line() imgui.push_id(i) - imgui.push_style_color(imgui.Col_.frame_bg.value, imgui.ImColor.hsv(i / 7.0, 0.5, 0.5).value) - imgui.push_style_color(imgui.Col_.frame_bg_hovered.value, imgui.ImColor.hsv(i / 7.0, 0.6, 0.5).value) - imgui.push_style_color(imgui.Col_.frame_bg_active.value, imgui.ImColor.hsv(i / 7.0, 0.7, 0.5).value) - imgui.push_style_color(imgui.Col_.slider_grab.value, imgui.ImColor.hsv(i / 7.0, 0.9, 0.9).value) + imgui.push_style_color(imgui.Col_.frame_bg, imgui.ImColor.hsv(i / 7.0, 0.5, 0.5).value) + imgui.push_style_color(imgui.Col_.frame_bg_hovered, imgui.ImColor.hsv(i / 7.0, 0.6, 0.5).value) + imgui.push_style_color(imgui.Col_.frame_bg_active, imgui.ImColor.hsv(i / 7.0, 0.7, 0.5).value) + imgui.push_style_color(imgui.Col_.slider_grab, imgui.ImColor.hsv(i / 7.0, 0.9, 0.9).value) _, static.sliderv_values[i] = imgui.v_slider_float("##v", ImVec2(18, 160), static.sliderv_values[i], 0.0, 1.0, "") if imgui.is_item_active() or imgui.is_item_hovered(): imgui.set_tooltip("%.3f" % static.sliderv_values[i]) @@ -1838,7 +1838,7 @@ def show_demo_window_widgets(): if i > 0: imgui.same_line() imgui.push_id(i) - imgui.push_style_var(imgui.StyleVar_.grab_min_size.value, 40) + imgui.push_style_var(imgui.StyleVar_.grab_min_size, 40) _, static.sliderv_values[i] = imgui.v_slider_float("##v", ImVec2(40, 160), static.sliderv_values[i], 0.0, 1.0, "%.2f\nsec") imgui.pop_style_var() imgui.pop_id() @@ -1886,9 +1886,9 @@ def show_demo_window_layout(): _, static.disable_menu = imgui.checkbox("Disable Menu", static.disable_menu) # Child 1: no border, enable horizontal scrollbar - window_flags = imgui.WindowFlags_.horizontal_scrollbar.value + window_flags = imgui.WindowFlags_.horizontal_scrollbar if static.disable_mouse_wheel: - window_flags |= imgui.WindowFlags_.no_scroll_with_mouse.value + window_flags |= imgui.WindowFlags_.no_scroll_with_mouse imgui.begin_child("ChildL", ImVec2(imgui.get_content_region_avail().x * 0.5, 260), False, window_flags) for i in range(100): imgui.text("%04d: scrollable region" % i) @@ -1899,19 +1899,19 @@ def show_demo_window_layout(): # Child 2: rounded border if hasattr(static, "disable_mouse_wheel"): static.disable_mouse_wheel = False if hasattr(static, "disable_menu"): static.disable_menu = False - window_flags = imgui.WindowFlags_.none.value + window_flags = imgui.WindowFlags_.none if static.disable_mouse_wheel: - window_flags |= imgui.WindowFlags_.no_scroll_with_mouse.value + window_flags |= imgui.WindowFlags_.no_scroll_with_mouse if not static.disable_menu: - window_flags |= imgui.WindowFlags_.menu_bar.value - imgui.push_style_var(imgui.StyleVar_.child_rounding.value, 5.0) + window_flags |= imgui.WindowFlags_.menu_bar + imgui.push_style_var(imgui.StyleVar_.child_rounding, 5.0) if imgui.begin_child("ChildR", ImVec2(0, 260), True, window_flags): if not static.disable_menu and imgui.begin_menu_bar(): if imgui.begin_menu("Menu"): show_example_menu_file() imgui.end_menu() imgui.end_menu_bar() - if imgui.begin_table("split", 2, flags=(imgui.TableFlags_.resizable.value | imgui.TableFlags_.no_saved_settings.value)): + if imgui.begin_table("split", 2, flags=(imgui.TableFlags_.resizable | imgui.TableFlags_.no_saved_settings)): for i in range(100): buf = f"{i:03d}" imgui.table_next_column() @@ -2127,7 +2127,7 @@ def show_demo_window_layout(): names = ["Top", "25%", "Center", "75%", "Bottom"] imgui.text_unformatted(names[i]) - child_flags = imgui.WindowFlags_.menu_bar.value if static.enable_extra_decorations else 0 + child_flags = imgui.WindowFlags_.menu_bar if static.enable_extra_decorations else 0 child_id = imgui.get_id(f"{i}") child_is_visible = imgui.begin_child(child_id, ImVec2(child_w, 200.0), True, child_flags) if imgui.begin_menu_bar(): @@ -2162,7 +2162,7 @@ def show_demo_window_layout(): imgui.push_id("##HorizontalScrolling") for i in range(5): child_height = imgui.get_text_line_height() + style.scrollbar_size + style.window_padding.y * 2.0 - child_flags = imgui.WindowFlags_.horizontal_scrollbar.value | (imgui.WindowFlags_.always_vertical_scrollbar.value if static.enable_extra_decorations else 0) + child_flags = imgui.WindowFlags_.horizontal_scrollbar | (imgui.WindowFlags_.always_vertical_scrollbar if static.enable_extra_decorations else 0) child_id = imgui.get_id(str(i)) child_is_visible = imgui.begin_child(child_id, ImVec2(-100, child_height), True, child_flags) if scroll_to_off: @@ -2304,16 +2304,16 @@ def show_demo_window_popups(): # Always center this window when appearing center = imgui.get_main_viewport().get_center() - imgui.set_next_window_pos(center, imgui.Cond_.appearing.value, ImVec2(0.5, 0.5)) + imgui.set_next_window_pos(center, imgui.Cond_.appearing, ImVec2(0.5, 0.5)) if not hasattr(static, "dont_ask_me_next_time"): static.dont_ask_me_next_time = False # Equivalent to static bool dont_ask_me_next_time = false; - if imgui.begin_popup_modal("Delete?", None, imgui.WindowFlags_.always_auto_resize.value)[0]: + if imgui.begin_popup_modal("Delete?", None, imgui.WindowFlags_.always_auto_resize)[0]: imgui.text("All those beautiful files will be deleted.\nThis operation cannot be undone!") imgui.separator() - imgui.push_style_var(imgui.StyleVar_.frame_padding.value, ImVec2(0, 0)) + imgui.push_style_var(imgui.StyleVar_.frame_padding, ImVec2(0, 0)) _, static.dont_ask_me_next_time = imgui.checkbox("Don't ask me next time", static.dont_ask_me_next_time) imgui.pop_style_var() @@ -2327,7 +2327,7 @@ def show_demo_window_popups(): if imgui.button("Stacked modals.."): imgui.open_popup("Stacked 1") - if imgui.begin_popup_modal("Stacked 1", None, imgui.WindowFlags_.menu_bar.value)[0]: + if imgui.begin_popup_modal("Stacked 1", None, imgui.WindowFlags_.menu_bar)[0]: if imgui.begin_menu_bar(): if imgui.begin_menu("File"): if imgui.menu_item_simple("Some menu item"): @@ -2375,7 +2375,7 @@ def show_demo_window_inputs(): # Display inputs submitted to ImGuiIO IMGUI_DEMO_MARKER("Inputs & Focus/Inputs") - imgui.set_next_item_open(True, imgui.Cond_.once.value) + imgui.set_next_item_open(True, imgui.Cond_.once) if imgui.tree_node("Inputs"): help_marker( "This is a simplified view. See more detailed input state:\n" @@ -2396,7 +2396,7 @@ def show_demo_window_inputs(): # Display ImGuiIO output flags IMGUI_DEMO_MARKER("Inputs & Focus/Outputs") - imgui.set_next_item_open(True, imgui.Cond_.once.value) + imgui.set_next_item_open(True, imgui.Cond_.once) if imgui.tree_node("Outputs"): help_marker( "The value of io.WantCaptureMouse and io.WantCaptureKeyboard are normally set by Dear ImGui " @@ -2421,11 +2421,11 @@ def show_demo_window_inputs(): capture_override_keyboard = -1 capture_override_desc = ["None", "Set to false", "Set to true"] imgui.set_next_item_width(imgui.get_font_size() * 15) - imgui.slider_int("SetNextFrameWantCaptureMouse() on hover", capture_override_mouse, -1, +1, capture_override_desc[capture_override_mouse + 1], imgui.SliderFlags_.always_clamp.value) + imgui.slider_int("SetNextFrameWantCaptureMouse() on hover", capture_override_mouse, -1, +1, capture_override_desc[capture_override_mouse + 1], imgui.SliderFlags_.always_clamp) imgui.set_next_item_width(imgui.get_font_size() * 15) - imgui.slider_int("SetNextFrameWantCaptureKeyboard() on hover", capture_override_keyboard, -1, +1, capture_override_desc[capture_override_keyboard + 1], imgui.SliderFlags_.always_clamp.value) + imgui.slider_int("SetNextFrameWantCaptureKeyboard() on hover", capture_override_keyboard, -1, +1, capture_override_desc[capture_override_keyboard + 1], imgui.SliderFlags_.always_clamp) - imgui.color_button("##panel", ImVec4(0.7, 0.1, 0.7, 1.0), imgui.ColorEditFlags_.no_tooltip.value | imgui.ColorEditFlags_.no_drag_drop.value, ImVec2(128.0, 96.0)) # Dummy item + imgui.color_button("##panel", ImVec4(0.7, 0.1, 0.7, 1.0), imgui.ColorEditFlags_.no_tooltip | imgui.ColorEditFlags_.no_drag_drop, ImVec2(128.0, 96.0)) # Dummy item if imgui.is_item_hovered() and capture_override_mouse != -1: imgui.set_next_frame_want_capture_mouse(capture_override_mouse == 1) if imgui.is_item_hovered() and capture_override_keyboard != -1: @@ -2516,7 +2516,7 @@ def show_demo_window_inputs(): imgui.button("Drag Me") cur_pos = imgui.get_cursor_screen_pos() if imgui.is_item_active(): - imgui.get_foreground_draw_list().add_line(cur_pos, io.mouse_pos, imgui.get_color_u32(imgui.Col_.button.value), 4.0) # Draw a line between the button and the mouse cursor + imgui.get_foreground_draw_list().add_line(cur_pos, io.mouse_pos, imgui.get_color_u32(imgui.Col_.button), 4.0) # Draw a line between the button and the mouse cursor # Drag operations gets "unlocked" when the mouse has moved past a certain threshold # (the default threshold is stored in io.MouseDragThreshold). You can request a lower or higher @@ -2534,8 +2534,8 @@ def show_demo_window_inputs(): # Make the UI compact because there are so many fields def push_style_compact(): style = imgui.get_style() - imgui.push_style_var(imgui.StyleVar_.frame_padding.value, ImVec2(style.frame_padding.x, style.frame_padding.y * 0.60)) - imgui.push_style_var(imgui.StyleVar_.item_spacing.value, ImVec2(style.item_spacing.x, style.item_spacing.y * 0.60)) + imgui.push_style_var(imgui.StyleVar_.frame_padding, ImVec2(style.frame_padding.x, style.frame_padding.y * 0.60)) + imgui.push_style_var(imgui.StyleVar_.item_spacing, ImVec2(style.item_spacing.x, style.item_spacing.y * 0.60)) def pop_style_compact(): @@ -2570,7 +2570,7 @@ def show_demo_window_tables(): help_marker("Disable the indenting of tree nodes so demo tables can use the full window width.") imgui.separator() if static.disable_indent: - imgui.push_style_var(imgui.StyleVar_.indent_spacing.value, 0.0) + imgui.push_style_var(imgui.StyleVar_.indent_spacing, 0.0) # About Styling of tables # Most settings are configured on a per-table basis via the flags passed to BeginTable() and TableSetupColumns APIs. @@ -2639,31 +2639,31 @@ class ContentsType: CT_FillButton = 1 if not hasattr(static, "bb_flags"): - static.bb_flags = imgui.TableFlags_.borders.value | imgui.TableFlags_.row_bg.value + static.bb_flags = imgui.TableFlags_.borders | imgui.TableFlags_.row_bg static.display_headers = False static.contents_type = ContentsType.CT_Text push_style_compact() - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_RowBg", static.bb_flags, imgui.TableFlags_.row_bg.value) - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_Borders", static.bb_flags, imgui.TableFlags_.borders.value) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_RowBg", static.bb_flags, imgui.TableFlags_.row_bg) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_Borders", static.bb_flags, imgui.TableFlags_.borders) imgui.same_line() help_marker("ImGuiTableFlags_Borders\n = ImGuiTableFlags_BordersInnerV\n | ImGuiTableFlags_BordersOuterV\n | ImGuiTableFlags_BordersInnerV\n | ImGuiTableFlags_BordersOuterH") imgui.indent() - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersH", static.bb_flags, imgui.TableFlags_.borders_h.value) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersH", static.bb_flags, imgui.TableFlags_.borders_h) imgui.indent() - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuterH", static.bb_flags, imgui.TableFlags_.borders_outer_h.value) - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInnerH", static.bb_flags, imgui.TableFlags_.borders_inner_h.value) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuterH", static.bb_flags, imgui.TableFlags_.borders_outer_h) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInnerH", static.bb_flags, imgui.TableFlags_.borders_inner_h) imgui.unindent() - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersV", static.bb_flags, imgui.TableFlags_.borders_v.value) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersV", static.bb_flags, imgui.TableFlags_.borders_v) imgui.indent() - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuterV", static.bb_flags, imgui.TableFlags_.borders_outer_v.value) - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInnerV", static.bb_flags, imgui.TableFlags_.borders_inner_v.value) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuterV", static.bb_flags, imgui.TableFlags_.borders_outer_v) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInnerV", static.bb_flags, imgui.TableFlags_.borders_inner_v) imgui.unindent() - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuter", static.bb_flags, imgui.TableFlags_.borders_outer.value) - _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInner", static.bb_flags, imgui.TableFlags_.borders_inner.value) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuter", static.bb_flags, imgui.TableFlags_.borders_outer) + _, static.bb_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInner", static.bb_flags, imgui.TableFlags_.borders_inner) imgui.unindent() imgui.align_text_to_frame_padding() @@ -2673,7 +2673,7 @@ class ContentsType: imgui.same_line() _, static.contents_type = imgui.radio_button("FillButton", static.contents_type, ContentsType.CT_FillButton) _, static.display_headers = imgui.checkbox("Display headers", static.display_headers) - _, static.flags = imgui.checkbox_flags("ImGuiTableFlags_NoBordersInBody", static.bb_flags, imgui.TableFlags_.no_borders_in_body.value); imgui.same_line(); help_marker("Disable vertical borders in columns Body (borders will always appear in Headers)") + _, static.flags = imgui.checkbox_flags("ImGuiTableFlags_NoBordersInBody", static.bb_flags, imgui.TableFlags_.no_borders_in_body); imgui.same_line(); help_marker("Disable vertical borders in columns Body (borders will always appear in Headers)") pop_style_compact() if imgui.begin_table("table1", 3, static.bb_flags): @@ -2704,14 +2704,14 @@ class ContentsType: # By default, if we don't enable ScrollX, the sizing policy for each column is "Stretch". # All columns maintain a sizing weight, and they will occupy all available width. if not hasattr(static, "rs_flags"): - static.rs_flags = (imgui.TableFlags_.sizing_stretch_same.value | - imgui.TableFlags_.resizable.value | - imgui.TableFlags_.borders_outer.value | - imgui.TableFlags_.borders_v.value | - imgui.TableFlags_.context_menu_in_body.value) + static.rs_flags = (imgui.TableFlags_.sizing_stretch_same | + imgui.TableFlags_.resizable | + imgui.TableFlags_.borders_outer | + imgui.TableFlags_.borders_v | + imgui.TableFlags_.context_menu_in_body) push_style_compact() - _, static.rs_flags = imgui.checkbox_flags("ImGuiTableFlags_Resizable", static.rs_flags, imgui.TableFlags_.resizable.value) - _, static.rs_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersV", static.rs_flags, imgui.TableFlags_.borders_v.value) + _, static.rs_flags = imgui.checkbox_flags("ImGuiTableFlags_Resizable", static.rs_flags, imgui.TableFlags_.resizable) + _, static.rs_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersV", static.rs_flags, imgui.TableFlags_.borders_v) imgui.same_line() help_marker("Using the _Resizable flag automatically enables the _BordersInnerV flag as well, this is why the resize borders are still showing when unchecking this.") pop_style_compact() @@ -2739,12 +2739,12 @@ class ContentsType: "Double-click a column border to auto-fit the column to its contents.") push_style_compact() if not hasattr(static, "rf_flags"): - static.rf_flags = (imgui.TableFlags_.sizing_fixed_fit.value | - imgui.TableFlags_.resizable.value | - imgui.TableFlags_.borders_outer.value | - imgui.TableFlags_.borders_v.value | - imgui.TableFlags_.context_menu_in_body.value) - _, static.rf_flags = imgui.checkbox_flags("ImGuiTableFlags_NoHostExtendX", static.rf_flags, imgui.TableFlags_.no_host_extend_x.value) + static.rf_flags = (imgui.TableFlags_.sizing_fixed_fit | + imgui.TableFlags_.resizable | + imgui.TableFlags_.borders_outer | + imgui.TableFlags_.borders_v | + imgui.TableFlags_.context_menu_in_body) + _, static.rf_flags = imgui.checkbox_flags("ImGuiTableFlags_NoHostExtendX", static.rf_flags, imgui.TableFlags_.no_host_extend_x) pop_style_compact() if imgui.begin_table("table1", 3, static.rf_flags): @@ -2764,17 +2764,17 @@ class ContentsType: "Using TableSetupColumn() to alter resizing policy on a per-column basis.\n\n" "When combining Fixed and Stretch columns, generally you only want one, maybe two trailing columns to use _WidthStretch.") if not hasattr(static, "rm_flags"): - static.rm_flags = (imgui.TableFlags_.sizing_fixed_fit.value | - imgui.TableFlags_.row_bg.value | - imgui.TableFlags_.borders.value | - imgui.TableFlags_.resizable.value | - imgui.TableFlags_.reorderable.value | - imgui.TableFlags_.hideable.value) + static.rm_flags = (imgui.TableFlags_.sizing_fixed_fit | + imgui.TableFlags_.row_bg | + imgui.TableFlags_.borders | + imgui.TableFlags_.resizable | + imgui.TableFlags_.reorderable | + imgui.TableFlags_.hideable) if imgui.begin_table("table1", 3, static.rm_flags): - imgui.table_setup_column("AAA", imgui.TableColumnFlags_.width_fixed.value) - imgui.table_setup_column("BBB", imgui.TableColumnFlags_.width_fixed.value) - imgui.table_setup_column("CCC", imgui.TableColumnFlags_.width_stretch.value) + imgui.table_setup_column("AAA", imgui.TableColumnFlags_.width_fixed) + imgui.table_setup_column("BBB", imgui.TableColumnFlags_.width_fixed) + imgui.table_setup_column("CCC", imgui.TableColumnFlags_.width_stretch) imgui.table_headers_row() for row in range(5): imgui.table_next_row() @@ -2784,12 +2784,12 @@ class ContentsType: imgui.end_table() if imgui.begin_table("table2", 6, static.rm_flags): - imgui.table_setup_column("AAA", imgui.TableColumnFlags_.width_fixed.value) - imgui.table_setup_column("BBB", imgui.TableColumnFlags_.width_fixed.value) - imgui.table_setup_column("CCC", imgui.TableColumnFlags_.width_fixed.value | imgui.TableColumnFlags_.default_hide.value) - imgui.table_setup_column("DDD", imgui.TableColumnFlags_.width_stretch.value) - imgui.table_setup_column("EEE", imgui.TableColumnFlags_.width_stretch.value) - imgui.table_setup_column("FFF", imgui.TableColumnFlags_.width_stretch.value | imgui.TableColumnFlags_.default_hide.value) + imgui.table_setup_column("AAA", imgui.TableColumnFlags_.width_fixed) + imgui.table_setup_column("BBB", imgui.TableColumnFlags_.width_fixed) + imgui.table_setup_column("CCC", imgui.TableColumnFlags_.width_fixed | imgui.TableColumnFlags_.default_hide) + imgui.table_setup_column("DDD", imgui.TableColumnFlags_.width_stretch) + imgui.table_setup_column("EEE", imgui.TableColumnFlags_.width_stretch) + imgui.table_setup_column("FFF", imgui.TableColumnFlags_.width_stretch | imgui.TableColumnFlags_.default_hide) imgui.table_headers_row() for row in range(5): imgui.table_next_row() @@ -2807,21 +2807,21 @@ class ContentsType: "Click and drag column headers to reorder columns.\n\n" "Right-click on a header to open a context menu.") if not hasattr(static, "rh_flags"): - static.rh_flags = (imgui.TableFlags_.resizable.value | - imgui.TableFlags_.reorderable.value | - imgui.TableFlags_.hideable.value | - imgui.TableFlags_.borders_outer.value | - imgui.TableFlags_.borders_v.value) + static.rh_flags = (imgui.TableFlags_.resizable | + imgui.TableFlags_.reorderable | + imgui.TableFlags_.hideable | + imgui.TableFlags_.borders_outer | + imgui.TableFlags_.borders_v) push_style_compact() - _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_Resizable", static.rh_flags, imgui.TableFlags_.resizable.value) - _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_Reorderable", static.rh_flags, imgui.TableFlags_.reorderable.value) - _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_Hideable", static.rh_flags, imgui.TableFlags_.hideable.value) - _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_NoBordersInBody", static.rh_flags, imgui.TableFlags_.no_borders_in_body.value) - _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_NoBordersInBodyUntilResize", static.rh_flags, imgui.TableFlags_.no_borders_in_body_until_resize.value) + _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_Resizable", static.rh_flags, imgui.TableFlags_.resizable) + _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_Reorderable", static.rh_flags, imgui.TableFlags_.reorderable) + _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_Hideable", static.rh_flags, imgui.TableFlags_.hideable) + _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_NoBordersInBody", static.rh_flags, imgui.TableFlags_.no_borders_in_body) + _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_NoBordersInBodyUntilResize", static.rh_flags, imgui.TableFlags_.no_borders_in_body_until_resize) imgui.same_line() help_marker("Disable vertical borders in columns Body until hovered for resize (borders will always appear in Headers)") - _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_HighlightHoveredColumn", static.rh_flags, imgui.TableFlags_.highlight_hovered_column.value) + _, static.rh_flags = imgui.checkbox_flags("ImGuiTableFlags_HighlightHoveredColumn", static.rh_flags, imgui.TableFlags_.highlight_hovered_column) pop_style_compact() if imgui.begin_table("table1", 3, static.rh_flags): @@ -2839,7 +2839,7 @@ class ContentsType: imgui.end_table() # Use outer_size.x == 0.0f instead of default to make the table as tight as possible (only valid when no scrolling and no stretch column) - if imgui.begin_table("table2", 3, static.rh_flags | imgui.TableFlags_.sizing_fixed_fit.value, ImVec2(0.0, 0.0)): + if imgui.begin_table("table2", 3, static.rh_flags | imgui.TableFlags_.sizing_fixed_fit, ImVec2(0.0, 0.0)): imgui.table_setup_column("One") imgui.table_setup_column("Two") imgui.table_setup_column("Three") @@ -2868,21 +2868,21 @@ class ContentsType: "In this demo we don't show horizontal borders to emphasize how they don't affect default horizontal padding.") if not hasattr(static, "padding_flags"): - static.padding_flags = (imgui.TableFlags_.borders_v.value) + static.padding_flags = (imgui.TableFlags_.borders_v) static.show_headers = False push_style_compact() - _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_PadOuterX", static.padding_flags, imgui.TableFlags_.pad_outer_x.value) + _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_PadOuterX", static.padding_flags, imgui.TableFlags_.pad_outer_x) imgui.same_line() help_marker("Enable outer-most padding (default if ImGuiTableFlags_BordersOuterV is set)") - _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_NoPadOuterX", static.padding_flags, imgui.TableFlags_.no_pad_outer_x.value) + _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_NoPadOuterX", static.padding_flags, imgui.TableFlags_.no_pad_outer_x) imgui.same_line() help_marker("Disable outer-most padding (default if ImGuiTableFlags_BordersOuterV is not set)") - _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_NoPadInnerX", static.padding_flags, imgui.TableFlags_.no_pad_inner_x.value) + _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_NoPadInnerX", static.padding_flags, imgui.TableFlags_.no_pad_inner_x) imgui.same_line() help_marker("Disable inner padding between columns (double inner padding if BordersOuterV is on, single inner padding if BordersOuterV is off)") - _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuterV", static.padding_flags, imgui.TableFlags_.borders_outer_v.value) - _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInnerV", static.padding_flags, imgui.TableFlags_.borders_inner_v.value) + _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersOuterV", static.padding_flags, imgui.TableFlags_.borders_outer_v) + _, static.padding_flags = imgui.checkbox_flags("ImGuiTableFlags_BordersInnerV", static.padding_flags, imgui.TableFlags_.borders_inner_v) _, static.show_headers = imgui.checkbox("show_headers", static.show_headers) pop_style_compact() @@ -2910,29 +2910,29 @@ class ContentsType: # FIXME-TABLE: Vertical border effectively not displayed the same way as horizontal one... help_marker("Setting style.CellPadding to (0,0) or a custom value.") if not hasattr(static, "padding_flags2"): - static.padding_flags2 = (imgui.TableFlags_.borders.value | imgui.TableFlags_.row_bg.value) + static.padding_flags2 = (imgui.TableFlags_.borders | imgui.TableFlags_.row_bg) static.cell_padding = [0.0, 0.0] static.show_widget_frame_bg = True push_style_compact() - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_Borders", static.padding_flags2, imgui.TableFlags_.borders.value) - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersH", static.padding_flags2, imgui.TableFlags_.borders_h.value) - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersV", static.padding_flags2, imgui.TableFlags_.borders_v.value) - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersInner", static.padding_flags2, imgui.TableFlags_.borders_inner.value) - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersOuter", static.padding_flags2, imgui.TableFlags_.borders_outer.value) - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_RowBg", static.padding_flags2, imgui.TableFlags_.row_bg.value) - _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_Resizable", static.padding_flags2, imgui.TableFlags_.resizable.value) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_Borders", static.padding_flags2, imgui.TableFlags_.borders) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersH", static.padding_flags2, imgui.TableFlags_.borders_h) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersV", static.padding_flags2, imgui.TableFlags_.borders_v) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersInner", static.padding_flags2, imgui.TableFlags_.borders_inner) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_BordersOuter", static.padding_flags2, imgui.TableFlags_.borders_outer) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_RowBg", static.padding_flags2, imgui.TableFlags_.row_bg) + _, static.padding_flags2 = imgui.checkbox_flags("ImGuiTableFlags_Resizable", static.padding_flags2, imgui.TableFlags_.resizable) _, static.show_widget_frame_bg = imgui.checkbox("show_widget_frame_bg", static.show_widget_frame_bg) _, static.cell_padding = imgui.slider_float2("CellPadding", static.cell_padding, 0.0, 10.0, "%.0f") pop_style_compact() - imgui.push_style_var(imgui.StyleVar_.cell_padding.value, static.cell_padding) # type: ignore + imgui.push_style_var(imgui.StyleVar_.cell_padding, static.cell_padding) # type: ignore if imgui.begin_table("table_padding_2", 3, static.padding_flags2): if not hasattr(static, "text_bufs"): static.text_bufs = ["" for _ in range(3 * 5)] # Mini text storage for 3x5 cells static.init = True if not static.show_widget_frame_bg: - imgui.push_style_color(imgui.Col_.frame_bg.value, 0) + imgui.push_style_color(imgui.Col_.frame_bg, 0) for cell in range(3 * 5): imgui.table_next_column() if static.init: @@ -2956,22 +2956,22 @@ class ContentsType: rows_count = 12 if not hasattr(static, "table_flags_ah"): - static.table_flags_ah = (imgui.TableFlags_.sizing_fixed_fit.value | imgui.TableFlags_.scroll_x.value | imgui.TableFlags_.scroll_y.value | - imgui.TableFlags_.borders_outer.value | imgui.TableFlags_.borders_inner_h.value | - imgui.TableFlags_.hideable.value | imgui.TableFlags_.resizable.value | - imgui.TableFlags_.reorderable.value | imgui.TableFlags_.highlight_hovered_column.value) + static.table_flags_ah = (imgui.TableFlags_.sizing_fixed_fit | imgui.TableFlags_.scroll_x | imgui.TableFlags_.scroll_y | + imgui.TableFlags_.borders_outer | imgui.TableFlags_.borders_inner_h | + imgui.TableFlags_.hideable | imgui.TableFlags_.resizable | + imgui.TableFlags_.reorderable | imgui.TableFlags_.highlight_hovered_column) - static.column_flags_ah = imgui.TableColumnFlags_.angled_header.value | imgui.TableColumnFlags_.width_fixed.value + static.column_flags_ah = imgui.TableColumnFlags_.angled_header | imgui.TableColumnFlags_.width_fixed static.bools_ah = [False] * (columns_count * rows_count) # Dummy selection storage static.frozen_cols_ah = 1 static.frozen_rows_ah = 2 - _, static.table_flags_ah = imgui.checkbox_flags("_ScrollX", static.table_flags_ah, imgui.TableFlags_.scroll_x.value) - _, static.table_flags_ah = imgui.checkbox_flags("_ScrollY", static.table_flags_ah, imgui.TableFlags_.scroll_y.value) - _, static.table_flags_ah = imgui.checkbox_flags("_Resizable", static.table_flags_ah, imgui.TableFlags_.resizable.value) - _, static.table_flags_ah = imgui.checkbox_flags("_Sortable", static.table_flags_ah, imgui.TableFlags_.sortable.value) - _, static.table_flags_ah = imgui.checkbox_flags("_NoBordersInBody", static.table_flags_ah, imgui.TableFlags_.no_borders_in_body.value) - _, static.table_flags_ah = imgui.checkbox_flags("_HighlightHoveredColumn", static.table_flags_ah, imgui.TableFlags_.highlight_hovered_column.value) + _, static.table_flags_ah = imgui.checkbox_flags("_ScrollX", static.table_flags_ah, imgui.TableFlags_.scroll_x) + _, static.table_flags_ah = imgui.checkbox_flags("_ScrollY", static.table_flags_ah, imgui.TableFlags_.scroll_y) + _, static.table_flags_ah = imgui.checkbox_flags("_Resizable", static.table_flags_ah, imgui.TableFlags_.resizable) + _, static.table_flags_ah = imgui.checkbox_flags("_Sortable", static.table_flags_ah, imgui.TableFlags_.sortable) + _, static.table_flags_ah = imgui.checkbox_flags("_NoBordersInBody", static.table_flags_ah, imgui.TableFlags_.no_borders_in_body) + _, static.table_flags_ah = imgui.checkbox_flags("_HighlightHoveredColumn", static.table_flags_ah, imgui.TableFlags_.highlight_hovered_column) imgui.set_next_item_width(imgui.get_font_size() * 8) _, static.frozen_cols_ah = imgui.slider_int("Frozen columns", static.frozen_cols_ah, 0, 2) @@ -2980,7 +2980,7 @@ class ContentsType: _, static.frozen_rows_ah = imgui.slider_int("Frozen rows", static.frozen_rows_ah, 0, 2) _, static.column_flags_ah = imgui.checkbox_flags("Disable header contributing to column width", - static.column_flags_ah, imgui.TableColumnFlags_.no_header_width.value) + static.column_flags_ah, imgui.TableColumnFlags_.no_header_width) if imgui.tree_node("Style settings"): imgui.same_line() @@ -2989,15 +2989,19 @@ class ContentsType: "style.TableAngledHeadersAngle", imgui.get_style().table_angled_headers_angle, -50.0, +50.0) imgui.set_next_item_width(imgui.get_font_size() * 8) - _, imgui.get_style().table_angled_headers_text_align = imgui.slider_float2( + table_angled_headers_text_align_list = [imgui.get_style().table_angled_headers_text_align.x, imgui.get_style().table_angled_headers_text_align.y] + changed_it, table_angled_headers_text_align_list = imgui.slider_float2( "style.TableAngledHeadersTextAlign", - imgui.get_style().table_angled_headers_text_align, 0.0, 1.0, "%.2f") + table_angled_headers_text_align_list, 0.0, 1.0, "%.2f") + if changed_it: + imgui.get_style().table_angled_headers_text_align.x = table_angled_headers_text_align_list[0] + imgui.get_style().table_angled_headers_text_align.y = table_angled_headers_text_align_list[1] imgui.tree_pop() text_base_height = imgui.get_text_line_height_with_spacing() if imgui.begin_table("table_angled_headers", columns_count, static.table_flags_ah, (0.0, text_base_height * 12)): - imgui.table_setup_column(column_names[0], imgui.TableColumnFlags_.no_hide.value | imgui.TableColumnFlags_.no_reorder.value) + imgui.table_setup_column(column_names[0], imgui.TableColumnFlags_.no_hide | imgui.TableColumnFlags_.no_reorder) for n in range(1, columns_count): imgui.table_setup_column(column_names[n], static.column_flags_ah) @@ -3087,7 +3091,7 @@ def show_example_menu_file(): IMGUI_DEMO_MARKER("Examples/Menu/Colors") if imgui.begin_menu("Colors"): sz = imgui.get_text_line_height() - for i in range(imgui.Col_.count.value): + for i in range(imgui.Col_.count): name = imgui.get_style_color_name(i) p = imgui.get_cursor_screen_pos() imgui.get_window_draw_list().add_rect_filled(p, ImVec2(p.x + sz, p.y + sz), imgui.get_color_u32(i)) @@ -3238,9 +3242,9 @@ def _store_zone_boundings(self, line_number): # Check if the mouse is hovering over the zone_boundings def _is_mouse_hovering_zone_boundings(self, zone_boundings): if not imgui.is_window_hovered( - imgui.HoveredFlags_.allow_when_blocked_by_active_item.value | - imgui.HoveredFlags_.root_and_child_windows.value | - imgui.HoveredFlags_.no_popup_hierarchy.value): + imgui.HoveredFlags_.allow_when_blocked_by_active_item | + imgui.HoveredFlags_.root_and_child_windows | + imgui.HoveredFlags_.no_popup_hierarchy): return False y_mouse = imgui.get_mouse_pos().y x_mouse = imgui.get_mouse_pos().x @@ -3320,7 +3324,7 @@ def gui_demo(): show_demo_window(True) def gui_code(): - with imgui_ctx.push_font(imgui_md.get_code_font()): + with imgui_ctx.push_font(imgui_md.get_code_font().font): code_editor.render("Code") def navigate_to_marker(marker): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.obsolete.py similarity index 91% rename from blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.py rename to blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.obsolete.py index e8873bf..0c2d73c 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immapp/imgui_example_glfw_opengl3.obsolete.py @@ -1,266 +1,264 @@ -# An example of using Dear ImGui with Glfw in python -# Here, the backend rendering is implemented in C++: see calls to C++ native functions: -# imgui.backends.glfw_xxxx() -# - -# imgui_bundle can be used to run imgui with an almost line by line translation from C++ to python -# -# This file a direct adaptation of an imgui example (imgui/examples/example_glfw_opengl3/main.cpp) -# (see https://github.com/ocornut/imgui/blob/master/examples/example_glfw_opengl3/main.cpp) - - -import os.path -import sys -import platform -import OpenGL.GL as GL # type: ignore -from imgui_bundle import imgui - -# Always import glfw *after* imgui_bundle -# (since imgui_bundle will set the correct path where to look for the correct version of the glfw dynamic library) -import glfw # type: ignore - - -def glfw_error_callback(error: int, description: str) -> None: - sys.stderr.write(f"Glfw Error {error}: {description}\n") - - -def main() -> None: - # Setup window - glfw.set_error_callback(glfw_error_callback) - if not glfw.init(): - sys.exit(1) - - # Decide GL+GLSL versions - # #if defined(IMGUI_IMPL_OPENGL_ES2) - # // GL ES 2.0 + GLSL 100 - # const char* glsl_version = "#version 100"; - # glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); - # glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); - # glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API); - if platform.system() == "Darwin": - glsl_version = "#version 150" - glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3) - glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 2) - glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) # // 3.2+ only - glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE) - else: - # GL 3.0 + GLSL 130 - glsl_version = "#version 130" - glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3) - glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 0) - # glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) # // 3.2+ only - # glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE) - - # Create window with graphics context - window = glfw.create_window( - 1280, 720, "Dear ImGui GLFW+OpenGL3 example", None, None - ) - if window is None: - sys.exit(1) - glfw.make_context_current(window) - glfw.swap_interval(1) # // Enable vsync - - # Setup Dear ImGui context - # IMGUI_CHECKVERSION(); - imgui.create_context() - io = imgui.get_io() - io.config_flags |= ( - imgui.ConfigFlags_.nav_enable_keyboard.value - ) # Enable Keyboard Controls - # io.config_flags |= imgui.ConfigFlags_.nav_enable_gamepad # Enable Gamepad Controls - io.config_flags |= imgui.ConfigFlags_.docking_enable.value # Enable docking - # io.config_flags |= imgui.ConfigFlags_.viewports_enable # Enable Multi-Viewport / Platform Windows - # io.config_viewports_no_auto_merge = True - # io.config_viewports_no_task_bar_icon = True - - # Setup Dear ImGui style - imgui.style_colors_dark() - # imgui.style_colors_classic() - - # When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones. - style = imgui.get_style() - if io.config_flags & imgui.ConfigFlags_.viewports_enable.value: - style.window_rounding = 0.0 - window_bg_color = style.color_(imgui.Col_.window_bg.value) - window_bg_color.w = 1.0 - style.set_color_(imgui.Col_.window_bg.value, window_bg_color) - - # Setup Platform/Renderer backends - import ctypes - - # You need to transfer the window address to imgui.backends.glfw_init_for_opengl - # proceed as shown below to get it. - window_address = ctypes.cast(window, ctypes.c_void_p).value - assert window_address is not None - imgui.backends.glfw_init_for_opengl(window_address, True) - - imgui.backends.opengl3_init(glsl_version) - - # // Load Fonts - # // - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use imgui.PushFont()/PopFont() to select them. - # // - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple. - # // - If the file cannot be loaded, the function will return NULL. Please handle those errors in your application (e.g. use an assertion, or display an error and quit). - # // - The fonts will be rasterized at a given size (w/ oversampling) and stored into a texture when calling ImFontAtlas::Build()/GetTexDataAsXXXX(), which ImGui_ImplXXXX_NewFrame below will call. - # // - Read 'docs/FONTS.md' for more instructions and details. - # // - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ ! - # //io.Fonts->AddFontDefault(); - # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf", 16.0f); - # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf", 15.0f); - # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf", 16.0f); - # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/ProggyTiny.ttf", 10.0f); - # //ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf", 18.0f, NULL, io.Fonts->GetGlyphRangesJapanese()); - # //IM_ASSERT(font != NULL); - - # Load font example, with a merged font for icons - # ------------------------------------------------ - # i. Load default font - font_atlas = imgui.get_io().fonts - font_atlas.add_font_default() - this_dir = os.path.dirname(__file__) - font_size_pixel = 48.0 - # i. Load another font... - font_filename = this_dir + "/../../demos_assets/fonts/Akronim-Regular.ttf" - font_atlas = imgui.get_io().fonts - glyph_range = font_atlas.get_glyph_ranges_default() - custom_font = font_atlas.add_font_from_file_ttf( - filename=font_filename, - size_pixels=font_size_pixel, - glyph_ranges_as_int_list=glyph_range, - ) - # ii. ... And merge icons into the previous font - from imgui_bundle import icons_fontawesome - - font_filename = this_dir + "/../../demos_assets/fonts/fontawesome-webfont.ttf" - font_config = imgui.ImFontConfig() - font_config.merge_mode = True - icons_range = [icons_fontawesome.ICON_MIN_FA, icons_fontawesome.ICON_MAX_FA, 0] - custom_font = font_atlas.add_font_from_file_ttf( - filename=font_filename, - size_pixels=font_size_pixel, - glyph_ranges_as_int_list=icons_range, - font_cfg=font_config, - ) - - # Our state - show_demo_window: bool | None = True - show_another_window = False - clear_color = [0.45, 0.55, 0.60, 1.00] - f = 0.0 - counter = 0 - - # Main loop - while not glfw.window_should_close(window): - - # // Poll and handle events (inputs, window resize, etc.) - # // You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs. - # // - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data. - # // - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data. - # // Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags. - glfw.poll_events() - - # Start the Dear ImGui frame - imgui.backends.opengl3_new_frame() - imgui.backends.glfw_new_frame() - imgui.new_frame() - - # 1. Show the big demo window (Most of the sample code is in imgui.ShowDemoWindow()! You can browse its code to learn more about Dear ImGui!). - if show_demo_window: - _show_demo_window = imgui.show_demo_window(show_demo_window) - - # 2. Show a simple window that we create ourselves. We use a Begin/End pair to created a named window. - def show_simple_window() -> None: - nonlocal show_demo_window, show_another_window, clear_color, counter, f - # static float f = 0.0f; - # static int counter = 0; - imgui.begin( - "Hello, world!" - ) # Create a window called "Hello, world!" and append into it. - - # Demo custom font - _id = id(custom_font) - imgui.push_font(custom_font) - imgui.text("Hello " + icons_fontawesome.ICON_FA_SMILE) - imgui.pop_font() - - imgui.text( - "This is some useful text." - ) # Display some text (you can use a format strings too) - assert show_demo_window is not None - _, show_demo_window = imgui.checkbox( - "Demo Window", show_demo_window - ) # Edit bools storing our window open/close state - _, show_another_window = imgui.checkbox( - "Another Window", show_another_window - ) - - _, f = imgui.slider_float( - "float", f, 0.0, 1.0 - ) # Edit 1 float using a slider from 0.0f to 1.0f - _, clear_color = imgui.color_edit4( - "clear color", clear_color - ) # Edit 4 floats representing a color - - if imgui.button( - "Button" - ): # Buttons return true when clicked (most widgets return true when edited/activated) - counter += 1 - - imgui.same_line() - imgui.text(f"counter = {counter}") - - imgui.text( - f"Application average {1000.0 / imgui.get_io().framerate} ms/frame ({imgui.get_io().framerate:.1f} FPS)" - ) - imgui.end() - - show_simple_window() - - # 3. Show another simple window. - def gui_another_window() -> None: - nonlocal show_another_window - if show_another_window: - imgui.begin( - "Another Window", show_another_window - ) # Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked) - imgui.text("Hello from another window!") - if imgui.button("Close Me"): - show_another_window = False - imgui.end() - - gui_another_window() - - # Rendering - imgui.render() - display_w, display_h = glfw.get_framebuffer_size(window) - GL.glViewport(0, 0, display_w, display_h) - GL.glClearColor( - clear_color[0] * clear_color[3], - clear_color[1] * clear_color[3], - clear_color[2] * clear_color[3], - clear_color[3], - ) - GL.glClear(GL.GL_COLOR_BUFFER_BIT) - imgui.backends.opengl3_render_draw_data(imgui.get_draw_data()) - - # Update and Render additional Platform Windows - # (Platform functions may change the current OpenGL context, so we save/restore it to make it easier to paste this code elsewhere. - # For this specific demo app we could also call glfwMakeContextCurrent(window) directly) - if io.config_flags & imgui.ConfigFlags_.viewports_enable.value > 0: - backup_current_context = glfw.get_current_context() - imgui.update_platform_windows() - imgui.render_platform_windows_default() - glfw.make_context_current(backup_current_context) - - glfw.swap_buffers(window) - - # Cleanup - imgui.backends.opengl3_shutdown() - imgui.backends.glfw_shutdown() - imgui.destroy_context() - - glfw.destroy_window(window) - glfw.terminate() - - -if __name__ == "__main__": - main() +# Obsoleted since v1.92 in Python: use pure python backends instead +# +# An example of using Dear ImGui with Glfw in python +# Here, the backend rendering is implemented in C++: see calls to C++ native functions: +# imgui.backends.glfw_xxxx() +# + +# imgui_bundle can be used to run imgui with an almost line by line translation from C++ to python +# +# This file a direct adaptation of an imgui example (imgui/examples/example_glfw_opengl3/main.cpp) +# (see https://github.com/ocornut/imgui/blob/master/examples/example_glfw_opengl3/main.cpp) + + +import os.path +import sys +import platform +import OpenGL.GL as GL # pip install PyOpenGL +from imgui_bundle import imgui + +# Always import glfw *after* imgui_bundle +# (since imgui_bundle will set the correct path where to look for the correct version of the glfw dynamic library) +import glfw # pip install glfw + + +def glfw_error_callback(error: int, description: str) -> None: + sys.stderr.write(f"Glfw Error {error}: {description}\n") + + +def main() -> None: + # Setup window + glfw.set_error_callback(glfw_error_callback) + if not glfw.init(): + sys.exit(1) + + # Decide GL+GLSL versions + # #if defined(IMGUI_IMPL_OPENGL_ES2) + # // GL ES 2.0 + GLSL 100 + # const char* glsl_version = "#version 100"; + # glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); + # glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); + # glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API); + if platform.system() == "Darwin": + glsl_version = "#version 150" + glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3) + glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 2) + glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) # // 3.2+ only + glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE) + else: + # GL 3.0 + GLSL 130 + glsl_version = "#version 130" + glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3) + glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 0) + # glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) # // 3.2+ only + # glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE) + + # Create window with graphics context + window = glfw.create_window( + 1280, 720, "Dear ImGui GLFW+OpenGL3 example", None, None + ) + if window is None: + sys.exit(1) + glfw.make_context_current(window) + glfw.swap_interval(1) # // Enable vsync + + # Setup Dear ImGui context + # IMGUI_CHECKVERSION(); + imgui.create_context() + io = imgui.get_io() + io.config_flags |= ( + imgui.ConfigFlags_.nav_enable_keyboard + ) # Enable Keyboard Controls + # io.config_flags |= imgui.ConfigFlags_.nav_enable_gamepad # Enable Gamepad Controls + io.config_flags |= imgui.ConfigFlags_.docking_enable # Enable docking + # io.config_flags |= imgui.ConfigFlags_.viewports_enable # Enable Multi-Viewport / Platform Windows + # io.config_viewports_no_auto_merge = True + # io.config_viewports_no_task_bar_icon = True + + # Setup Dear ImGui style + imgui.style_colors_dark() + # imgui.style_colors_classic() + + # When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones. + style = imgui.get_style() + if io.config_flags & imgui.ConfigFlags_.viewports_enable: + style.window_rounding = 0.0 + window_bg_color = style.color_(imgui.Col_.window_bg) + window_bg_color.w = 1.0 + style.set_color_(imgui.Col_.window_bg, window_bg_color) + + # Setup Platform/Renderer backends + import ctypes + + # You need to transfer the window address to imgui.backends.glfw_init_for_opengl + # proceed as shown below to get it. + window_address = ctypes.cast(window, ctypes.c_void_p).value + assert window_address is not None + imgui.backends.glfw_init_for_opengl(window_address, True) + + imgui.backends.opengl3_init(glsl_version) + + # // Load Fonts + # // - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use imgui.PushFont()/PopFont() to select them. + # // - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple. + # // - If the file cannot be loaded, the function will return NULL. Please handle those errors in your application (e.g. use an assertion, or display an error and quit). + # // - The fonts will be rasterized at a given size (w/ oversampling) and stored into a texture when calling ImFontAtlas::Build()/GetTexDataAsXXXX(), which ImGui_ImplXXXX_NewFrame below will call. + # // - Read 'docs/FONTS.md' for more instructions and details. + # // - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ ! + # //io.Fonts->AddFontDefault(); + # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf", 16.0f); + # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf", 15.0f); + # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf", 16.0f); + # //io.Fonts->AddFontFromFileTTF("../../misc/fonts/ProggyTiny.ttf", 10.0f); + # //ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf", 18.0f, NULL, io.Fonts->GetGlyphRangesJapanese()); + # //IM_ASSERT(font != NULL); + + # Load font example, with a merged font for icons + # ------------------------------------------------ + # i. Load default font + font_atlas = imgui.get_io().fonts + font_atlas.add_font_default() + this_dir = os.path.dirname(__file__) + font_size_pixel = 48.0 + # i. Load another font... + font_filename = this_dir + "/../../demos_assets/fonts/Akronim-Regular.ttf" + font_atlas = imgui.get_io().fonts + custom_font = font_atlas.add_font_from_file_ttf( + filename=font_filename, + size_pixels=font_size_pixel, + ) + # ii. ... And merge icons into the previous font + from imgui_bundle import icons_fontawesome + + font_filename = this_dir + "/../../demos_assets/fonts/fontawesome-webfont.ttf" + font_config = imgui.ImFontConfig() + font_config.merge_mode = True + custom_font = font_atlas.add_font_from_file_ttf( + filename=font_filename, + size_pixels=font_size_pixel, + font_cfg=font_config, + ) + + # Our state + show_demo_window: bool | None = True + show_another_window = False + clear_color = [0.45, 0.55, 0.60, 1.00] + f = 0.0 + counter = 0 + + # Main loop + while not glfw.window_should_close(window): + + # // Poll and handle events (inputs, window resize, etc.) + # // You can read the io.WantCaptureMouse, io.WantCaptureKeyboard flags to tell if dear imgui wants to use your inputs. + # // - When io.WantCaptureMouse is true, do not dispatch mouse input data to your main application, or clear/overwrite your copy of the mouse data. + # // - When io.WantCaptureKeyboard is true, do not dispatch keyboard input data to your main application, or clear/overwrite your copy of the keyboard data. + # // Generally you may always pass all inputs to dear imgui, and hide them from your application based on those two flags. + glfw.poll_events() + + # Start the Dear ImGui frame + imgui.backends.opengl3_new_frame() + imgui.backends.glfw_new_frame() + imgui.new_frame() + + # 1. Show the big demo window (Most of the sample code is in imgui.ShowDemoWindow()! You can browse its code to learn more about Dear ImGui!). + if show_demo_window: + _show_demo_window = imgui.show_demo_window(show_demo_window) + + # 2. Show a simple window that we create ourselves. We use a Begin/End pair to created a named window. + def show_simple_window() -> None: + nonlocal show_demo_window, show_another_window, clear_color, counter, f + # static float f = 0.0f; + # static int counter = 0; + imgui.begin( + "Hello, world!" + ) # Create a window called "Hello, world!" and append into it. + + # # Demo custom font + _id = id(custom_font) + imgui.push_font(custom_font, custom_font.legacy_size) + imgui.text("Hello " + icons_fontawesome.ICON_FA_SMILE) + imgui.pop_font() + + imgui.text( + "This is some useful text." + ) # Display some text (you can use a format strings too) + assert show_demo_window is not None + _, show_demo_window = imgui.checkbox( + "Demo Window", show_demo_window + ) # Edit bools storing our window open/close state + _, show_another_window = imgui.checkbox( + "Another Window", show_another_window + ) + + _, f = imgui.slider_float( + "float", f, 0.0, 1.0 + ) # Edit 1 float using a slider from 0.0f to 1.0f + _, clear_color = imgui.color_edit4( + "clear color", clear_color + ) # Edit 4 floats representing a color + + if imgui.button( + "Button" + ): # Buttons return true when clicked (most widgets return true when edited/activated) + counter += 1 + + imgui.same_line() + imgui.text(f"counter = {counter}") + + imgui.text( + f"Application average {1000.0 / imgui.get_io().framerate} ms/frame ({imgui.get_io().framerate:.1f} FPS)" + ) + imgui.end() + + show_simple_window() + + # 3. Show another simple window. + def gui_another_window() -> None: + nonlocal show_another_window + if show_another_window: + imgui.begin( + "Another Window", show_another_window + ) # Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked) + imgui.text("Hello from another window!") + if imgui.button("Close Me"): + show_another_window = False + imgui.end() + + gui_another_window() + + # Rendering + imgui.render() + display_w, display_h = glfw.get_framebuffer_size(window) + GL.glViewport(0, 0, display_w, display_h) + GL.glClearColor( + clear_color[0] * clear_color[3], + clear_color[1] * clear_color[3], + clear_color[2] * clear_color[3], + clear_color[3], + ) + GL.glClear(GL.GL_COLOR_BUFFER_BIT) + imgui.backends.opengl3_render_draw_data(imgui.get_draw_data()) + + # Update and Render additional Platform Windows + # (Platform functions may change the current OpenGL context, so we save/restore it to make it easier to paste this code elsewhere. + # For this specific demo app we could also call glfwMakeContextCurrent(window) directly) + if io.config_flags & imgui.ConfigFlags_.viewports_enable > 0: + backup_current_context = glfw.get_current_context() + imgui.update_platform_windows() + imgui.render_platform_windows_default() + glfw.make_context_current(backup_current_context) + + glfw.swap_buffers(window) + + # Cleanup + imgui.backends.opengl3_shutdown() + imgui.backends.glfw_shutdown() + imgui.destroy_context() + + glfw.destroy_window(window) + glfw.terminate() + + +if __name__ == "__main__": + main() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_display.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_display.py index 8eefe8c..83a0cd0 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_display.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_display.py @@ -1,4 +1,4 @@ -from imgui_bundle import immvision, immapp, imgui_md, ImVec2 +from imgui_bundle import immvision, immapp, imgui_md, ImVec2, imgui, hello_imgui from imgui_bundle.demos_python import demo_utils immvision.use_rgb_color_order() @@ -12,23 +12,35 @@ def demo_gui() -> None: assets_dir = demo_utils.demos_assets_folder() + "/images/" # Load images as numpy arrays - statics.bear = demo_utils.imread_pil(assets_dir + "bear_transparent.png", load_alpha=True) - statics.tennis = demo_utils.imread_pil(assets_dir + "tennis.jpg") + statics.bear = demo_utils.imread_demo(assets_dir + "bear_transparent.png", load_alpha=True) + statics.tennis = demo_utils.imread_demo(assets_dir + "tennis.jpg") statics.params = immvision.ImageParams() + bear_display_size = int(hello_imgui.em_size(15)) + statics.params.image_display_size = (bear_display_size, bear_display_size) statics.inited = True - imgui_md.render_unindented("immvision.image_display() will simply display an image") + imgui.begin_group() + imgui_md.render_unindented("# immvision.image_display()") + imgui_md.render_unindented("Displays an image (possibly resizable)") immvision.image_display_resizable( "Tennis", statics.tennis, size=statics.image_display_size ) + imgui.end_group() - imgui_md.render_unindented( - """ - immvision.image() will display an image, while providing lots of visualization options.
- Open the options panel by clicking on the settings button at the bottom right corner of the image""" - ) + imgui.same_line() + + imgui.begin_group() + imgui_md.render_unindented("# immvision.image()") + imgui_md.render_unindented("Displays an image, while providing lots of visualization options.") immvision.image("Bear", statics.bear, statics.params) + imgui_md.render_unindented(""" + * Zoom in/out using the mouse wheel. + * Pixel values are displayed at high zoom levels. + * Pan the image by dragging it with the left mouse button + * Open settings via button (bottom right corner of the image) + """) + imgui.end_group() def main(): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector.py index 44bbde6..397ccca 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_inspector.py @@ -9,7 +9,7 @@ def fill_inspector(): os.path.dirname(__file__) image_files = ["dmla.jpg", "house.jpg", "tennis.jpg", "world.png"] for image_file in image_files: - img = demo_utils.imread_pil(f"{demo_utils.demos_assets_folder()}/images/{image_file}") + img = demo_utils.imread_demo(f"{demo_utils.demos_assets_folder()}/images/{image_file}") immvision.inspector_add_image(img, legend=image_file) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_link.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_link.py index 7b460a8..740224a 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_link.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_link.py @@ -8,7 +8,7 @@ immvision.use_rgb_color_order() this_dir = os.path.dirname(__file__) -image = demo_utils.imread_pil(demo_utils.demos_assets_folder() + "/images/tennis.jpg") +image = demo_utils.imread_demo(demo_utils.demos_assets_folder() + "/images/tennis.jpg") channels = [np.ascontiguousarray(image[:, :, i]) for i in range(image.shape[2])] params_rgb = immvision.ImageParams() diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_process.py b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_process.py index 2121a98..9d85b3a 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_process.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_immvision/demo_immvision_process.py @@ -107,7 +107,7 @@ class AppState: immvision_params_sobel: immvision.ImageParams def __init__(self, image_file: str): - self.image = demo_utils.imread_pil(image_file) + self.image = demo_utils.imread_demo(image_file) self.sobel_params = SobelParams() self.image_sobel = compute_sobel(self.image, self.sobel_params) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_implot/demo_implot_stock.py b/blimgui/dist64/imgui_bundle/demos_python/demos_implot/demo_implot_stock.py index 2dbdfce..e7e14cc 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_implot/demo_implot_stock.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_implot/demo_implot_stock.py @@ -1,14 +1,15 @@ # pip install yfinance -import yfinance as yf +import yfinance as yf # type: ignore import numpy as np +import numpy.typing as npt from dataclasses import dataclass from imgui_bundle import implot, ImVec4, ImVec2, imgui, imgui_ctx, IM_COL32, immapp -from typing import Optional +from typing import Optional, TypeAlias from functools import cached_property # ArrayFloat: 1D array of float64 -ArrayFloat = np.ndarray[(int,), np.float64] +ArrayFloat: TypeAlias = npt.NDArray[np.float64] # shape (N,) TICKER_IDS = ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA", "META", "NFLX", "NVDA", "AMD", "INTC"] PERIOD_RANGES = ["1mo", "3mo", "6mo", "12mo", "24mo", "60mo"] @@ -65,10 +66,10 @@ def plot_candlestick( dt = datetime.fromtimestamp(xs[idx]) date_str = dt.strftime("%Y-%m-%d") imgui.text(f"Day: {date_str}") - imgui.text(f"Open: ${opens[idx]:.2f}") - imgui.text(f"Close: ${closes[idx]:.2f}") - imgui.text(f"Low: ${lows[idx]:.2f}") - imgui.text(f"High: ${highs[idx]:.2f}") + imgui.text(f"Open: $ {opens[idx]:.2f}") + imgui.text(f"Close: $ {closes[idx]:.2f}") + imgui.text(f"Low: $ {lows[idx]:.2f}") + imgui.text(f"High: $ {highs[idx]:.2f}") # begin plot item if implot.internal.begin_item(label_id): @@ -219,21 +220,21 @@ def gui(self): self._gui_fetch() if self.fetch_error: - imgui.text_colored(f"Error: {self.fetch_error}", 1.0, 0.4, 0.4) + imgui.text_colored(ImVec4(1.0, 0.4, 0.4, 1.0), f"Error: {self.fetch_error}") if self.stock_data: implot.get_style().use_local_time = False if implot.begin_subplots("##Candlestick + Volume", 3, 1, ImVec2(-1, -1), - implot.SubplotFlags_.link_all_x.value): + implot.SubplotFlags_.link_all_x): # === Candlestick plot === if implot.begin_plot("Price", ImVec2(-1, 0)): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.time.value) - implot.setup_axis_format(implot.ImAxis_.y1.value, "$%.0f") - x_axis_flags = implot.AxisFlags_.auto_fit.value if self.needs_refresh_x_extent else 0 - implot.setup_axis(implot.ImAxis_.x1.value, "##Date", x_axis_flags) - y_axis_flags = implot.AxisFlags_.auto_fit.value if self.needs_refresh_x_extent else 0 - implot.setup_axis(implot.ImAxis_.y1.value, "Price", y_axis_flags) + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.time) + implot.setup_axis_format(implot.ImAxis_.y1, "$%.0f") + x_axis_flags = implot.AxisFlags_.auto_fit if self.needs_refresh_x_extent else 0 + implot.setup_axis(implot.ImAxis_.x1, "##Date", x_axis_flags) + y_axis_flags = implot.AxisFlags_.auto_fit if self.needs_refresh_x_extent else 0 + implot.setup_axis(implot.ImAxis_.y1, "Price", y_axis_flags) plot_candlestick( self.loaded_ticker, self.stock_data.timestamps, @@ -249,10 +250,10 @@ def gui(self): # === Volume subplot === if implot.begin_plot("Volume", ImVec2(-1, 0)): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.time.value) - implot.setup_axis_format(implot.ImAxis_.y1.value, "%.0f") - y_axis_flags = implot.AxisFlags_.auto_fit.value if self.needs_refresh_x_extent else 0 - implot.setup_axis(implot.ImAxis_.y1.value, "Volume", y_axis_flags) + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.time) + implot.setup_axis_format(implot.ImAxis_.y1, "%.0f") + y_axis_flags = implot.AxisFlags_.auto_fit if self.needs_refresh_x_extent else 0 + implot.setup_axis(implot.ImAxis_.y1, "Volume", y_axis_flags) implot.plot_bars(f"{self.loaded_ticker} Vol", self.stock_data.timestamps, self.stock_data.volumes, 60 * 60 * 24 * 0.8) implot.plot_line(f"{self.loaded_ticker}-Vol EMA 20", self.stock_data.timestamps, self.stock_data.volume_ema_20) @@ -261,10 +262,10 @@ def gui(self): # === RSI subplot === if implot.begin_plot("RSI", ImVec2(-1, 0)): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.time.value) - implot.setup_axis_format(implot.ImAxis_.y1.value, "%.0f") - implot.setup_axis_limits(implot.ImAxis_.y1.value, 0, 100) # RSI range - implot.setup_axis(implot.ImAxis_.y1.value, "RSI") + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.time) + implot.setup_axis_format(implot.ImAxis_.y1, "%.0f") + implot.setup_axis_limits(implot.ImAxis_.y1, 0, 100) # RSI range + implot.setup_axis(implot.ImAxis_.y1, "RSI") implot.plot_line(f"{self.loaded_ticker} RSI 14", self.stock_data.timestamps, self.stock_data.rsi_14) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_implot/implot_demo.py b/blimgui/dist64/imgui_bundle/demos_python/demos_implot/implot_demo.py index 5a29d06..59caa3a 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_implot/implot_demo.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_implot/implot_demo.py @@ -1,18 +1,35 @@ -# This file is an almost line by line transcription of implot_demo.cpp -# ( https://github.com/epezent/implot/blob/master/implot_demo.cpp ) +# This file is a line-by-line transcription of implot_demo.cpp +# (https://github.com/epezent/implot/blob/master/implot_demo.cpp) import time from imgui_bundle import imgui, immapp, implot, imgui_ctx, ImVec4, ImVec2, IM_COL32 import numpy as np from numpy.typing import NDArray import datetime +from typing import TypeAlias, Any + +FloatArray1D: TypeAlias = NDArray[np.floating[Any]] + + +def make_checkerboard_texture(size: int = 256, tile_size: int = 32) -> NDArray[np.uint8]: + """Create a checkerboard RGBA texture as a numpy array.""" + img = np.zeros((size, size, 4), dtype=np.uint8) + for y in range(size): + for x in range(size): + if ((x // tile_size) + (y // tile_size)) % 2 == 0: + color = (255, 255, 255, 255) + else: + color = (64, 64, 64, 255) + img[y, x] = color + return img + def plot_candlestick( label_id: str, - xs: NDArray, # x-axis values (timestamps as float) - opens: NDArray, - closes: NDArray, - lows: NDArray, - highs: NDArray, + xs: FloatArray1D, # x-axis values (timestamps as float) + opens: FloatArray1D, + closes: FloatArray1D, + lows: FloatArray1D, + highs: FloatArray1D, tooltip: bool, width_percent: float, positive_color: ImVec4 | None = None, @@ -54,7 +71,7 @@ def plot_candlestick( idx = np.searchsorted(xs, mouse.x) if 0 < idx < count: with imgui_ctx.begin_tooltip(): - dt = datetime.fromtimestamp(xs[idx]) + dt = datetime.fromtimestamp(float(xs[idx])) date_str = dt.strftime("%Y-%m-%d") imgui.text(f"Day: {date_str}") imgui.text(f"Open: ${opens[idx]:.2f}") @@ -70,14 +87,14 @@ def plot_candlestick( # fit data if requested if implot.internal.fit_this_frame(): for i in range(count): - implot.internal.fit_point(implot.Point(xs[i], lows[i])) - implot.internal.fit_point(implot.Point(xs[i], highs[i])) + implot.internal.fit_point(implot.Point(float(xs[i]), float(lows[i]))) + implot.internal.fit_point(implot.Point(float(xs[i]), float(highs[i]))) # render data for i in range(count): - open_pos = implot.plot_to_pixels(xs[i] - half_width, opens[i]) - close_pos = implot.plot_to_pixels(xs[i] + half_width, closes[i]) - low_pos = implot.plot_to_pixels(xs[i], lows[i]) - high_pos = implot.plot_to_pixels(xs[i], highs[i]) + open_pos = implot.plot_to_pixels(float(xs[i]) - half_width, float(opens[i])) + close_pos = implot.plot_to_pixels(float(xs[i]) + half_width, float(closes[i])) + low_pos = implot.plot_to_pixels(float(xs[i]), float(lows[i])) + high_pos = implot.plot_to_pixels(float(xs[i]), float(highs[i])) color = positive_color if opens[i] < closes[i] else negative_color color_u32 = imgui.get_color_u32(color) draw_list.add_line(low_pos, high_pos, color_u32) @@ -93,8 +110,8 @@ def demo_custom_plotters_and_tooltips(): if not hasattr(static, "initialized"): static.nb_days = 218 static.first_day = datetime.datetime(2019, 1, 1, 0, 0, 0).timestamp() - static.last_day = static.first_day + static.nb_days * 86400 - static.dates = np.array([static.first_day + i * 86400 for i in range(0, static.nb_days)]) + static.last_day = static.first_day + static.nb_days * 86400.0 + static.dates = np.array([static.first_day + i * 86400.0 for i in range(0, static.nb_days)]) static.opens = np.array([1284.7,1319.9,1318.7,1328,1317.6,1321.6,1314.3,1325,1319.3,1323.1,1324.7,1321.3,1323.5,1322,1281.3,1281.95,1311.1,1315,1314,1313.1,1331.9,1334.2,1341.3,1350.6,1349.8,1346.4,1343.4,1344.9,1335.6,1337.9,1342.5,1337,1338.6,1337,1340.4,1324.65,1324.35,1349.5,1371.3,1367.9,1351.3,1357.8,1356.1,1356,1347.6,1339.1,1320.6,1311.8,1314,1312.4,1312.3,1323.5,1319.1,1327.2,1332.1,1320.3,1323.1,1328,1330.9,1338,1333,1335.3,1345.2,1341.1,1332.5,1314,1314.4,1310.7,1314,1313.1,1315,1313.7,1320,1326.5,1329.2,1314.2,1312.3,1309.5,1297.4,1293.7,1277.9,1295.8,1295.2,1290.3,1294.2,1298,1306.4,1299.8,1302.3,1297,1289.6,1302,1300.7,1303.5,1300.5,1303.2,1306,1318.7,1315,1314.5,1304.1,1294.7,1293.7,1291.2,1290.2,1300.4,1284.2,1284.25,1301.8,1295.9,1296.2,1304.4,1323.1,1340.9,1341,1348,1351.4,1351.4,1343.5,1342.3,1349,1357.6,1357.1,1354.7,1361.4,1375.2,1403.5,1414.7,1433.2,1438,1423.6,1424.4,1418,1399.5,1435.5,1421.25,1434.1,1412.4,1409.8,1412.2,1433.4,1418.4,1429,1428.8,1420.6,1441,1460.4,1441.7,1438.4,1431,1439.3,1427.4,1431.9,1439.5,1443.7,1425.6,1457.5,1451.2,1481.1,1486.7,1512.1,1515.9,1509.2,1522.3,1513,1526.6,1533.9,1523,1506.3,1518.4,1512.4,1508.8,1545.4,1537.3,1551.8,1549.4,1536.9,1535.25,1537.95,1535.2,1556,1561.4,1525.6,1516.4,1507,1493.9,1504.9,1506.5,1513.1,1506.5,1509.7,1502,1506.8,1521.5,1529.8,1539.8,1510.9,1511.8,1501.7,1478,1485.4,1505.6,1511.6,1518.6,1498.7,1510.9,1510.8,1498.3,1492,1497.7,1484.8,1494.2,1495.6,1495.6,1487.5,1491.1,1495.1,1506.4]) static.highs = np.array([1284.75,1320.6,1327,1330.8,1326.8,1321.6,1326,1328,1325.8,1327.1,1326,1326,1323.5,1322.1,1282.7,1282.95,1315.8,1316.3,1314,1333.2,1334.7,1341.7,1353.2,1354.6,1352.2,1346.4,1345.7,1344.9,1340.7,1344.2,1342.7,1342.1,1345.2,1342,1350,1324.95,1330.75,1369.6,1374.3,1368.4,1359.8,1359,1357,1356,1353.4,1340.6,1322.3,1314.1,1316.1,1312.9,1325.7,1323.5,1326.3,1336,1332.1,1330.1,1330.4,1334.7,1341.1,1344.2,1338.8,1348.4,1345.6,1342.8,1334.7,1322.3,1319.3,1314.7,1316.6,1316.4,1315,1325.4,1328.3,1332.2,1329.2,1316.9,1312.3,1309.5,1299.6,1296.9,1277.9,1299.5,1296.2,1298.4,1302.5,1308.7,1306.4,1305.9,1307,1297.2,1301.7,1305,1305.3,1310.2,1307,1308,1319.8,1321.7,1318.7,1316.2,1305.9,1295.8,1293.8,1293.7,1304.2,1302,1285.15,1286.85,1304,1302,1305.2,1323,1344.1,1345.2,1360.1,1355.3,1363.8,1353,1344.7,1353.6,1358,1373.6,1358.2,1369.6,1377.6,1408.9,1425.5,1435.9,1453.7,1438,1426,1439.1,1418,1435,1452.6,1426.65,1437.5,1421.5,1414.1,1433.3,1441.3,1431.4,1433.9,1432.4,1440.8,1462.3,1467,1443.5,1444,1442.9,1447,1437.6,1440.8,1445.7,1447.8,1458.2,1461.9,1481.8,1486.8,1522.7,1521.3,1521.1,1531.5,1546.1,1534.9,1537.7,1538.6,1523.6,1518.8,1518.4,1514.6,1540.3,1565,1554.5,1556.6,1559.8,1541.9,1542.9,1540.05,1558.9,1566.2,1561.9,1536.2,1523.8,1509.1,1506.2,1532.2,1516.6,1519.7,1515,1519.5,1512.1,1524.5,1534.4,1543.3,1543.3,1542.8,1519.5,1507.2,1493.5,1511.4,1525.8,1522.2,1518.8,1515.3,1518,1522.3,1508,1501.5,1503,1495.5,1501.1,1497.9,1498.7,1492.1,1499.4,1506.9,1520.9]) static.lows = np.array([1282.85,1315,1318.7,1309.6,1317.6,1312.9,1312.4,1319.1,1319,1321,1318.1,1321.3,1319.9,1312,1280.5,1276.15,1308,1309.9,1308.5,1312.3,1329.3,1333.1,1340.2,1347,1345.9,1338,1340.8,1335,1332,1337.9,1333,1336.8,1333.2,1329.9,1340.4,1323.85,1324.05,1349,1366.3,1351.2,1349.1,1352.4,1350.7,1344.3,1338.9,1316.3,1308.4,1306.9,1309.6,1306.7,1312.3,1315.4,1319,1327.2,1317.2,1320,1323,1328,1323,1327.8,1331.7,1335.3,1336.6,1331.8,1311.4,1310,1309.5,1308,1310.6,1302.8,1306.6,1313.7,1320,1322.8,1311,1312.1,1303.6,1293.9,1293.5,1291,1277.9,1294.1,1286,1289.1,1293.5,1296.9,1298,1299.6,1292.9,1285.1,1288.5,1296.3,1297.2,1298.4,1298.6,1302,1300.3,1312,1310.8,1301.9,1292,1291.1,1286.3,1289.2,1289.9,1297.4,1283.65,1283.25,1292.9,1295.9,1290.8,1304.2,1322.7,1336.1,1341,1343.5,1345.8,1340.3,1335.1,1341.5,1347.6,1352.8,1348.2,1353.7,1356.5,1373.3,1398,1414.7,1427,1416.4,1412.7,1420.1,1396.4,1398.8,1426.6,1412.85,1400.7,1406,1399.8,1404.4,1415.5,1417.2,1421.9,1415,1413.7,1428.1,1434,1435.7,1427.5,1429.4,1423.9,1425.6,1427.5,1434.8,1422.3,1412.1,1442.5,1448.8,1468.2,1484.3,1501.6,1506.2,1498.6,1488.9,1504.5,1518.3,1513.9,1503.3,1503,1506.5,1502.1,1503,1534.8,1535.3,1541.4,1528.6,1525.6,1535.25,1528.15,1528,1542.6,1514.3,1510.7,1505.5,1492.1,1492.9,1496.8,1493.1,1503.4,1500.9,1490.7,1496.3,1505.3,1505.3,1517.9,1507.4,1507.1,1493.3,1470.5,1465,1480.5,1501.7,1501.4,1493.3,1492.1,1505.1,1495.7,1478,1487.1,1480.8,1480.6,1487,1488.3,1484.8,1484,1490.7,1490.4,1503.1]) @@ -107,19 +124,19 @@ def demo_custom_plotters_and_tooltips(): _, static.tooltip = imgui.checkbox("Show Tooltips", static.tooltip) imgui.same_line() - _, static.positive_color = imgui.color_edit4("Positive Color", static.positive_color, imgui.ColorEditFlags_.no_inputs.value) + _, static.positive_color = imgui.color_edit4("Positive Color", static.positive_color, imgui.ColorEditFlags_.no_inputs) imgui.same_line() - _, static.negative_color = imgui.color_edit4("Negative Color", static.negative_color, imgui.ColorEditFlags_.no_inputs.value) + _, static.negative_color = imgui.color_edit4("Negative Color", static.negative_color, imgui.ColorEditFlags_.no_inputs) implot.get_style().use_local_time = False if implot.begin_plot("Candlestick Chart", ImVec2(-1, 0)): - implot.setup_axes("", "", 0, implot.AxisFlags_.auto_fit.value | implot.AxisFlags_.range_fit.value) + implot.setup_axes("", "", 0, implot.AxisFlags_.auto_fit | implot.AxisFlags_.range_fit) implot.setup_axes_limits(static.first_day, static.last_day, 1250, 1600) - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.time.value) - implot.setup_axis_limits_constraints(implot.ImAxis_.x1.value, static.first_day, static.last_day) - implot.setup_axis_zoom_constraints(implot.ImAxis_.x1.value, 60 * 60 * 24 * 14, static.last_day - static.first_day) - implot.setup_axis_format(implot.ImAxis_.y1.value, "$%.0f") + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.time) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, static.first_day, static.last_day) + implot.setup_axis_zoom_constraints(implot.ImAxis_.x1, 60 * 60 * 24 * 14, static.last_day - static.first_day) + implot.setup_axis_format(implot.ImAxis_.y1, "$%.0f") plot_candlestick("GOOGL", static.dates, static.opens, static.closes, static.lows, static.highs, @@ -178,7 +195,7 @@ def demo_help(): imgui.indent() io = imgui.get_io() - backend_flag = "True" if io.backend_flags & imgui.BackendFlags_.renderer_has_vtx_offset.value else "False" + backend_flag = "True" if io.backend_flags & imgui.BackendFlags_.renderer_has_vtx_offset else "False" imgui.bullet_text(f"ImGuiBackendFlags_RendererHasVtxOffset: {backend_flag}") imgui.unindent() @@ -214,14 +231,14 @@ def demo_config(): imgui.separator() if implot.begin_plot("Preview"): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.time.value) - implot.setup_axis_limits(implot.ImAxis_.x1.value, static.now, static.now + 24 * 3600) + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.time) + implot.setup_axis_limits(implot.ImAxis_.x1, static.now, static.now + 24 * 3600) for i in range(10): x = np.array([static.now, static.now + 24 * 3600], np.float32) y = np.array([0, i / 9.0], np.float32) - with imgui_ctx.push_id(i): + with imgui_ctx.push_id(str(i)): implot.plot_line("##Line", x, y) implot.end_plot() @@ -247,8 +264,8 @@ def demo_line_plots(): if implot.begin_plot("Line Plots"): implot.setup_axes("x", "y") implot.plot_line("f(x)", static.xs1, static.ys1) - implot.set_next_marker_style(implot.Marker_.circle.value) - implot.plot_line("g(x)", static.xs2, static.ys2, flags=implot.LineFlags_.segments.value) + implot.set_next_marker_style(implot.Marker_.circle) + implot.plot_line("g(x)", static.xs2, static.ys2, flags=implot.LineFlags_.segments) implot.end_plot() @@ -300,7 +317,7 @@ def demo_filled_line_plots(): implot.setup_axes_limits(0, 100, 0, 500) if static.show_fills: - implot.push_style_var(implot.StyleVar_.fill_alpha.value, 0.25) + implot.push_style_var(implot.StyleVar_.fill_alpha, 0.25) ref_value = -np.inf if static.shade_mode == 0 else np.inf if static.shade_mode == 1 else static.fill_ref implot.plot_shaded("Stock 1", static.xs1, static.ys1, ref_value, static.flags) implot.plot_shaded("Stock 2", static.xs1, static.ys2, ref_value, static.flags) @@ -335,7 +352,7 @@ def demo_shaded_plots(): _, static.alpha = imgui.drag_float("Alpha", static.alpha, 0.01, 0, 1) if implot.begin_plot("Shaded Plots"): - implot.push_style_var(implot.StyleVar_.fill_alpha.value, static.alpha) + implot.push_style_var(implot.StyleVar_.fill_alpha, static.alpha) implot.plot_shaded("Uncertain Data", static.xs, static.ys1, static.ys2) implot.plot_line("Uncertain Data", static.xs, static.ys) implot.plot_shaded("Overlapping", static.xs, static.ys3, static.ys4) @@ -361,9 +378,9 @@ def demo_scatter_plots(): if implot.begin_plot("Scatter Plot"): implot.plot_scatter("Data 1", static.xs1, static.ys1) - implot.push_style_var(implot.StyleVar_.fill_alpha.value, 0.25) + implot.push_style_var(implot.StyleVar_.fill_alpha, 0.25) implot.set_next_marker_style( - implot.Marker_.square.value, + implot.Marker_.square, size=6, fill=implot.get_colormap_color(1), weight=implot.AUTO, @@ -386,24 +403,24 @@ def demo_stairstep_plots(): if not hasattr(static, "flags"): static.flags = 0 - _, static.flags = imgui.checkbox_flags("Shaded", static.flags, implot.StairsFlags_.shaded.value) + _, static.flags = imgui.checkbox_flags("Shaded", static.flags, implot.StairsFlags_.shaded) if implot.begin_plot("Stairstep Plot"): implot.setup_axes("x", "f(x)") implot.setup_axes_limits(0, 1, 0, 1) - implot.push_style_color(implot.Col_.line.value, [0.5, 0.5, 0.5, 1.0]) + implot.push_style_color(implot.Col_.line, [0.5, 0.5, 0.5, 1.0]) implot.plot_line("##1", static.ys1, xscale=0.05) implot.plot_line("##2", static.ys2, xscale=0.05) implot.pop_style_color() - implot.set_next_marker_style(implot.Marker_.circle.value) + implot.set_next_marker_style(implot.Marker_.circle) implot.set_next_fill_style(implot.AUTO_COL, 0.25) implot.plot_stairs("Post Step (default)", static.ys1, xscale=0.05, flags=static.flags) - implot.set_next_marker_style(implot.Marker_.circle.value) + implot.set_next_marker_style(implot.Marker_.circle) implot.set_next_fill_style(implot.AUTO_COL, 0.25) - implot.plot_stairs("Pre Step", static.ys2, xscale=0.05, flags=static.flags | implot.StairsFlags_.pre_step.value) + implot.plot_stairs("Pre Step", static.ys2, xscale=0.05, flags=static.flags | implot.StairsFlags_.pre_step) implot.end_plot() @@ -416,7 +433,7 @@ def demo_bar_plots(): if implot.begin_plot("Bar Plot"): implot.plot_bars("Vertical", static.data, bar_size=0.7, shift=1) - implot.plot_bars("Horizontal", static.data, bar_size=0.4, shift=1, flags=implot.BarsFlags_.horizontal.value) + implot.plot_bars("Horizontal", static.data, bar_size=0.4, shift=1, flags=implot.BarsFlags_.horizontal) implot.end_plot() @@ -442,7 +459,7 @@ def demo_bar_groups(): static.flags = 0 static.horz = False - _, static.flags = imgui.checkbox_flags("Stacked", static.flags, implot.BarGroupsFlags_.stacked.value) + _, static.flags = imgui.checkbox_flags("Stacked", static.flags, implot.BarGroupsFlags_.stacked) imgui.same_line() _, static.horz = imgui.checkbox("Horizontal", static.horz) @@ -450,12 +467,12 @@ def demo_bar_groups(): _, static.size = imgui.slider_float("Size", static.size, 0, 1) if implot.begin_plot("Bar Group"): - implot.setup_legend(implot.Location_.east.value, implot.LegendFlags_.outside.value) + implot.setup_legend(implot.Location_.east, implot.LegendFlags_.outside) if static.horz: - implot.setup_axes("Score", "Student", implot.AxisFlags_.auto_fit.value, implot.AxisFlags_.auto_fit.value) + implot.setup_axes("Score", "Student", implot.AxisFlags_.auto_fit, implot.AxisFlags_.auto_fit) implot.setup_axis_ticks( - axis=implot.ImAxis_.y1.value, + axis=implot.ImAxis_.y1, values=static.positions, labels=static.glabels, keep_default=False @@ -465,11 +482,11 @@ def demo_bar_groups(): values=static.data, group_size=static.groups, shift=0, - flags=static.flags | implot.BarGroupsFlags_.horizontal.value) + flags=static.flags | implot.BarGroupsFlags_.horizontal) else: - implot.setup_axes("Student", "Score", implot.AxisFlags_.auto_fit.value, implot.AxisFlags_.auto_fit.value) + implot.setup_axes("Student", "Score", implot.AxisFlags_.auto_fit, implot.AxisFlags_.auto_fit) implot.setup_axis_ticks( - axis=implot.ImAxis_.x1.value, + axis=implot.ImAxis_.x1, values=static.positions, labels=static.glabels, keep_default=False @@ -493,8 +510,8 @@ def demo_bar_stacks(): static.liars = -1 if static.liars == -1: - Liars_Data = np.array([4282515870, 4282609140, 4287357182, 4294630301, 4294945280, 4294921472], np.uint32) - static.liars = implot.add_colormap("Liars", Liars_Data) + liars_data = np.array([4282515870, 4282609140, 4287357182, 4294630301, 4294945280, 4294921472], np.uint32) + static.liars = implot.add_colormap("Liars", liars_data) if not hasattr(static, "diverging"): static.diverging = True @@ -525,10 +542,10 @@ def demo_bar_stacks(): labels_div = ["Pants on Fire","False","Mostly False","Mostly False","False","Pants on Fire","Half True","Mostly True","True"] implot.push_colormap(static.liars) - if implot.begin_plot("PolitiFact: Who Lies More?", ImVec2(-1, 400), implot.Flags_.no_mouse_text.value): - implot.setup_legend(implot.Location_.south.value, implot.LegendFlags_.outside.value | implot.LegendFlags_.horizontal.value) - implot.setup_axes("", "", implot.AxisFlags_.auto_fit.value | implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.auto_fit.value | implot.AxisFlags_.invert.value) - implot.setup_axis_ticks(implot.ImAxis_.y1.value, + if implot.begin_plot("PolitiFact: Who Lies More?", ImVec2(-1, 400), implot.Flags_.no_mouse_text): + implot.setup_legend(implot.Location_.south, implot.LegendFlags_.outside | implot.LegendFlags_.horizontal) + implot.setup_axes("", "", implot.AxisFlags_.auto_fit | implot.AxisFlags_.no_decorations, implot.AxisFlags_.auto_fit | implot.AxisFlags_.invert) + implot.setup_axis_ticks(implot.ImAxis_.y1, v_min=0, v_max=19, n_ticks=20, labels=politicians, keep_default=False) if static.diverging: @@ -537,14 +554,14 @@ def demo_bar_stacks(): values=data_div, group_size=0.75, shift=0, - flags=implot.BarGroupsFlags_.stacked.value | implot.BarGroupsFlags_.horizontal.value) + flags=implot.BarGroupsFlags_.stacked | implot.BarGroupsFlags_.horizontal) else: implot.plot_bar_groups( label_ids=labels_reg, values=data_reg, group_size=0.75, shift=0, - flags=implot.BarGroupsFlags_.stacked.value | implot.BarGroupsFlags_.horizontal.value) + flags=implot.BarGroupsFlags_.stacked | implot.BarGroupsFlags_.horizontal) implot.end_plot() implot.pop_colormap() @@ -572,13 +589,13 @@ def demo_error_bars(): implot.set_next_error_bar_style(implot.get_colormap_color(1), 0) implot.plot_error_bars("Line", static.xs, static.lin1, static.err1, static.err2) - implot.set_next_marker_style(implot.Marker_.square.value) + implot.set_next_marker_style(implot.Marker_.square) implot.plot_line("Line", static.xs, static.lin1) - implot.push_style_color(implot.Col_.error_bar.value, implot.get_colormap_color(2)) + implot.push_style_color(implot.Col_.error_bar, implot.get_colormap_color(2)) implot.plot_error_bars("Scatter", static.xs, static.lin2, static.err2) implot.plot_error_bars("Scatter", static.xs, static.lin2, static.err3, static.err4, - flags=implot.ErrorBarsFlags_.horizontal.value) + flags=implot.ErrorBarsFlags_.horizontal) implot.pop_style_color() implot.plot_scatter("Scatter", static.xs, static.lin2) @@ -595,10 +612,10 @@ def demo_stem_plots(): static.ys2 = 0.5 + 0.25 * np.sin(10 * static.xs) * np.sin(static.xs) if implot.begin_plot("Stem Plots"): - implot.setup_axis_limits(implot.ImAxis_.x1.value, 0, 1.0) - implot.setup_axis_limits(implot.ImAxis_.y1.value, 0, 1.6) + implot.setup_axis_limits(implot.ImAxis_.x1, 0, 1.0) + implot.setup_axis_limits(implot.ImAxis_.y1, 0, 1.6) implot.plot_stems("Stems 1", static.xs, static.ys1) - implot.set_next_marker_style(implot.Marker_.circle.value) + implot.set_next_marker_style(implot.Marker_.circle) implot.plot_stems("Stems 2", static.xs, static.ys2) implot.end_plot() @@ -613,9 +630,9 @@ def demo_infinite_lines(): static.vals = np.array([0.25, 0.5, 0.75], dtype=np.float64) if implot.begin_plot("##Infinite"): - implot.setup_axes("", "", implot.AxisFlags_.no_initial_fit.value, implot.AxisFlags_.no_initial_fit.value) + implot.setup_axes("", "", implot.AxisFlags_.no_initial_fit, implot.AxisFlags_.no_initial_fit) implot.plot_inf_lines("Vertical", static.vals) - implot.plot_inf_lines("Horizontal", static.vals, flags=implot.InfLinesFlags_.horizontal.value) + implot.plot_inf_lines("Horizontal", static.vals, flags=implot.InfLinesFlags_.horizontal) implot.end_plot() @@ -630,13 +647,13 @@ def demo_pie_charts(): imgui.set_next_item_width(250) _, static.data1 = imgui.drag_float4("Values", static.data1, 0.01, 0, 1) - _, static.flags = imgui.checkbox_flags("Normalize", static.flags, implot.PieChartFlags_.normalize.value) - _, static.flags = imgui.checkbox_flags("Ignore Hidden", static.flags, implot.PieChartFlags_.ignore_hidden.value) + _, static.flags = imgui.checkbox_flags("Normalize", static.flags, implot.PieChartFlags_.normalize) + _, static.flags = imgui.checkbox_flags("Ignore Hidden", static.flags, implot.PieChartFlags_.ignore_hidden) - if implot.begin_plot("##Pie1", size=(250, 250), flags=implot.Flags_.equal.value | implot.Flags_.no_mouse_text.value): - implot.setup_axes("", "", implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.no_decorations.value) + if implot.begin_plot("##Pie1", size=(250, 250), flags=implot.Flags_.equal | implot.Flags_.no_mouse_text): + implot.setup_axes("", "", implot.AxisFlags_.no_decorations, implot.AxisFlags_.no_decorations) implot.setup_axes_limits(0, 1, 0, 1) - implot.plot_pie_chart(static.labels1, np.array(static.data1), x=0.5, y=0.5, radius=0.4, label_fmt="%.2f", angle0=90, flags=static.flags) + implot.plot_pie_chart(static.labels1, np.array(static.data1), x=0.5, y=0.5, radius=0.4, label_fmt="%.2f", angle0=90, flags=static.flags) # type: ignore implot.end_plot() imgui.same_line() @@ -645,9 +662,9 @@ def demo_pie_charts(): static.labels2 = ["A", "B", "C", "D", "E"] static.data2 = np.array([1, 1, 2, 3, 5], dtype=np.int32) - implot.push_colormap(implot.Colormap_.pastel.value) - if implot.begin_plot("##Pie2", size=(250, 250), flags=implot.Flags_.equal.value | implot.Flags_.no_mouse_text.value): - implot.setup_axes("", "", implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.no_decorations.value) + implot.push_colormap(implot.Colormap_.pastel) + if implot.begin_plot("##Pie2", size=(250, 250), flags=implot.Flags_.equal | implot.Flags_.no_mouse_text): + implot.setup_axes("", "", implot.AxisFlags_.no_decorations, implot.AxisFlags_.no_decorations) implot.setup_axes_limits(0, 1, 0, 1) implot.plot_pie_chart(static.labels2, static.data2, x=0.5, y=0.5, radius=0.4, label_fmt="%.0f", angle0=180, flags=static.flags) implot.end_plot() @@ -673,7 +690,7 @@ def demo_heatmaps(): static.ylabels = ["R1", "R2", "R3", "R4", "R5", "R6", "R7"] if not hasattr(static, "map"): - static.map = implot.Colormap_.viridis.value + static.map = implot.Colormap_.viridis if implot.colormap_button(implot.get_colormap_name(static.map), size=(225, 0), cmap=static.map): static.map = (static.map + 1) % implot.get_colormap_count() @@ -687,16 +704,16 @@ def demo_heatmaps(): if not hasattr(static, "hm_flags"): static.hm_flags = 0 - _, static.hm_flags = imgui.checkbox_flags("Column Major", static.hm_flags, implot.HeatmapFlags_.col_major.value) + _, static.hm_flags = imgui.checkbox_flags("Column Major", static.hm_flags, implot.HeatmapFlags_.col_major) - axes_flags = implot.AxisFlags_.lock.value | implot.AxisFlags_.no_grid_lines.value | implot.AxisFlags_.no_tick_marks.value + axes_flags = implot.AxisFlags_.lock | implot.AxisFlags_.no_grid_lines | implot.AxisFlags_.no_tick_marks implot.push_colormap(static.map) - if implot.begin_plot("##Heatmap1", size=(225, 225), flags=implot.Flags_.no_legend.value | implot.Flags_.no_mouse_text.value): + if implot.begin_plot("##Heatmap1", size=(225, 225), flags=implot.Flags_.no_legend | implot.Flags_.no_mouse_text): implot.setup_axes("", "", axes_flags, axes_flags) - implot.setup_axis_ticks(implot.ImAxis_.x1.value, v_min=0 + 1.0 / 14.0, v_max=1 - 1.0 / 14.0, n_ticks=7, labels=static.xlabels, keep_default=False) - implot.setup_axis_ticks(implot.ImAxis_.y1.value, v_min=1 - 1.0 / 14.0, v_max=0 + 1.0 / 14.0, n_ticks=7, labels=static.ylabels, keep_default=False) + implot.setup_axis_ticks(implot.ImAxis_.x1, v_min=0 + 1.0 / 14.0, v_max=1 - 1.0 / 14.0, n_ticks=7, labels=static.xlabels, keep_default=False) + implot.setup_axis_ticks(implot.ImAxis_.y1, v_min=1 - 1.0 / 14.0, v_max=0 + 1.0 / 14.0, n_ticks=7, labels=static.ylabels, keep_default=False) implot.plot_heatmap("heat", static.values1, scale_min=static.scale_min, scale_max=static.scale_max, label_fmt="%g", bounds_min=implot.Point(0, 0), bounds_max=implot.Point(1, 1), flags=static.hm_flags) implot.end_plot() @@ -712,10 +729,10 @@ def demo_heatmaps(): static.values2 = np.random.uniform(0.0, 1.0, size=(size, size)) if implot.begin_plot("##Heatmap2", size=(225, 225)): - implot.setup_axes("", "", implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.no_decorations.value) + implot.setup_axes("", "", implot.AxisFlags_.no_decorations, implot.AxisFlags_.no_decorations) implot.setup_axes_limits(-1, 1, -1, 1) implot.plot_heatmap("heat1", static.values2, scale_min=0, scale_max=1) - implot.plot_heatmap("heat2", static.values2, scale_min=0, scale_max=1, bounds_min=(-1, -1), bounds_max=(0, 0)) + implot.plot_heatmap("heat2", static.values2, scale_min=0, scale_max=1, bounds_min=implot.Point(-1, -1), bounds_max=implot.Point(0, 0)) implot.end_plot() implot.pop_colormap() @@ -727,7 +744,7 @@ def demo_histogram(): static = demo_histogram if not hasattr(static, "hist_flags"): - static.hist_flags = implot.HistogramFlags_.density.value + static.hist_flags = implot.HistogramFlags_.density static.bins = 50 static.mu = 5.0 static.sigma = 2.0 @@ -737,17 +754,17 @@ def demo_histogram(): static.data = np.random.normal(static.mu, static.sigma, 10000) imgui.set_next_item_width(200) - if imgui.radio_button("Sqrt", static.bins == implot.Bin_.sqrt.value): - static.bins = implot.Bin_.sqrt.value + if imgui.radio_button("Sqrt", static.bins == implot.Bin_.sqrt): + static.bins = implot.Bin_.sqrt imgui.same_line() - if imgui.radio_button("Sturges", static.bins == implot.Bin_.sturges.value): - static.bins = implot.Bin_.sturges.value + if imgui.radio_button("Sturges", static.bins == implot.Bin_.sturges): + static.bins = implot.Bin_.sturges imgui.same_line() - if imgui.radio_button("Rice", static.bins == implot.Bin_.rice.value): - static.bins = implot.Bin_.rice.value + if imgui.radio_button("Rice", static.bins == implot.Bin_.rice): + static.bins = implot.Bin_.rice imgui.same_line() - if imgui.radio_button("Scott", static.bins == implot.Bin_.scott.value): - static.bins = implot.Bin_.scott.value + if imgui.radio_button("Scott", static.bins == implot.Bin_.scott): + static.bins = implot.Bin_.scott imgui.same_line() if imgui.radio_button("N Bins", static.bins >= 0): static.bins = 50 @@ -757,11 +774,11 @@ def demo_histogram(): imgui.set_next_item_width(200) _, static.bins = imgui.slider_int("##Bins", static.bins, 1, 100) - _, static.hist_flags = imgui.checkbox_flags("Horizontal", static.hist_flags, implot.HistogramFlags_.horizontal.value) + _, static.hist_flags = imgui.checkbox_flags("Horizontal", static.hist_flags, implot.HistogramFlags_.horizontal) imgui.same_line() - _, static.hist_flags = imgui.checkbox_flags("Density", static.hist_flags, implot.HistogramFlags_.density.value) + _, static.hist_flags = imgui.checkbox_flags("Density", static.hist_flags, implot.HistogramFlags_.density) imgui.same_line() - _, static.hist_flags = imgui.checkbox_flags("Cumulative", static.hist_flags, implot.HistogramFlags_.cumulative.value) + _, static.hist_flags = imgui.checkbox_flags("Cumulative", static.hist_flags, implot.HistogramFlags_.cumulative) _, static.range = imgui.checkbox("Range", static.range) @@ -770,23 +787,23 @@ def demo_histogram(): imgui.set_next_item_width(200) _, static.rmin, static.rmax = imgui.drag_float_range2("##Range", static.rmin, static.rmax, 0.1, -3, 13) imgui.same_line() - _, static.hist_flags = imgui.checkbox_flags("Exclude Outliers", static.hist_flags, implot.HistogramFlags_.no_outliers.value) + _, static.hist_flags = imgui.checkbox_flags("Exclude Outliers", static.hist_flags, implot.HistogramFlags_.no_outliers) x = np.linspace(-3, 13, 100) y = np.exp(-((x - static.mu) ** 2) / (2 * static.sigma ** 2)) / (static.sigma * np.sqrt(2 * np.pi)) - if static.hist_flags & implot.HistogramFlags_.cumulative.value: + if static.hist_flags & implot.HistogramFlags_.cumulative: y = np.cumsum(y) y /= y[-1] if implot.begin_plot("##Histograms"): - implot.setup_axes("", "", implot.AxisFlags_.auto_fit.value, implot.AxisFlags_.auto_fit.value) + implot.setup_axes("", "", implot.AxisFlags_.auto_fit, implot.AxisFlags_.auto_fit) implot.set_next_fill_style(implot.AUTO_COL, 0.5) implot.plot_histogram("Empirical", static.data, bins=static.bins, bar_scale=1.0, range=implot.Range(static.rmin, static.rmax) if static.range else implot.Range(), flags=static.hist_flags) - if (static.hist_flags & implot.HistogramFlags_.density.value) and not (static.hist_flags & implot.HistogramFlags_.no_outliers.value): - if static.hist_flags & implot.HistogramFlags_.horizontal.value: + if (static.hist_flags & implot.HistogramFlags_.density) and not (static.hist_flags & implot.HistogramFlags_.no_outliers): + if static.hist_flags & implot.HistogramFlags_.horizontal: implot.plot_line("Theoretical", y, x) else: implot.plot_line("Theoretical", x, y) @@ -809,10 +826,10 @@ def demo_histogram2d(): _, static.count = imgui.slider_int("Count", static.count, 100, 100000) _, static.xybins = imgui.slider_int2("Bins", static.xybins, 1, 500) imgui.same_line() - _, static.hist_flags = imgui.checkbox_flags("Density", static.hist_flags, implot.HistogramFlags_.density.value) + _, static.hist_flags = imgui.checkbox_flags("Density", static.hist_flags, implot.HistogramFlags_.density) max_count = 0 - flags = implot.AxisFlags_.auto_fit.value | implot.AxisFlags_.foreground.value + flags = implot.AxisFlags_.auto_fit | implot.AxisFlags_.foreground implot.push_colormap("Hot") if implot.begin_plot("##Hist2D", size=(imgui.get_content_region_avail().x - 100 - imgui.get_style().item_spacing.x, 0)): @@ -823,11 +840,11 @@ def demo_histogram2d(): xs=static.dist1, ys=static.dist2, x_bins=static.xybins[0], y_bins=static.xybins[1], range=implot.Rect(-6, 6, -6, 6), - flags=static.hist_flags) + flags=static.hist_flags) # type: ignore implot.end_plot() imgui.same_line() - implot.colormap_scale("Density" if static.hist_flags & implot.HistogramFlags_.density.value else "Count", 0, max_count, size=(100, 0)) + implot.colormap_scale("Density" if static.hist_flags & implot.HistogramFlags_.density else "Count", 0, max_count, size=(100, 0)) implot.pop_colormap() @@ -867,9 +884,9 @@ def demo_digital_plots(): static.data_analog[1].add_point(static.t, np.cos(2 * static.t)) if implot.begin_plot("##Digital"): - implot.setup_axis_limits(implot.ImAxis_.x1.value, static.t - 10.0, static.t, - implot.Cond_.once.value if static.paused else implot.Cond_.always.value) - implot.setup_axis_limits(implot.ImAxis_.y1.value, -1, 1) + implot.setup_axis_limits(implot.ImAxis_.x1, static.t - 10.0, static.t, + implot.Cond_.once if static.paused else implot.Cond_.always) + implot.setup_axis_limits(implot.ImAxis_.y1, -1, 1) for i in range(2): if static.show_digital[i] and static.data_digital[i].size > 0: @@ -894,6 +911,15 @@ def demo_images(): static.uv1 = [1.0, 1.0] static.tint =[1.0, 1.0, 1.0, 1.0] + # Create textures + # Step 1: create them as numpy arrays + checker_img = make_checkerboard_texture() + # Step 2: convert them to OpenGL textures (using imgui_bundle's immvision) + from imgui_bundle import immvision + static.tex_checker = immvision.GlTexture(checker_img) + # Step 3: create ImTextureRef from the OpenGL texture id + static.tex_id_checker = imgui.ImTextureRef(static.tex_checker.texture_id) + imgui.bullet_text("Below we are displaying the font texture, which is the only texture we have\naccess to in this demo.") imgui.bullet_text("Use the 'ImTextureID' type as storage to pass pointers or identifiers to your\nown texture data.") imgui.bullet_text("See ImGui Wiki page 'Image Loading and Displaying Examples'.") @@ -905,7 +931,7 @@ def demo_images(): _, static.tint = imgui.color_edit4("Tint", static.tint) if implot.begin_plot("##image"): - implot.plot_image("my image", imgui.get_io().fonts.tex_id, + implot.plot_image("my image", static.tex_id_checker, bounds_min=implot.Point(static.bmin[0], static.bmin[1]), bounds_max=implot.Point(static.bmax[0], static.bmax[1]), uv0=ImVec2(static.uv0[0], static.uv0[1]), @@ -943,16 +969,16 @@ def demo_markers_and_text(): _, static.mk_size = imgui.drag_float("Marker Size", static.mk_size, 0.1, 2.0, 10.0, "%.2f px") _, static.mk_weight = imgui.drag_float("Marker Weight", static.mk_weight, 0.05, 0.5, 3.0, "%.2f px") - if implot.begin_plot("##MarkerStyles", size=(-1, 0), flags=implot.Flags_.canvas_only.value): - implot.setup_axes("", "", implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.no_decorations.value) + if implot.begin_plot("##MarkerStyles", size=(-1, 0), flags=implot.Flags_.canvas_only): + implot.setup_axes("", "", implot.AxisFlags_.no_decorations, implot.AxisFlags_.no_decorations) implot.setup_axes_limits(0, 10, 0, 12) xs = [1, 4] ys = [10, 11] # Filled markers - for m in range(implot.Marker_.count.value): - with imgui_ctx.push_id(m): + for m in range(implot.Marker_.count): + with imgui_ctx.push_id(str(m)): implot.set_next_marker_style(m, static.mk_size, implot.AUTO_COL, static.mk_weight) implot.plot_line("##Filled", np.array(xs), np.array(ys)) ys = [ys[0] - 1, ys[1] - 1] @@ -961,8 +987,8 @@ def demo_markers_and_text(): ys = [10, 11] # Open markers - for m in range(implot.Marker_.count.value): - with imgui_ctx.push_id(m): + for m in range(implot.Marker_.count): + with imgui_ctx.push_id(str(m)): implot.set_next_marker_style(m, static.mk_size, [0, 0, 0, 0], static.mk_weight) implot.plot_line("##Open", np.array(xs), np.array(ys)) ys = [ys[0] - 1, ys[1] - 1] @@ -970,8 +996,8 @@ def demo_markers_and_text(): implot.plot_text("Filled Markers", 2.5, 6.0) implot.plot_text("Open Markers", 7.5, 6.0) - implot.push_style_color(implot.Col_.inlay_text.value, [1, 0, 1, 1]) - implot.plot_text("Vertical Text", 5.0, 6.0, pix_offset=(0, 0), flags=implot.TextFlags_.vertical.value) + implot.push_style_color(implot.Col_.inlay_text, [1, 0, 1, 1]) + implot.plot_text("Vertical Text", 5.0, 6.0, pix_offset=(0, 0), flags=implot.TextFlags_.vertical) implot.pop_style_color() implot.end_plot() @@ -994,10 +1020,10 @@ def demo_nan_values(): _, static.include_nan = imgui.checkbox("Include NaN", static.include_nan) imgui.same_line() - _, static.flags = imgui.checkbox_flags("Skip NaN", static.flags, implot.LineFlags_.skip_nan.value) + _, static.flags = imgui.checkbox_flags("Skip NaN", static.flags, implot.LineFlags_.skip_nan) if implot.begin_plot("##NaNValues"): - implot.set_next_marker_style(implot.Marker_.square.value) + implot.set_next_marker_style(implot.Marker_.square) implot.plot_line("line", data1, data2, flags=static.flags) implot.plot_bars("bars", data1) implot.end_plot() @@ -1015,7 +1041,7 @@ def demo_log_scale(): static.ys3 = np.power(10, static.xs[:21]) if implot.begin_plot("Log Plot", size=(-1, 0)): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.log10.value) + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.log10) implot.setup_axes_limits(0.1, 100, 0, 10) implot.plot_line("f(x) = x", static.xs, static.xs) @@ -1037,7 +1063,7 @@ def demo_symmetric_log_scale(): static.ys2 = static.xs * 0.002 - 1 if implot.begin_plot("SymLog Plot", size=(-1, 0)): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.sym_log.value) + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.sym_log) implot.plot_line("f(x) = a*x+b", static.xs, static.ys2) implot.plot_line("f(x) = sin(x)", static.xs, static.ys1) @@ -1085,7 +1111,7 @@ def demo_time_scale(): static.data = HugeTimeData(static.t_min) if implot.begin_plot("##Time", size=(-1, 0)): - implot.setup_axis_scale(implot.ImAxis_.x1.value, implot.Scale_.time.value) + implot.setup_axis_scale(implot.ImAxis_.x1, implot.Scale_.time) implot.setup_axes_limits(static.t_min, static.t_max, 0, 1) if static.data is not None: @@ -1154,29 +1180,29 @@ def demo_multiple_axes(): implot.setup_axes_limits(0, 100, 0, 10) if static.x2_axis: - implot.setup_axis(implot.ImAxis_.x2.value, "X-Axis 2", implot.AxisFlags_.aux_default.value) - implot.setup_axis_limits(implot.ImAxis_.x2.value, 0, 100) + implot.setup_axis(implot.ImAxis_.x2, "X-Axis 2", implot.AxisFlags_.aux_default) + implot.setup_axis_limits(implot.ImAxis_.x2, 0, 100) if static.y2_axis: - implot.setup_axis(implot.ImAxis_.y2.value, "Y-Axis 2", implot.AxisFlags_.aux_default.value) - implot.setup_axis_limits(implot.ImAxis_.y2.value, 0, 1) + implot.setup_axis(implot.ImAxis_.y2, "Y-Axis 2", implot.AxisFlags_.aux_default) + implot.setup_axis_limits(implot.ImAxis_.y2, 0, 1) if static.y3_axis: - implot.setup_axis(implot.ImAxis_.y3.value, "Y-Axis 3", implot.AxisFlags_.aux_default.value) - implot.setup_axis_limits(implot.ImAxis_.y3.value, 0, 300) + implot.setup_axis(implot.ImAxis_.y3, "Y-Axis 3", implot.AxisFlags_.aux_default) + implot.setup_axis_limits(implot.ImAxis_.y3, 0, 300) implot.plot_line("f(x) = x", static.xs, static.xs) if static.x2_axis: - implot.set_axes(implot.ImAxis_.x2.value, implot.ImAxis_.y1.value) + implot.set_axes(implot.ImAxis_.x2, implot.ImAxis_.y1) implot.plot_line("f(x) = sin(x)*3+1", static.xs2, static.ys1) if static.y2_axis: - implot.set_axes(implot.ImAxis_.x1.value, implot.ImAxis_.y2.value) + implot.set_axes(implot.ImAxis_.x1, implot.ImAxis_.y2) implot.plot_line("f(x) = cos(x)*.2+.5", static.xs, static.ys2) if static.x2_axis and static.y3_axis: - implot.set_axes(implot.ImAxis_.x2.value, implot.ImAxis_.y3.value) + implot.set_axes(implot.ImAxis_.x2, implot.ImAxis_.y3) implot.plot_line("f(x) = sin(x+.5)*100+200", static.xs2, static.ys3) implot.end_plot() @@ -1189,10 +1215,10 @@ def demo_linked_axes(): static = demo_linked_axes if not hasattr(static, "x_min"): - static.x_min = 0.0 - static.x_max = 1.0 - static.y_min = 0.0 - static.y_max = 1.0 + static.x_min = implot.BoxedValue(0.0) + static.x_max = implot.BoxedValue(1.0) + static.y_min = implot.BoxedValue(0.0) + static.y_max = implot.BoxedValue(1.0) static.linkx = True static.linky = True @@ -1205,17 +1231,17 @@ def demo_linked_axes(): if implot.begin_aligned_plots("AlignedGroup"): if implot.begin_plot("Plot A"): if static.linkx: - implot.setup_axis_links(implot.ImAxis_.x1.value, static.x_min, static.x_max) + implot.setup_axis_links(implot.ImAxis_.x1, static.x_min, static.x_max) if static.linky: - implot.setup_axis_links(implot.ImAxis_.y1.value, static.y_min, static.y_max) + implot.setup_axis_links(implot.ImAxis_.y1, static.y_min, static.y_max) implot.plot_line("Line", data) implot.end_plot() if implot.begin_plot("Plot B"): if static.linkx: - implot.setup_axis_links(implot.ImAxis_.x1.value, static.x_min, static.x_max) + implot.setup_axis_links(implot.ImAxis_.x1, static.x_min, static.x_max) if static.linky: - implot.setup_axis_links(implot.ImAxis_.y1.value, static.y_min, static.y_max) + implot.setup_axis_links(implot.ImAxis_.y1, static.y_min, static.y_max) implot.plot_line("Line", data) implot.end_plot() @@ -1231,16 +1257,16 @@ def demo_axis_constraints(): _, static.constraints[:2] = imgui.drag_float2("Limits Constraints", static.constraints[:2], 0.01) _, static.constraints[2:] = imgui.drag_float2("Zoom Constraints", static.constraints[2:], 0.01) - _, static.flags = imgui.checkbox_flags("Pan Stretch", static.flags, implot.AxisFlags_.pan_stretch.value) + _, static.flags = imgui.checkbox_flags("Pan Stretch", static.flags, implot.AxisFlags_.pan_stretch) if implot.begin_plot("##AxisConstraints", size=(-1, 0)): implot.setup_axes("X", "Y", static.flags, static.flags) implot.setup_axes_limits(-1, 1, -1, 1) - implot.setup_axis_limits_constraints(implot.ImAxis_.x1.value, static.constraints[0], static.constraints[1]) - implot.setup_axis_zoom_constraints(implot.ImAxis_.x1.value, static.constraints[2], static.constraints[3]) - implot.setup_axis_limits_constraints(implot.ImAxis_.y1.value, static.constraints[0], static.constraints[1]) - implot.setup_axis_zoom_constraints(implot.ImAxis_.y1.value, static.constraints[2], static.constraints[3]) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, static.constraints[0], static.constraints[1]) + implot.setup_axis_zoom_constraints(implot.ImAxis_.x1, static.constraints[2], static.constraints[3]) + implot.setup_axis_limits_constraints(implot.ImAxis_.y1, static.constraints[0], static.constraints[1]) + implot.setup_axis_zoom_constraints(implot.ImAxis_.y1, static.constraints[2], static.constraints[3]) implot.end_plot() @@ -1260,13 +1286,13 @@ def demo_equal_axes(): imgui.bullet_text("Equal constraint applies to axis pairs (e.g. X1/Y1, X2/Y2)") - if implot.begin_plot("##EqualAxes", size=(-1, 0), flags=implot.Flags_.equal.value): - implot.setup_axis(implot.ImAxis_.x2.value, None, implot.AxisFlags_.aux_default.value) - implot.setup_axis(implot.ImAxis_.y2.value, None, implot.AxisFlags_.aux_default.value) + if implot.begin_plot("##EqualAxes", size=(-1, 0), flags=implot.Flags_.equal): + implot.setup_axis(implot.ImAxis_.x2, None, implot.AxisFlags_.aux_default) + implot.setup_axis(implot.ImAxis_.y2, None, implot.AxisFlags_.aux_default) implot.plot_line("Circle", static.xs1, static.ys1) - implot.set_axes(implot.ImAxis_.x2.value, implot.ImAxis_.y2.value) + implot.set_axes(implot.ImAxis_.x2, implot.ImAxis_.y2) implot.plot_line("Diamond", static.xs2, static.ys2) implot.end_plot() @@ -1278,8 +1304,8 @@ def demo_auto_fitting_data(): static = demo_auto_fitting_data if not hasattr(static, "xflags"): - static.xflags = implot.AxisFlags_.none.value - static.yflags = implot.AxisFlags_.auto_fit.value | implot.AxisFlags_.range_fit.value + static.xflags = implot.AxisFlags_.none + static.yflags = implot.AxisFlags_.auto_fit | implot.AxisFlags_.range_fit static.data = 1 + np.sin(np.arange(101) / 10.0) imgui.bullet_text("The Y-axis has been configured to auto-fit to only the data visible in X-axis range.") @@ -1288,15 +1314,15 @@ def demo_auto_fitting_data(): imgui.text_unformatted("X: ") imgui.same_line() - _, static.xflags = imgui.checkbox_flags("ImPlotAxisFlags_AutoFit##X", static.xflags, implot.AxisFlags_.auto_fit.value) + _, static.xflags = imgui.checkbox_flags("ImPlotAxisFlags_AutoFit##X", static.xflags, implot.AxisFlags_.auto_fit) imgui.same_line() - _, static.xflags = imgui.checkbox_flags("ImPlotAxisFlags_RangeFit##X", static.xflags, implot.AxisFlags_.range_fit.value) + _, static.xflags = imgui.checkbox_flags("ImPlotAxisFlags_RangeFit##X", static.xflags, implot.AxisFlags_.range_fit) imgui.text_unformatted("Y: ") imgui.same_line() - _, static.yflags = imgui.checkbox_flags("ImPlotAxisFlags_AutoFit##Y", static.yflags, implot.AxisFlags_.auto_fit.value) + _, static.yflags = imgui.checkbox_flags("ImPlotAxisFlags_AutoFit##Y", static.yflags, implot.AxisFlags_.auto_fit) imgui.same_line() - _, static.yflags = imgui.checkbox_flags("ImPlotAxisFlags_RangeFit##Y", static.yflags, implot.AxisFlags_.range_fit.value) + _, static.yflags = imgui.checkbox_flags("ImPlotAxisFlags_RangeFit##Y", static.yflags, implot.AxisFlags_.range_fit) if implot.begin_plot("##DataFitting"): implot.setup_axes("X", "Y", static.xflags, static.yflags) @@ -1309,10 +1335,10 @@ def demo_subplots_sizing(): static = demo_subplots_sizing if not hasattr(static, "flags"): - static.flags = implot.SubplotFlags_.share_items.value | implot.SubplotFlags_.no_legend.value + static.flags = implot.SubplotFlags_.share_items | implot.SubplotFlags_.no_legend - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_NoResize", static.flags, implot.SubplotFlags_.no_resize.value) - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_NoTitle", static.flags, implot.SubplotFlags_.no_title.value) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_NoResize", static.flags, implot.SubplotFlags_.no_resize) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_NoTitle", static.flags, implot.SubplotFlags_.no_title) if not hasattr(static, "rows"): static.rows = 3 @@ -1322,7 +1348,7 @@ def demo_subplots_sizing(): _, static.cols = imgui.slider_int("Cols", static.cols, 1, 5) if static.rows < 1 or static.cols < 1: - imgui.text_colored("Nice try, but the number of rows and columns must be greater than 0!", (1, 0, 0, 1)) + imgui.text_colored(ImVec4(1, 0, 0, 1), "Nice try, but the number of rows and columns must be greater than 0!") return if not hasattr(static, "rratios"): @@ -1341,8 +1367,8 @@ def demo_subplots_sizing(): row_col_ratios=row_col_ratios): id_counter = 0 for i in range(static.rows * static.cols): - if implot.begin_plot("", size=(0, 0), flags=implot.Flags_.no_legend.value): - implot.setup_axes("", "", implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.no_decorations.value) + if implot.begin_plot("", size=(0, 0), flags=implot.Flags_.no_legend): + implot.setup_axes("", "", implot.AxisFlags_.no_decorations, implot.AxisFlags_.no_decorations) # Compute sinewave data fi = 0.01 * (i + 1) @@ -1351,7 +1377,7 @@ def demo_subplots_sizing(): # Apply colormap based on subplot index if static.rows * static.cols > 1: - col = implot.sample_colormap(i / float(static.rows * static.cols - 1), implot.Colormap_.jet.value) + col = implot.sample_colormap(i / float(static.rows * static.cols - 1), implot.Colormap_.jet) implot.set_next_line_style(col) # Label and plot line @@ -1367,10 +1393,10 @@ def demo_subplot_item_sharing(): static = demo_subplot_item_sharing if not hasattr(static, "flags"): - static.flags = implot.SubplotFlags_.share_items.value + static.flags = implot.SubplotFlags_.share_items - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_ShareItems", static.flags, implot.SubplotFlags_.share_items.value) - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_ColMajor", static.flags, implot.SubplotFlags_.col_major.value) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_ShareItems", static.flags, implot.SubplotFlags_.share_items) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_ColMajor", static.flags, implot.SubplotFlags_.col_major) imgui.bullet_text("Drag and drop items from the legend onto plots (except for 'common')") @@ -1381,7 +1407,7 @@ def demo_subplot_item_sharing(): static.curj = -1 # Current dataset being dragged if implot.begin_subplots("##ItemSharing", static.rows, static.cols, size=(-1, 400), flags=static.flags): - implot.setup_legend(implot.Location_.south.value, implot.LegendFlags_.sort.value | implot.LegendFlags_.horizontal.value) + implot.setup_legend(implot.Location_.south, implot.LegendFlags_.sort | implot.LegendFlags_.horizontal) for i in range(static.rows * static.cols): if implot.begin_plot(""): @@ -1420,12 +1446,12 @@ def demo_subplot_axis_linking(): static = demo_subplot_axis_linking if not hasattr(static, "flags"): - static.flags = implot.SubplotFlags_.link_rows.value | implot.SubplotFlags_.link_cols.value + static.flags = implot.SubplotFlags_.link_rows | implot.SubplotFlags_.link_cols - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkRows", static.flags, implot.SubplotFlags_.link_rows.value) - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkCols", static.flags, implot.SubplotFlags_.link_cols.value) - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkAllX", static.flags, implot.SubplotFlags_.link_all_x.value) - _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkAllY", static.flags, implot.SubplotFlags_.link_all_y.value) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkRows", static.flags, implot.SubplotFlags_.link_rows) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkCols", static.flags, implot.SubplotFlags_.link_cols) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkAllX", static.flags, implot.SubplotFlags_.link_all_x) + _, static.flags = imgui.checkbox_flags("ImPlotSubplotFlags_LinkAllY", static.flags, implot.SubplotFlags_.link_all_y) if not hasattr(static, "rows"): static.rows = 2 @@ -1452,18 +1478,18 @@ def demo_drag_points(): imgui.bullet_text("Click and drag each point.") if not hasattr(static, "flags"): - static.flags = implot.DragToolFlags_.none.value + static.flags = implot.DragToolFlags_.none - _, static.flags = imgui.checkbox_flags("No Cursors", static.flags, implot.DragToolFlags_.no_cursors.value) + _, static.flags = imgui.checkbox_flags("No Cursors", static.flags, implot.DragToolFlags_.no_cursors) imgui.same_line() - _, static.flags = imgui.checkbox_flags("No Fit", static.flags, implot.DragToolFlags_.no_fit.value) + _, static.flags = imgui.checkbox_flags("No Fit", static.flags, implot.DragToolFlags_.no_fit) imgui.same_line() - _, static.flags = imgui.checkbox_flags("No Input", static.flags, implot.DragToolFlags_.no_inputs.value) + _, static.flags = imgui.checkbox_flags("No Input", static.flags, implot.DragToolFlags_.no_inputs) - ax_flags = implot.AxisFlags_.no_tick_labels.value | implot.AxisFlags_.no_tick_marks.value + ax_flags = implot.AxisFlags_.no_tick_labels | implot.AxisFlags_.no_tick_marks clicked, hovered, held = [False] * 4, [False] * 4, [False] * 4 - if implot.begin_plot("##Bezier", size=(-1, 0), flags=implot.Flags_.canvas_only.value): + if implot.begin_plot("##Bezier", size=(-1, 0), flags=implot.Flags_.canvas_only): implot.setup_axes("", "", ax_flags, ax_flags) implot.setup_axes_limits(0, 1, 0, 1) @@ -1472,17 +1498,17 @@ def demo_drag_points(): colors = [[0, 0.9, 0, 1], [1, 0.5, 1, 1], [0, 0.5, 1, 1], [0, 0.9, 0, 1]] for i in range(4): - _, static.P[i].x, static.P[i].y, clicked[i], hovered[i], held[i] = implot.drag_point( + _, static.P[i].x, static.P[i].y, clicked[i], hovered[i], held[i] = implot.drag_point( # type: ignore id_=i, x=static.P[i].x, y=static.P[i].y, col=colors[i], flags=static.flags, held=held[i]) # Compute Bézier curve t_vals = np.linspace(0, 1, 100) u = 1 - t_vals w1, w2, w3, w4 = u**3, 3 * u**2 * t_vals, 3 * u * t_vals**2, t_vals**3 - B = np.dot(np.column_stack((w1, w2, w3, w4)), [[p.x, p.y] for p in static.P]) + b = np.dot(np.column_stack((w1, w2, w3, w4)), [[p.x, p.y] for p in static.P]) # Ensure 1D contiguous arrays using `.ravel()` - implot.plot_line("##bez", B[:, 0].ravel(), B[:, 1].ravel()) + implot.plot_line("##bez", b[:, 0].ravel(), b[:, 1].ravel()) implot.end_plot() @@ -1498,13 +1524,13 @@ def demo_drag_lines(): static.y1 = 0.25 static.y2 = 0.75 static.f = 0.1 - static.flags = implot.DragToolFlags_.none.value + static.flags = implot.DragToolFlags_.none - _, static.flags = imgui.checkbox_flags("No Cursors", static.flags, implot.DragToolFlags_.no_cursors.value) + _, static.flags = imgui.checkbox_flags("No Cursors", static.flags, implot.DragToolFlags_.no_cursors) imgui.same_line() - _, static.flags = imgui.checkbox_flags("No Fit", static.flags, implot.DragToolFlags_.no_fit.value) + _, static.flags = imgui.checkbox_flags("No Fit", static.flags, implot.DragToolFlags_.no_fit) imgui.same_line() - _, static.flags = imgui.checkbox_flags("No Input", static.flags, implot.DragToolFlags_.no_inputs.value) + _, static.flags = imgui.checkbox_flags("No Input", static.flags, implot.DragToolFlags_.no_inputs) if implot.begin_plot("##lines", size=(-1, 0)): @@ -1522,7 +1548,6 @@ def demo_drag_lines(): ys = (static.y1 + static.y2) / 2 + np.abs(static.y2 - static.y1) / 2 * np.sin(static.f * np.arange(1000) / 10) # Drag frequency line - clicked, hovered, held = False, False, False _, static.f, clicked, hovered, held = implot.drag_line_y(120482, static.f, col=[1, 0.5, 1, 1], thickness=1, flags=static.flags) implot.set_next_line_style(implot.AUTO_COL, 2.0 if hovered or held else 1.0) @@ -1552,7 +1577,7 @@ def __init__(self): self.y_data2 = self.y_data1 - 0.6 + np.sin(arg * 2) * 0.4 self.y_data3 = self.y_data2 - 0.6 + np.sin(arg * 3) * 0.4 self.rect = implot.Rect(0.0025, 0.0075, -2.7, 1.1) # type: ignore - self.flags = implot.DragToolFlags_.none.value + self.flags = implot.DragToolFlags_.none def demo_drag_rects(): @@ -1563,15 +1588,15 @@ def demo_drag_rects(): imgui.bullet_text("Click and drag the edges, corners, and center of the rect.") _, state.flags = imgui.checkbox_flags( - "NoCursors", state.flags, implot.DragToolFlags_.no_cursors.value + "NoCursors", state.flags, implot.DragToolFlags_.no_cursors ) imgui.same_line() _, state.flags = imgui.checkbox_flags( - "NoFit", state.flags, implot.DragToolFlags_.no_fit.value + "NoFit", state.flags, implot.DragToolFlags_.no_fit ) imgui.same_line() _, state.flags = imgui.checkbox_flags( - "NoInput", state.flags, implot.DragToolFlags_.no_inputs.value + "NoInput", state.flags, implot.DragToolFlags_.no_inputs ) plot_height = immapp.em_size() * 15 @@ -1605,14 +1630,14 @@ def demo_drag_rects(): implot.internal.fit_point(implot.Point(0, 1.5)) implot.end_plot() - if implot.begin_plot("##rect", ImVec2(-1, plot_height), implot.Flags_.canvas_only.value): + if implot.begin_plot("##rect", ImVec2(-1, plot_height), implot.Flags_.canvas_only): # implot.setup_axes("", "", implot.ImPlotAxisFlags_.no_decorations, implot.ImPlotAxisFlags_.no_decorations) implot.setup_axes_limits( state.rect.x.min, state.rect.x.max, state.rect.y.min, state.rect.y.max, - imgui.Cond_.always.value, + imgui.Cond_.always, ) implot.plot_line("Signal 1", state.x_data, state.y_data1) implot.plot_line("Signal 2", state.x_data, state.y_data2) @@ -1667,31 +1692,31 @@ def demo_tags(): _, static.show = imgui.checkbox("Show Tags", static.show) if implot.begin_plot("##Tags"): - implot.setup_axis(implot.ImAxis_.x2.value) - implot.setup_axis(implot.ImAxis_.y2.value) + implot.setup_axis(implot.ImAxis_.x2) + implot.setup_axis(implot.ImAxis_.y2) if static.show: implot.tag_x(0.25, ImVec4(1, 1, 0, 1)) implot.tag_y(0.75, ImVec4(1, 1, 0, 1)) _, static.drag_tag, _, _, _ = implot.drag_line_y( - id_=0, y=static.drag_tag, col=ImVec4(1, 0, 0, 1), thickness=1, flags=implot.DragToolFlags_.no_fit.value + id_=0, y=static.drag_tag, col=ImVec4(1, 0, 0, 1), thickness=1, flags=implot.DragToolFlags_.no_fit ) implot.tag_y(static.drag_tag, ImVec4(1, 0, 0, 1), "Drag") - implot.set_axes(implot.ImAxis_.x2.value, implot.ImAxis_.y2.value) + implot.set_axes(implot.ImAxis_.x2, implot.ImAxis_.y2) implot.tag_x(0.5, ImVec4(0, 1, 1, 1), "MyTag") implot.tag_y(0.5, ImVec4(0, 1, 1, 1), "Tag: %d" % 42) implot.end_plot() -def sparkline(id, values, y_min, y_max, offset, color, size): - implot.push_style_var(implot.StyleVar_.plot_padding.value, ImVec2(0, 0)) - if implot.begin_plot(id, size, implot.Flags_.canvas_only.value): - implot.setup_axes("", "", implot.AxisFlags_.no_decorations.value, implot.AxisFlags_.no_decorations.value) - implot.setup_axes_limits(0, len(values) - 1, y_min, y_max, imgui.Cond_.always.value) +def sparkline(label_id, values, y_min, y_max, offset, color, size): + implot.push_style_var(implot.StyleVar_.plot_padding, ImVec2(0, 0)) + if implot.begin_plot(label_id, size, implot.Flags_.canvas_only): + implot.setup_axes("", "", implot.AxisFlags_.no_decorations, implot.AxisFlags_.no_decorations) + implot.setup_axes_limits(0, len(values) - 1, y_min, y_max, imgui.Cond_.always) implot.set_next_line_style(color) implot.plot_line("line", values, offset=offset) implot.end_plot() @@ -1705,9 +1730,9 @@ def demo_tables(): static.anim = True static.offset = 0 static.flags = ( - imgui.TableFlags_.borders_outer.value | imgui.TableFlags_.borders_v.value | - imgui.TableFlags_.row_bg.value | imgui.TableFlags_.resizable.value | - imgui.TableFlags_.reorderable.value + imgui.TableFlags_.borders_outer | imgui.TableFlags_.borders_v | + imgui.TableFlags_.row_bg | imgui.TableFlags_.resizable | + imgui.TableFlags_.reorderable ) imgui.bullet_text("Plots can be used inside of ImGui tables as another means of creating subplots.") @@ -1717,12 +1742,12 @@ def demo_tables(): static.offset = (static.offset + 1) % 100 if imgui.begin_table("##table", 3, static.flags, (-1, 0)): - imgui.table_setup_column("Electrode", imgui.TableColumnFlags_.width_fixed.value, 75.0) - imgui.table_setup_column("Voltage", imgui.TableColumnFlags_.width_fixed.value, 75.0) + imgui.table_setup_column("Electrode", imgui.TableColumnFlags_.width_fixed, 75.0) + imgui.table_setup_column("Voltage", imgui.TableColumnFlags_.width_fixed, 75.0) imgui.table_setup_column("EMG Signal") imgui.table_headers_row() - implot.push_colormap(implot.Colormap_.cool.value) + implot.push_colormap(implot.Colormap_.cool) for row in range(10): imgui.table_next_row() @@ -1747,29 +1772,29 @@ def demo_tables(): def style_seaborn(): style = implot.get_style() - style.set_color_(implot.Col_.line.value, implot.AUTO_COL) - style.set_color_(implot.Col_.fill.value, implot.AUTO_COL) - style.set_color_(implot.Col_.marker_outline.value, implot.AUTO_COL) - style.set_color_(implot.Col_.marker_fill.value, implot.AUTO_COL) - - style.set_color_(implot.Col_.error_bar.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color_(implot.Col_.frame_bg.value, ImVec4(1.00, 1.00, 1.00, 1.00)) - style.set_color_(implot.Col_.plot_bg.value, ImVec4(0.92, 0.92, 0.95, 1.00)) - style.set_color_(implot.Col_.plot_border.value, ImVec4(0.00, 0.00, 0.00, 0.00)) - style.set_color_(implot.Col_.legend_bg.value, ImVec4(0.92, 0.92, 0.95, 1.00)) - style.set_color_(implot.Col_.legend_border.value, ImVec4(0.80, 0.81, 0.85, 1.00)) - style.set_color_(implot.Col_.legend_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color_(implot.Col_.title_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color_(implot.Col_.inlay_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color_(implot.Col_.axis_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color_(implot.Col_.axis_grid.value, ImVec4(1.00, 1.00, 1.00, 1.00)) - style.set_color_(implot.Col_.axis_bg_hovered.value, ImVec4(0.92, 0.92, 0.95, 1.00)) - style.set_color_(implot.Col_.axis_bg_active.value, ImVec4(0.92, 0.92, 0.95, 0.75)) - style.set_color_(implot.Col_.selection.value, ImVec4(1.00, 0.65, 0.00, 1.00)) - style.set_color_(implot.Col_.crosshairs.value, ImVec4(0.23, 0.10, 0.64, 0.50)) + style.set_color_(implot.Col_.line, implot.AUTO_COL) + style.set_color_(implot.Col_.fill, implot.AUTO_COL) + style.set_color_(implot.Col_.marker_outline, implot.AUTO_COL) + style.set_color_(implot.Col_.marker_fill, implot.AUTO_COL) + + style.set_color_(implot.Col_.error_bar, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color_(implot.Col_.frame_bg, ImVec4(1.00, 1.00, 1.00, 1.00)) + style.set_color_(implot.Col_.plot_bg, ImVec4(0.92, 0.92, 0.95, 1.00)) + style.set_color_(implot.Col_.plot_border, ImVec4(0.00, 0.00, 0.00, 0.00)) + style.set_color_(implot.Col_.legend_bg, ImVec4(0.92, 0.92, 0.95, 1.00)) + style.set_color_(implot.Col_.legend_border, ImVec4(0.80, 0.81, 0.85, 1.00)) + style.set_color_(implot.Col_.legend_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color_(implot.Col_.title_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color_(implot.Col_.inlay_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color_(implot.Col_.axis_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color_(implot.Col_.axis_grid, ImVec4(1.00, 1.00, 1.00, 1.00)) + style.set_color_(implot.Col_.axis_bg_hovered, ImVec4(0.92, 0.92, 0.95, 1.00)) + style.set_color_(implot.Col_.axis_bg_active, ImVec4(0.92, 0.92, 0.95, 0.75)) + style.set_color_(implot.Col_.selection, ImVec4(1.00, 0.65, 0.00, 1.00)) + style.set_color_(implot.Col_.crosshairs, ImVec4(0.23, 0.10, 0.64, 0.50)) style.line_weight = 1.5 - style.marker = implot.Marker_.none.value + style.marker = implot.Marker_.none style.marker_size = 4 style.marker_weight = 1 style.fill_alpha = 1.0 @@ -1795,7 +1820,7 @@ def style_seaborn(): def demo_custom_styles(): # Apply Seaborn style import copy - implot.push_colormap(implot.Colormap_.deep.value) + implot.push_colormap(implot.Colormap_.deep) backup_style = copy.copy(implot.get_style()) style_seaborn() if implot.begin_plot("Seaborn Style"): @@ -1853,14 +1878,14 @@ def demo_legend_popups(): if implot.begin_plot("Right Click the Legend"): implot.setup_axes_limits(0, 100, -1, 1) - implot.push_style_var(implot.StyleVar_.fill_alpha.value, static.alpha) + implot.push_style_var(implot.StyleVar_.fill_alpha, static.alpha) if not static.line: implot.set_next_fill_style(ImVec4(static.color[0], static.color[1], static.color[2], 1.0)) implot.plot_bars("Right Click Me", vals) else: if static.markers: - implot.set_next_marker_style(implot.Marker_.square.value) + implot.set_next_marker_style(implot.Marker_.square) implot.set_next_line_style(ImVec4(static.color[0], static.color[1], static.color[2], 1.0), static.thickness) implot.plot_line("Right Click Me", vals) if static.shaded: @@ -1889,7 +1914,7 @@ def demo_colormap_widgets(): static = demo_colormap_widgets if not hasattr(static, "cmap"): - static.cmap = implot.Colormap_.viridis.value + static.cmap = implot.Colormap_.viridis static.t = 0.5 static.col = [1.0, 1.0, 1.0, 1.0] # Placeholder color static.scale = [0, 100] @@ -1900,7 +1925,7 @@ def demo_colormap_widgets(): static.cmap = (static.cmap + 1) % implot.get_colormap_count() # Colormap Slider - imgui.color_button("##Display", static.col, imgui.ColorEditFlags_.no_inputs.value) + imgui.color_button("##Display", static.col, imgui.ColorEditFlags_.no_inputs) imgui.same_line() # _, static.t, static.col = implot.colormap_slider( # label="Slider", @@ -1921,9 +1946,9 @@ def demo_colormap_widgets(): _, static.scale = imgui.input_float2("Scale", static.scale) # Checkbox Flags for Scale Behavior - _, static.flags = imgui.checkbox_flags("No Label", static.flags, implot.ColormapScaleFlags_.no_label.value) - _, static.flags = imgui.checkbox_flags("Opposite", static.flags, implot.ColormapScaleFlags_.opposite.value) - _, static.flags = imgui.checkbox_flags("Invert", static.flags, implot.ColormapScaleFlags_.invert.value) + _, static.flags = imgui.checkbox_flags("No Label", static.flags, implot.ColormapScaleFlags_.no_label) + _, static.flags = imgui.checkbox_flags("Opposite", static.flags, implot.ColormapScaleFlags_.opposite) + _, static.flags = imgui.checkbox_flags("Invert", static.flags, implot.ColormapScaleFlags_.invert) #----------------------------------------------------------------------------- @@ -1953,20 +1978,7 @@ def demo_header(label, demo_function): def show_all_demos(): """Main function to display all ImPlot demos with categorized tabs.""" - static = show_all_demos - imgui.text(f"ImPlot says hello. ({implot.version})") - - # Show warning for potential rendering issues - if not hasattr(static, "show_warning"): - static.show_warning = (imgui.get_io().backend_flags & imgui.BackendFlags_.renderer_has_vtx_offset.value) == 0 and imgui.draw_idx_size() == 2 - - if static.show_warning: - imgui.push_style_color(imgui.Col_.text.value, [1, 1, 0, 1]) - imgui.text_wrapped("WARNING: ImDrawIdx is 16-bit and ImGuiBackendFlags_RendererHasVtxOffset is false. " - "Expect visual glitches and artifacts! See README for more information.") - imgui.pop_style_color() - imgui.spacing() if imgui.begin_tab_bar("ImPlotDemoTabs"): @@ -2062,7 +2074,7 @@ def show_demo_window(): # Show ImPlot Style Editor if static.show_implot_style_editor: - imgui.set_next_window_size((415, 762), imgui.Cond_.appearing.value) + imgui.set_next_window_size((415, 762), imgui.Cond_.appearing) imgui.begin("Style Editor (ImPlot)", True) implot.show_style_editor() imgui.end() @@ -2082,9 +2094,9 @@ def show_demo_window(): imgui.show_demo_window() # Main Demo Window - imgui.set_next_window_pos((50, 50), imgui.Cond_.first_use_ever.value) - imgui.set_next_window_size((600, 750), imgui.Cond_.first_use_ever.value) - imgui.begin("ImPlot Demo", True, imgui.WindowFlags_.menu_bar.value) + imgui.set_next_window_pos((50, 50), imgui.Cond_.first_use_ever) + imgui.set_next_window_size((600, 750), imgui.Cond_.first_use_ever) + imgui.begin("ImPlot Demo", True, imgui.WindowFlags_.menu_bar) if imgui.begin_menu_bar(): if imgui.begin_menu("Tools"): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_demo.py b/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_demo.py index a8ac0eb..eb3dc2a 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_demo.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_demo.py @@ -6,6 +6,7 @@ from imgui_bundle.demos_python.demo_utils.api_demos import set_hello_imgui_demo_assets_folder import numpy as np +from numpy.typing import NDArray set_hello_imgui_demo_assets_folder() @@ -13,41 +14,8 @@ # [SECTION] Demo Textures #----------------------------------------------------------------------------- -def rgba_image_to_texture(image: np.ndarray) -> int: - """Upload an RGBA image to the GPU as a texture, returns the OpenGL texture ID.""" - from OpenGL import GL - assert image.dtype == np.uint8 and image.ndim == 3 and image.shape[2] == 4 - - height, width = image.shape[:2] - - # Generate a texture ID - texture_id = GL.glGenTextures(1) - GL.glBindTexture(GL.GL_TEXTURE_2D, texture_id) - - # Set texture parameters (you may want to adjust this) - GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR) - GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR) - GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE) - GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE) - - # Upload the image - GL.glTexImage2D( - GL.GL_TEXTURE_2D, - 0, # level - GL.GL_RGBA, # internal format - width, - height, - 0, # border - GL.GL_RGBA, # input format - GL.GL_UNSIGNED_BYTE, # input type - image - ) - - GL.glBindTexture(GL.GL_TEXTURE_2D, 0) - return texture_id - - -def make_checkerboard_texture(size: int = 256, tile_size: int = 32) -> np.ndarray: + +def make_checkerboard_texture(size: int = 256, tile_size: int = 32) -> NDArray[np.uint8]: """Create a checkerboard RGBA texture as a numpy array.""" img = np.zeros((size, size, 4), dtype=np.uint8) for y in range(size): @@ -60,7 +28,7 @@ def make_checkerboard_texture(size: int = 256, tile_size: int = 32) -> np.ndarra return img -def make_gradient_circle_texture(size: int = 256) -> np.ndarray: +def make_gradient_circle_texture(size: int = 256) -> NDArray[np.uint8]: """Create a circular gradient texture with transparency and a color tint.""" img = np.zeros((size, size, 4), dtype=np.uint8) center = size / 2 @@ -123,8 +91,8 @@ def demo_line_plots(): if implot3d.begin_plot("Line Plots"): implot3d.setup_axes("x", "y", "z") implot3d.plot_line("f(x)", static.xs1, static.ys1, static.zs1) - implot3d.set_next_marker_style(implot3d.Marker_.circle.value) - implot3d.plot_line("g(x)", static.xs2, static.ys2, static.zs2, flags=implot3d.LineFlags_.segments.value) + implot3d.set_next_marker_style(implot3d.Marker_.circle) + implot3d.plot_line("g(x)", static.xs2, static.ys2, static.zs2, flags=implot3d.LineFlags_.segments) implot3d.end_plot() @@ -145,9 +113,9 @@ def demo_scatter_plots(): if implot3d.begin_plot("Scatter Plots"): implot3d.plot_scatter("Data 1", static.xs1, static.ys1, static.zs1) - implot3d.push_style_var(implot3d.StyleVar_.fill_alpha.value, 0.25) + implot3d.push_style_var(implot3d.StyleVar_.fill_alpha, 0.25) implot3d.set_next_marker_style( - implot3d.Marker_.square.value, 6, + implot3d.Marker_.square, 6, implot3d.get_colormap_color(1), # Marker outline color implot3d.AUTO, # Default weight implot3d.get_colormap_color(1) # Marker fill color @@ -199,7 +167,7 @@ def add_vertex(x, y, z): implot3d.set_next_fill_style(implot3d.get_colormap_color(0)) implot3d.set_next_line_style(implot3d.get_colormap_color(1), 2) implot3d.set_next_marker_style( - implot3d.Marker_.square.value, 3, + implot3d.Marker_.square, 3, implot3d.get_colormap_color(2), implot3d.AUTO, implot3d.get_colormap_color(2) ) @@ -272,21 +240,21 @@ def demo_quad_plots(): color_x = colors["X"] implot3d.set_next_fill_style(color_x) implot3d.set_next_line_style(color_x, 2) - implot3d.set_next_marker_style(implot3d.Marker_.square.value, 3, color_x, implot3d.AUTO, color_x) + implot3d.set_next_marker_style(implot3d.Marker_.square, 3, color_x, implot3d.AUTO, color_x) implot3d.plot_quad("X", static.xs[0:8], static.ys[0:8], static.zs[0:8]) # Render +y and -y faces color_y = colors["Y"] implot3d.set_next_fill_style(color_y) implot3d.set_next_line_style(color_y, 2) - implot3d.set_next_marker_style(implot3d.Marker_.square.value, 3, color_y, implot3d.AUTO, color_y) + implot3d.set_next_marker_style(implot3d.Marker_.square, 3, color_y, implot3d.AUTO, color_y) implot3d.plot_quad("Y", static.xs[8:16], static.ys[8:16], static.zs[8:16]) # Render +z and -z faces color_z = colors["Z"] implot3d.set_next_fill_style(color_z) implot3d.set_next_line_style(color_z, 2) - implot3d.set_next_marker_style(implot3d.Marker_.square.value, 3, color_z, implot3d.AUTO, color_z) + implot3d.set_next_marker_style(implot3d.Marker_.square, 3, color_z, implot3d.AUTO, color_z) implot3d.plot_quad("Z", static.xs[16:24], static.ys[16:24], static.zs[16:24]) implot3d.end_plot() @@ -361,9 +329,9 @@ def demo_surface_plots(): if static.selected_fill == 1: implot3d.push_colormap(static.colormaps[static.sel_colormap]) - if implot3d.begin_plot("Surface Plots", size=(-1, 400), flags=implot3d.Flags_.no_clip.value): + if implot3d.begin_plot("Surface Plots", size=(-1, 400), flags=implot3d.Flags_.no_clip): implot3d.setup_axes_limits(-1, 1, -1, 1, -1.5, 1.5) - implot3d.push_style_var(implot3d.StyleVar_.fill_alpha.value, 0.8) + implot3d.push_style_var(implot3d.StyleVar_.fill_alpha, 0.8) if static.selected_fill == 0: implot3d.set_next_fill_style(static.solid_color) @@ -438,7 +406,7 @@ def demo_mesh_plots(): if static.set_marker_color: implot3d.set_next_marker_style( - implot3d.Marker_.square.value, 3, static.marker_color, implot3d.AUTO, static.marker_color + implot3d.Marker_.square, 3, static.marker_color, implot3d.AUTO, static.marker_color ) # Plot the selected mesh @@ -456,7 +424,7 @@ def slider_implot3d_point( label: str, v: implot3d.Point, v_min: float, v_max: float, format: str = "%.3", flags: int = 0 -) -> [bool, implot3d.Point]: +) -> tuple[bool, implot3d.Point]: as_floats = [v.x, v.y, v.z] changed, as_floats = imgui.slider_float3(label, as_floats, v_min, v_max, format, flags) return changed, implot3d.Point(as_floats[0], as_floats[1], as_floats[2]) @@ -489,11 +457,17 @@ def demo_image_plots(): static.uv2 = ImVec2(1.0, 1.0) static.uv3 = ImVec2(0.0, 1.0) + # Create textures + # Step 1: create them as numpy arrays checker_img = make_checkerboard_texture() circle_img = make_gradient_circle_texture() - - static.tex_id_checker = rgba_image_to_texture(checker_img) - static.tex_id_circle = rgba_image_to_texture(circle_img) + # Step 2: convert them to OpenGL textures (using imgui_bundle's immvision) + from imgui_bundle import immvision + static.tex_checker = immvision.GlTexture(checker_img) + static.tex_circle = immvision.GlTexture(circle_img) + # Step 3: create ImTextureRef from the OpenGL texture id + static.tex_id_checker = imgui.ImTextureRef(static.tex_checker.texture_id) + static.tex_id_circle = imgui.ImTextureRef(static.tex_circle.texture_id) static.initialized = True @@ -526,7 +500,7 @@ def demo_image_plots(): # tex_id = imgui.ImTextureRef(imgui.get_io().fonts.python_get_texture_id()) - if implot3d.begin_plot("Image Plot", size=(-1, 0), flags=implot3d.Flags_.no_clip.value): + if implot3d.begin_plot("Image Plot", size=(-1, 0), flags=implot3d.Flags_.no_clip): implot3d.plot_image("Image 1", static.tex_id_checker, center=static.center1, axis_u=static.axis_u1, @@ -573,13 +547,13 @@ def demo_realtime_plots(): static.data_z.add_point(mouse_y - plot_center_y) implot3d.setup_axes("Time", "Mouse X", "Mouse Y", - implot3d.AxisFlags_.no_tick_labels.value, - implot3d.AxisFlags_.no_tick_labels.value, - implot3d.AxisFlags_.no_tick_labels.value) + implot3d.AxisFlags_.no_tick_labels, + implot3d.AxisFlags_.no_tick_labels, + implot3d.AxisFlags_.no_tick_labels) - implot3d.setup_axis_limits(implot3d.ImAxis3D_.x.value, static.t - 10.0, static.t, implot3d.Cond_.always.value) - implot3d.setup_axis_limits(implot3d.ImAxis3D_.y.value, -400, 400, implot3d.Cond_.once.value) - implot3d.setup_axis_limits(implot3d.ImAxis3D_.z.value, -400, 400, implot3d.Cond_.once.value) + implot3d.setup_axis_limits(implot3d.ImAxis3D_.x, static.t - 10.0, static.t, implot3d.Cond_.always) + implot3d.setup_axis_limits(implot3d.ImAxis3D_.y, -400, 400, implot3d.Cond_.once) + implot3d.setup_axis_limits(implot3d.ImAxis3D_.z, -400, 400, implot3d.Cond_.once) # Get the valid data to plot x_data = static.data_x.get_data() @@ -604,21 +578,21 @@ def demo_markers_and_text(): _, static.mk_size = imgui.drag_float("Marker Size", static.mk_size, 0.1, 2.0, 10.0, "%.2f px") _, static.mk_weight = imgui.drag_float("Marker Weight", static.mk_weight, 0.05, 0.5, 3.0, "%.2f px") - if implot3d.begin_plot("##MarkerStyles", size=(-1, 0), flags=implot3d.Flags_.canvas_only.value): + if implot3d.begin_plot("##MarkerStyles", size=(-1, 0), flags=implot3d.Flags_.canvas_only): implot3d.setup_axes("", "", "", - implot3d.AxisFlags_.no_decorations.value, - implot3d.AxisFlags_.no_decorations.value, - implot3d.AxisFlags_.no_decorations.value) + implot3d.AxisFlags_.no_decorations, + implot3d.AxisFlags_.no_decorations, + implot3d.AxisFlags_.no_decorations) - implot3d.setup_axes_limits(-0.5, 1.5, -0.5, 1.5, 0, implot3d.Marker_.count.value + 1) + implot3d.setup_axes_limits(-0.5, 1.5, -0.5, 1.5, 0, implot3d.Marker_.count + 1) xs = np.zeros(2, dtype=np.float32) ys = np.zeros(2, dtype=np.float32) - zs = np.array([implot3d.Marker_.count.value, implot3d.Marker_.count.value + 1], dtype=np.float32) + zs = np.array([implot3d.Marker_.count, implot3d.Marker_.count + 1], dtype=np.float32) # Filled markers - for m in range(implot3d.Marker_.count.value): - angle = (zs[0] / float(implot3d.Marker_.count.value)) * 2 * np.pi + for m in range(implot3d.Marker_.count): + angle = (zs[0] / float(implot3d.Marker_.count)) * 2 * np.pi xs[1] = xs[0] + np.cos(angle) * 0.5 ys[1] = ys[0] + np.sin(angle) * 0.5 @@ -629,11 +603,11 @@ def demo_markers_and_text(): zs -= 1 # Move markers down in Z axis xs[0], ys[0] = 1, 1 - zs[:] = [implot3d.Marker_.count.value, implot3d.Marker_.count.value + 1] + zs[:] = [implot3d.Marker_.count, implot3d.Marker_.count + 1] # Open markers - for m in range(implot3d.Marker_.count.value): - angle = (zs[0] / float(implot3d.Marker_.count.value)) * 2 * np.pi + for m in range(implot3d.Marker_.count): + angle = (zs[0] / float(implot3d.Marker_.count)) * 2 * np.pi xs[1] = xs[0] + np.cos(angle) * 0.5 ys[1] = ys[0] - np.sin(angle) * 0.5 @@ -648,7 +622,7 @@ def demo_markers_and_text(): implot3d.plot_text("Open Markers", 1.0, 1.0, 6.0) # Inlay text with color - implot3d.push_style_color(implot3d.Col_.inlay_text.value, [1, 0, 1, 1]) + implot3d.push_style_color(implot3d.Col_.inlay_text, [1, 0, 1, 1]) implot3d.plot_text("Rotated Text", 0.5, 0.5, 6.0, np.pi / 4, (0, 0)) implot3d.pop_style_color() @@ -674,11 +648,11 @@ def demo_nan_values(): # UI: Controls for NaN handling _, static.include_nan = imgui.checkbox("Include NaN", static.include_nan) imgui.same_line() - _, static.flags = imgui.checkbox_flags("Skip NaN", static.flags, implot3d.LineFlags_.skip_nan.value) + _, static.flags = imgui.checkbox_flags("Skip NaN", static.flags, implot3d.LineFlags_.skip_nan) # Begin plot if implot3d.begin_plot("##NaNValues"): - implot3d.set_next_marker_style(implot3d.Marker_.square.value) + implot3d.set_next_marker_style(implot3d.Marker_.square) implot3d.plot_line("Line", data1, data2, data3, flags=static.flags) implot3d.end_plot() @@ -742,14 +716,14 @@ def demo_box_rotation(): _, static.init_azimuth = imgui.slider_float("Initial Azimuth", static.init_azimuth, -180.0, 180.0, "%.1f degrees") if implot3d.begin_plot("##BoxRotation"): - implot3d.setup_axes_limits(-1, 1, -1, 1, -1, 1, implot3d.Cond_.always.value) + implot3d.setup_axes_limits(-1, 1, -1, 1, -1, 1, implot3d.Cond_.always) # Set initial rotation implot3d.setup_box_initial_rotation(static.init_elevation, static.init_azimuth) # Set the rotation using the specified elevation and azimuth if changed: - implot3d.setup_box_rotation(static.elevation, static.azimuth, static.animate, implot3d.Cond_.always.value) + implot3d.setup_box_rotation(static.elevation, static.azimuth, static.animate, implot3d.Cond_.always) # Define axes lines origin = np.array([0.0, 0.0], dtype=np.float32) @@ -787,16 +761,16 @@ def demo_tick_labels(): if implot3d.begin_plot("##Ticks"): implot3d.setup_axes_limits(2, 5, 0, 1, 0, 1) if static.custom_ticks: - implot3d.setup_axis_ticks(axis = implot3d.ImAxis3D_.x.value, values=pi_list, labels = pi_str_list if static.custom_labels else [], keep_default=True) - implot3d.setup_axis_ticks(axis = implot3d.ImAxis3D_.y.value, values=letters_ticks, labels = letters_labels if static.custom_labels else [], keep_default=False) - implot3d.setup_axis_ticks(axis = implot3d.ImAxis3D_.z.value, v_min=0, v_max=1, n_ticks=6, labels = letters_labels if static.custom_labels else [], keep_default=False) + implot3d.setup_axis_ticks(axis = implot3d.ImAxis3D_.x, values=pi_list, labels = pi_str_list if static.custom_labels else [], keep_default=True) + implot3d.setup_axis_ticks(axis = implot3d.ImAxis3D_.y, values=letters_ticks, labels = letters_labels if static.custom_labels else [], keep_default=False) + implot3d.setup_axis_ticks(axis = implot3d.ImAxis3D_.z, v_min=0, v_max=1, n_ticks=6, labels = letters_labels if static.custom_labels else [], keep_default=False) implot3d.end_plot() def demo_custom_styles(): # Apply Seaborn style import copy - implot3d.push_colormap(implot3d.Colormap_.deep.value) + implot3d.push_colormap(implot3d.Colormap_.deep) backup_style = copy.copy(implot3d.get_style()) style_seaborn() @@ -858,23 +832,23 @@ def demo_custom_rendering(): def style_seaborn(): style = implot3d.get_style() - style.set_color(implot3d.Col_.line.value, implot3d.AUTO_COL) - style.set_color(implot3d.Col_.fill.value, implot3d.AUTO_COL) - style.set_color(implot3d.Col_.marker_outline.value, implot3d.AUTO_COL) - style.set_color(implot3d.Col_.marker_fill.value, implot3d.AUTO_COL) - style.set_color(implot3d.Col_.frame_bg.value, ImVec4(1.00, 1.00, 1.00, 1.00)) - style.set_color(implot3d.Col_.plot_bg.value, ImVec4(0.92, 0.92, 0.95, 1.00)) - style.set_color(implot3d.Col_.plot_border.value, ImVec4(0.00, 0.00, 0.00, 0.00)) - style.set_color(implot3d.Col_.legend_bg.value, ImVec4(0.92, 0.92, 0.95, 1.00)) - style.set_color(implot3d.Col_.legend_border.value, ImVec4(0.80, 0.81, 0.85, 1.00)) - style.set_color(implot3d.Col_.legend_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color(implot3d.Col_.title_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color(implot3d.Col_.inlay_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color(implot3d.Col_.axis_text.value, ImVec4(0.00, 0.00, 0.00, 1.00)) - style.set_color(implot3d.Col_.axis_grid.value, ImVec4(1.00, 1.00, 1.00, 1.00)) + style.set_color(implot3d.Col_.line, implot3d.AUTO_COL) + style.set_color(implot3d.Col_.fill, implot3d.AUTO_COL) + style.set_color(implot3d.Col_.marker_outline, implot3d.AUTO_COL) + style.set_color(implot3d.Col_.marker_fill, implot3d.AUTO_COL) + style.set_color(implot3d.Col_.frame_bg, ImVec4(1.00, 1.00, 1.00, 1.00)) + style.set_color(implot3d.Col_.plot_bg, ImVec4(0.92, 0.92, 0.95, 1.00)) + style.set_color(implot3d.Col_.plot_border, ImVec4(0.00, 0.00, 0.00, 0.00)) + style.set_color(implot3d.Col_.legend_bg, ImVec4(0.92, 0.92, 0.95, 1.00)) + style.set_color(implot3d.Col_.legend_border, ImVec4(0.80, 0.81, 0.85, 1.00)) + style.set_color(implot3d.Col_.legend_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color(implot3d.Col_.title_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color(implot3d.Col_.inlay_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color(implot3d.Col_.axis_text, ImVec4(0.00, 0.00, 0.00, 1.00)) + style.set_color(implot3d.Col_.axis_grid, ImVec4(1.00, 1.00, 1.00, 1.00)) style.line_weight = 1.5 - style.marker = implot3d.Marker_.none.value + style.marker = implot3d.Marker_.none style.marker_size = 4 style.marker_weight = 1 style.fill_alpha = 1.0 @@ -1029,9 +1003,9 @@ def show_demo_window(): imgui.show_demo_window() # Set window properties - imgui.set_next_window_pos((100, 100), imgui.Cond_.first_use_ever.value) - imgui.set_next_window_size((600, 750), imgui.Cond_.first_use_ever.value) - imgui.begin("ImPlot3D Demo", None, imgui.WindowFlags_.menu_bar.value) + imgui.set_next_window_pos((100, 100), imgui.Cond_.first_use_ever) + imgui.set_next_window_size((600, 750), imgui.Cond_.first_use_ever) + imgui.begin("ImPlot3D Demo", None, imgui.WindowFlags_.menu_bar) if imgui.begin_menu_bar(): if imgui.begin_menu("Tools"): _, static.show_implot3d_style_editor = imgui.menu_item("Style Editor", "", static.show_implot3d_style_editor) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_meshes.py b/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_meshes.py index 184a405..724bfa7 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_meshes.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_implot3d/implot3d_meshes.py @@ -201,7 +201,7 @@ def make_sphere_mesh() -> implot3d.Mesh: ] for i in range(len(points)): - points[i] = implot3d.Point(points[i][0], points[i][1], points[i][2]) + points[i] = implot3d.Point(points[i][0], points[i][1], points[i][2]) # type: ignore # Define sphere indices (faces) indices = [ @@ -527,7 +527,7 @@ def make_sphere_mesh() -> implot3d.Mesh: 160, 161, 159 ] - return implot3d.Mesh(points=points, idx=indices) + return implot3d.Mesh(points=points, idx=indices) # type: ignore def make_duck_mesh() -> implot3d.Mesh: @@ -788,10 +788,10 @@ def make_duck_mesh() -> implot3d.Mesh: [0.055104, -0.898766, 0.325273] ] for i in range(len(points)): - points[i] = implot3d.Point(points[i][0], points[i][1], points[i][2]) + points[i] = implot3d.Point(points[i][0], points[i][1], points[i][2]) # type: ignore idx = [2, 73, 74, 2, 74, 21, 30, 0, 20, 30, 20, 31, 43, 42, 1, 43, 1, 33, 41, 40, 13, 41, 13, 3, 40, 71, 70, 40, 70, 13, 48, 9, 32, 48, 32, 47, 45, 88, 10, 45, 10, 49, 39, 38, 35, 39, 35, 34, 66, 37, 16, 66, 16, 67, 6, 30, 31, 6, 31, 19, 46, 47, 32, 46, 32, 8, 44, 43, 33, 44, 33, 7, 70, 69, 5, 70, 5, 13, 35, 27, 24, 35, 24, 34, 16, 18, 68, 16, 68, 67, 36, 18, 16, 36, 16, 37, 64, 17, 15, 64, 15, 65, 26, 29, 28, 26, 28, 25, 50, 11, 12, 50, 12, 45, 72, 75, 74, 72, 74, 73, 17, 36, 37, 17, 37, 15, 65, 15, 37, 65, 37, 66, 25, 28, 38, 25, 38, 39, 40, 88, 12, 40, 12, 71, 10, 88, 40, 10, 40, 41, 32, 9, 42, 32, 42, 43, 8, 32, 43, 8, 43, 44, 85, 84, 22, 85, 22, 23, 20, 78, 77, 20, 77, 31, 19, 31, 77, 19, 77, 76, 48, 58, 51, 48, 51, 9, 10, 52, 56, 10, 56, 49, 41, 53, 52, 41, 52, 10, 9, 51, 54, 9, 54, 42, 81, 80, 55, 81, 55, 21, 20, 57, 79, 20, 79, 78, 90, 89, 59, 90, 59, 60, 61, 91, 90, 61, 90, 60, 62, 92, 91, 62, 91, 61, 63, 93, 92, 63, 92, 62, 29, 64, 65, 29, 65, 28, 28, 65, 66, 28, 66, 38, 38, 66, 67, 38, 67, 35, 68, 27, 35, 68, 35, 67, 60, 59, 69, 60, 69, 70, 71, 61, 60, 71, 60, 70, 12, 62, 61, 12, 61, 71, 11, 63, 62, 11, 62, 12, 14, 4, 72, 14, 72, 73, 2, 14, 73, 22, 81, 21, 22, 21, 74, 75, 23, 22, 75, 22, 74, 83, 82, 87, 83, 87, 86, 76, 77, 47, 76, 47, 46, 78, 48, 47, 78, 47, 77, 79, 58, 48, 79, 48, 78, 49, 56, 82, 49, 82, 83, 84, 45, 49, 84, 49, 83, 85, 50, 45, 85, 45, 84, 87, 80, 81, 87, 81, 86, 22, 84, 83, 22, 83, 86, 45, 12, 88, 34, 24, 89, 34, 89, 90, 91, 39, 34, 91, 34, 90, 92, 25, 39, 92, 39, 91, 93, 26, 25, 93, 25, 92, 0, 98, 100, 0, 100, 20, 1, 96, 95, 1, 95, 33, 30, 99, 98, 30, 98, 0, 97, 96, 1, 97, 1, 42, 96, 3, 13, 96, 13, 95, 41, 3, 96, 41, 96, 97, 98, 2, 21, 98, 21, 100, 99, 14, 2, 99, 2, 98, 102, 103, 51, 102, 51, 58, 56, 52, 103, 56, 103, 102, 101, 4, 14, 101, 14, 99, 6, 101, 99, 6, 99, 30, 95, 94, 7, 95, 7, 33, 13, 5, 94, 13, 94, 95, 52, 53, 104, 52, 104, 103, 103, 104, 54, 103, 54, 51, 105, 106, 79, 105, 79, 57, 97, 104, 53, 97, 53, 41, 42, 54, 104, 42, 104, 97, 21, 55, 105, 21, 105, 100, 100, 105, 57, 100, 57, 20, 106, 105, 87, 106, 87, 82, 106, 102, 58, 106, 58, 79, 82, 56, 102, 82, 102, 106, 22, 86, 81, 80, 87, 105, 80, 105, 55, 109, 119, 157, 109, 157, 156, 123, 124, 118, 123, 118, 107, 135, 126, 108, 135, 108, 134, 133, 110, 114, 133, 114, 132, 132, 114, 154, 132, 154, 155, 138, 137, 125, 138, 125, 111, 136, 139, 112, 136, 112, 168, 131, 127, 128, 131, 128, 130, 152, 153, 117, 152, 117, 129, 6, 19, 124, 6, 124, 123, 46, 8, 125, 46, 125, 137, 44, 7, 126, 44, 126, 135, 154, 114, 5, 154, 5, 69, 128, 127, 24, 128, 24, 27, 117, 153, 68, 117, 68, 18, 36, 129, 117, 36, 117, 18, 64, 151, 116, 64, 116, 17, 26, 121, 122, 26, 122, 29, 50, 136, 113, 50, 113, 11, 72, 156, 157, 72, 157, 75, 17, 116, 129, 17, 129, 36, 151, 152, 129, 151, 129, 116, 121, 131, 130, 121, 130, 122, 132, 155, 113, 132, 113, 168, 112, 133, 132, 112, 132, 168, 125, 135, 134, 125, 134, 111, 8, 44, 135, 8, 135, 125, 85, 23, 120, 85, 120, 165, 118, 124, 158, 118, 158, 159, 19, 76, 158, 19, 158, 124, 138, 111, 140, 138, 140, 147, 112, 139, 145, 112, 145, 141, 133, 112, 141, 133, 141, 142, 111, 134, 143, 111, 143, 140, 162, 119, 144, 162, 144, 161, 118, 159, 160, 118, 160, 146, 169, 148, 59, 169, 59, 89, 149, 148, 169, 149, 169, 170, 150, 149, 170, 150, 170, 171, 63, 150, 171, 63, 171, 93, 29, 122, 151, 29, 151, 64, 122, 130, 152, 122, 152, 151, 130, 128, 153, 130, 153, 152, 68, 153, 128, 68, 128, 27, 148, 154, 69, 148, 69, 59, 155, 154, 148, 155, 148, 149, 113, 155, 149, 113, 149, 150, 11, 113, 150, 11, 150, 63, 115, 156, 72, 115, 72, 4, 109, 156, 115, 120, 157, 119, 120, 119, 162, 75, 157, 120, 75, 120, 23, 164, 166, 167, 164, 167, 163, 76, 46, 137, 76, 137, 158, 159, 158, 137, 159, 137, 138, 160, 159, 138, 160, 138, 147, 139, 164, 163, 139, 163, 145, 165, 164, 139, 165, 139, 136, 85, 165, 136, 85, 136, 50, 167, 166, 162, 167, 162, 161, 120, 166, 164, 120, 164, 165, 136, 168, 113, 127, 169, 89, 127, 89, 24, 170, 169, 127, 170, 127, 131, 171, 170, 131, 171, 131, 121, 93, 171, 121, 93, 121, 26, 107, 118, 177, 107, 177, 175, 108, 126, 172, 108, 172, 173, 123, 107, 175, 123, 175, 176, 174, 134, 108, 174, 108, 173, 173, 172, 114, 173, 114, 110, 133, 174, 173, 133, 173, 110, 175, 177, 119, 175, 119, 109, 176, 175, 109, 176, 109, 115, 178, 147, 140, 178, 140, 179, 145, 178, 179, 145, 179, 141, 101, 176, 115, 101, 115, 4, 6, 123, 176, 6, 176, 101, 172, 126, 7, 172, 7, 94, 114, 172, 94, 114, 94, 5, 141, 179, 180, 141, 180, 142, 179, 140, 143, 179, 143, 180, 181, 146, 160, 181, 160, 182, 174, 133, 142, 174, 142, 180, 134, 174, 180, 134, 180, 143, 119, 177, 181, 119, 181, 144, 177, 118, 146, 177, 146, 181, 182, 163, 167, 182, 167, 181, 182, 160, 147, 182, 147, 178, 163, 182, 178, 163, 178, 145, 120, 162, 166, 161, 144, 181, 161, 181, 167, 189, 185, 190, 189, 190, 199, 190, 183, 188, 190, 188, 199, 197, 187, 196, 197, 196, 200, 188, 183, 193, 188, 193, 201, 192, 186, 191, 192, 191, 198, 191, 184, 194, 191, 194, 198, 188, 184, 191, 188, 191, 199, 191, 186, 189, 191, 189, 199, 194, 184, 188, 194, 188, 201, 196, 187, 195, 196, 195, 202, 195, 185, 189, 195, 189, 202, 189, 186, 196, 189, 196, 202, 196, 186, 192, 196, 192, 200, 209, 214, 210, 209, 210, 205, 210, 214, 208, 210, 208, 203, 197, 200, 213, 197, 213, 207, 208, 201, 193, 208, 193, 203, 192, 198, 211, 192, 211, 206, 211, 198, 194, 211, 194, 204, 208, 214, 211, 208, 211, 204, 211, 214, 209, 211, 209, 206, 194, 201, 208, 194, 208, 204, 213, 215, 212, 213, 212, 207, 212, 215, 209, 212, 209, 205, 209, 215, 213, 209, 213, 206, 213, 200, 192, 213, 192, 206, 230, 225, 231, 230, 231, 236, 231, 223, 229, 231, 229, 236, 219, 227, 234, 229, 223, 217, 229, 217, 221, 216, 226, 232, 216, 232, 220, 232, 224, 218, 232, 218, 220, 229, 224, 232, 229, 232, 236, 232, 226, 230, 232, 230, 236, 234, 238, 228, 234, 228, 219, 218, 224, 229, 218, 229, 221, 234, 227, 233, 234, 233, 237, 233, 225, 230, 233, 230, 237, 230, 226, 235, 230, 235, 237, 235, 238, 234, 235, 234, 237, 228, 238, 235, 228, 235, 222, 235, 226, 216, 235, 216, 222, 245, 251, 246, 245, 246, 241, 246, 251, 244, 246, 244, 239, 219, 249, 243, 244, 221, 217, 244, 217, 239, 216, 220, 247, 216, 247, 242, 247, 220, 218, 247, 218, 240, 244, 251, 247, 244, 247, 240, 247, 251, 245, 247, 245, 242, 249, 219, 228, 249, 228, 253, 218, 221, 244, 218, 244, 240, 249, 252, 248, 249, 248, 243, 248, 252, 245, 248, 245, 241, 245, 252, 250, 245, 250, 242, 250, 252, 249, 250, 249, 253, 228, 222, 250, 228, 250, 253, 250, 222, 216, 250, 216, 242] - mesh = implot3d.Mesh(points, idx) + mesh = implot3d.Mesh(points, idx) # type: ignore return mesh diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full.py b/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full.py index 0659c15..f4b1b76 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_full.py @@ -81,13 +81,14 @@ def custom_background(): runner_params.callbacks.custom_background = custom_background def gui(): - imgui.set_next_window_pos(ImVec2(0, 0), imgui.Cond_.appearing.value) - imgui.begin("My Window!", None, imgui.WindowFlags_.always_auto_resize.value) + imgui.set_next_window_pos(ImVec2(0, 0), imgui.Cond_.appearing) + imgui.begin("My Window!", None, imgui.WindowFlags_.always_auto_resize) if app_state.display_in_frame_buffer: clear_color_vec4 = ImVec4(*app_state.clear_color) nvg_imgui.render_nvg_to_frame_buffer(app_state.vg, app_state.myFrameBuffer, nvg_drawing_function, clear_color_vec4) - imgui.image(app_state.myFrameBuffer.texture_id, ImVec2(1000, 600)) + imgui.image( + imgui.ImTextureRef(app_state.myFrameBuffer.texture_id), ImVec2(1000, 600)) imgui.button("?##Note") if imgui.is_item_hovered(): diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_heart.py b/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_heart.py index 9a0a12b..bde94cd 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_heart.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_nanovg/demo_nanovg_heart.py @@ -135,7 +135,9 @@ def gui(): assert app_state.nvg_framebuffer is not None nvg_imgui.render_nvg_to_frame_buffer(app_state.vg, app_state.nvg_framebuffer, draw_scene) assert app_state.nvg_framebuffer is not None - imgui.image(app_state.nvg_framebuffer.texture_id, hello_imgui.em_to_vec2(50, 30)) + imgui.image( + imgui.ImTextureRef(app_state.nvg_framebuffer.texture_id), + hello_imgui.em_to_vec2(50, 30)) _, gDrawingState.heart_color = imgui.color_edit4("Heart color", gDrawingState.heart_color) diff --git a/blimgui/dist64/imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.py b/blimgui/dist64/imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.py index 49e5c61..baf802a 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.py +++ b/blimgui/dist64/imgui_bundle/demos_python/demos_node_editor/demo_node_editor_basic.py @@ -9,6 +9,12 @@ ) from imgui_bundle.immapp import static, run_anon_block +import os + +# Fallback for __file__ in Pyodide +if '__file__' not in globals(): + __file__ = os.getcwd() + '/script.py' + class IdProvider: """A simple utility to obtain unique ids, and to be able to restore them at each frame""" @@ -223,7 +229,6 @@ def demo_gui(): def main(): - import os this_dir = os.path.dirname(__file__) config = ed.Config() diff --git a/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_glfw_window_manip.py b/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_glfw_window_manip.py index 411b402..b43e040 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_glfw_window_manip.py +++ b/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_glfw_window_manip.py @@ -4,7 +4,7 @@ """ from imgui_bundle import hello_imgui, imgui, glfw_utils -import glfw +import glfw # pip install glfw def gui(): diff --git a/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_tstengine_keys.py b/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_tstengine_keys.py index 8b01947..f46484b 100644 --- a/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_tstengine_keys.py +++ b/blimgui/dist64/imgui_bundle/demos_python/sandbox/sandbox_tstengine_keys.py @@ -10,10 +10,10 @@ def test_inc_func(ctx: imgui.test_engine.TestContext) -> None: test_press = imgui.test_engine.register_test(engine, "Demo Tests", "Press keys") def test_press_func(ctx: imgui.test_engine.TestContext) -> None: - ctx.key_down(imgui.Key.left_alt.value) - ctx.key_down(imgui.Key.a.value) - ctx.key_up(imgui.Key.a.value) - ctx.key_up(imgui.Key.left_alt.value) + ctx.key_down(imgui.Key.left_alt) + ctx.key_down(imgui.Key.a) + ctx.key_up(imgui.Key.a) + ctx.key_up(imgui.Key.left_alt) test_press.test_func = test_press_func diff --git a/blimgui/dist64/imgui_bundle/glfw_utils.py b/blimgui/dist64/imgui_bundle/glfw_utils.py index 5df356c..0ee3e79 100644 --- a/blimgui/dist64/imgui_bundle/glfw_utils.py +++ b/blimgui/dist64/imgui_bundle/glfw_utils.py @@ -1,8 +1,9 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from typing import cast try: - import glfw # type: ignore + import glfw # pip install glfw + def glfw_window_hello_imgui() -> glfw._GLFWwindow: """Return the main glfw window used by HelloImGui (when the backend is GLFW) @@ -15,6 +16,16 @@ def glfw_window_hello_imgui() -> glfw._GLFWwindow: window_pointer = ctypes.cast(window_address, ctypes.POINTER(glfw._GLFWwindow)) return cast(glfw._GLFWwindow, window_pointer) -except ImportError: + +except (ImportError, ModuleNotFoundError): + + + def glfw_window_hello_imgui() -> None: + import sys + print("""Please install glfw, so that glfw_window_hello_imgui works: + pip install glfw""") + sys.exit(1) + + pass # print("Warning: could not import glfw") diff --git a/blimgui/dist64/imgui_bundle/hello_imgui.pyi b/blimgui/dist64/imgui_bundle/hello_imgui.pyi index d83a716..d7c2846 100644 --- a/blimgui/dist64/imgui_bundle/hello_imgui.pyi +++ b/blimgui/dist64/imgui_bundle/hello_imgui.pyi @@ -65,6 +65,20 @@ def EmptyVoidFunction() -> VoidFunction: def EmptyEventCallback() -> AnyEventCallback: pass +def set_load_asset_file_data_function(fn: Callable[[str], bytes]) -> None: + """Register a Python callback that receives an asset and returns the file contents as a **bytes** object. + + Example + ------- + ```python + def my_loader(filename: str) -> bytes: + from pathlib import Path + return Path(filename).read_bytes() + + hello_imgui.set_load_file_bytes_func(my_loader) + """ + pass + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! AUTOGENERATED CODE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # // Autogenerated code below! Do not edit! #################### #################### @@ -80,26 +94,14 @@ class DpiAwareParams: """ Hello ImGui will try its best to automatically handle DPI scaling for you. - Parameters to change the scaling behavior: + Parameter to change the scaling behavior: ------------------------------------------ - `dpiWindowSizeFactor`: factor by which window size should be multiplied + By default, Hello ImGui will compute it automatically, when it is set to 0. - - `fontRenderingScale`: - factor by which fonts glyphs should be scaled at rendering time - (typically 1 on windows, and 0.5 on macOS retina screens) - - By default, Hello ImGui will compute them automatically, - when dpiWindowSizeFactor and fontRenderingScale are set to 0. - - Parameters to improve font rendering quality: - --------------------------------------------- - - `fontOversampleH` and `fontOversampleV` : Font oversampling parameters - Rasterize at higher quality for sub-pixel positioning. Probably unused if freeType is used. - If not zero, these values will be used to set the oversampling factor when loading fonts. - - How to set those values manually: + How to set manually: --------------------------------- If it fails (i.e. your window and/or fonts are too big or too small), you may set them manually: @@ -131,57 +133,18 @@ class DpiAwareParams: # and the resulting value will be stored in `dpiWindowSizeFactor`. dpi_window_size_factor: float = 0.0 - # float fontRenderingScale = 0.0f; /* original C++ signature */ - # `fontRenderingScale` - # factor (that is either 1 or < 1.) by which fonts glyphs should be scaled at rendering time. - # On macOS retina screens, it will be 0.5, since macOS APIs hide the real resolution of the screen. - # Changing this value will *not* change the visible font size on the screen, however it will - # affect the size of the loaded glyphs. - # For example, if fontRenderingScale=0.5 (which is the default on a macOS retina screen), - # a font size of 16 will be loaded as if it was 32, and will be rendered at half size. - # This leads to a better rendering quality on some platforms. - # (This parameter will be used to set ImGui::GetIO().FontGlobalScale at startup) - font_rendering_scale: float = 0.0 - - # bool onlyUseFontDpiResponsive = false; /* original C++ signature */ - # `onlyUseFontDpiResponsive` - # If True, guarantees that only HelloImGui::LoadDpiResponsiveFont will be used to load fonts. - # (also for the default font) - only_use_font_dpi_responsive: bool = False - - # `fontOversampleH` and `fontOversampleV` : Font oversampling parameters - # Rasterize at higher quality for sub-pixel positioning. Probably unused if freeType is used. - # If not zero, these values will be used to set the oversampling factor when loading fonts. - # (i.e. they will be set in ImFontConfig::OversampleH and ImFontConfig::OversampleV) - # OversampleH: The difference between 2 and 3 for OversampleH is minimal. - # You can reduce this to 1 for large glyphs save memory. - # OversampleV: This is not really useful as we don't use sub-pixel positions on the Y axis. - # Read https://github.com/nothings/stb/blob/master/tests/oversample/README.md for details. - # int fontOversampleH = 0; /* original C++ signature */ - font_oversample_h: int = 0 # Default is 2 in ImFontConfig - # int fontOversampleV = 0; /* original C++ signature */ - font_oversample_v: int = 0 # Default is 1 in ImFontConfig - # float DpiFontLoadingFactor() const { /* original C++ signature */ - # float r = dpiWindowSizeFactor / fontRenderingScale; - # return r; + # return dpiWindowSizeFactor; # } def dpi_font_loading_factor(self) -> float: - """`dpiFontLoadingFactor` - factor by which font size should be multiplied at loading time to get a similar - visible size on different OSes. + """`DpiFontLoadingFactor` + factor by which font size should be multiplied at loading time to get a similar visible size on different OSes. + This is equal to dpiWindowSizeFactor The size will be equivalent to a size given for a 96 PPI screen """ pass - # DpiAwareParams(float dpiWindowSizeFactor = 0.0f, float fontRenderingScale = 0.0f, bool onlyUseFontDpiResponsive = false, int fontOversampleH = 0, int fontOversampleV = 0); /* original C++ signature */ - def __init__( - self, - dpi_window_size_factor: float = 0.0, - font_rendering_scale: float = 0.0, - only_use_font_dpi_responsive: bool = False, - font_oversample_h: int = 0, - font_oversample_v: int = 0, - ) -> None: + # DpiAwareParams(float dpiWindowSizeFactor = 0.0f); /* original C++ signature */ + def __init__(self, dpi_window_size_factor: float = 0.0) -> None: """Auto-generated default constructor with named params""" pass @@ -199,8 +162,8 @@ class DpiAwareParams: # application to be used on high DPI screens! # Otherwise, widgets might be misplaced or too small on different screens and/or OS. # -# Instead you should use scale your widgets and windows relatively to the font size, -# as is done with the [em CSS Unit](https://lyty.dev/css/css-unit.html). +# Instead, you should use scale your widgets and windows relatively to the font size, +# as is done with the [em CSS Unit](https://www.w3schools.com/cssref/css_units.php). # # @@md # * @@ -252,6 +215,9 @@ def get_dpi_aware_params() -> DpiAwareParams: # ---------------------------------------------------------------------------- +# +# Legacy API, you should use RunnerParams.dpiAwareParams instead +# # float DpiFontLoadingFactor(); /* original C++ signature */ def dpi_font_loading_factor() -> float: """Multiply font sizes by this factor when loading fonts manually with ImGui::GetIO().Fonts->AddFont... @@ -262,18 +228,10 @@ def dpi_font_loading_factor() -> float: # float DpiWindowSizeFactor(); /* original C++ signature */ def dpi_window_size_factor() -> float: """DpiWindowSizeFactor() is the factor by which window size should be multiplied to get a similar visible size on different OSes. - It returns ApplicationScreenPixelPerInch / 96 under windows and linux. Under macOS, it will return 1. + It returns ApplicationScreenPixelPerInch / 96 under windows and linux. Under macOS, it will return 1. """ pass -# float ImGuiDefaultFontGlobalScale(); /* original C++ signature */ -# } -def imgui_default_font_global_scale() -> float: - """returns the default value that should be stored inside `ImGui::GetIO().FontGlobalScale`""" - pass - -# namespace HelloImGui - # ---------------------------------------------------------------------------- # Handling screens with high DPI # ---------------------------------------------------------------------------- @@ -332,31 +290,16 @@ def imgui_default_font_global_scale() -> float: # - You cannot change DisplayFramebufferScale manually, it will be reset at each new frame, by asking the platform backend. # # -### FontGlobalScale -# -# `ImGui::GetIO().FontGlobalScale` is a factor by which fonts glyphs should be scaled at rendering time. -# It is typically 1 on windows, and 0.5 on macOS retina screens. -# -# ### How to load fonts with the correct size # #### Using HelloImGui (recommended) # -# [`HelloImGui::LoadFont()` and `HelloImGui::LoadFontDpiResponsive`](https://pthom.github.io/hello_imgui/book/doc_api.html#load-fonts) will load fonts +# [`HelloImGui::LoadFont()`](https://pthom.github.io/hello_imgui/book/doc_api.html#load-fonts) will load fonts # with the correct size, taking into account the DPI scaling. # #### Using Dear ImGui # `ImGui::GetIO().Fonts->AddFontFromFileTTF()` loads a font with a given size, in *physical pixels*. -# -# If for example, DisplayFramebufferScale is (2,2), and you load a font with a size of 16, it will by default be rendered -# with size of 16 *virtual screen coordinate pixels* (i.e. 32 physical pixels). This will lead to blurry text. -# To solve this, you should load your font with a size of 16 *virtual screen coordinate pixels* (i.e. 32 physical pixels), -# and set `ImGui::GetIO().FontGlobalScale` to 0.5. -# -# Helpers if using `ImGui::GetIO().Fonts->AddFontFromFileTTF()`: -# - `HelloImGui::ImGuiDefaultFontGlobalScale()` returns the default value that should be stored inside `ImGui::GetIO().FontGlobalScale`. -# - `HelloImGui::DpiFontLoadingFactor()` returns a factor by which you shall multiply your font sizes when loading them. -# +# KKDYNFONT: TBC... # ### Reproducible physical window sizes (in mm or inches) # @@ -379,6 +322,10 @@ def imgui_default_font_global_scale() -> float: # @@md # +# //////////////////////////////////////////////////////////////////////////////////////////////////////////////// +# hello_imgui/hello_imgui_assets.h included by hello_imgui.h // +# ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + # * # @@md#AssetsStructure # @@ -419,6 +366,8 @@ def load_asset_file_data(asset_path: str) -> AssetFileData: You *have* to call FreeAssetFileData to free the memory, except if you use ImGui::GetIO().Fonts->AddFontFromMemoryTTF, which will take ownership of the data and free it for you. + This function can be redirected with setLoadAssetFileDataFunction. If not redirected, + it calls DefaultLoadAssetFileData. """ pass @@ -433,6 +382,13 @@ def free_asset_file_data(asset_file_data: AssetFileData) -> None: # @@md +# AssetFileData DefaultLoadAssetFileData(const char *assetPath); /* original C++ signature */ +def default_load_asset_file_data(asset_path: str) -> AssetFileData: + """This function actually performs the asset load, as described in + LoadAssetFileData + """ + pass + # @@md#assetFileFullPath # std::string AssetFileFullPath(const std::string& assetRelativeFilename, /* original C++ signature */ @@ -487,8 +443,6 @@ def override_assets_folder(folder: str) -> None: """synonym of SetAssetsFolder""" pass -# namespace HelloImGui - # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # hello_imgui/hello_imgui_error.h included by hello_imgui.h // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -496,7 +450,7 @@ def override_assets_folder(folder: str) -> None: # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # hello_imgui/hello_imgui_logger.h included by hello_imgui.h // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class LogLevel(enum.Enum): +class LogLevel(enum.IntEnum): # Debug, /* original C++ signature */ debug = enum.auto() # (= 0) # Info, /* original C++ signature */ @@ -548,19 +502,39 @@ def log_gui(size: Optional[ImVec2Like] = None) -> None: # ``` # void ImageFromAsset(const char *assetPath, const ImVec2& size = ImVec2(0, 0), /* original C++ signature */ -# const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1,1), -# const ImVec4& tint_col = ImVec4(1,1,1,1), -# const ImVec4& border_col = ImVec4(0,0,0,0)); +# const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1,1)); def image_from_asset( asset_path: str, size: Optional[ImVec2Like] = None, uv0: Optional[ImVec2Like] = None, uv1: Optional[ImVec2Like] = None, +) -> None: + """`HelloImGui::ImageFromAsset(const char *assetPath, size, ...)`: + will display a static image from the assets. + + + Python bindings defaults: + If any of the params below is None, then its default value below will be used: + * size: ImVec2(0, 0) + * uv0: ImVec2(0, 0) + * uv1: ImVec2(1,1) + """ + pass + +# void ImageFromAssetWithBg(const char *assetPath, const ImVec2& size = ImVec2(0, 0), /* original C++ signature */ +# const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1,1), +# const ImVec4& tint_col = ImVec4(1,1,1,1), +# const ImVec4& border_col = ImVec4(0,0,0,0)); +def image_from_asset_with_bg( + asset_path: str, + size: Optional[ImVec2Like] = None, + uv0: Optional[ImVec2Like] = None, + uv1: Optional[ImVec2Like] = None, tint_col: Optional[ImVec4Like] = None, border_col: Optional[ImVec4Like] = None, ) -> None: """`HelloImGui::ImageFromAsset(const char *assetPath, size, ...)`: - will display a static image from the assets. + will display a static image from the assets, with a colored background and a border. Python bindings defaults: @@ -670,7 +644,7 @@ def image_proportional_size(asked_size: ImVec2Like, image_size: ImVec2Like) -> I # Some themes were adapted by themes posted by ImGui users at https://github.com/ocornut/imgui/issues/707 # -class ImGuiTheme_(enum.Enum): +class ImGuiTheme_(enum.IntEnum): # ImGuiTheme_ImGuiColorsClassic = 0, /* original C++ signature */ imgui_colors_classic = enum.auto() # (= 0) # ImGuiTheme_ImGuiColorsDark, /* original C++ signature */ @@ -859,17 +833,6 @@ def darcula( def show_theme_tweak_gui_window(p_open: Optional[bool] = None) -> Optional[bool]: pass -# //////////////////////////////////////////////////////////////////////////////////////////////////////////////// -# hello_imgui/hello_imgui_font.h included by hello_imgui.h // -# ////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -# std::vector translate_common_glyph_ranges(const std::vector & glyphRanges); /* original C++ signature */ -def translate_common_glyph_ranges(glyph_ranges: List[ImWchar]) -> List[ImWcharPair]: - """Utility to translate DearImGui common Unicode ranges to ImWcharPair (Python) - (get_glyph_ranges_chinese_simplified_common, get_glyph_ranges_japanese, ...) - """ - pass - # @@md#Fonts # When loading fonts, use @@ -888,17 +851,9 @@ class FontLoadingParams: # bool adjustSizeToDpi = true; /* original C++ signature */ # if True, the font size will be adjusted automatically to account for HighDPI + # adjust_size_to_dpi: bool = True - # bool useFullGlyphRange = false; /* original C++ signature */ - # if True, the font will be loaded with the full glyph range - use_full_glyph_range: bool = False - # bool reduceMemoryUsageIfFullGlyphRange = true; /* original C++ signature */ - # if set, fontConfig.GlyphRanges, and - # fontConfig.OversampleH / fontConfig.OversampleV will be set to 1 - # when useFullGlyphRange is True (this is useful to save memory) - reduce_memory_usage_if_full_glyph_range: bool = True - # bool mergeToLastFont = false; /* original C++ signature */ # if True, the font will be merged to the last font merge_to_last_font: bool = False @@ -913,78 +868,23 @@ class FontLoadingParams: # Otherwise, it will be loaded from the filesystem inside_assets: bool = True - # std::vector glyphRanges = {}; /* original C++ signature */ - # the ranges of glyphs to load, as a list of pairs of ImWchar - # - if empty, the default glyph range will be used - # - you can specify several ranges - # - intervals bounds are inclusive - # Note: in order to use common ranges defined by ImGui (GetGlyphRangesJapanese, GetGlyphRangesChinese, ...) - # use TranslateCommonGlyphRanges (or translate_common_glyph_ranges in Python) - glyph_ranges: List[ImWcharPair] = List[ImWcharPair]() - # ImFontConfig fontConfig = ImFontConfig(); /* original C++ signature */ # ImGui native font config to use font_config: ImFontConfig = ImFontConfig() - - # if True, the font will be loaded and then FontAwesome icons will be merged to it - # (deprecated, use mergeToLastFont instead, and load in two steps) - # This will use an old version of FontAwesome (FontAwesome 4) - # bool mergeFontAwesome = false; /* original C++ signature */ - merge_font_awesome: bool = False - # ImFontConfig fontConfigFontAwesome = ImFontConfig(); /* original C++ signature */ - font_config_font_awesome: ImFontConfig = ImFontConfig() - # void blah(bool adjustSizeToDpi = true, bool useFullGlyphRange = false, bool reduceMemoryUsageIfFullGlyphRange = true, bool mergeToLastFont = false, bool loadColor = false, bool insideAssets = true, std::vector glyphRanges = __srcmlcpp_brace_init__(), ImFontConfig fontConfig = ImFontConfig(), bool mergeFontAwesome = false, ImFontConfig fontConfigFontAwesome = ImFontConfig()); /* original C++ signature */ + # FontLoadingParams(bool adjustSizeToDpi = true, bool mergeToLastFont = false, bool loadColor = false, bool insideAssets = true, ImFontConfig fontConfig = ImFontConfig()); /* original C++ signature */ def __init__( self, adjust_size_to_dpi: bool = True, - use_full_glyph_range: bool = False, - reduce_memory_usage_if_full_glyph_range: bool = True, merge_to_last_font: bool = False, load_color: bool = False, inside_assets: bool = True, - glyph_ranges: Optional[List[ImWcharPair]] = None, font_config: Optional[ImFontConfig] = None, - merge_font_awesome: bool = False, - font_config_font_awesome: Optional[ImFontConfig] = None, - ) -> None: - """Auto-generated default constructor with named params - - - Python bindings defaults: - If any of the params below is None, then its default value below will be used: - * glyphRanges: initialized with default value - * fontConfig: ImFontConfig() - * fontConfigFontAwesome: ImFontConfig() - """ - pass - -class FontDpiResponsive: - """A font that will be automatically resized to account for changes in DPI - Use LoadAdaptiveFont instead of LoadFont to get this behavior. - Fonts loaded with LoadAdaptiveFont will be reloaded during execution - if ImGui::GetIO().FontGlobalScale is changed. - """ - - # ImFont* font = nullptr; /* original C++ signature */ - font: ImFont = None - # std::string fontFilename; /* original C++ signature */ - font_filename: str - # float fontSize = 0.f; /* original C++ signature */ - font_size: float = 0.0 - # FontLoadingParams fontLoadingParams; /* original C++ signature */ - font_loading_params: FontLoadingParams - # FontDpiResponsive(std::string fontFilename = std::string(), float fontSize = 0.f, FontLoadingParams fontLoadingParams = FontLoadingParams()); /* original C++ signature */ - def __init__( - self, - font_filename: str = "", - font_size: float = 0.0, - font_loading_params: Optional[FontLoadingParams] = None, ) -> None: """Auto-generated default constructor with named params Python bindings defaults: - If fontLoadingParams is None, then its default value will be: FontLoadingParams() + If fontConfig is None, then its default value will be: ImFontConfig() """ pass @@ -999,33 +899,13 @@ def load_font( """ pass -# FontDpiResponsive* LoadFontDpiResponsive( /* original C++ signature */ -# const std::string & fontFilename, float fontSize, -# const FontLoadingParams & params = __srcmlcpp_brace_init__()); -def load_font_dpi_responsive( - font_filename: str, font_size: float, params: Optional[FontLoadingParams] = None -) -> FontDpiResponsive: - """Python bindings defaults: - If params is None, then its default value will be: initialized with default value - """ - pass - -# @@md - -# -# Deprecated API below, kept for compatibility (uses LoadFont internally) -# # ImFont* LoadFontTTF( /* original C++ signature */ # const std::string & fontFilename, # float fontSize, -# bool useFullGlyphRange = false, # ImFontConfig config = ImFontConfig() -# ); +# ); def load_font_ttf( - font_filename: str, - font_size: float, - use_full_glyph_range: bool = False, - config: Optional[ImFontConfig] = None, + font_filename: str, font_size: float, config: Optional[ImFontConfig] = None ) -> ImFont: """Python bindings defaults: If config is None, then its default value will be: ImFontConfig() @@ -1035,40 +915,17 @@ def load_font_ttf( # ImFont* LoadFontTTF_WithFontAwesomeIcons( /* original C++ signature */ # const std::string & fontFilename, # float fontSize, -# bool useFullGlyphRange = false, -# ImFontConfig configFont = ImFontConfig(), -# ImFontConfig configIcons = ImFontConfig() -# ); +# ImFontConfig configFont = ImFontConfig() +# ); def load_font_ttf_with_font_awesome_icons( - font_filename: str, - font_size: float, - use_full_glyph_range: bool = False, - config_font: Optional[ImFontConfig] = None, - config_icons: Optional[ImFontConfig] = None, -) -> ImFont: - """Python bindings defaults: - If any of the params below is None, then its default value below will be used: - * configFont: ImFontConfig() - * configIcons: ImFontConfig() - """ - pass - -# ImFont* MergeFontAwesomeToLastFont(float fontSize, ImFontConfig config = ImFontConfig()); /* original C++ signature */ -def merge_font_awesome_to_last_font( - font_size: float, config: Optional[ImFontConfig] = None + font_filename: str, font_size: float, config_font: Optional[ImFontConfig] = None ) -> ImFont: """Python bindings defaults: - If config is None, then its default value will be: ImFontConfig() + If configFont is None, then its default value will be: ImFontConfig() """ pass -# bool DidCallHelloImGuiLoadFontTTF(); /* original C++ signature */ -# } -def did_call_hello_imgui_load_font_ttf() -> bool: - """indicates that fonts were loaded using HelloImGui::LoadFont. In that case, fonts may have been resized to - account for HighDPI (on macOS and emscripten) - """ - pass +# @@md # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # hello_imgui/runner_params.h included by hello_imgui.h // @@ -1136,13 +993,11 @@ class ScreenBounds: """ pass -# namespace BackendApi - # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # hello_imgui/app_window_params.h continued // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class FullScreenMode(enum.Enum): +class FullScreenMode(enum.IntEnum): # NoFullScreen, /* original C++ signature */ no_full_screen = enum.auto() # (= 0) # FullScreen, /* original C++ signature */ @@ -1156,7 +1011,7 @@ class FullScreenMode(enum.Enum): enum.auto() ) # (= 3) # Fake full screen, maximized window on the selected monitor -class WindowSizeState(enum.Enum): +class WindowSizeState(enum.IntEnum): # Standard, /* original C++ signature */ standard = enum.auto() # (= 0) # Minimized, /* original C++ signature */ @@ -1165,7 +1020,7 @@ class WindowSizeState(enum.Enum): # } maximized = enum.auto() # (= 2) -class WindowPositionMode(enum.Enum): +class WindowPositionMode(enum.IntEnum): # OsDefault, /* original C++ signature */ os_default = enum.auto() # (= 0) # MonitorCenter, /* original C++ signature */ @@ -1174,7 +1029,7 @@ class WindowPositionMode(enum.Enum): # } from_coords = enum.auto() # (= 2) -class EmscriptenKeyboardElement(enum.Enum): +class EmscriptenKeyboardElement(enum.IntEnum): # Window, /* original C++ signature */ window = enum.auto() # (= 0) # Document, /* original C++ signature */ @@ -1187,7 +1042,7 @@ class EmscriptenKeyboardElement(enum.Enum): # } default = enum.auto() # (= 4) -class WindowSizeMeasureMode(enum.Enum): +class WindowSizeMeasureMode(enum.IntEnum): # ScreenCoords, /* original C++ signature */ # ScreenCoords: measure window size in screen coords. # Note: screen coordinates *might* differ from real pixel on high dpi screens; but this depends on the OS. @@ -1471,9 +1326,13 @@ class AppWindowParams: # @@md +# //////////////////////////////////////////////////////////////////////////////////////////////////////////////// +# hello_imgui/imgui_window_params.h included by hello_imgui/runner_params.h // +# ////////////////////////////////////////////////////////////////////////////////////////////////////////////// + # @@md#DefaultImGuiWindowType -class DefaultImGuiWindowType(enum.Enum): +class DefaultImGuiWindowType(enum.IntEnum): """`DefaultImGuiWindowType` is an enum class that defines whether a full screen background window is provided or not """ @@ -1654,8 +1513,6 @@ class ImGuiWindowParams: # hello_imgui/imgui_default_settings.h included by hello_imgui/runner_callbacks.h // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// -# namespace HelloImGui - # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # hello_imgui/runner_callbacks.h continued // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -1739,7 +1596,7 @@ class MobileCallbacks: # @@md#EdgeToolbar -class EdgeToolbarType(enum.Enum): +class EdgeToolbarType(enum.IntEnum): """EdgeToolbarType: location of an Edge Toolbar""" # Top, /* original C++ signature */ @@ -1821,7 +1678,7 @@ def edge_toolbar_type_name(e: EdgeToolbarType) -> str: # @@md#DefaultIconFont -class DefaultIconFont(enum.Enum): +class DefaultIconFont(enum.IntEnum): """HelloImGui can optionally merge an icon font (FontAwesome 4 or 6) to the default font Breaking change in v1.5.0: - the default icon font is now FontAwesome 6, which includes many more icons. @@ -2077,8 +1934,6 @@ def append_callback( """AppendCallback: legacy synonym for SequenceFunctions""" pass -# namespace HelloImGui - # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # hello_imgui/docking_params.h included by hello_imgui/runner_params.h // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -2405,7 +2260,7 @@ class DockableWindow: # @@md -class DockingLayoutCondition(enum.Enum): +class DockingLayoutCondition(enum.IntEnum): # FirstUseEver, /* original C++ signature */ first_use_ever = enum.auto() # (= 0) # ApplicationStart, /* original C++ signature */ @@ -2793,7 +2648,7 @@ class OpenGlOptionsFilled_: # You can select the platform backend type (SDL, GLFW) and the rendering backend type # via RunnerParams.platformBackendType and RunnerParams.renderingBackendType. -class PlatformBackendType(enum.Enum): +class PlatformBackendType(enum.IntEnum): """Platform backend type (SDL, GLFW) They are listed in the order of preference when FirstAvailable is selected. """ @@ -2808,7 +2663,7 @@ class PlatformBackendType(enum.Enum): # } null = enum.auto() # (= 3) -class RendererBackendType(enum.Enum): +class RendererBackendType(enum.IntEnum): """Rendering backend type (OpenGL3, Metal, Vulkan, DirectX11, DirectX12) They are listed in the order of preference when FirstAvailable is selected. """ @@ -2835,7 +2690,7 @@ class RendererBackendType(enum.Enum): # @@md#IniFolderType -class IniFolderType(enum.Enum): +class IniFolderType(enum.IntEnum): """IniFolderType is an enum which describes where is the base path to store the ini file for the application settings. @@ -2903,7 +2758,7 @@ def ini_folder_location(ini_folder_type: IniFolderType) -> str: # @@md#FpsIdling -class FpsIdlingMode(enum.Enum): +class FpsIdlingMode(enum.IntEnum): """FpsIdlingMode is an enum that describes the different modes of idling when rendering the GUI. - Sleep: the application will sleep when idling to reduce CPU usage. - EarlyReturn: rendering will return immediately when idling. @@ -3630,7 +3485,7 @@ def show_app_menu(runner_params: RunnerParams) -> None: # class imgui_default_settings: # Proxy class that introduces typings for the *submodule* imgui_default_settings - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) """ namespace ImGuiDefaultSettings""" # void LoadDefaultFont_WithFontAwesomeIcons(); /* original C++ signature */ @staticmethod @@ -3651,7 +3506,7 @@ class imgui_default_settings: # Proxy class that introduces typings for the *su # class manual_render: # Proxy class that introduces typings for the *submodule* manual_render - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) """ namespace ManualRender""" # HelloImGui::ManualRender is a namespace that groups functions, allowing fine-grained control over the rendering process: # - It is customizable like HelloImGui::Run: initialize it with `RunnerParams` or `SimpleRunnerParams` diff --git a/blimgui/dist64/imgui_bundle/im_col32.py b/blimgui/dist64/imgui_bundle/im_col32.py index 834605f..2911bbe 100644 --- a/blimgui/dist64/imgui_bundle/im_col32.py +++ b/blimgui/dist64/imgui_bundle/im_col32.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle ImU32 = int IM_COL32_R_SHIFT = 0 diff --git a/blimgui/dist64/imgui_bundle/im_cool_bar.pyi b/blimgui/dist64/imgui_bundle/im_cool_bar.pyi index 9c5ede6..0b29a52 100644 --- a/blimgui/dist64/imgui_bundle/im_cool_bar.pyi +++ b/blimgui/dist64/imgui_bundle/im_cool_bar.pyi @@ -16,7 +16,7 @@ ImGuiWindowFlags_None = WindowFlags_.none # #MIT License # -#Copyright (c) 2023 Stephane Cuillerdier (aka Aiekick) +#Copyright (c) 2024 Stephane Cuillerdier (aka Aiekick) # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal @@ -39,7 +39,9 @@ ImGuiWindowFlags_None = WindowFlags_.none -class ImCoolBarFlags_(enum.Enum): +##define ENABLE_IMCOOLBAR_DEBUG + +class ImCoolBarFlags_(enum.IntEnum): # none = enum.auto() # (= 0) vertical = enum.auto() # (= (1 << 0)) @@ -87,6 +89,8 @@ def get_cool_bar_item_width() -> float: pass def get_cool_bar_item_scale() -> float: pass +def show_cool_bar_metrics(v_opened: bool) -> None: + pass #################### #################### diff --git a/blimgui/dist64/imgui_bundle/imgui/__init__.pyi b/blimgui/dist64/imgui_bundle/imgui/__init__.pyi index ebba3c1..c169640 100644 --- a/blimgui/dist64/imgui_bundle/imgui/__init__.pyi +++ b/blimgui/dist64/imgui_bundle/imgui/__init__.pyi @@ -12,7 +12,6 @@ from typing import ( Callable, Union, Protocol, - TypeVar, ) import numpy as np import enum @@ -28,7 +27,6 @@ from .internal import TableHeaderData from .internal import ( Context, ImDrawListSharedData, - ImFontBuilderIO, ImRect, ColorMod, GroupData, @@ -55,25 +53,21 @@ from .internal import ( TreeNodeStackData, MultiSelectTempData, ItemFlags, + ImFontLoader, + ImDrawTextFlags, ) VoidPtr = Any FLT_MIN: float # value defined by this module as the minimum acceptable C(++) float FLT_MAX: float # value defined by this module as the maximum acceptable C(++) float +IM_COL32_WHITE: int Window = internal.Window uint = int uchar = int char = int - -def font_atlas_get_tex_data_as_rgba32( - font_atlas: ImFontAtlas, -) -> np.ndarray: - """Manual binding for ImFontAtlas::GetTexDataAsRGBA32 - This is also available as a method of ImFont: ImFontAtlas.get_tex_data_as_rgba32() - """ - pass +ImFontAtlasRectId = int # ----------------------------------------------------------------------------- # [SECTION] Forward declarations and basic types @@ -89,7 +83,6 @@ struct ImDrawListSplitter; // Helper to split a draw list into differen struct ImDrawVert; // A single vertex (pos + uv + col = 20 bytes by default. Override layout with IMGUI_OVERRIDE_DRAWVERT_STRUCT_LAYOUT) struct ImFont; // Runtime data for a single font within a parent ImFontAtlas struct ImFontAtlas; // Runtime data for multiple fonts, bake multiple fonts into a single texture, TTF/OTF font loader -struct ImFontBuilderIO; // Opaque interface to a font builder (stb_truetype or FreeType). struct ImFontConfig; // Configuration data when adding a font or merging fonts struct ImFontGlyph; // A single font glyph (code point + coordinates within in ImFontAtlas + offset) struct ImFontGlyphRangesBuilder; // Helper to build glyph ranges from text/string data @@ -193,6 +186,8 @@ ToggleFlags = int # -> enum ToggleFlags_ ChildFlags = int # -> enum ChildFlags_ InputFlags = int # -> enum ImGuiInputFlags_ // Flags: for Shortcut(), SetNextItemShortcut() KeyChord = ModFlags # == int. We generally use ImGuiKeyChord to mean "a ImGuiKey or-ed with any number of ImGuiMod_XXX value", but you may store only mods in there. +ImFontFlags = int # -> enum ImFontFlags_ // Flags: for ImFont +ListClipperFlags = int """ // ImTexture: user data for renderer backend to identify a texture [Compile-time configurable type] @@ -324,51 +319,62 @@ VERTEX_BUFFER_UV_OFFSET: int VERTEX_BUFFER_COL_OFFSET: int INDEX_SIZE: int -# VecProtocol: add __add__, __sub__, __mul__, __truediv__, __neg__ to ImVec2 and ImVec4 -TVec = TypeVar("TVec", bound="VecProtocol") - -class VecProtocol(Protocol[TVec]): - def __add__(self: TVec, other: TVec) -> TVec: ... - def __sub__(self: TVec, other: TVec) -> TVec: ... - def __mul__(self: TVec, other: Union[TVec, float]) -> TVec: ... - def __truediv__(self: TVec, other: Union[TVec, float]) -> TVec: ... - def __neg__(self: TVec) -> TVec: ... - ImVec2Like = Union[ImVec2, Tuple[int | float, int | float], List[int | float]] ImVec4Like = Union[ImVec4, Tuple[int | float, int | float, int | float, int | float], List[int | float]] +# Vec2Protocol: add __add__, __sub__, __mul__, __truediv__, __neg__ to ImVec2 +class Vec2Protocol(Protocol): + def __iter__(self) -> Iterator[float]: ... + def __add__(self, other: ImVec2Like) -> ImVec2: ... + def __sub__(self, other: ImVec2Like) -> ImVec2: ... + def __mul__(self, other: Union[ImVec2Like, float]) -> ImVec2: ... + def __truediv__(self, other: Union[ImVec2Like, float]) -> ImVec2: ... + def __neg__(self) -> ImVec2: ... + +# Vec4Protocol: add __add__, __sub__, __mul__, __truediv__, __neg__ to ImVec4 +class Vec4Protocol(Protocol): + def __iter__(self) -> Iterator[float]: ... + def __add__(self, other: ImVec4Like) -> ImVec4: ... + def __sub__(self, other: ImVec4Like) -> ImVec4: ... + def __mul__(self, other: Union[ImVec4Like, float]) -> ImVec4: ... + def __truediv__(self, other: Union[ImVec4Like, float]) -> ImVec4: ... + def __neg__(self) -> ImVec4: ... + +NpBuffer = np.ndarray # used to transfer texture data as a 1D numpy array of bytes + ################################################## # AUTO GENERATED CODE BELOW ################################################## # // Autogenerated code below! Do not edit! #################### #################### -# dear imgui, v1.91.9b +# dear imgui, v1.92.5 # (headers) # Help: -# - See links below. # - Call and read ImGui::ShowDemoWindow() in imgui_demo.cpp. All applications in examples/ are doing that. # - Read top of imgui.cpp for more details, links and comments. -# - Add '#define IMGUI_DEFINE_MATH_OPERATORS' before including this file (or in imconfig.h) to access courtesy maths operators for ImVec2 and ImVec4. +# - Add '#define IMGUI_DEFINE_MATH_OPERATORS' before including imgui.h (or in imconfig.h) to access courtesy maths operators for ImVec2 and ImVec4. # Resources: # - FAQ ........................ https://dearimgui.com/faq (in repository as docs/FAQ.md) # - Homepage ................... https://github.com/ocornut/imgui -# - Releases & changelog ....... https://github.com/ocornut/imgui/releases +# - Releases & Changelog ....... https://github.com/ocornut/imgui/releases # - Gallery .................... https://github.com/ocornut/imgui/issues?q=label%3Agallery (please post your screenshots/video there!) # - Wiki ....................... https://github.com/ocornut/imgui/wiki (lots of good stuff there) # - Getting Started https://github.com/ocornut/imgui/wiki/Getting-Started (how to integrate in an existing app by adding ~25 lines of code) # - Third-party Extensions https://github.com/ocornut/imgui/wiki/Useful-Extensions (ImPlot & many more) -# - Bindings/Backends https://github.com/ocornut/imgui/wiki/Bindings (language bindings, backends for various tech/engines) -# - Glossary https://github.com/ocornut/imgui/wiki/Glossary +# - Bindings/Backends https://github.com/ocornut/imgui/wiki/Bindings (language bindings + backends for various tech/engines) # - Debug Tools https://github.com/ocornut/imgui/wiki/Debug-Tools +# - Glossary https://github.com/ocornut/imgui/wiki/Glossary # - Software using Dear ImGui https://github.com/ocornut/imgui/wiki/Software-using-dear-imgui # - Issues & support ........... https://github.com/ocornut/imgui/issues # - Test Engine & Automation ... https://github.com/ocornut/imgui_test_engine (test suite, test engine to automate your apps) +# - Web version of the Demo .... https://pthom.github.io/imgui_manual_online/manual/imgui_manual.html (w/ source code browser) -# For first-time users having issues compiling/linking/running/loading fonts: +# For FIRST-TIME users having issues compiling/linking/running: # please post in https://github.com/ocornut/imgui/discussions if you cannot find a solution in resources above. -# Everything else should be asked in 'Issues'! We are building a database of cross-linked knowledge there. +# EVERYTHING ELSE should be asked in 'Issues'! We are building a database of cross-linked knowledge there. +# Since 1.92, we encourage font loading questions to also be posted in 'Issues'. # Library Version # (Integer encoded as XYYZZ for use in #if preprocessor conditionals, e.g. '#if IMGUI_VERSION_NUM >= 12345') @@ -379,6 +385,7 @@ ImVec4Like = Union[ImVec4, Tuple[int | float, int | float, int | float, int | fl # [ADAPT_IMGUI_BUNDLE] # #ifdef IMGUI_BUNDLE_PYTHON_API # + # #endif # @@ -399,7 +406,7 @@ ImVec4Like = Union[ImVec4, Tuple[int | float, int | float, int | float, int | fl # Index of this file: # // [SECTION] Header mess # // [SECTION] Forward declarations and basic types -# // [SECTION] Texture identifier (ImTextureID) +# // [SECTION] Texture identifiers (ImTextureID, ImTextureRef) # // [SECTION] Dear ImGui end-user API functions # // [SECTION] Flags & Enumerations # // [SECTION] Tables API flags and structures (ImGuiTableFlags, ImGuiTableColumnFlags, ImGuiTableRowFlags, ImGuiTableBgTarget, ImGuiTableSortSpecs, ImGuiTableColumnSortSpecs) @@ -410,7 +417,8 @@ ImVec4Like = Union[ImVec4, Tuple[int | float, int | float, int | float, int | fl # // [SECTION] Helpers (ImGuiOnceUponAFrame, ImGuiTextFilter, ImGuiTextBuffer, ImGuiStorage, ImGuiListClipper, Math Operators, ImColor) # // [SECTION] Multi-Select API flags and structures (ImGuiMultiSelectFlags, ImGuiMultiSelectIO, ImGuiSelectionRequest, ImGuiSelectionBasicStorage, ImGuiSelectionExternalStorage) # // [SECTION] Drawing API (ImDrawCallback, ImDrawCmd, ImDrawIdx, ImDrawVert, ImDrawChannel, ImDrawListSplitter, ImDrawFlags, ImDrawListFlags, ImDrawList, ImDrawData) -# // [SECTION] Font API (ImFontConfig, ImFontGlyph, ImFontGlyphRangesBuilder, ImFontAtlasFlags, ImFontAtlas, ImFont) +# // [SECTION] Texture API (ImTextureFormat, ImTextureStatus, ImTextureRect, ImTextureData) +# // [SECTION] Font API (ImFontConfig, ImFontGlyph, ImFontGlyphRangesBuilder, ImFontAtlasFlags, ImFontAtlas, ImFontBaked, ImFont) # // [SECTION] Viewports (ImGuiViewportFlags, ImGuiViewport) # // [SECTION] ImGuiPlatformIO + other Platform Dependent Interfaces (ImGuiPlatformMonitor, ImGuiPlatformImeData) # // [SECTION] Obsolete functions and types @@ -434,10 +442,13 @@ ImVec4Like = Union[ImVec4, Tuple[int | float, int | float, int | float, int | fl # Using dear imgui via a shared library is not recommended: we don't guarantee backward nor forward ABI compatibility + this is a call-heavy library and function call overhead adds up. # Helper Macros +# (note: compiling with NDEBUG will usually strip out assert() to nothing, which is NOT recommended because we use asserts to notify of programmer mistakes.) # Helper Macros - IM_FMTARGS, IM_FMTLIST: Apply printf-style warnings to our formatting functions. -# (MSVC provides an equivalent mechanism via SAL Annotations but it would require the macros in a different -# location. e.g. #include + None myprintf(_Printf_format_string_ const char* format, ...)) +# (MSVC provides an equivalent mechanism via SAL Annotations but it requires the macros in a different +# location. e.g. #include + None myprintf(_Printf_format_string_ const char* format, ...), +# and only works when using Code Analysis, rather than just normal compiling). +# (see https://github.com/ocornut/imgui/issues/8871 for a patch to enable this for MSVC's Code Analysis) # Disable some of MSVC most aggressive Debug runtime checks in function header/footer (used in some simple/low-level functions) @@ -456,30 +467,28 @@ ImVec4Like = Union[ImVec4, Tuple[int | float, int | float, int | float, int | fl # Enumerations # - We don't use strongly typed enums much because they add constraints (can't extend in private code, can't store typed in bit fields, extra casting on iteration) # - Tip: Use your programming IDE navigation facilities on the names in the _central column_ below to find the actual flags/enum lists! -# - In Visual Studio: CTRL+comma ("Edit.GoToAll") can follow symbols inside comments, whereas CTRL+F12 ("Edit.GoToImplementation") cannot. -# - In Visual Studio w/ Visual Assist installed: ALT+G ("VAssistX.GoToImplementation") can also follow symbols inside comments. -# - In VS Code, CLion, etc.: CTRL+click can follow symbols inside comments. +# - In Visual Studio: Ctrl+Comma ("Edit.GoToAll") can follow symbols inside comments, whereas Ctrl+F12 ("Edit.GoToImplementation") cannot. +# - In Visual Studio w/ Visual Assist installed: Alt+G ("VAssistX.GoToImplementation") can also follow symbols inside comments. +# - In VS Code, CLion, etc.: Ctrl+Click can follow symbols inside comments. # Flags (declared as int to allow using as flags without overhead, and to not pollute the top of this file) # - Tip: Use your programming IDE navigation facilities on the names in the _central column_ below to find the actual flags/enum lists! -# - In Visual Studio: CTRL+comma ("Edit.GoToAll") can follow symbols inside comments, whereas CTRL+F12 ("Edit.GoToImplementation") cannot. -# - In Visual Studio w/ Visual Assist installed: ALT+G ("VAssistX.GoToImplementation") can also follow symbols inside comments. -# - In VS Code, CLion, etc.: CTRL+click can follow symbols inside comments. +# - In Visual Studio: Ctrl+Comma ("Edit.GoToAll") can follow symbols inside comments, whereas Ctrl+F12 ("Edit.GoToImplementation") cannot. +# - In Visual Studio w/ Visual Assist installed: Alt+G ("VAssistX.GoToImplementation") can also follow symbols inside comments. +# - In VS Code, CLion, etc.: Ctrl+Click can follow symbols inside comments. # Character types # (we generally use UTF-8 encoded string in the API. This is storage specifically for a decoded character used for keyboard input and display) -# A single decoded U16 character/code point. We encode them as multi bytes UTF-8 when used in strings. # Callback and functions types # #ifdef IMGUI_BUNDLE_PYTHON_API # -# Callback function for ImGui::SetNextWindowSizeConstraints() # #else # # #endif # -class ImVec2(VecProtocol["ImVec2"]): +class ImVec2(Vec2Protocol): # float x, /* original C++ signature */ x: float # y; /* original C++ signature */ @@ -519,7 +528,7 @@ class ImVec2(VecProtocol["ImVec2"]): # #endif # -class ImVec4(VecProtocol["ImVec4"]): +class ImVec4(Vec4Protocol): """ImVec4: 4D vector used to store clipping rectangles, colors etc. [Compile-time configurable type]""" # float x, /* original C++ signature */ @@ -553,14 +562,49 @@ class ImVec4(VecProtocol["ImVec4"]): # # ----------------------------------------------------------------------------- -# [SECTION] Texture identifier (ImTextureID) +# [SECTION] Texture identifiers (ImTextureID, ImTextureRef) # ----------------------------------------------------------------------------- -# ImTexture: user data for renderer backend to identify a texture [Compile-time configurable type] -# - To use something else than an opaque None* pointer: override with e.g. '#define ImTextureID MyTextureType*' in your imconfig.h file. -# - This can be whatever to you want it to be! read the FAQ about ImTextureID for details. -# - You can make this a structure with various constructors if you need. You will have to implement ==/!= operators. -# - (note: before v1.91.4 (2024/10/08) the default type for ImTextureID was None*. Use intermediary intptr_t cast and read FAQ if you have casting warnings) +# ImTextureID = backend specific, low-level identifier for a texture uploaded in GPU/graphics system. +# [Compile-time configurable type] +# - When a Rendered Backend creates a texture, it store its native identifier into a ImTextureID value. +# (e.g. Used by DX11 backend to a `ID3D11ShaderResourceView*`; Used by OpenGL backends to store `GLuint`; +# Used by SDLGPU backend to store a `SDL_GPUTextureSamplerBinding*`, etc.). +# - User may submit their own textures to e.g. ImGui::Image() function by passing this value. +# - During the rendering loop, the Renderer Backend retrieve the ImTextureID, which stored inside a +# ImTextureRef, which is stored inside a ImDrawCmd. +# - Compile-time type configuration: +# - To use something other than a 64-bit value: add '#define ImTextureID MyTextureType*' in your imconfig.h file. +# - This can be whatever to you want it to be! read the FAQ entry about textures for details. +# - You may decide to store a higher-level structure containing texture, sampler, shader etc. with various +# constructors if you like. You will need to implement ==/!= operators. +# History: +# - In v1.91.4 (2024/10/08): the default type for ImTextureID was changed from 'None*' to 'ImU64'. This allowed backends requiring 64-bit worth of data to build on 32-bit architectures. Use intermediary intptr_t cast and read FAQ if you have casting warnings. +# - In v1.92.0 (2025/06/11): added ImTextureRef which carry either a ImTextureID either a pointer to internal texture atlas. All user facing functions taking ImTextureID changed to ImTextureRef + +# Define this if you need 0 to be a valid ImTextureID for your backend. + +class ImTextureRef: + # ImTextureRef() { _TexData = NULL; _TexID = ImTextureID_Invalid; } /* original C++ signature */ + @overload + def __init__(self) -> None: + pass + # ImTextureRef(ImTextureID tex_id) { _TexData = NULL; _TexID = tex_id; } /* original C++ signature */ + @overload + def __init__(self, tex_id: ImTextureID) -> None: + pass + # inline ImTextureID GetTexID() const; /* original C++ signature */ + def get_tex_id(self) -> ImTextureID: + """(private API) + + == (_TexData ? _TexData->TexID : _TexID) // Implemented below in the file. + """ + pass + # Members (either are set, never both!) + # ImTextureData* _TexData; /* original C++ signature */ + _tex_data: ImTextureData # A texture, generally owned by a ImFontAtlas. Will convert to ImTextureID during render loop, after texture has been uploaded. + # ImTextureID _TexID; /* original C++ signature */ + _tex_id: ImTextureID # _OR_ Low-level backend texture identifier, if already uploaded or created by user/app. Generally provided to e.g. ImGui::Image() calls. # ----------------------------------------------------------------------------- # [SECTION] Dear ImGui end-user API functions @@ -621,7 +665,7 @@ def render() -> None: # IMGUI_API ImDrawData* GetDrawData(); /* original C++ signature */ def get_draw_data() -> ImDrawData: - """valid after Render() and until the next call to NewFrame(). this is what you have to render.""" + """valid after Render() and until the next call to NewFrame(). Call ImGui_ImplXXXX_RenderDrawData() function in your Renderer Backend to render.""" pass # Demo, Debug, Information @@ -888,11 +932,6 @@ def set_window_focus() -> None: """(not recommended) set current window to be focused / top-most. prefer using SetNextWindowFocus().""" pass -# IMGUI_API void SetWindowFontScale(float scale); /* original C++ signature */ -def set_window_font_scale(scale: float) -> None: - """[OBSOLETE] set font scale. Adjust IO.FontGlobalScale if you want to scale all windows. This is an old API! For correct scaling, prefer to reload font + rebuild ImFontAtlas + call style.ScaleAllSizes().""" - pass - # IMGUI_API void SetWindowPos(const char* name, const ImVec2& pos, ImGuiCond cond = 0); /* original C++ signature */ @overload def set_window_pos(name: str, pos: ImVec2Like, cond: Cond = 0) -> None: @@ -970,16 +1009,52 @@ def set_scroll_from_pos_y(local_y: float, center_y_ratio: float = 0.5) -> None: """adjust scrolling amount to make given position visible. Generally GetCursorStartPos() + offset to compute a valid position.""" pass -# Parameters stacks (shared) -# IMGUI_API void PushFont(ImFont* font); /* original C++ signature */ -def push_font(font: ImFont) -> None: - """use None as a shortcut to push default font""" +# Parameters stacks (font) +# - PushFont(font, 0.0) // Change font and keep current size +# - PushFont(None, 20.0) // Keep font and change current size +# - PushFont(font, 20.0) // Change font and set size to 20.0 +# - PushFont(font, style.FontSizeBase * 2.0) // Change font and set size to be twice bigger than current size. +# - PushFont(font, font->LegacySize) // Change font and set size to size passed to AddFontXXX() function. Same as pre-1.92 behavior. +# *IMPORTANT* before 1.92, fonts had a single size. They can now be dynamically be adjusted. +# - In 1.92 we have REMOVED the single parameter version of PushFont() because it seems like the easiest way to provide an error-proof transition. +# - PushFont(font) before 1.92 = PushFont(font, font->LegacySize) after 1.92 // Use default font size as passed to AddFontXXX() function. +# *IMPORTANT* global scale factors are applied over the provided size. +# - Global scale factors are: 'style.FontScaleMain', 'style.FontScaleDpi' and maybe more. +# - If you want to apply a factor to the _current_ font size: +# - CORRECT: PushFont(None, style.FontSizeBase) // use current unscaled size == does nothing +# - CORRECT: PushFont(None, style.FontSizeBase * 2.0) // use current unscaled size x2 == make text twice bigger +# - INCORRECT: PushFont(None, GetFontSize()) // INCORRECT! using size after global factors already applied == GLOBAL SCALING FACTORS WILL APPLY TWICE! +# - INCORRECT: PushFont(None, GetFontSize() * 2.0) // INCORRECT! using size after global factors already applied == GLOBAL SCALING FACTORS WILL APPLY TWICE! +# #ifdef IMGUI_BUNDLE_PYTHON_API +# +# IMGUI_API void PushFont(std::optional font, float font_size_base_unscaled); /* original C++ signature */ +def push_font(font: Optional[ImFont], font_size_base_unscaled: float) -> None: + """Use None as a shortcut to keep current font. Use 0.0 to keep current size.""" pass +# #endif +# + # IMGUI_API void PopFont(); /* original C++ signature */ def pop_font() -> None: pass +# IMGUI_API ImFont* GetFont(); /* original C++ signature */ +def get_font() -> ImFont: + """get current font""" + pass + +# IMGUI_API float GetFontSize(); /* original C++ signature */ +def get_font_size() -> float: + """get current scaled font size (= height in pixels). AFTER global scale factors applied. *IMPORTANT* DO NOT PASS THIS VALUE TO PushFont()! Use ImGui::GetStyle().FontSizeBase to get value before global scale factors.""" + pass + +# IMGUI_API ImFontBaked* GetFontBaked(); /* original C++ signature */ +def get_font_baked() -> ImFontBaked: + """get current font bound at current size // == GetFont()->GetFontBaked(GetFontSize())""" + pass + +# Parameters stacks (shared) # IMGUI_API void PushStyleColor(ImGuiCol idx, ImU32 col); /* original C++ signature */ @overload def push_style_color(idx: Col, col: ImU32) -> None: @@ -1061,16 +1136,6 @@ def pop_text_wrap_pos() -> None: # Style read access # - Use the ShowStyleEditor() function to interactively see/edit the colors. -# IMGUI_API ImFont* GetFont(); /* original C++ signature */ -def get_font() -> ImFont: - """get current font""" - pass - -# IMGUI_API float GetFontSize(); /* original C++ signature */ -def get_font_size() -> float: - """get current font size (= height in pixels) of current font with current scale applied""" - pass - # IMGUI_API ImVec2 GetFontTexUvWhitePixel(); /* original C++ signature */ def get_font_tex_uv_white_pixel() -> ImVec2: """get UV coordinate for a white pixel, useful to draw custom shapes via the ImDrawList API""" @@ -1402,22 +1467,20 @@ def text_link(label: str) -> bool: """hyperlink text button, return True when clicked""" pass -# IMGUI_API void TextLinkOpenURL(const char* label, const char* url = NULL); /* original C++ signature */ -def text_link_open_url(label: str, url: Optional[str] = None) -> None: +# IMGUI_API bool TextLinkOpenURL(const char* label, const char* url = NULL); /* original C++ signature */ +def text_link_open_url(label: str, url: Optional[str] = None) -> bool: """hyperlink text button, automatically open file/url when clicked""" pass # Widgets: Images -# - Read about ImTextureID here: https://github.com/ocornut/imgui/wiki/Image-Loading-and-Displaying-Examples +# - Read about ImTextureID/ImTextureRef here: https://github.com/ocornut/imgui/wiki/Image-Loading-and-Displaying-Examples # - 'uv0' and 'uv1' are texture coordinates. Read about them from the same link above. # - Image() pads adds style.ImageBorderSize on each side, ImageButton() adds style.FramePadding on each side. # - ImageButton() draws a background based on regular Button() color + optionally an inner background if specified. -# IMGUI_API void Image(ImTextureID user_texture_id, const ImVec2& image_size, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1)); /* original C++ signature */ +# - An obsolete version of Image(), before 1.91.9 (March 2025), had a 'tint_col' parameter which is now supported by the ImageWithBg() function. +# IMGUI_API void Image(ImTextureRef tex_ref, const ImVec2& image_size, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1)); /* original C++ signature */ def image( - user_texture_id: ImTextureID, - image_size: ImVec2Like, - uv0: Optional[ImVec2Like] = None, - uv1: Optional[ImVec2Like] = None, + tex_ref: ImTextureRef, image_size: ImVec2Like, uv0: Optional[ImVec2Like] = None, uv1: Optional[ImVec2Like] = None ) -> None: """Python bindings defaults: If any of the params below is None, then its default value below will be used: @@ -1426,9 +1489,9 @@ def image( """ pass -# IMGUI_API void ImageWithBg(ImTextureID user_texture_id, const ImVec2& image_size, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1), const ImVec4& bg_col = ImVec4(0, 0, 0, 0), const ImVec4& tint_col = ImVec4(1, 1, 1, 1)); /* original C++ signature */ +# IMGUI_API void ImageWithBg(ImTextureRef tex_ref, const ImVec2& image_size, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1), const ImVec4& bg_col = ImVec4(0, 0, 0, 0), const ImVec4& tint_col = ImVec4(1, 1, 1, 1)); /* original C++ signature */ def image_with_bg( - user_texture_id: ImTextureID, + tex_ref: ImTextureRef, image_size: ImVec2Like, uv0: Optional[ImVec2Like] = None, uv1: Optional[ImVec2Like] = None, @@ -1444,10 +1507,10 @@ def image_with_bg( """ pass -# IMGUI_API bool ImageButton(const char* str_id, ImTextureID user_texture_id, const ImVec2& image_size, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1), const ImVec4& bg_col = ImVec4(0, 0, 0, 0), const ImVec4& tint_col = ImVec4(1, 1, 1, 1)); /* original C++ signature */ +# IMGUI_API bool ImageButton(const char* str_id, ImTextureRef tex_ref, const ImVec2& image_size, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1), const ImVec4& bg_col = ImVec4(0, 0, 0, 0), const ImVec4& tint_col = ImVec4(1, 1, 1, 1)); /* original C++ signature */ def image_button( str_id: str, - user_texture_id: ImTextureID, + tex_ref: ImTextureRef, image_size: ImVec2Like, uv0: Optional[ImVec2Like] = None, uv1: Optional[ImVec2Like] = None, @@ -1489,13 +1552,13 @@ def combo( pass # Widgets: Drag Sliders -# - CTRL+Click on any drag box to turn them into an input box. Manually input values aren't clamped by default and can go off-bounds. Use ImGuiSliderFlags_AlwaysClamp to always clamp. +# - Ctrl+Click on any drag box to turn them into an input box. Manually input values aren't clamped by default and can go off-bounds. Use ImGuiSliderFlags_AlwaysClamp to always clamp. # - For all the Float2/Float3/Float4/Int2/Int3/Int4 versions of every function, note that a 'float v[X]' function argument is the same as 'float* v', # the array syntax is just a way to document the number of elements that are expected to be accessible. You can pass address of your first element out of a contiguous set, e.g. &myvector.x # - Adjust format string to decorate the value with a prefix, a suffix, or adapt the editing and display precision e.g. "%.3" -> 1.234; "%5.2 secs" -> 01.23 secs; "Biscuit: %.0" -> Biscuit: 1; etc. # - Format string may also be set to None or use the default format ("%f" or "%d"). # - Speed are per-pixel of mouse movement (v_speed=0.2: mouse needs to move by 5 pixels to increase value by 1). For keyboard/gamepad navigation, minimum speed is Max(v_speed, minimum_step_at_given_precision). -# - Use v_min < v_max to clamp edits to given limits. Note that CTRL+Click manual input can override those limits if ImGuiSliderFlags_AlwaysClamp is not used. +# - Use v_min < v_max to clamp edits to given limits. Note that Ctrl+Click manual input can override those limits if ImGuiSliderFlags_AlwaysClamp is not used. # - Use v_max = FLT_MAX / INT_MAX etc to avoid clamping to a maximum, same with v_min = -FLT_MAX / INT_MIN to avoid clamping to a minimum. # - We use the same sets of flags for DragXXX() and SliderXXX() functions as the features are the same and it makes it easier to swap them. # - Legacy: Pre-1.78 there are DragXXX() function signatures that take a final `float power=1.0' argument instead of the `ImGuiSliderFlags flags=0' argument. @@ -1648,7 +1711,7 @@ def drag_scalar_n( pass # Widgets: Regular Sliders -# - CTRL+Click on any slider to turn them into an input box. Manually input values aren't clamped by default and can go off-bounds. Use ImGuiSliderFlags_AlwaysClamp to always clamp. +# - Ctrl+Click on any slider to turn them into an input box. Manually input values aren't clamped by default and can go off-bounds. Use ImGuiSliderFlags_AlwaysClamp to always clamp. # - Adjust format string to decorate the value with a prefix, a suffix, or adapt the editing and display precision e.g. "%.3" -> 1.234; "%5.2 secs" -> 01.23 secs; "Biscuit: %.0" -> Biscuit: 1; etc. # - Format string may also be set to None or use the default format ("%f" or "%d"). # - Legacy: Pre-1.78 there are SliderXXX() function signatures that take a final `float power=1.0' argument instead of the `ImGuiSliderFlags flags=0' argument. @@ -1764,7 +1827,7 @@ def v_slider_scalar( pass # Widgets: Input with Keyboard -# - If you want to use InputText() with std::string or any custom dynamic string type, see misc/cpp/imgui_stdlib.h and comments in imgui_demo.cpp. +# - If you want to use InputText() with std::string or any custom dynamic string type, use the wrapper in misc/cpp/imgui_stdlib.h/.cpp! # - Most of the ImGuiInputTextFlags flags are only useful for InputText() and not for InputFloatX, InputIntX, InputDouble etc. # IMGUI_API bool InputFloat(const char* label, float* v, float step = 0.0f, float step_fast = 0.0f, const char* format = "%.3f", ImGuiInputTextFlags flags = 0); /* original C++ signature */ def input_float( @@ -1887,7 +1950,7 @@ def tree_node(label: str) -> bool: # IMGUI_API bool TreeNode(const char* str_id, const char* fmt, ...) ; /* original C++ signature */ @overload def tree_node(str_id: str, fmt: str) -> bool: - """helper variation to easily decorelate the id from the displayed string. Read the FAQ about why and how to use ID. to align arbitrary text at the same level as a TreeNode() you can use Bullet().""" + """helper variation to easily decorrelate the id from the displayed string. Read the FAQ about why and how to use ID. to align arbitrary text at the same level as a TreeNode() you can use Bullet().""" pass # IMGUI_API bool TreeNode(const void* ptr_id, const char* fmt, ...) ; /* original C++ signature */ @@ -1952,10 +2015,9 @@ def set_next_item_open(is_open: bool, cond: Cond = 0) -> None: # IMGUI_API void SetNextItemStorageID(ImGuiID storage_id); /* original C++ signature */ def set_next_item_storage_id(storage_id: ID) -> None: + """set id to use for open/close storage (default to same as item id).""" pass -# set id to use for open/close storage (default to same as item id). - # [ADAPT_IMGUI_BUNDLE] # Widgets: Selectables # - A selectable highlights when hovered, and can display another color when selected. @@ -1974,7 +2036,7 @@ def selectable( # [/ADAPT_IMGUI_BUNDLE] # Multi-selection system for Selectable(), Checkbox(), TreeNode() functions [BETA] -# - This enables standard multi-selection/range-selection idioms (CTRL+Mouse/Keyboard, SHIFT+Mouse/Keyboard, etc.) in a way that also allow a clipper to be used. +# - This enables standard multi-selection/range-selection idioms (Ctrl+Mouse/Keyboard, Shift+Mouse/Keyboard, etc.) in a way that also allow a clipper to be used. # - ImGuiSelectionUserData is often used to store your item index within the current view (but may store something else). # - Read comments near ImGuiMultiSelectIO for instructions/details and see 'Demo->Widgets->Selection State & Multi-Select' for demo. # - TreeNode() is technically supported but... using this correctly is more complicated. You need some sort of linear/random access to your tree, @@ -2001,7 +2063,7 @@ def is_item_toggled_selection() -> bool: # - This is essentially a thin wrapper to using BeginChild/EndChild with the ImGuiChildFlags_FrameStyle flag for stylistic changes + displaying a label. # - If you don't need a label you can probably simply use BeginChild() with the ImGuiChildFlags_FrameStyle flag for the same result. # - You can submit contents and manage your selection state however you want it, by creating e.g. Selectable() or any other items. -# - The simplified/old ListBox() api are helpers over BeginListBox()/EndListBox() which are kept available for convenience purpose. This is analoguous to how Combos are created. +# - The simplified/old ListBox() api are helpers over BeginListBox()/EndListBox() which are kept available for convenience purpose. This is analogous to how Combos are created. # - Choose frame width: size.x > 0.0: custom / size.x < 0.0 or -FLT_MIN: right-align / size.x = 0.0 (default): use current ItemWidth # - Choose frame height: size.y > 0.0: custom / size.y < 0.0 or -FLT_MIN: bottom-align / size.y = 0.0 (default): arbitrary default height which can fit ~7 items # IMGUI_API bool BeginListBox(const char* label, const ImVec2& size = ImVec2(0, 0)); /* original C++ signature */ @@ -2355,7 +2417,7 @@ def table_get_column_index() -> int: # IMGUI_API int TableGetRowIndex(); /* original C++ signature */ def table_get_row_index() -> int: - """return current row index.""" + """return current row index (header rows are accounted for)""" pass # IMGUI_API const char* TableGetColumnName(int column_n = -1); /* original C++ signature */ @@ -2437,16 +2499,16 @@ def end_tab_bar() -> None: # IMGUI_API bool BeginTabItem(const char* label, bool* p_open = NULL, ImGuiTabItemFlags flags = 0); /* original C++ signature */ def begin_tab_item(label: str, p_open: Optional[bool] = None, flags: TabItemFlags = 0) -> Tuple[bool, Optional[bool]]: + """create a Tab. Returns True if the Tab is selected.""" pass -# create a Tab. Returns True if the Tab is selected. # #ifdef IMGUI_BUNDLE_PYTHON_API # # IMGUI_API bool BeginTabItemSimple(const char* label, ImGuiTabItemFlags flags = 0); /* original C++ signature */ def begin_tab_item_simple(label: str, flags: TabItemFlags = 0) -> bool: + """create a Tab (non-closable). Returns True if the Tab is selected.""" pass -# create a Tab (non-closable). Returns True if the Tab is selected. # #endif # # IMGUI_API void EndTabItem(); /* original C++ signature */ @@ -2465,18 +2527,24 @@ def set_tab_item_closed(tab_or_docked_window_label: str) -> None: pass # Docking -# [BETA API] Enable with io.ConfigFlags |= ImGuiConfigFlags_DockingEnable. -# Note: You can use most Docking facilities without calling any API. You DO NOT need to call DockSpace() to use Docking! -# - Drag from window title bar or their tab to dock/undock. Hold SHIFT to disable docking. -# - Drag from window menu button (upper-left button) to undock an entire node (all windows). -# - When io.ConfigDockingWithShift == True, you instead need to hold SHIFT to enable docking. -# About dockspaces: -# - Use DockSpaceOverViewport() to create a window covering the screen or a specific viewport + a dockspace inside it. -# This is often used with ImGuiDockNodeFlags_PassthruCentralNode to make it transparent. -# - Use DockSpace() to create an explicit dock node _within_ an existing window. See Docking demo for details. -# - Important: Dockspaces need to be submitted _before_ any window they can host. Submit it early in your frame! -# - Important: Dockspaces need to be kept alive if hidden, otherwise windows docked into it will be undocked. -# e.g. if you have multiple tabs with a dockspace inside each tab: submit the non-visible dockspaces with ImGuiDockNodeFlags_KeepAliveOnly. +# - Read https://github.com/ocornut/imgui/wiki/Docking for details. +# - Enable with io.ConfigFlags |= ImGuiConfigFlags_DockingEnable. +# - You can use most Docking facilities without calling any API. You don't necessarily need to call a DockSpaceXXX function to use Docking! +# - Drag from window title bar or their tab to dock/undock. Hold SHIFT to disable docking. +# - Drag from window menu button (upper-left button) to undock an entire node (all windows). +# - When io.ConfigDockingWithShift == True, you instead need to hold SHIFT to enable docking. +# - Dockspaces: +# - If you want to dock windows into the edge of your screen, most application can simply call DockSpaceOverViewport(): +# e.g. ImGui::NewFrame(); then ImGui::DockSpaceOverViewport(); // Create a dockspace in main viewport. +# or: ImGui::NewFrame(); then ImGui::DockSpaceOverViewport(0, None, ImGuiDockNodeFlags_PassthruCentralNode); // Create a dockspace in main viewport, where central node is transparent. +# - A dockspace is an explicit dock node within an existing window. +# - DockSpaceOverViewport() basically creates an invisible window covering a viewport, and submit a DockSpace() into it. +# - IMPORTANT: Dockspaces need to be submitted _before_ any window they can host. Submit them early in your frame! +# - IMPORTANT: Dockspaces need to be kept alive if hidden, otherwise windows docked into it will be undocked. +# If you have e.g. multiple tabs with a dockspace inside each tab: submit the non-visible dockspaces with ImGuiDockNodeFlags_KeepAliveOnly. +# - Programmatic docking: +# - There is no public API yet other than the very limited SetNextWindowDockID() function. Sorry for that! +# - Read https://github.com/ocornut/imgui/wiki/Docking for examples of how to use current internal API. # IMGUI_API ImGuiID DockSpace(ImGuiID dockspace_id, const ImVec2& size = ImVec2(0, 0), ImGuiDockNodeFlags flags = 0, const ImGuiWindowClass* window_class = NULL); /* original C++ signature */ def dock_space( dockspace_id: ID, @@ -2582,7 +2650,7 @@ def end_drag_drop_target() -> None: # Disabling [BETA API] # - Disable all user interactions and dim items visuals (applying style.DisabledAlpha over current colors) # - Those can be nested but it cannot be used to enable an already disabled section (a single BeginDisabled(True) in the stack is enough to keep everything disabled) -# - Tooltips windows by exception are opted out of disabling. +# - Tooltips windows are automatically opted out of disabling. Note that IsItemHovered() by default returns False on disabled items, unless using ImGuiHoveredFlags_AllowWhenDisabled. # - BeginDisabled(False)/EndDisabled() essentially does nothing but is provided to facilitate use of boolean expressions (as a micro-optimization: if you have tens of thousands of BeginDisabled(False)/EndDisabled() pairs, you might want to reformulate your code to avoid making those calls) # IMGUI_API void BeginDisabled(bool disabled = true); /* original C++ signature */ def begin_disabled(disabled: bool = True) -> None: @@ -2846,8 +2914,8 @@ def set_next_frame_want_capture_keyboard(want_capture_keyboard: bool) -> None: # Inputs Utilities: Shortcut Testing & Routing [BETA] # - ImGuiKeyChord = a ImGuiKey + optional ImGuiMod_Alt/ImGuiMod_Ctrl/ImGuiMod_Shift/ImGuiMod_Super. -# ImGuiKey_C // Accepted by functions taking ImGuiKey or ImGuiKeyChord arguments) -# ImGuiMod_Ctrl | ImGuiKey_C // Accepted by functions taking ImGuiKeyChord arguments) +# ImGuiKey_C // Accepted by functions taking ImGuiKey or ImGuiKeyChord arguments +# ImGuiMod_Ctrl | ImGuiKey_C // Accepted by functions taking ImGuiKeyChord arguments # only ImGuiMod_XXX values are legal to combine with an ImGuiKey. You CANNOT combine two ImGuiKey values. # - The general idea is that several callers may register interest in a shortcut, and only one owner gets it. # Parent -> call Shortcut(Ctrl+S) // When Parent is focused, Parent gets the shortcut. @@ -2963,7 +3031,7 @@ def set_mouse_cursor(cursor_type: MouseCursor) -> None: # IMGUI_API void SetNextFrameWantCaptureMouse(bool want_capture_mouse); /* original C++ signature */ def set_next_frame_want_capture_mouse(want_capture_mouse: bool) -> None: - """Override io.WantCaptureMouse flag next frame (said flag is left for your application to handle, typical when True it instucts your app to ignore inputs). This is equivalent to setting "io.WantCaptureMouse = want_capture_mouse;" after the next NewFrame() call.""" + """Override io.WantCaptureMouse flag next frame (said flag is left for your application to handle, typical when True it instructs your app to ignore inputs). This is equivalent to setting "io.WantCaptureMouse = want_capture_mouse;" after the next NewFrame() call.""" pass # Clipboard Utilities @@ -3018,10 +3086,9 @@ def debug_start_item_picker() -> None: def debug_check_version_and_data_layout( version_str: str, sz_io: int, sz_style: int, sz_vec2: int, sz_vec4: int, sz_drawvert: int, sz_drawidx: int ) -> bool: + """This is called by IMGUI_CHECKVERSION() macro.""" pass -# This is called by IMGUI_CHECKVERSION() macro. - # Memory Allocators # - Those functions are not reliant on the current context. # - DLL users: heaps and globals are not shared across DLL boundaries! You will need to call SetCurrentContext() + SetAllocatorFunctions() @@ -3047,8 +3114,8 @@ def destroy_platform_windows() -> None: """call DestroyWindow platform functions for all viewports. call from backend Shutdown() if you need to close platform windows before imgui shutdown. otherwise will be called by DestroyContext().""" pass -# IMGUI_API ImGuiViewport* FindViewportByID(ImGuiID id); /* original C++ signature */ -def find_viewport_by_id(id_: ID) -> Viewport: +# IMGUI_API ImGuiViewport* FindViewportByID(ImGuiID viewport_id); /* original C++ signature */ +def find_viewport_by_id(viewport_id: ID) -> Viewport: """this is a helper for backends.""" pass @@ -3061,7 +3128,7 @@ def find_viewport_by_platform_handle(platform_handle: Any) -> Viewport: # [SECTION] Flags & Enumerations # ----------------------------------------------------------------------------- -class WindowFlags_(enum.Enum): +class WindowFlags_(enum.IntFlag): """Flags for ImGui::Begin() (Those are per-window flags. There are shared flags in ImGuiIO: io.ConfigWindowsResizeFromEdges and io.ConfigWindowsMoveFromTitleBarOnly) """ @@ -3123,7 +3190,7 @@ class WindowFlags_(enum.Enum): # ImGuiWindowFlags_NoNavFocus = 1 << 17, /* original C++ signature */ no_nav_focus = ( enum.auto() - ) # (= 1 << 17) # No focusing toward this window with keyboard/gamepad navigation (e.g. skipped by CTRL+TAB) + ) # (= 1 << 17) # No focusing toward this window with keyboard/gamepad navigation (e.g. skipped by Ctrl+Tab) # ImGuiWindowFlags_UnsavedDocument = 1 << 18, /* original C++ signature */ unsaved_document = ( enum.auto() @@ -3155,9 +3222,9 @@ class WindowFlags_(enum.Enum): # Obsolete names -class ChildFlags_(enum.Enum): +class ChildFlags_(enum.IntFlag): """Flags for ImGui::BeginChild() - (Legacy: bit 0 must always correspond to ImGuiChildFlags_Borders to be backward compatible with old API using 'bool border = False'. + (Legacy: bit 0 must always correspond to ImGuiChildFlags_Borders to be backward compatible with old API using 'bool border = False'.) About using AutoResizeX/AutoResizeY flags: - May be combined with SetNextWindowSizeConstraints() to set a min/max size for each axis (see "Demo->Child->Auto-resize with Constraints"). - Size measurement for a given axis is only performed when the child window is within visible boundaries, or is just appearing. @@ -3206,7 +3273,7 @@ class ChildFlags_(enum.Enum): # Obsolete names -class ItemFlags_(enum.Enum): +class ItemFlags_(enum.IntFlag): """Flags for ImGui::PushItemFlag() (Those are shared by all items) """ @@ -3238,7 +3305,7 @@ class ItemFlags_(enum.Enum): enum.auto() ) # (= 1 << 5) # False // Allow submitting an item with the same identifier as an item already submitted this frame without triggering a warning tooltip if io.ConfigDebugHighlightIdConflicts is set. -class InputTextFlags_(enum.Enum): +class InputTextFlags_(enum.IntFlag): """Flags for ImGui::InputText() (Those are per-item flags. There are shared flags in ImGuiIO: io.ConfigInputTextCursorBlink and io.ConfigInputTextEnterKeepActive) """ @@ -3325,10 +3392,20 @@ class InputTextFlags_(enum.Enum): enum.auto() ) # (= 1 << 23) # Callback on any edit. Note that InputText() already returns True on edit + you can always use IsItemEdited(). The callback is useful to manipulate the underlying buffer while focus is active. + # Multi-line Word-Wrapping [BETA] + # - Not well tested yet. Please report any incorrect cursor movement, selection behavior etc. bug to https://github.com/ocornut/imgui/issues/3237. + # - Wrapping style is not ideal. Wrapping of long words/sections (e.g. words larger than total available width) may be particularly unpleasing. + # - Wrapping width needs to always account for the possibility of a vertical scrollbar. + # - It is much slower than regular text fields. + # Ballpark estimate of cost on my 2019 desktop PC: for a 100 KB text buffer: +~0.3 ms (Optimized) / +~1.0 ms (Debug build). + # The CPU cost is very roughly proportional to text length, so a 10 KB buffer should cost about ten times less. + # ImGuiInputTextFlags_WordWrap = 1 << 24, /* original C++ signature */ + word_wrap = enum.auto() # (= 1 << 24) # InputTextMultiline(): word-wrap lines that are too long. + # Obsolete names # ImGuiInputTextFlags_AlwaysInsertMode = ImGuiInputTextFlags_AlwaysOverwrite // [renamed in 1.82] name was not matching behavior -class TreeNodeFlags_(enum.Enum): +class TreeNodeFlags_(enum.IntFlag): """Flags for ImGui::TreeNodeEx(), ImGui::CollapsingHeader*()""" # ImGuiTreeNodeFlags_None = 0, /* original C++ signature */ @@ -3386,16 +3463,29 @@ class TreeNodeFlags_(enum.Enum): # ImGuiTreeNodeFlags_LabelSpanAllColumns = 1 << 15, /* original C++ signature */ label_span_all_columns = enum.auto() # (= 1 << 15) # Label will span all columns of its container table # ImGuiTreeNodeFlags_NoScrollOnOpen = 1 << 16, // FIXME: TODO: Disable automatic scroll on TreePop() if node got just open and contents is not visible - # ImGuiTreeNodeFlags_NavLeftJumpsBackHere = 1 << 17, /* original C++ signature */ - nav_left_jumps_back_here = ( + # ImGuiTreeNodeFlags_NavLeftJumpsToParent = 1 << 17, /* original C++ signature */ + nav_left_jumps_to_parent = ( enum.auto() - ) # (= 1 << 17) # (WIP) Nav: left direction may move to this TreeNode() from any of its child (items submitted between TreeNode and TreePop) + ) # (= 1 << 17) # Nav: left arrow moves back to parent. This is processed in TreePop() when there's an unfulfilled Left nav request remaining. # ImGuiTreeNodeFlags_CollapsingHeader = ImGuiTreeNodeFlags_Framed | ImGuiTreeNodeFlags_NoTreePushOnOpen | ImGuiTreeNodeFlags_NoAutoOpenOnLog, /* original C++ signature */ collapsing_header = ( enum.auto() ) # (= TreeNodeFlags_Framed | TreeNodeFlags_NoTreePushOnOpen | TreeNodeFlags_NoAutoOpenOnLog) -class PopupFlags_(enum.Enum): + # [EXPERIMENTAL] Draw lines connecting TreeNode hierarchy. Discuss in GitHub issue #2920. + # Default value is pulled from style.TreeLinesFlags. May be overridden in TreeNode calls. + # ImGuiTreeNodeFlags_DrawLinesNone = 1 << 18, /* original C++ signature */ + draw_lines_none = enum.auto() # (= 1 << 18) # No lines drawn + # ImGuiTreeNodeFlags_DrawLinesFull = 1 << 19, /* original C++ signature */ + draw_lines_full = ( + enum.auto() + ) # (= 1 << 19) # Horizontal lines to child nodes. Vertical line drawn down to TreePop() position: cover full contents. Faster (for large trees). + # ImGuiTreeNodeFlags_DrawLinesToNodes = 1 << 20, /* original C++ signature */ + draw_lines_to_nodes = ( + enum.auto() + ) # (= 1 << 20) # Horizontal lines to child nodes. Vertical line drawn down to bottom-most child node. Slower (for large trees). + +class PopupFlags_(enum.IntFlag): """Flags for OpenPopup*(), BeginPopupContext*(), IsPopupOpen() functions. - To be backward compatible with older API which took an 'int mouse_button = 1' argument instead of 'ImGuiPopupFlags flags', we need to treat small flags values as a mouse button index, so we encode the mouse button in the first few bits of the flags. @@ -3447,7 +3537,7 @@ class PopupFlags_(enum.Enum): # } any_popup = enum.auto() # (= PopupFlags_AnyPopupId | PopupFlags_AnyPopupLevel) -class SelectableFlags_(enum.Enum): +class SelectableFlags_(enum.IntFlag): """Flags for ImGui::Selectable()""" # ImGuiSelectableFlags_None = 0, /* original C++ signature */ @@ -3468,8 +3558,12 @@ class SelectableFlags_(enum.Enum): allow_overlap = enum.auto() # (= 1 << 4) # (WIP) Hit testing to allow subsequent widgets to overlap this one # ImGuiSelectableFlags_Highlight = 1 << 5, /* original C++ signature */ highlight = enum.auto() # (= 1 << 5) # Make the item be displayed as if it is hovered + # ImGuiSelectableFlags_SelectOnNav = 1 << 6, /* original C++ signature */ + select_on_nav = ( + enum.auto() + ) # (= 1 << 6) # Auto-select when moved into, unless Ctrl is held. Automatic when in a BeginMultiSelect() block. -class ComboFlags_(enum.Enum): +class ComboFlags_(enum.IntFlag): """Flags for ImGui::BeginCombo()""" # ImGuiComboFlags_None = 0, /* original C++ signature */ @@ -3498,7 +3592,7 @@ class ComboFlags_(enum.Enum): enum.auto() ) # (= ComboFlags_HeightSmall | ComboFlags_HeightRegular | ComboFlags_HeightLarge | ComboFlags_HeightLargest) -class TabBarFlags_(enum.Enum): +class TabBarFlags_(enum.IntFlag): """Flags for ImGui::BeginTabBar()""" # ImGuiTabBarFlags_None = 0, /* original C++ signature */ @@ -3523,17 +3617,24 @@ class TabBarFlags_(enum.Enum): no_tooltip = enum.auto() # (= 1 << 5) # Disable tooltips when hovering a tab # ImGuiTabBarFlags_DrawSelectedOverline = 1 << 6, /* original C++ signature */ draw_selected_overline = enum.auto() # (= 1 << 6) # Draw selected overline markers over selected tab - # ImGuiTabBarFlags_FittingPolicyResizeDown = 1 << 7, /* original C++ signature */ - fitting_policy_resize_down = enum.auto() # (= 1 << 7) # Resize tabs when they don't fit - # ImGuiTabBarFlags_FittingPolicyScroll = 1 << 8, /* original C++ signature */ - fitting_policy_scroll = enum.auto() # (= 1 << 8) # Add scroll buttons when tabs don't fit - # ImGuiTabBarFlags_FittingPolicyMask_ = ImGuiTabBarFlags_FittingPolicyResizeDown | ImGuiTabBarFlags_FittingPolicyScroll, /* original C++ signature */ - fitting_policy_mask_ = enum.auto() # (= TabBarFlags_FittingPolicyResizeDown | TabBarFlags_FittingPolicyScroll) - # ImGuiTabBarFlags_FittingPolicyDefault_ = ImGuiTabBarFlags_FittingPolicyResizeDown, /* original C++ signature */ - # } - fitting_policy_default_ = enum.auto() # (= TabBarFlags_FittingPolicyResizeDown) -class TabItemFlags_(enum.Enum): + # Fitting/Resize policy + # ImGuiTabBarFlags_FittingPolicyMixed = 1 << 7, /* original C++ signature */ + fitting_policy_mixed = ( + enum.auto() + ) # (= 1 << 7) # Shrink down tabs when they don't fit, until width is style.TabMinWidthShrink, then enable scrolling buttons. + # ImGuiTabBarFlags_FittingPolicyShrink = 1 << 8, /* original C++ signature */ + fitting_policy_shrink = enum.auto() # (= 1 << 8) # Shrink down tabs when they don't fit + # ImGuiTabBarFlags_FittingPolicyScroll = 1 << 9, /* original C++ signature */ + fitting_policy_scroll = enum.auto() # (= 1 << 9) # Enable scrolling buttons when tabs don't fit + # ImGuiTabBarFlags_FittingPolicyMask_ = ImGuiTabBarFlags_FittingPolicyMixed | ImGuiTabBarFlags_FittingPolicyShrink | ImGuiTabBarFlags_FittingPolicyScroll, /* original C++ signature */ + fitting_policy_mask_ = ( + enum.auto() + ) # (= TabBarFlags_FittingPolicyMixed | TabBarFlags_FittingPolicyShrink | TabBarFlags_FittingPolicyScroll) + # ImGuiTabBarFlags_FittingPolicyDefault_ = ImGuiTabBarFlags_FittingPolicyMixed, /* original C++ signature */ + fitting_policy_default_ = enum.auto() # (= TabBarFlags_FittingPolicyMixed) + +class TabItemFlags_(enum.IntFlag): """Flags for ImGui::BeginTabItem()""" # ImGuiTabItemFlags_None = 0, /* original C++ signature */ @@ -3569,7 +3670,7 @@ class TabItemFlags_(enum.Enum): enum.auto() ) # (= 1 << 8) # Tab is selected when trying to close + closure is not immediately assumed (will wait for user to stop submitting the tab). Otherwise closure is assumed when pressing the X, so if you keep submitting the tab may reappear at end of tab bar. -class FocusedFlags_(enum.Enum): +class FocusedFlags_(enum.IntFlag): """Flags for ImGui::IsWindowFocused()""" # ImGuiFocusedFlags_None = 0, /* original C++ signature */ @@ -3594,7 +3695,7 @@ class FocusedFlags_(enum.Enum): # } root_and_child_windows = enum.auto() # (= FocusedFlags_RootWindow | FocusedFlags_ChildWindows) -class HoveredFlags_(enum.Enum): +class HoveredFlags_(enum.IntFlag): """Flags for ImGui::IsItemHovered(), ImGui::IsWindowHovered() Note: if you are trying to check whether your mouse should be dispatched to Dear ImGui or to your app, you should use 'io.WantCaptureMouse' instead! Please read the FAQ! Note: windows with the ImGuiWindowFlags_NoInputs flag are ignored by IsWindowHovered() calls. @@ -3659,7 +3760,7 @@ class HoveredFlags_(enum.Enum): # Tooltips mode # - typically used in IsItemHovered() + SetTooltip() sequence. # - this is a shortcut to pull flags from 'style.HoverFlagsForTooltipMouse' or 'style.HoverFlagsForTooltipNav' where you can reconfigure desired behavior. - # e.g. 'TooltipHoveredFlagsForMouse' defaults to 'ImGuiHoveredFlags_Stationary | ImGuiHoveredFlags_DelayShort'. + # e.g. 'HoverFlagsForTooltipMouse' defaults to 'ImGuiHoveredFlags_Stationary | ImGuiHoveredFlags_DelayShort | ImGuiHoveredFlags_AllowWhenDisabled'. # - for frequently actioned or hovered items providing a tooltip, you want may to use ImGuiHoveredFlags_ForTooltip (stationary + delay) so the tooltip doesn't show too often. # - for items which main purpose is to be hovered, or items with low affordance, or in less consistent apps, prefer no delay or shorter delay. # ImGuiHoveredFlags_ForTooltip = 1 << 12, /* original C++ signature */ @@ -3691,7 +3792,7 @@ class HoveredFlags_(enum.Enum): enum.auto() ) # (= 1 << 17) # IsItemHovered() only: Disable shared delay system where moving from one item to the next keeps the previous timer for a short time (standard for tooltips with long delays) -class DockNodeFlags_(enum.Enum): +class DockNodeFlags_(enum.IntFlag): """Flags for ImGui::DockSpace(), shared/inherited by child nodes. (Some flags can be applied to individual nodes directly) FIXME-DOCK: Also see ImGuiDockNodeFlagsPrivate_ which may involve using the WIP and internal DockBuilder api. @@ -3725,7 +3826,7 @@ class DockNodeFlags_(enum.Enum): # ImGuiDockNodeFlags_NoUndocking = 1 << 7, /* original C++ signature */ no_undocking = enum.auto() # (= 1 << 7) # // Disable undocking this node. -class DragDropFlags_(enum.Enum): +class DragDropFlags_(enum.IntFlag): """Flags for ImGui::BeginDragDropSource(), ImGui::AcceptDragDropPayload()""" # ImGuiDragDropFlags_None = 0, /* original C++ signature */ @@ -3776,6 +3877,10 @@ class DragDropFlags_(enum.Enum): accept_no_preview_tooltip = ( enum.auto() ) # (= 1 << 12) # Request hiding the BeginDragDropSource tooltip from the BeginDragDropTarget site. + # ImGuiDragDropFlags_AcceptDrawAsHovered = 1 << 13, /* original C++ signature */ + accept_draw_as_hovered = ( + enum.auto() + ) # (= 1 << 13) # Accepting item will render as if hovered. Useful for e.g. a Button() used as a drop target. # ImGuiDragDropFlags_AcceptPeekOnly = ImGuiDragDropFlags_AcceptBeforeDelivery | ImGuiDragDropFlags_AcceptNoDrawDefaultRect, /* original C++ signature */ accept_peek_only = ( enum.auto() @@ -3783,7 +3888,7 @@ class DragDropFlags_(enum.Enum): # Standard Drag and Drop payload types. You can define you own payload types using short strings. Types starting with '_' are defined by Dear ImGui. -class DataType_(enum.Enum): +class DataType_(enum.IntFlag): """A primary data type""" # ImGuiDataType_S8, /* original C++ signature */ @@ -3814,7 +3919,7 @@ class DataType_(enum.Enum): # } count = enum.auto() # (= 12) -class Dir(enum.Enum): +class Dir(enum.IntFlag): """A cardinal direction""" # ImGuiDir_None = -1, /* original C++ signature */ @@ -3831,7 +3936,7 @@ class Dir(enum.Enum): # } count = enum.auto() # (= 4) -class SortDirection(enum.Enum): +class SortDirection(enum.IntFlag): """A sorting direction""" # ImGuiSortDirection_None = 0, /* original C++ signature */ @@ -3841,7 +3946,7 @@ class SortDirection(enum.Enum): # ImGuiSortDirection_Descending = 2 /* original C++ signature */ descending = enum.auto() # (= 2) # Descending = 9->0, Z->A etc. -class Key(enum.Enum): +class Key(enum.IntFlag): """A key identifier (ImGuiKey_XXX or ImGuiMod_XXX value): can represent Keyboard, Mouse and Gamepad values. All our named keys are >= 512. Keys value 0 to 511 are left unused and were legacy native/opaque key values (< 1.87). Support for legacy keys was completely removed in 1.91.5. @@ -3892,8 +3997,8 @@ class Key(enum.Enum): left_shift = enum.auto() # (= 528) # ImGuiKey_LeftAlt, /* original C++ signature */ left_alt = enum.auto() # (= 529) - # ImGuiKey_LeftSuper, /* original C++ signature */ - left_super = enum.auto() # (= 530) + # ImGuiKey_LeftSuper, /* original C++ signature */ + left_super = enum.auto() # (= 530) # Also see ImGuiMod_Ctrl, ImGuiMod_Shift, ImGuiMod_Alt, ImGuiMod_Super below! # ImGuiKey_RightCtrl, /* original C++ signature */ right_ctrl = enum.auto() # (= 531) # ImGuiKey_RightShift, /* original C++ signature */ @@ -4097,84 +4202,80 @@ class Key(enum.Enum): # ImGuiKey_Oem102, /* original C++ signature */ oem102 = enum.auto() # (= 631) # Non-US backslash. - # Gamepad (some of those are analog values, 0.0 to 1.0) // NAVIGATION ACTION + # Gamepad + # (analog values are 0.0 to 1.0) # (download controller mapping PNG/PSD at http://dearimgui.com/controls_sheets) + # // XBOX | SWITCH | PLAYSTA. | -> ACTION # ImGuiKey_GamepadStart, /* original C++ signature */ - gamepad_start = enum.auto() # (= 632) # Menu (Xbox) + (Switch) Start/Options (PS) + gamepad_start = enum.auto() # (= 632) # Menu | + | Options | # ImGuiKey_GamepadBack, /* original C++ signature */ - gamepad_back = enum.auto() # (= 633) # View (Xbox) - (Switch) Share (PS) + gamepad_back = enum.auto() # (= 633) # View | - | Share | # ImGuiKey_GamepadFaceLeft, /* original C++ signature */ gamepad_face_left = ( enum.auto() - ) # (= 634) # X (Xbox) Y (Switch) Square (PS) // Tap: Toggle Menu. Hold: Windowing mode (Focus/Move/Resize windows) + ) # (= 634) # X | Y | Square | Tap: Toggle Menu. Hold: Windowing mode (Focus/Move/Resize windows) # ImGuiKey_GamepadFaceRight, /* original C++ signature */ - gamepad_face_right = ( - enum.auto() - ) # (= 635) # B (Xbox) A (Switch) Circle (PS) // Cancel / Close / Exit + gamepad_face_right = enum.auto() # (= 635) # B | A | Circle | Cancel / Close / Exit # ImGuiKey_GamepadFaceUp, /* original C++ signature */ - gamepad_face_up = ( - enum.auto() - ) # (= 636) # Y (Xbox) X (Switch) Triangle (PS) // Text Input / On-screen Keyboard + gamepad_face_up = enum.auto() # (= 636) # Y | X | Triangle | Text Input / On-screen Keyboard # ImGuiKey_GamepadFaceDown, /* original C++ signature */ - gamepad_face_down = ( - enum.auto() - ) # (= 637) # A (Xbox) B (Switch) Cross (PS) // Activate / Open / Toggle / Tweak + gamepad_face_down = enum.auto() # (= 637) # A | B | Cross | Activate / Open / Toggle / Tweak # ImGuiKey_GamepadDpadLeft, /* original C++ signature */ gamepad_dpad_left = ( enum.auto() - ) # (= 638) # D-pad Left // Move / Tweak / Resize Window (in Windowing mode) + ) # (= 638) # D-pad Left | " | " | Move / Tweak / Resize Window (in Windowing mode) # ImGuiKey_GamepadDpadRight, /* original C++ signature */ gamepad_dpad_right = ( enum.auto() - ) # (= 639) # D-pad Right // Move / Tweak / Resize Window (in Windowing mode) + ) # (= 639) # D-pad Right | " | " | Move / Tweak / Resize Window (in Windowing mode) # ImGuiKey_GamepadDpadUp, /* original C++ signature */ gamepad_dpad_up = ( enum.auto() - ) # (= 640) # D-pad Up // Move / Tweak / Resize Window (in Windowing mode) + ) # (= 640) # D-pad Up | " | " | Move / Tweak / Resize Window (in Windowing mode) # ImGuiKey_GamepadDpadDown, /* original C++ signature */ gamepad_dpad_down = ( enum.auto() - ) # (= 641) # D-pad Down // Move / Tweak / Resize Window (in Windowing mode) + ) # (= 641) # D-pad Down | " | " | Move / Tweak / Resize Window (in Windowing mode) # ImGuiKey_GamepadL1, /* original C++ signature */ gamepad_l1 = ( enum.auto() - ) # (= 642) # L Bumper (Xbox) L (Switch) L1 (PS) // Tweak Slower / Focus Previous (in Windowing mode) + ) # (= 642) # L Bumper | L | L1 | Tweak Slower / Focus Previous (in Windowing mode) # ImGuiKey_GamepadR1, /* original C++ signature */ gamepad_r1 = ( enum.auto() - ) # (= 643) # R Bumper (Xbox) R (Switch) R1 (PS) // Tweak Faster / Focus Next (in Windowing mode) + ) # (= 643) # R Bumper | R | R1 | Tweak Faster / Focus Next (in Windowing mode) # ImGuiKey_GamepadL2, /* original C++ signature */ - gamepad_l2 = enum.auto() # (= 644) # L Trig. (Xbox) ZL (Switch) L2 (PS) [Analog] + gamepad_l2 = enum.auto() # (= 644) # L Trigger | ZL | L2 | [Analog] # ImGuiKey_GamepadR2, /* original C++ signature */ - gamepad_r2 = enum.auto() # (= 645) # R Trig. (Xbox) ZR (Switch) R2 (PS) [Analog] + gamepad_r2 = enum.auto() # (= 645) # R Trigger | ZR | R2 | [Analog] # ImGuiKey_GamepadL3, /* original C++ signature */ - gamepad_l3 = enum.auto() # (= 646) # L Stick (Xbox) L3 (Switch) L3 (PS) + gamepad_l3 = enum.auto() # (= 646) # L Stick | L3 | L3 | # ImGuiKey_GamepadR3, /* original C++ signature */ - gamepad_r3 = enum.auto() # (= 647) # R Stick (Xbox) R3 (Switch) R3 (PS) + gamepad_r3 = enum.auto() # (= 647) # R Stick | R3 | R3 | # ImGuiKey_GamepadLStickLeft, /* original C++ signature */ gamepad_l_stick_left = ( enum.auto() - ) # (= 648) # [Analog] // Move Window (in Windowing mode) + ) # (= 648) # | | | [Analog] Move Window (in Windowing mode) # ImGuiKey_GamepadLStickRight, /* original C++ signature */ gamepad_l_stick_right = ( enum.auto() - ) # (= 649) # [Analog] // Move Window (in Windowing mode) + ) # (= 649) # | | | [Analog] Move Window (in Windowing mode) # ImGuiKey_GamepadLStickUp, /* original C++ signature */ gamepad_l_stick_up = ( enum.auto() - ) # (= 650) # [Analog] // Move Window (in Windowing mode) + ) # (= 650) # | | | [Analog] Move Window (in Windowing mode) # ImGuiKey_GamepadLStickDown, /* original C++ signature */ gamepad_l_stick_down = ( enum.auto() - ) # (= 651) # [Analog] // Move Window (in Windowing mode) + ) # (= 651) # | | | [Analog] Move Window (in Windowing mode) # ImGuiKey_GamepadRStickLeft, /* original C++ signature */ - gamepad_r_stick_left = enum.auto() # (= 652) # [Analog] + gamepad_r_stick_left = enum.auto() # (= 652) # | | | [Analog] # ImGuiKey_GamepadRStickRight, /* original C++ signature */ - gamepad_r_stick_right = enum.auto() # (= 653) # [Analog] + gamepad_r_stick_right = enum.auto() # (= 653) # | | | [Analog] # ImGuiKey_GamepadRStickUp, /* original C++ signature */ - gamepad_r_stick_up = enum.auto() # (= 654) # [Analog] + gamepad_r_stick_up = enum.auto() # (= 654) # | | | [Analog] # ImGuiKey_GamepadRStickDown, /* original C++ signature */ - gamepad_r_stick_down = enum.auto() # (= 655) # [Analog] + gamepad_r_stick_down = enum.auto() # (= 655) # | | | [Analog] # ImGuiKey_MouseLeft, /* original C++ signature */ # Aliases: Mouse Buttons (auto-submitted from AddMouseButtonEvent() calls) @@ -4202,12 +4303,17 @@ class Key(enum.Enum): reserved_for_mod_alt = enum.auto() # (= 665) # ImGuiKey_ReservedForModSuper, /* original C++ signature */ reserved_for_mod_super = enum.auto() # (= 666) + + # [Internal] If you need to iterate all keys (for e.g. an input mapper) you may use ImGuiKey_NamedKey_BEGIN..ImGuiKey_NamedKey_END. # ImGuiKey_NamedKey_END, /* original C++ signature */ named_key_end = enum.auto() # (= 667) + # ImGuiKey_NamedKey_COUNT = ImGuiKey_NamedKey_END - ImGuiKey_NamedKey_BEGIN, /* original C++ signature */ + named_key_count = enum.auto() # (= Key_NamedKey_END - Key_NamedKey_BEGIN) # Keyboard Modifiers (explicitly submitted by backend via AddKeyEvent() calls) - # - This is mirroring the data also written to io.KeyCtrl, io.KeyShift, io.KeyAlt, io.KeySuper, in a format allowing - # them to be accessed via standard key API, allowing calls such as IsKeyPressed(), IsKeyReleased(), querying duration etc. + # - Any functions taking a ImGuiKeyChord parameter can binary-or those with regular keys, e.g. Shortcut(ImGuiMod_Ctrl | ImGuiKey_S). + # - Those are written back into io.KeyCtrl, io.KeyShift, io.KeyAlt, io.KeySuper for convenience, + # but may be accessed via standard key API such as IsKeyPressed(), IsKeyReleased(), querying duration etc. # - Code polling every key (e.g. an interface to detect a key press for input mapping) might want to ignore those # and prefer using the real keys (e.g. ImGuiKey_LeftCtrl, ImGuiKey_RightCtrl instead of ImGuiMod_Ctrl). # - In theory the value of keyboard modifiers should be roughly equivalent to a logical or of the equivalent left/right keys. @@ -4227,13 +4333,7 @@ class Key(enum.Enum): # ImGuiMod_Mask_ = 0xF000, /* original C++ signature */ mod_mask_ = enum.auto() # (= 0xF000) # 4-bits - # ImGuiKey_NamedKey_COUNT = ImGuiKey_NamedKey_END - ImGuiKey_NamedKey_BEGIN, /* original C++ signature */ - # [Internal] If you need to iterate all keys (for e.g. an input mapper) you may use ImGuiKey_NamedKey_BEGIN..ImGuiKey_NamedKey_END. - named_key_count = enum.auto() # (= Key_NamedKey_END - Key_NamedKey_BEGIN) - # ImGuiKey_KeysData_SIZE = ImGuiKey_NamedKey_COUNT, // Size of KeysData[]: only hold named keys - # ImGuiKey_KeysData_OFFSET = ImGuiKey_NamedKey_BEGIN, // Accesses to io.KeysData[] must use (key - ImGuiKey_NamedKey_BEGIN) index. - -class InputFlags_(enum.Enum): +class InputFlags_(enum.IntFlag): """Flags for Shortcut(), SetNextItemShortcut(), (and for upcoming extended versions of IsKeyPressed(), IsMouseClicked(), Shortcut(), SetKeyOwner(), SetItemKeyOwner() that are still in imgui_internal.h) Don't mistake with ImGuiInputTextFlags! (which is for ImGui::InputText() function) @@ -4269,7 +4369,7 @@ class InputFlags_(enum.Enum): # ImGuiInputFlags_RouteOverActive = 1 << 15, /* original C++ signature */ route_over_active = ( enum.auto() - ) # (= 1 << 15) # Option: global route: higher priority than active item. Unlikely you need to use that: will interfere with every active items, e.g. CTRL+A registered by InputText will be overridden by this. May not be fully honored as user/internal code is likely to always assume they can access keys when active. + ) # (= 1 << 15) # Option: global route: higher priority than active item. Unlikely you need to use that: will interfere with every active items, e.g. Ctrl+A registered by InputText will be overridden by this. May not be fully honored as user/internal code is likely to always assume they can access keys when active. # ImGuiInputFlags_RouteUnlessBgFocused = 1 << 16, /* original C++ signature */ route_unless_bg_focused = ( enum.auto() @@ -4285,7 +4385,7 @@ class InputFlags_(enum.Enum): enum.auto() ) # (= 1 << 18) # Automatically display a tooltip when hovering item [BETA] Unsure of right api (opt-in/opt-out) -class ConfigFlags_(enum.Enum): +class ConfigFlags_(enum.IntFlag): """Configuration flags stored in io.ConfigFlags. Set by user/application.""" # ImGuiConfigFlags_None = 0, /* original C++ signature */ @@ -4319,14 +4419,6 @@ class ConfigFlags_(enum.Enum): viewports_enable = ( enum.auto() ) # (= 1 << 10) # Viewport enable flags (require both ImGuiBackendFlags_PlatformHasViewports + ImGuiBackendFlags_RendererHasViewports set by the respective backends) - # ImGuiConfigFlags_DpiEnableScaleViewports= 1 << 14, /* original C++ signature */ - dpi_enable_scale_viewports = ( - enum.auto() - ) # (= 1 << 14) # [BETA: Don't use] FIXME-DPI: Reposition and resize imgui windows when the DpiScale of a viewport changed (mostly useful for the main viewport hosting other window). Note that resizing the main window itself is up to your application. - # ImGuiConfigFlags_DpiEnableScaleFonts = 1 << 15, /* original C++ signature */ - dpi_enable_scale_fonts = ( - enum.auto() - ) # (= 1 << 15) # [BETA: Don't use] FIXME-DPI: Request bitmap-scaled fonts to match DpiScale. This is a very low-quality workaround. The correct way to handle DPI is _currently_ to replace the atlas and/or fonts in the Platform_OnChangedViewport callback, but this is all early work in progress. # User storage (to allow your backend/engine to communicate to code that may be shared between multiple projects. Those flags are NOT used by core Dear ImGui) # ImGuiConfigFlags_IsSRGB = 1 << 20, /* original C++ signature */ @@ -4334,7 +4426,7 @@ class ConfigFlags_(enum.Enum): # ImGuiConfigFlags_IsTouchScreen = 1 << 21, /* original C++ signature */ is_touch_screen = enum.auto() # (= 1 << 21) # Application is using a touch screen instead of a mouse. -class BackendFlags_(enum.Enum): +class BackendFlags_(enum.IntFlag): """Backend capabilities flags stored in io.BackendFlags. Set by imgui_impl_xxx or custom backend.""" # ImGuiBackendFlags_None = 0, /* original C++ signature */ @@ -4353,18 +4445,26 @@ class BackendFlags_(enum.Enum): renderer_has_vtx_offset = ( enum.auto() ) # (= 1 << 3) # Backend Renderer supports ImDrawCmd::VtxOffset. This enables output of large meshes (64K+ vertices) while still using 16-bit indices. + # ImGuiBackendFlags_RendererHasTextures = 1 << 4, /* original C++ signature */ + renderer_has_textures = ( + enum.auto() + ) # (= 1 << 4) # Backend Renderer supports ImTextureData requests to create/update/destroy textures. This enables incremental texture updates and texture reloads. See https://github.com/ocornut/imgui/blob/master/docs/BACKENDS.md for instructions on how to upgrade your custom backend. - # [BETA] Viewports - # ImGuiBackendFlags_PlatformHasViewports = 1 << 10, /* original C++ signature */ - platform_has_viewports = enum.auto() # (= 1 << 10) # Backend Platform supports multiple viewports. - # ImGuiBackendFlags_HasMouseHoveredViewport=1 << 11, /* original C++ signature */ + # [BETA] Multi-Viewports + # ImGuiBackendFlags_RendererHasViewports = 1 << 10, /* original C++ signature */ + renderer_has_viewports = enum.auto() # (= 1 << 10) # Backend Renderer supports multiple viewports. + # ImGuiBackendFlags_PlatformHasViewports = 1 << 11, /* original C++ signature */ + platform_has_viewports = enum.auto() # (= 1 << 11) # Backend Platform supports multiple viewports. + # ImGuiBackendFlags_HasMouseHoveredViewport=1 << 12, /* original C++ signature */ has_mouse_hovered_viewport = ( enum.auto() - ) # (= 1 << 11) # Backend Platform supports calling io.AddMouseViewportEvent() with the viewport under the mouse. IF POSSIBLE, ignore viewports with the ImGuiViewportFlags_NoInputs flag (Win32 backend, GLFW 3.30+ backend can do this, SDL backend cannot). If this cannot be done, Dear ImGui needs to use a flawed heuristic to find the viewport under. - # ImGuiBackendFlags_RendererHasViewports = 1 << 12, /* original C++ signature */ - renderer_has_viewports = enum.auto() # (= 1 << 12) # Backend Renderer supports multiple viewports. + ) # (= 1 << 12) # Backend Platform supports calling io.AddMouseViewportEvent() with the viewport under the mouse. IF POSSIBLE, ignore viewports with the ImGuiViewportFlags_NoInputs flag (Win32 backend, GLFW 3.30+ backend can do this, SDL backend cannot). If this cannot be done, Dear ImGui needs to use a flawed heuristic to find the viewport under. + # ImGuiBackendFlags_HasParentViewport = 1 << 13, /* original C++ signature */ + has_parent_viewport = ( + enum.auto() + ) # (= 1 << 13) # Backend Platform supports honoring viewport->ParentViewport/ParentViewportId value, by applying the corresponding parent/child relation at the Platform level. -class Col_(enum.Enum): +class Col_(enum.IntFlag): """Enumeration for PushStyleColor() / PopStyleColor()""" # ImGuiCol_Text, /* original C++ signature */ @@ -4433,75 +4533,83 @@ class Col_(enum.Enum): resize_grip_hovered = enum.auto() # (= 31) # ImGuiCol_ResizeGripActive, /* original C++ signature */ resize_grip_active = enum.auto() # (= 32) + # ImGuiCol_InputTextCursor, /* original C++ signature */ + input_text_cursor = enum.auto() # (= 33) # InputText cursor/caret # ImGuiCol_TabHovered, /* original C++ signature */ - tab_hovered = enum.auto() # (= 33) # Tab background, when hovered + tab_hovered = enum.auto() # (= 34) # Tab background, when hovered # ImGuiCol_Tab, /* original C++ signature */ - tab = enum.auto() # (= 34) # Tab background, when tab-bar is focused & tab is unselected + tab = enum.auto() # (= 35) # Tab background, when tab-bar is focused & tab is unselected # ImGuiCol_TabSelected, /* original C++ signature */ - tab_selected = enum.auto() # (= 35) # Tab background, when tab-bar is focused & tab is selected + tab_selected = enum.auto() # (= 36) # Tab background, when tab-bar is focused & tab is selected # ImGuiCol_TabSelectedOverline, /* original C++ signature */ - tab_selected_overline = enum.auto() # (= 36) # Tab horizontal overline, when tab-bar is focused & tab is selected + tab_selected_overline = enum.auto() # (= 37) # Tab horizontal overline, when tab-bar is focused & tab is selected # ImGuiCol_TabDimmed, /* original C++ signature */ - tab_dimmed = enum.auto() # (= 37) # Tab background, when tab-bar is unfocused & tab is unselected + tab_dimmed = enum.auto() # (= 38) # Tab background, when tab-bar is unfocused & tab is unselected # ImGuiCol_TabDimmedSelected, /* original C++ signature */ - tab_dimmed_selected = enum.auto() # (= 38) # Tab background, when tab-bar is unfocused & tab is selected + tab_dimmed_selected = enum.auto() # (= 39) # Tab background, when tab-bar is unfocused & tab is selected # ImGuiCol_TabDimmedSelectedOverline, /* original C++ signature */ tab_dimmed_selected_overline = ( enum.auto() - ) # (= 39) #..horizontal overline, when tab-bar is unfocused & tab is selected + ) # (= 40) #..horizontal overline, when tab-bar is unfocused & tab is selected # ImGuiCol_DockingPreview, /* original C++ signature */ - docking_preview = enum.auto() # (= 40) # Preview overlay color when about to docking something + docking_preview = enum.auto() # (= 41) # Preview overlay color when about to docking something # ImGuiCol_DockingEmptyBg, /* original C++ signature */ docking_empty_bg = ( enum.auto() - ) # (= 41) # Background color for empty node (e.g. CentralNode with no window docked into it) + ) # (= 42) # Background color for empty node (e.g. CentralNode with no window docked into it) # ImGuiCol_PlotLines, /* original C++ signature */ - plot_lines = enum.auto() # (= 42) + plot_lines = enum.auto() # (= 43) # ImGuiCol_PlotLinesHovered, /* original C++ signature */ - plot_lines_hovered = enum.auto() # (= 43) + plot_lines_hovered = enum.auto() # (= 44) # ImGuiCol_PlotHistogram, /* original C++ signature */ - plot_histogram = enum.auto() # (= 44) + plot_histogram = enum.auto() # (= 45) # ImGuiCol_PlotHistogramHovered, /* original C++ signature */ - plot_histogram_hovered = enum.auto() # (= 45) + plot_histogram_hovered = enum.auto() # (= 46) # ImGuiCol_TableHeaderBg, /* original C++ signature */ - table_header_bg = enum.auto() # (= 46) # Table header background + table_header_bg = enum.auto() # (= 47) # Table header background # ImGuiCol_TableBorderStrong, /* original C++ signature */ - table_border_strong = enum.auto() # (= 47) # Table outer and header borders (prefer using Alpha=1.0 here) + table_border_strong = enum.auto() # (= 48) # Table outer and header borders (prefer using Alpha=1.0 here) # ImGuiCol_TableBorderLight, /* original C++ signature */ - table_border_light = enum.auto() # (= 48) # Table inner borders (prefer using Alpha=1.0 here) + table_border_light = enum.auto() # (= 49) # Table inner borders (prefer using Alpha=1.0 here) # ImGuiCol_TableRowBg, /* original C++ signature */ - table_row_bg = enum.auto() # (= 49) # Table row background (even rows) + table_row_bg = enum.auto() # (= 50) # Table row background (even rows) # ImGuiCol_TableRowBgAlt, /* original C++ signature */ - table_row_bg_alt = enum.auto() # (= 50) # Table row background (odd rows) + table_row_bg_alt = enum.auto() # (= 51) # Table row background (odd rows) # ImGuiCol_TextLink, /* original C++ signature */ - text_link = enum.auto() # (= 51) # Hyperlink color - # ImGuiCol_TextSelectedBg, /* original C++ signature */ - text_selected_bg = enum.auto() # (= 52) + text_link = enum.auto() # (= 52) # Hyperlink color + # ImGuiCol_TextSelectedBg, /* original C++ signature */ + text_selected_bg = enum.auto() # (= 53) # Selected text inside an InputText + # ImGuiCol_TreeLines, /* original C++ signature */ + tree_lines = enum.auto() # (= 54) # Tree node hierarchy outlines when using ImGuiTreeNodeFlags_DrawLines # ImGuiCol_DragDropTarget, /* original C++ signature */ - drag_drop_target = enum.auto() # (= 53) # Rectangle highlighting a drop target + drag_drop_target = enum.auto() # (= 55) # Rectangle border highlighting a drop target + # ImGuiCol_DragDropTargetBg, /* original C++ signature */ + drag_drop_target_bg = enum.auto() # (= 56) # Rectangle background highlighting a drop target + # ImGuiCol_UnsavedMarker, /* original C++ signature */ + unsaved_marker = enum.auto() # (= 57) # Unsaved Document marker (in window title and tabs) # ImGuiCol_NavCursor, /* original C++ signature */ - nav_cursor = enum.auto() # (= 54) # Color of keyboard/gamepad navigation cursor/rectangle, when visible + nav_cursor = enum.auto() # (= 58) # Color of keyboard/gamepad navigation cursor/rectangle, when visible # ImGuiCol_NavWindowingHighlight, /* original C++ signature */ - nav_windowing_highlight = enum.auto() # (= 55) # Highlight window when using CTRL+TAB + nav_windowing_highlight = enum.auto() # (= 59) # Highlight window when using Ctrl+Tab # ImGuiCol_NavWindowingDimBg, /* original C++ signature */ nav_windowing_dim_bg = ( enum.auto() - ) # (= 56) # Darken/colorize entire screen behind the CTRL+TAB window list, when active + ) # (= 60) # Darken/colorize entire screen behind the Ctrl+Tab window list, when active # ImGuiCol_ModalWindowDimBg, /* original C++ signature */ modal_window_dim_bg = ( enum.auto() - ) # (= 57) # Darken/colorize entire screen behind a modal window, when one is active + ) # (= 61) # Darken/colorize entire screen behind a modal window, when one is active # ImGuiCol_COUNT, /* original C++ signature */ - count = enum.auto() # (= 58) + count = enum.auto() # (= 62) -class StyleVar_(enum.Enum): +class StyleVar_(enum.IntFlag): """Enumeration for PushStyleVar() / PopStyleVar() to temporarily modify the ImGuiStyle structure. - The enum only refers to fields of ImGuiStyle which makes sense to be pushed/popped inside UI code. During initialization or between frames, feel free to just poke into ImGuiStyle directly. - Tip: Use your programming IDE navigation facilities on the names in the _second column_ below to find the actual members and their description. - - In Visual Studio: CTRL+comma ("Edit.GoToAll") can follow symbols inside comments, whereas CTRL+F12 ("Edit.GoToImplementation") cannot. - - In Visual Studio w/ Visual Assist installed: ALT+G ("VAssistX.GoToImplementation") can also follow symbols inside comments. - - In VS Code, CLion, etc.: CTRL+click can follow symbols inside comments. + - In Visual Studio: Ctrl+Comma ("Edit.GoToAll") can follow symbols inside comments, whereas Ctrl+F12 ("Edit.GoToImplementation") cannot. + - In Visual Studio w/ Visual Assist installed: Alt+G ("VAssistX.GoToImplementation") can also follow symbols inside comments. + - In VS Code, CLion, etc.: Ctrl+Click can follow symbols inside comments. - When changing this enum, you need to update the associated internal table GStyleVarInfo[] accordingly. This is where we link enum values to members offset/type. """ @@ -4546,43 +4654,53 @@ class StyleVar_(enum.Enum): scrollbar_size = enum.auto() # (= 18) # float ScrollbarSize # ImGuiStyleVar_ScrollbarRounding, /* original C++ signature */ scrollbar_rounding = enum.auto() # (= 19) # float ScrollbarRounding + # ImGuiStyleVar_ScrollbarPadding, /* original C++ signature */ + scrollbar_padding = enum.auto() # (= 20) # float ScrollbarPadding # ImGuiStyleVar_GrabMinSize, /* original C++ signature */ - grab_min_size = enum.auto() # (= 20) # float GrabMinSize + grab_min_size = enum.auto() # (= 21) # float GrabMinSize # ImGuiStyleVar_GrabRounding, /* original C++ signature */ - grab_rounding = enum.auto() # (= 21) # float GrabRounding + grab_rounding = enum.auto() # (= 22) # float GrabRounding # ImGuiStyleVar_ImageBorderSize, /* original C++ signature */ - image_border_size = enum.auto() # (= 22) # float ImageBorderSize + image_border_size = enum.auto() # (= 23) # float ImageBorderSize # ImGuiStyleVar_LayoutAlign, /* original C++ signature */ - layout_align = enum.auto() # (= 23) # float LayoutAlign + layout_align = enum.auto() # (= 24) # float LayoutAlign # ImGuiStyleVar_TabRounding, /* original C++ signature */ - tab_rounding = enum.auto() # (= 24) # float TabRounding + tab_rounding = enum.auto() # (= 25) # float TabRounding # ImGuiStyleVar_TabBorderSize, /* original C++ signature */ - tab_border_size = enum.auto() # (= 25) # float TabBorderSize + tab_border_size = enum.auto() # (= 26) # float TabBorderSize + # ImGuiStyleVar_TabMinWidthBase, /* original C++ signature */ + tab_min_width_base = enum.auto() # (= 27) # float TabMinWidthBase + # ImGuiStyleVar_TabMinWidthShrink, /* original C++ signature */ + tab_min_width_shrink = enum.auto() # (= 28) # float TabMinWidthShrink # ImGuiStyleVar_TabBarBorderSize, /* original C++ signature */ - tab_bar_border_size = enum.auto() # (= 26) # float TabBarBorderSize + tab_bar_border_size = enum.auto() # (= 29) # float TabBarBorderSize # ImGuiStyleVar_TabBarOverlineSize, /* original C++ signature */ - tab_bar_overline_size = enum.auto() # (= 27) # float TabBarOverlineSize + tab_bar_overline_size = enum.auto() # (= 30) # float TabBarOverlineSize # ImGuiStyleVar_TableAngledHeadersAngle, /* original C++ signature */ - table_angled_headers_angle = enum.auto() # (= 28) # float TableAngledHeadersAngle + table_angled_headers_angle = enum.auto() # (= 31) # float TableAngledHeadersAngle # ImGuiStyleVar_TableAngledHeadersTextAlign, /* original C++ signature */ - table_angled_headers_text_align = enum.auto() # (= 29) # ImVec2 TableAngledHeadersTextAlign + table_angled_headers_text_align = enum.auto() # (= 32) # ImVec2 TableAngledHeadersTextAlign + # ImGuiStyleVar_TreeLinesSize, /* original C++ signature */ + tree_lines_size = enum.auto() # (= 33) # float TreeLinesSize + # ImGuiStyleVar_TreeLinesRounding, /* original C++ signature */ + tree_lines_rounding = enum.auto() # (= 34) # float TreeLinesRounding # ImGuiStyleVar_ButtonTextAlign, /* original C++ signature */ - button_text_align = enum.auto() # (= 30) # ImVec2 ButtonTextAlign + button_text_align = enum.auto() # (= 35) # ImVec2 ButtonTextAlign # ImGuiStyleVar_SelectableTextAlign, /* original C++ signature */ - selectable_text_align = enum.auto() # (= 31) # ImVec2 SelectableTextAlign + selectable_text_align = enum.auto() # (= 36) # ImVec2 SelectableTextAlign # ImGuiStyleVar_SeparatorTextBorderSize, /* original C++ signature */ - separator_text_border_size = enum.auto() # (= 32) # float SeparatorTextBorderSize + separator_text_border_size = enum.auto() # (= 37) # float SeparatorTextBorderSize # ImGuiStyleVar_SeparatorTextAlign, /* original C++ signature */ - separator_text_align = enum.auto() # (= 33) # ImVec2 SeparatorTextAlign + separator_text_align = enum.auto() # (= 38) # ImVec2 SeparatorTextAlign # ImGuiStyleVar_SeparatorTextPadding, /* original C++ signature */ - separator_text_padding = enum.auto() # (= 34) # ImVec2 SeparatorTextPadding + separator_text_padding = enum.auto() # (= 39) # ImVec2 SeparatorTextPadding # ImGuiStyleVar_DockingSeparatorSize, /* original C++ signature */ - docking_separator_size = enum.auto() # (= 35) # float DockingSeparatorSize + docking_separator_size = enum.auto() # (= 40) # float DockingSeparatorSize # ImGuiStyleVar_COUNT /* original C++ signature */ # } - count = enum.auto() # (= 36) + count = enum.auto() # (= 41) -class ButtonFlags_(enum.Enum): +class ButtonFlags_(enum.IntFlag): """Flags for InvisibleButton() [extended in imgui_internal.h]""" # ImGuiButtonFlags_None = 0, /* original C++ signature */ @@ -4602,7 +4720,7 @@ class ButtonFlags_(enum.Enum): enum.auto() ) # (= 1 << 3) # InvisibleButton(): do not disable navigation/tabbing. Otherwise disabled by default. -class ColorEditFlags_(enum.Enum): +class ColorEditFlags_(enum.IntFlag): """Flags for ColorEdit3() / ColorEdit4() / ColorPicker3() / ColorPicker4() / ColorButton()""" # ImGuiColorEditFlags_None = 0, /* original C++ signature */ @@ -4723,7 +4841,7 @@ class ColorEditFlags_(enum.Enum): # Obsolete names # ImGuiColorEditFlags_RGB = ImGuiColorEditFlags_DisplayRGB, ImGuiColorEditFlags_HSV = ImGuiColorEditFlags_DisplayHSV, ImGuiColorEditFlags_HEX = ImGuiColorEditFlags_DisplayHex // [renamed in 1.69] -class SliderFlags_(enum.Enum): +class SliderFlags_(enum.IntFlag): """Flags for DragFloat(), DragInt(), SliderFloat(), SliderInt() etc. We use the same sets of flags for DragXXX() and SliderXXX() functions as the features are the same and it makes it easier to swap them. (Those are per-item flags. There is shared behavior flag too: ImGuiIO: io.ConfigDragClickToInputText) @@ -4742,7 +4860,7 @@ class SliderFlags_(enum.Enum): # ImGuiSliderFlags_NoInput = 1 << 7, /* original C++ signature */ no_input = ( enum.auto() - ) # (= 1 << 7) # Disable CTRL+Click or Enter key allowing to input text directly into the widget. + ) # (= 1 << 7) # Disable Ctrl+Click or Enter key allowing to input text directly into the widget. # ImGuiSliderFlags_WrapAround = 1 << 8, /* original C++ signature */ wrap_around = ( enum.auto() @@ -4750,7 +4868,7 @@ class SliderFlags_(enum.Enum): # ImGuiSliderFlags_ClampOnInput = 1 << 9, /* original C++ signature */ clamp_on_input = ( enum.auto() - ) # (= 1 << 9) # Clamp value to min/max bounds when input manually with CTRL+Click. By default CTRL+Click allows going out of bounds. + ) # (= 1 << 9) # Clamp value to min/max bounds when input manually with Ctrl+Click. By default Ctrl+Click allows going out of bounds. # ImGuiSliderFlags_ClampZeroRange = 1 << 10, /* original C++ signature */ clamp_zero_range = ( enum.auto() @@ -4766,7 +4884,7 @@ class SliderFlags_(enum.Enum): enum.auto() ) # (= 0x7000000F) # [Internal] We treat using those bits as being potentially a 'float power' argument from the previous API that has got miscast to this enum, and will trigger an assert if needed. -class MouseButton_(enum.Enum): +class MouseButton_(enum.IntFlag): """Identify a mouse button. Those values are guaranteed to be stable and we frequently use 0/1 directly. Named enums provided for convenience. """ @@ -4781,7 +4899,7 @@ class MouseButton_(enum.Enum): # } count = enum.auto() # (= 5) -class MouseCursor_(enum.Enum): +class MouseCursor_(enum.IntFlag): """Enumeration for GetMouseCursor() User code may request backend to display given cursor by calling SetMouseCursor(), which is why we have some cursors that are marked unused here """ @@ -4816,7 +4934,7 @@ class MouseCursor_(enum.Enum): # } count = enum.auto() # (= 11) -class MouseSource(enum.Enum): +class MouseSource(enum.IntFlag): """Enumeration for AddMouseSourceEvent() actual source of Mouse Input data. Historically we use "Mouse" terminology everywhere to indicate pointer data, e.g. MousePos, IsMousePressed(), io.AddMousePosEvent() But that "Mouse" data can come from different source which occasionally may be useful for application to know about. @@ -4837,7 +4955,7 @@ class MouseSource(enum.Enum): # } count = enum.auto() # (= 3) -class Cond_(enum.Enum): +class Cond_(enum.IntFlag): """Enumeration for ImGui::SetNextWindow***(), SetWindow***(), SetNextItem***() functions Represent a condition. Important: Treat as a regular enum! Do NOT combine multiple values using binary operators! All the functions above treat 0 as a shortcut to ImGuiCond_Always. @@ -4862,7 +4980,7 @@ class Cond_(enum.Enum): # [SECTION] Tables API flags and structures (ImGuiTableFlags, ImGuiTableColumnFlags, ImGuiTableRowFlags, ImGuiTableBgTarget, ImGuiTableSortSpecs, ImGuiTableColumnSortSpecs) # ----------------------------------------------------------------------------- -class TableFlags_(enum.Enum): +class TableFlags_(enum.IntFlag): """Flags for ImGui::BeginTable() - Important! Sizing policies have complex and subtle side effects, much more so than you would expect. Read comments/demos carefully + experiment with live demos to get acquainted with them. @@ -5022,7 +5140,7 @@ class TableFlags_(enum.Enum): enum.auto() ) # (= TableFlags_SizingFixedFit | TableFlags_SizingFixedSame | TableFlags_SizingStretchProp | TableFlags_SizingStretchSame) -class TableColumnFlags_(enum.Enum): +class TableColumnFlags_(enum.IntFlag): """Flags for ImGui::TableSetupColumn()""" # Input configuration flags @@ -5115,7 +5233,7 @@ class TableColumnFlags_(enum.Enum): enum.auto() ) # (= 1 << 30) # [Internal] Disable user resizing this column directly (it may however we resized indirectly from its left edge) -class TableRowFlags_(enum.Enum): +class TableRowFlags_(enum.IntFlag): """Flags for ImGui::TableNextRow()""" # ImGuiTableRowFlags_None = 0, /* original C++ signature */ @@ -5125,7 +5243,7 @@ class TableRowFlags_(enum.Enum): enum.auto() ) # (= 1 << 0) # Identify header row (set default background color + width of its contents accounted differently for auto column width) -class TableBgTarget_(enum.Enum): +class TableBgTarget_(enum.IntFlag): """Enum for ImGui::TableSetBgColor() Background colors are rendering in 3 layers: - Layer 0: draw with RowBg0 color if set, otherwise draw with ColumnBg0 if set. @@ -5225,8 +5343,6 @@ class ImNewWrapper: """Auto-generated default constructor""" pass -# This is only required so we can use the symmetrical new() - # ----------------------------------------------------------------------------- # ImVector<> # Lightweight std::vector<>-like class to avoid dragging dependencies (also, some implementations of STL with debug enabled are absurdly slow, we bypass it so our code runs fast in debug). @@ -6328,7 +6444,7 @@ class ImVector_Window_ptr: # Python specialization for ImVector def __len__(self) -> int: pass -class ImVector_ImFontAtlasCustomRect: # Python specialization for ImVector +class ImVector_ImFontConfig: # Python specialization for ImVector # #ifdef IMGUI_BUNDLE_PYTHON_API # # size_t DataAddress() { return (size_t)(Data); } /* original C++ signature */ @@ -6344,7 +6460,7 @@ class ImVector_ImFontAtlasCustomRect: # Python specialization for ImVector& src) { Size = Capacity = 0; Data = NULL; operator=(src); } /* original C++ signature */ @overload - def __init__(self, src: ImVector_ImFontAtlasCustomRect) -> None: + def __init__(self, src: ImVector_ImFontConfig) -> None: pass # inline void clear() { if (Data) { Size = Capacity = 0; IM_FREE(Data); Data = NULL; } } /* original C++ signature */ def clear(self) -> None: @@ -6368,17 +6484,17 @@ class ImVector_ImFontAtlasCustomRect: # Python specialization for ImVector= 0 && i < Size); return Data[i]; } /* original C++ signature */ @overload - def __getitem__(self, i: int) -> ImFontAtlasCustomRect: + def __getitem__(self, i: int) -> ImFontConfig: """(private API)""" pass # inline T& operator[](int i) { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ @overload - def __getitem__(self, i: int) -> ImFontAtlasCustomRect: + def __getitem__(self, i: int) -> ImFontConfig: """(private API)""" pass # NB: It is illegal to call push_back/push_front/insert with a reference pointing inside the ImVector data itself! e.g. v.push_back(v[10]) is forbidden. # inline void push_back(const T& v) { if (Size == Capacity) reserve(_grow_capacity(Size + 1)); memcpy(&Data[Size], &v, sizeof(v)); Size++; } /* original C++ signature */ - def push_back(self, v: ImFontAtlasCustomRect) -> None: + def push_back(self, v: ImFontConfig) -> None: """(private API)""" pass # inline void pop_back() { IM_ASSERT(Size > 0); Size--; } /* original C++ signature */ @@ -6386,17 +6502,17 @@ class ImVector_ImFontAtlasCustomRect: # Python specialization for ImVector None: + def push_front(self, v: ImFontConfig) -> None: """(private API)""" pass - def __iter__(self) -> Iterator[ImFontAtlasCustomRect]: + def __iter__(self) -> Iterator[ImFontConfig]: pass def __len__(self) -> int: pass -class ImVector_ImFontConfig: # Python specialization for ImVector +class ImVector_ImFontConfig_ptr: # Python specialization for ImVector # #ifdef IMGUI_BUNDLE_PYTHON_API # # size_t DataAddress() { return (size_t)(Data); } /* original C++ signature */ @@ -6412,7 +6528,7 @@ class ImVector_ImFontConfig: # Python specialization for ImVector pass # inline ImVector(const ImVector& src) { Size = Capacity = 0; Data = NULL; operator=(src); } /* original C++ signature */ @overload - def __init__(self, src: ImVector_ImFontConfig) -> None: + def __init__(self, src: ImVector_ImFontConfig_ptr) -> None: pass # inline void clear() { if (Data) { Size = Capacity = 0; IM_FREE(Data); Data = NULL; } } /* original C++ signature */ def clear(self) -> None: @@ -8232,6 +8348,210 @@ class ImVector_MultiSelectTempData: # Python specialization for ImVector int: pass +class ImVector_ImTextureData_ptr: # Python specialization for ImVector + # #ifdef IMGUI_BUNDLE_PYTHON_API + # + # size_t DataAddress() { return (size_t)(Data); } /* original C++ signature */ + def data_address(self) -> int: + """(private API)""" + pass + # #endif + # + + # inline ImVector() { Size = Capacity = 0; Data = NULL; } /* original C++ signature */ + @overload + def __init__(self) -> None: + pass + # inline ImVector(const ImVector& src) { Size = Capacity = 0; Data = NULL; operator=(src); } /* original C++ signature */ + @overload + def __init__(self, src: ImVector_ImTextureData_ptr) -> None: + pass + # inline void clear() { if (Data) { Size = Capacity = 0; IM_FREE(Data); Data = NULL; } } /* original C++ signature */ + def clear(self) -> None: + """Important: does not destruct anything + (private API) + """ + pass + # inline void clear_destruct() { for (int n = 0; n < Size; n++) Data[n].~T(); clear(); } /* original C++ signature */ + def clear_destruct(self) -> None: + """Important: never called automatically! always explicit. + (private API) + """ + pass + # inline bool empty() const { return Size == 0; } /* original C++ signature */ + def empty(self) -> bool: + """(private API)""" + pass + # inline int size() const { return Size; } /* original C++ signature */ + def size(self) -> int: + """(private API)""" + pass + # inline const T& operator[](int i) const { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ + @overload + def __getitem__(self, i: int) -> ImTextureData: + """(private API)""" + pass + # inline T& operator[](int i) { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ + @overload + def __getitem__(self, i: int) -> ImTextureData: + """(private API)""" + pass + # NB: It is illegal to call push_back/push_front/insert with a reference pointing inside the ImVector data itself! e.g. v.push_back(v[10]) is forbidden. + # inline void push_back(const T& v) { if (Size == Capacity) reserve(_grow_capacity(Size + 1)); memcpy(&Data[Size], &v, sizeof(v)); Size++; } /* original C++ signature */ + def push_back(self, v: ImTextureData) -> None: + """(private API)""" + pass + # inline void pop_back() { IM_ASSERT(Size > 0); Size--; } /* original C++ signature */ + def pop_back(self) -> None: + """(private API)""" + pass + # inline void push_front(const T& v) { if (Size == 0) push_back(v); else insert(Data, v); } /* original C++ signature */ + def push_front(self, v: ImTextureData) -> None: + """(private API)""" + pass + + def __iter__(self) -> Iterator[ImTextureData]: + pass + + def __len__(self) -> int: + pass + +class ImVector_ImTextureRef: # Python specialization for ImVector + # #ifdef IMGUI_BUNDLE_PYTHON_API + # + # size_t DataAddress() { return (size_t)(Data); } /* original C++ signature */ + def data_address(self) -> int: + """(private API)""" + pass + # #endif + # + + # inline ImVector() { Size = Capacity = 0; Data = NULL; } /* original C++ signature */ + @overload + def __init__(self) -> None: + pass + # inline ImVector(const ImVector& src) { Size = Capacity = 0; Data = NULL; operator=(src); } /* original C++ signature */ + @overload + def __init__(self, src: ImVector_ImTextureRef) -> None: + pass + # inline void clear() { if (Data) { Size = Capacity = 0; IM_FREE(Data); Data = NULL; } } /* original C++ signature */ + def clear(self) -> None: + """Important: does not destruct anything + (private API) + """ + pass + # inline void clear_destruct() { for (int n = 0; n < Size; n++) Data[n].~T(); clear(); } /* original C++ signature */ + def clear_destruct(self) -> None: + """Important: never called automatically! always explicit. + (private API) + """ + pass + # inline bool empty() const { return Size == 0; } /* original C++ signature */ + def empty(self) -> bool: + """(private API)""" + pass + # inline int size() const { return Size; } /* original C++ signature */ + def size(self) -> int: + """(private API)""" + pass + # inline const T& operator[](int i) const { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ + @overload + def __getitem__(self, i: int) -> ImTextureRef: + """(private API)""" + pass + # inline T& operator[](int i) { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ + @overload + def __getitem__(self, i: int) -> ImTextureRef: + """(private API)""" + pass + # NB: It is illegal to call push_back/push_front/insert with a reference pointing inside the ImVector data itself! e.g. v.push_back(v[10]) is forbidden. + # inline void push_back(const T& v) { if (Size == Capacity) reserve(_grow_capacity(Size + 1)); memcpy(&Data[Size], &v, sizeof(v)); Size++; } /* original C++ signature */ + def push_back(self, v: ImTextureRef) -> None: + """(private API)""" + pass + # inline void pop_back() { IM_ASSERT(Size > 0); Size--; } /* original C++ signature */ + def pop_back(self) -> None: + """(private API)""" + pass + # inline void push_front(const T& v) { if (Size == 0) push_back(v); else insert(Data, v); } /* original C++ signature */ + def push_front(self, v: ImTextureRef) -> None: + """(private API)""" + pass + + def __iter__(self) -> Iterator[ImTextureRef]: + pass + + def __len__(self) -> int: + pass + +class ImVector_ImTextureRect: # Python specialization for ImVector + # #ifdef IMGUI_BUNDLE_PYTHON_API + # + # size_t DataAddress() { return (size_t)(Data); } /* original C++ signature */ + def data_address(self) -> int: + """(private API)""" + pass + # #endif + # + + # inline ImVector() { Size = Capacity = 0; Data = NULL; } /* original C++ signature */ + @overload + def __init__(self) -> None: + pass + # inline ImVector(const ImVector& src) { Size = Capacity = 0; Data = NULL; operator=(src); } /* original C++ signature */ + @overload + def __init__(self, src: ImVector_ImTextureRect) -> None: + pass + # inline void clear() { if (Data) { Size = Capacity = 0; IM_FREE(Data); Data = NULL; } } /* original C++ signature */ + def clear(self) -> None: + """Important: does not destruct anything + (private API) + """ + pass + # inline void clear_destruct() { for (int n = 0; n < Size; n++) Data[n].~T(); clear(); } /* original C++ signature */ + def clear_destruct(self) -> None: + """Important: never called automatically! always explicit. + (private API) + """ + pass + # inline bool empty() const { return Size == 0; } /* original C++ signature */ + def empty(self) -> bool: + """(private API)""" + pass + # inline int size() const { return Size; } /* original C++ signature */ + def size(self) -> int: + """(private API)""" + pass + # inline const T& operator[](int i) const { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ + @overload + def __getitem__(self, i: int) -> ImTextureRect: + """(private API)""" + pass + # inline T& operator[](int i) { IM_ASSERT(i >= 0 && i < Size); return Data[i]; } /* original C++ signature */ + @overload + def __getitem__(self, i: int) -> ImTextureRect: + """(private API)""" + pass + # NB: It is illegal to call push_back/push_front/insert with a reference pointing inside the ImVector data itself! e.g. v.push_back(v[10]) is forbidden. + # inline void push_back(const T& v) { if (Size == Capacity) reserve(_grow_capacity(Size + 1)); memcpy(&Data[Size], &v, sizeof(v)); Size++; } /* original C++ signature */ + def push_back(self, v: ImTextureRect) -> None: + """(private API)""" + pass + # inline void pop_back() { IM_ASSERT(Size > 0); Size--; } /* original C++ signature */ + def pop_back(self) -> None: + """(private API)""" + pass + # inline void push_front(const T& v) { if (Size == 0) push_back(v); else insert(Data, v); } /* original C++ signature */ + def push_front(self, v: ImTextureRect) -> None: + """(private API)""" + pass + + def __iter__(self) -> Iterator[ImTextureRect]: + pass + + def __len__(self) -> int: + pass + ImVector_ImTextureID = ImVector_int ImVector_ImDrawIdx = ImVector_uint @@ -8240,6 +8560,8 @@ ImVector_ID = ImVector_uint ImVector_ImU32 = ImVector_uint +ImVector_ImU16 = ImVector_uint + ImVector_ImWchar32 = ImVector_uint ImVector_ImWchar = ImVector_ImWchar32 @@ -8260,6 +8582,15 @@ ImVector_ImU8 = ImVector_uchar # ----------------------------------------------------------------------------- class Style: + # Font scaling + # - recap: ImGui::GetFontSize() == FontSizeBase * (FontScaleMain * FontScaleDpi * other_scaling_factors) + # float FontSizeBase; /* original C++ signature */ + font_size_base: float # Current base font size before external global factors are applied. Use PushFont(None, size) to modify. Use ImGui::GetFontSize() to obtain scaled value. + # float FontScaleMain; /* original C++ signature */ + font_scale_main: float # Main global scale factor. May be set by application once, or exposed to end-user. + # float FontScaleDpi; /* original C++ signature */ + font_scale_dpi: float # Additional global scale factor from viewport/monitor contents scale. When io.ConfigDpiScaleFonts is enabled, this is automatically overwritten when changing monitor DPI. + # float Alpha; /* original C++ signature */ alpha: float # Global alpha applies to everything in Dear ImGui. # float DisabledAlpha; /* original C++ signature */ @@ -8318,6 +8649,8 @@ class Style: scrollbar_size: float # Width of the vertical scrollbar, Height of the horizontal scrollbar. # float ScrollbarRounding; /* original C++ signature */ scrollbar_rounding: float # Radius of grab corners for scrollbar. + # float ScrollbarPadding; /* original C++ signature */ + scrollbar_padding: float # Padding of scrollbar grab within its frame (same for both axes). # float GrabMinSize; /* original C++ signature */ grab_min_size: float # Minimum width/height of a grab box for slider/scrollbar. # float GrabRounding; /* original C++ signature */ @@ -8334,6 +8667,14 @@ class Style: tab_rounding: float # Radius of upper corners of a tab. Set to 0.0 to have rectangular tabs. # float TabBorderSize; /* original C++ signature */ tab_border_size: float # Thickness of border around tabs. + # float TabMinWidthBase; /* original C++ signature */ + tab_min_width_base: ( + float # Minimum tab width, to make tabs larger than their contents. TabBar buttons are not affected. + ) + # float TabMinWidthShrink; /* original C++ signature */ + tab_min_width_shrink: ( + float # Minimum tab width after shrinking, when using ImGuiTabBarFlags_FittingPolicyMixed policy. + ) # float TabCloseButtonMinWidthSelected; /* original C++ signature */ tab_close_button_min_width_selected: ( float # -1: always visible. 0.0: visible when hovered. >0.0: visible when hovered if minimum width. @@ -8350,6 +8691,18 @@ class Style: ) # ImVec2 TableAngledHeadersTextAlign; /* original C++ signature */ table_angled_headers_text_align: ImVec2 # Alignment of angled headers within the cell + # ImGuiTreeNodeFlags TreeLinesFlags; /* original C++ signature */ + tree_lines_flags: TreeNodeFlags # Default way to draw lines connecting TreeNode hierarchy. ImGuiTreeNodeFlags_DrawLinesNone or ImGuiTreeNodeFlags_DrawLinesFull or ImGuiTreeNodeFlags_DrawLinesToNodes. + # float TreeLinesSize; /* original C++ signature */ + tree_lines_size: float # Thickness of outlines when using ImGuiTreeNodeFlags_DrawLines. + # float TreeLinesRounding; /* original C++ signature */ + tree_lines_rounding: float # Radius of lines connecting child nodes to the vertical line. + # float DragDropTargetRounding; /* original C++ signature */ + drag_drop_target_rounding: float # Radius of the drag and drop target frame. + # float DragDropTargetBorderSize; /* original C++ signature */ + drag_drop_target_border_size: float # Thickness of the drag and drop target border. + # float DragDropTargetPadding; /* original C++ signature */ + drag_drop_target_padding: float # Size to expand the drag and drop target from actual target item size. # ImGuiDir ColorButtonPosition; /* original C++ signature */ color_button_position: ( Dir # Side of the color button in the ColorEdit4 widget (left/right). Defaults to ImGuiDir_Right. @@ -8372,6 +8725,8 @@ class Style: display_window_padding: ImVec2 # Apply to regular windows: amount which we enforce to keep visible when moving near edges of your screen. # ImVec2 DisplaySafeAreaPadding; /* original C++ signature */ display_safe_area_padding: ImVec2 # Apply to every windows, menus, popups, tooltips: amount where we avoid displaying contents. Adjust if you cannot see the edges of your screen (e.g. on a TV where scaling has not been configured). + # bool DockingNodeHasCloseButton; /* original C++ signature */ + docking_node_has_close_button: bool # Docking node has their own CloseButton() to close all docked windows. # float DockingSeparatorSize; /* original C++ signature */ docking_separator_size: float # Thickness of resizing border between docked windows # float MouseCursorScale; /* original C++ signature */ @@ -8402,8 +8757,13 @@ class Style: # ImGuiHoveredFlags HoverFlagsForTooltipMouse; /* original C++ signature */ hover_flags_for_tooltip_mouse: HoveredFlags # Default flags when using IsItemHovered(ImGuiHoveredFlags_ForTooltip) or BeginItemTooltip()/SetItemTooltip() while using mouse. # ImGuiHoveredFlags HoverFlagsForTooltipNav; /* original C++ signature */ - hover_flags_for_tooltip_nav: HoveredFlags - # Default flags when using IsItemHovered(ImGuiHoveredFlags_ForTooltip) or BeginItemTooltip()/SetItemTooltip() while using keyboard/gamepad. + hover_flags_for_tooltip_nav: HoveredFlags # Default flags when using IsItemHovered(ImGuiHoveredFlags_ForTooltip) or BeginItemTooltip()/SetItemTooltip() while using keyboard/gamepad. + + # [Internal] + # float _MainScale; /* original C++ signature */ + _main_scale: float # FIXME-WIP: Reference scale, as applied by ScaleAllSizes(). + # float _NextFrameFontSizeBase; /* original C++ signature */ + _next_frame_font_size_base: float # FIXME: Temporary hack until we finish remaining work. # [ADAPT_IMGUI_BUNDLE] # #ifdef IMGUI_BUNDLE_PYTHON_API @@ -8423,7 +8783,7 @@ class Style: # IMGUI_API ImGuiStyle(); /* original C++ signature */ def __init__(self) -> None: pass - # IMGUI_API void ScaleAllSizes(float scale_factor); /* original C++ signature */ + # IMGUI_API void ScaleAllSizes(float scale_factor); /* original C++ signature */ def scale_all_sizes(self, scale_factor: float) -> None: pass # Obsolete names @@ -8474,12 +8834,15 @@ class IO: # ImGuiBackendFlags BackendFlags; /* original C++ signature */ backend_flags: BackendFlags # = 0 // See ImGuiBackendFlags_ enum. Set by backend (imgui_impl_xxx files or custom backend) to communicate features supported by the backend. # ImVec2 DisplaySize; /* original C++ signature */ - display_size: ImVec2 # // Main display size, in pixels (generally == GetMainViewport()->Size). May change every frame. + display_size: ( + ImVec2 # // Main display size, in pixels (== GetMainViewport()->Size). May change every frame. + ) + # ImVec2 DisplayFramebufferScale; /* original C++ signature */ + display_framebuffer_scale: ImVec2 # = (1, 1) // Main display density. For retina display where window coordinates are different from framebuffer coordinates. This will affect font density + will end up in ImDrawData::FramebufferScale. # float DeltaTime; /* original C++ signature */ delta_time: float # = 1.0/60.0 // Time elapsed since last frame, in seconds. May change every frame. # float IniSavingRate; /* original C++ signature */ - ini_saving_rate: float - # = 5.0 // Minimum time between saving positions/sizes to .ini file, in seconds. + ini_saving_rate: float # = 5.0 // Minimum time between saving positions/sizes to .ini file, in seconds. # void* UserData; /* original C++ signature */ user_data: Any # = None // Store your own data. @@ -8488,16 +8851,10 @@ class IO: fonts: ( ImFontAtlas # // Font atlas: load, rasterize and pack one or more fonts into a single texture. ) - # float FontGlobalScale; /* original C++ signature */ - font_global_scale: float # = 1.0 // Global scale all fonts - # bool FontAllowUserScaling; /* original C++ signature */ - font_allow_user_scaling: ( - bool # = False // [OBSOLETE] Allow user scaling text of individual window with CTRL+Wheel. - ) # ImFont* FontDefault; /* original C++ signature */ font_default: ImFont # = None // Font to use on NewFrame(). Use None to uses Fonts->Fonts[0]. - # ImVec2 DisplayFramebufferScale; /* original C++ signature */ - display_framebuffer_scale: ImVec2 # = (1, 1) // For retina display or other situations where window coordinates are different from framebuffer coordinates. This generally ends up in ImDrawData::FramebufferScale. + # bool FontAllowUserScaling; /* original C++ signature */ + font_allow_user_scaling: bool # = False // Allow user scaling text of individual window with Ctrl+Wheel. # Keyboard/Gamepad Navigation options # bool ConfigNavSwapGamepadButtons; /* original C++ signature */ @@ -8518,6 +8875,8 @@ class IO: # Docking options (when ImGuiConfigFlags_DockingEnable is set) # bool ConfigDockingNoSplit; /* original C++ signature */ config_docking_no_split: bool # = False // Simplified docking mode: disable window splitting, so docking is limited to merging multiple windows together into tab-bars. + # bool ConfigDockingNoDockingOver; /* original C++ signature */ + config_docking_no_docking_over: bool # = False // Simplified docking mode: disable window merging into a same tab-bar, so docking is limited to splitting windows. # bool ConfigDockingWithShift; /* original C++ signature */ config_docking_with_shift: bool # = False // Enable docking with holding Shift key (reduce visual noise, allows dropping in wider space) # bool ConfigDockingAlwaysTabBar; /* original C++ signature */ @@ -8533,7 +8892,18 @@ class IO: # bool ConfigViewportsNoDecoration; /* original C++ signature */ config_viewports_no_decoration: bool # = True // Disable default OS window decoration flag for secondary viewports. When a viewport doesn't want window decorations, ImGuiViewportFlags_NoDecoration will be set on it. Enabling decoration can create subsequent issues at OS levels (e.g. minimum window size). # bool ConfigViewportsNoDefaultParent; /* original C++ signature */ - config_viewports_no_default_parent: bool # = False // Disable default OS parenting to main viewport for secondary viewports. By default, viewports are marked with ParentViewportId = , expecting the platform backend to setup a parent/child relationship between the OS windows (some backend may ignore this). Set to True if you want the default to be 0, then all viewports will be top-level OS windows. + config_viewports_no_default_parent: bool # = True // When False: set secondary viewports' ParentViewportId to main viewport ID by default. Expects the platform backend to setup a parent/child relationship between the OS windows based on this value. Some backend may ignore this. Set to True if you want viewports to automatically be parent of main viewport, otherwise all viewports will be top-level OS windows. + # bool ConfigViewportsPlatformFocusSetsImGuiFocus; /* original C++ signature */ + config_viewports_platform_focus_sets_focus: bool # = True // When a platform window is focused (e.g. using Alt+Tab, clicking Platform Title Bar), apply corresponding focus on imgui windows (may clear focus/active id from imgui windows location in other platform windows). In principle this is better enabled but we provide an opt-out, because some Linux window managers tend to eagerly focus windows (e.g. on mouse hover, or even a simple window pos/size change). + + # DPI/Scaling options + # This may keep evolving during 1.92.x releases. Expect some turbulence. + # bool ConfigDpiScaleFonts; /* original C++ signature */ + config_dpi_scale_fonts: bool # = False // [EXPERIMENTAL] Automatically overwrite style.FontScaleDpi when Monitor DPI changes. This will scale fonts but _NOT_ scale sizes/padding for now. + # bool ConfigDpiScaleViewports; /* original C++ signature */ + config_dpi_scale_viewports: ( + bool # = False // [EXPERIMENTAL] Scale Dear ImGui and Platform Windows when Monitor DPI changes. + ) # Miscellaneous options # (you can visualize and interact with all options in 'Demo->Configuration') @@ -8558,7 +8928,7 @@ class IO: # bool ConfigWindowsMoveFromTitleBarOnly; /* original C++ signature */ config_windows_move_from_title_bar_only: bool # = False // Enable allowing to move windows only when clicking on their title bar. Does not apply to windows without a title bar. # bool ConfigWindowsCopyContentsWithCtrlC; /* original C++ signature */ - config_windows_copy_contents_with_ctrl_c: bool # = False // [EXPERIMENTAL] CTRL+C copy the contents of focused window into the clipboard. Experimental because: (1) has known issues with nested Begin/End pairs (2) text output quality varies (3) text output is in submission order rather than spatial order. + config_windows_copy_contents_with_ctrl_c: bool # = False // [EXPERIMENTAL] Ctrl+C copy the contents of focused window into the clipboard. Experimental because: (1) has known issues with nested Begin/End pairs (2) text output quality varies (3) text output is in submission order rather than spatial order. # bool ConfigScrollbarScrollByPage; /* original C++ signature */ config_scrollbar_scroll_by_page: bool # = True // Enable scrolling page by page when clicking outside the scrollbar grab. When disabled, always scroll to clicked location. When enabled, Shift+Click scrolls to clicked location. # float ConfigMemoryCompactTimer; /* original C++ signature */ @@ -8609,7 +8979,7 @@ class IO: # Option to enable various debug tools showing buttons that will call the IM_DEBUG_BREAK() macro. # - The Item Picker tool will be available regardless of this being enabled, in order to maximize its discoverability. # - Requires a debugger being attached, otherwise IM_DEBUG_BREAK() options will appear to crash your application. - # e.g. io.ConfigDebugIsDebuggerPresent = ::IsDebuggerPresent() on Win32, or refer to ImOsIsDebuggerPresent() imgui_test_engine/imgui_te_utils.cpp for a Unix compatible version). + # e.g. io.ConfigDebugIsDebuggerPresent = ::IsDebuggerPresent() on Win32, or refer to ImOsIsDebuggerPresent() imgui_test_engine/imgui_te_utils.cpp for a Unix compatible version. # bool ConfigDebugIsDebuggerPresent; /* original C++ signature */ config_debug_is_debugger_present: bool # = False // Enable various tools calling IM_DEBUG_BREAK(). @@ -8730,9 +9100,8 @@ class IO: pass # IMGUI_API void ClearInputMouse(); /* original C++ signature */ def clear_input_mouse(self) -> None: + """Clear current mouse state.""" pass - # Clear current mouse state. - # ------------------------------------------------------------------ # Output - Updated by NewFrame() or EndFrame()/Render() # (when reading from the io.WantCaptureMouse, io.WantCaptureKeyboard flags to dispatch your inputs, it is @@ -8785,7 +9154,7 @@ class IO: np.ndarray ) # ndarray[type=bool, size=5] # Mouse buttons: 0=left, 1=right, 2=middle + extras (ImGuiMouseButton_COUNT == 5). Dear ImGui mostly uses left and right buttons. Other buttons allow us to track if the mouse is being used by your application + available to user as a convenience via IsMouse** API. # float MouseWheel; /* original C++ signature */ - mouse_wheel: float # Mouse wheel Vertical: 1 unit scrolls about 5 lines text. >0 scrolls Up, <0 scrolls Down. Hold SHIFT to turn vertical scroll into horizontal scroll. + mouse_wheel: float # Mouse wheel Vertical: 1 unit scrolls about 5 lines text. >0 scrolls Up, <0 scrolls Down. Hold Shift to turn vertical scroll into horizontal scroll. # float MouseWheelH; /* original C++ signature */ mouse_wheel_h: float # Mouse wheel Horizontal. >0 scrolls Left, <0 scrolls Right. Most users don't have a mouse with a horizontal wheel, may not be filled by all backends. # ImGuiMouseSource MouseSource; /* original C++ signature */ @@ -8793,17 +9162,17 @@ class IO: # ImGuiID MouseHoveredViewport; /* original C++ signature */ mouse_hovered_viewport: ID # (Optional) Modify using io.AddMouseViewportEvent(). With multi-viewports: viewport the OS mouse is hovering. If possible _IGNORING_ viewports with the ImGuiViewportFlags_NoInputs flag is much better (few backends can handle that). Set io.BackendFlags |= ImGuiBackendFlags_HasMouseHoveredViewport if you can provide this info. If you don't imgui will infer the value using the rectangles and last focused time of the viewports it knows about (ignoring other OS windows). # bool KeyCtrl; /* original C++ signature */ - key_ctrl: bool # Keyboard modifier down: Control + key_ctrl: bool # Keyboard modifier down: Ctrl (non-macOS), Cmd (macOS) # bool KeyShift; /* original C++ signature */ key_shift: bool # Keyboard modifier down: Shift # bool KeyAlt; /* original C++ signature */ key_alt: bool # Keyboard modifier down: Alt # bool KeySuper; /* original C++ signature */ - key_super: bool # Keyboard modifier down: Cmd/Super/Windows + key_super: bool # Keyboard modifier down: Windows/Super (non-macOS), Ctrl (macOS) # Other state maintained from data above + IO function calls # ImGuiKeyChord KeyMods; /* original C++ signature */ - key_mods: KeyChord # Key mods flags (any of ImGuiMod_Ctrl/ImGuiMod_Shift/ImGuiMod_Alt/ImGuiMod_Super flags, same as io.KeyCtrl/KeyShift/KeyAlt/KeySuper but merged into flags. Read-only, updated by NewFrame() + key_mods: KeyChord # Key mods flags (any of ImGuiMod_Ctrl/ImGuiMod_Shift/ImGuiMod_Alt/ImGuiMod_Super flags, same as io.KeyCtrl/KeyShift/KeyAlt/KeySuper but merged into flags). Read-only, updated by NewFrame() # bool WantCaptureMouseUnlessPopupClose; /* original C++ signature */ want_capture_mouse_unless_popup_close: bool # Alternative to WantCaptureMouse: (WantCaptureMouse == True && WantCaptureMouseUnlessPopupClose == False) when a click over None is expected to close a popup. # ImVec2 MousePosPrev; /* original C++ signature */ @@ -8843,10 +9212,10 @@ class IO: np.ndarray ) # ndarray[type=bool, size=5] # Track if button was clicked inside a dear imgui window. # bool MouseWheelRequestAxisSwap; /* original C++ signature */ - mouse_wheel_request_axis_swap: bool # On a non-Mac system, holding SHIFT requests WheelY to perform the equivalent of a WheelX event. On a Mac system this is already enforced by the system. + mouse_wheel_request_axis_swap: bool # On a non-Mac system, holding Shift requests WheelY to perform the equivalent of a WheelX event. On a Mac system this is already enforced by the system. # bool MouseCtrlLeftAsRightClick; /* original C++ signature */ mouse_ctrl_left_as_right_click: ( - bool # (OSX) Set to True when the current click was a Ctrl+click that spawned a simulated right click + bool # (OSX) Set to True when the current click was a Ctrl+Click that spawned a simulated right click ) # float MouseDownDuration[5]; /* original C++ signature */ mouse_down_duration: ( @@ -8867,8 +9236,7 @@ class IO: # ImWchar16 InputQueueSurrogate; /* original C++ signature */ input_queue_surrogate: ImWchar16 # For AddInputCharacterUTF16() # ImVector InputQueueCharacters; /* original C++ signature */ - input_queue_characters: ImVector_ImWchar - # Queue of _characters_ input (obtained by platform backend). Fill using AddInputCharacter() helper. + input_queue_characters: ImVector_ImWchar # Queue of _characters_ input (obtained by platform backend). Fill using AddInputCharacter() helper. # Legacy: before 1.87, we required backend to fill io.KeyMap[] (imgui->native map) during initialization and io.KeysDown[] (native indices) every frame. # This is still temporarily supported as a legacy feature. However the new preferred scheme is for backend to call io.AddKeyEvent(). @@ -8880,9 +9248,6 @@ class IO: # float NavInputs[ImGuiNavInput_COUNT]; // [LEGACY] Since 1.88, NavInputs[] was removed. Backends from 1.60 to 1.86 won't build. Feed gamepad inputs via io.AddKeyEvent() and ImGuiKey_GamepadXXX enums. # None* ImeWindowHandle; // [Obsoleted in 1.87] Set ImGuiViewport::PlatformHandleRaw instead. Set this to your HWND to get automatic IME cursor positioning. - # Legacy: before 1.91.1, clipboard functions were stored in ImGuiIO instead of ImGuiPlatformIO. - # As this is will affect all users of custom engines/backends, we are providing proper legacy redirection (will obsolete). - # IMGUI_API ImGuiIO(); /* original C++ signature */ def __init__(self) -> None: pass @@ -8890,16 +9255,21 @@ class IO: # #ifdef IMGUI_BUNDLE_PYTHON_API # - # IMGUI_API void SetIniFilename(const char* filename); /* original C++ signature */ - def set_ini_filename(self, filename: str) -> None: + + # IMGUI_API void SetIniFilename(std::optional filename); /* original C++ signature */ + def set_ini_filename(self, filename: Optional[str]) -> None: + """- The disk functions are automatically called if IniFilename != None + - Set IniFilename to None to load/save manually. Read io.WantSaveIniSettings description about handling .ini saving manually. + - Important: default value "imgui.ini" is relative to current working dir! Most apps will want to lock this to an absolute path (e.g. same path as executables). + """ + pass + # IMGUI_API std::string GetIniFilename() const; /* original C++ signature */ + def get_ini_filename(self) -> str: pass # IMGUI_API void SetLogFilename(const char* filename); /* original C++ signature */ def set_log_filename(self, filename: str) -> None: pass - # IMGUI_API std::string GetIniFilename(); /* original C++ signature */ - def get_ini_filename(self) -> str: - pass - # IMGUI_API std::string GetLogFilename(); /* original C++ signature */ + # IMGUI_API std::string GetLogFilename() const; /* original C++ signature */ def get_log_filename(self) -> str: pass # #endif @@ -8945,13 +9315,13 @@ class InputTextCallbackData: # int BufTextLen; /* original C++ signature */ buf_text_len: int # Text length (in bytes) // Read-write // [Resize,Completion,History,Always] Exclude zero-terminator storage. In C land: == strlen(some_text), in C++ land: string.length() # int BufSize; /* original C++ signature */ - buf_size: int # Buffer size (in bytes) = capacity+1 // Read-only // [Resize,Completion,History,Always] Include zero-terminator storage. In C land == ARRAYSIZE(my_char_array), in C++ land: string.capacity()+1 + buf_size: int # Buffer size (in bytes) = capacity+1 // Read-only // [Resize,Completion,History,Always] Include zero-terminator storage. In C land: == ARRAYSIZE(my_char_array), in C++ land: string.capacity()+1 # bool BufDirty; /* original C++ signature */ buf_dirty: bool # Set if you modify Buf/BufTextLen! // Write // [Completion,History,Always] # int CursorPos; /* original C++ signature */ cursor_pos: int # // Read-write // [Completion,History,Always] # int SelectionStart; /* original C++ signature */ - selection_start: int # // Read-write // [Completion,History,Always] == to SelectionEnd when no selection) + selection_start: int # // Read-write // [Completion,History,Always] == to SelectionEnd when no selection # int SelectionEnd; /* original C++ signature */ selection_end: int # // Read-write // [Completion,History,Always] @@ -9305,6 +9675,16 @@ class Storage: """Auto-generated default constructor""" pass +class ListClipperFlags_(enum.IntFlag): + """Flags for ImGuiListClipper (currently not fully exposed in function calls: a future refactor will likely add this to ImGuiListClipper::Begin function equivalent)""" + + # ImGuiListClipperFlags_None = 0, /* original C++ signature */ + none = enum.auto() # (= 0) + # ImGuiListClipperFlags_NoSetTableRowCounters = 1 << 0, /* original C++ signature */ + no_set_table_row_counters = ( + enum.auto() + ) # (= 1 << 0) # [Internal] Disabled modifying table row counters. Avoid assumption that 1 clipper item == 1 table row. + class ListClipper: """Helper: Manually clip large list of items. If you have lots evenly spaced items and you have random access to the list, you can perform coarse @@ -9338,7 +9718,7 @@ class ListClipper: items_count: int # [Internal] Number of items # float ItemsHeight; /* original C++ signature */ items_height: float # [Internal] Height of item after a first step and item submission can calculate it - # float StartPosY; /* original C++ signature */ + # double StartPosY; /* original C++ signature */ start_pos_y: float # [Internal] Cursor position at the time of Begin() or after table frozen rows are all processed # double StartSeekOffsetY; /* original C++ signature */ start_seek_offset_y: ( @@ -9346,6 +9726,8 @@ class ListClipper: ) # void* TempData; /* original C++ signature */ temp_data: Any # [Internal] Internal data + # ImGuiListClipperFlags Flags; /* original C++ signature */ + flags: ListClipperFlags # [Internal] Flags, currently not yet well exposed. # IMGUI_API ImGuiListClipper(); /* original C++ signature */ def __init__(self) -> None: @@ -9454,7 +9836,7 @@ class ImColor: # Multi-selection system # Documentation at: https://github.com/ocornut/imgui/wiki/Multi-Select # - Refer to 'Demo->Widgets->Selection State & Multi-Select' for demos using this. -# - This system implements standard multi-selection idioms (CTRL+Mouse/Keyboard, SHIFT+Mouse/Keyboard, etc) +# - This system implements standard multi-selection idioms (Ctrl+Mouse/Keyboard, Shift+Mouse/Keyboard, etc) # with support for clipper (skipping non-visible items), box-select and many other details. # - Selectable(), Checkbox() are supported but custom widgets may use it as well. # - TreeNode() is technically supported but... using this correctly is more complicated: you need some sort of linear/random access to your tree, @@ -9487,7 +9869,7 @@ class ImColor: # - As most users will want to store an index, for convenience and to reduce confusion we use ImS64 instead of None*, # being syntactically easier to downcast. Feel free to reinterpret_cast and store a pointer inside. -class MultiSelectFlags_(enum.Enum): +class MultiSelectFlags_(enum.IntFlag): """Flags for BeginMultiSelect()""" # ImGuiMultiSelectFlags_None = 0, /* original C++ signature */ @@ -9497,7 +9879,7 @@ class MultiSelectFlags_(enum.Enum): enum.auto() ) # (= 1 << 0) # Disable selecting more than one item. This is available to allow single-selection code to share same code/logic if desired. It essentially disables the main purpose of BeginMultiSelect() tho! # ImGuiMultiSelectFlags_NoSelectAll = 1 << 1, /* original C++ signature */ - no_select_all = enum.auto() # (= 1 << 1) # Disable CTRL+A shortcut to select all. + no_select_all = enum.auto() # (= 1 << 1) # Disable Ctrl+A shortcut to select all. # ImGuiMultiSelectFlags_NoRangeSelect = 1 << 2, /* original C++ signature */ no_range_select = ( enum.auto() @@ -9549,6 +9931,10 @@ class MultiSelectFlags_(enum.Enum): nav_wrap_x = ( enum.auto() ) # (= 1 << 16) # [Temporary] Enable navigation wrapping on X axis. Provided as a convenience because we don't have a design for the general Nav API for this yet. When the more general feature be public we may obsolete this flag in favor of new one. + # ImGuiMultiSelectFlags_NoSelectOnRightClick = 1 << 17, /* original C++ signature */ + no_select_on_right_click = ( + enum.auto() + ) # (= 1 << 17) # Disable default right-click processing, which selects item on mouse down, and is designed for context-menus. class MultiSelectIO: """Main IO structure returned by BeginMultiSelect()/EndMultiSelect(). @@ -9594,7 +9980,7 @@ class MultiSelectIO: """ pass -class SelectionRequestType(enum.Enum): +class SelectionRequestType(enum.IntFlag): """Selection request type""" # ImGuiSelectionRequestType_None = 0, /* original C++ signature */ @@ -9748,13 +10134,13 @@ class ImDrawCmd: - VtxOffset: When 'io.BackendFlags & ImGuiBackendFlags_RendererHasVtxOffset' is enabled, this fields allow us to render meshes larger than 64K vertices while keeping 16-bit indices. Backends made for <1.71. will typically ignore the VtxOffset fields. - - The ClipRect/TextureId/VtxOffset fields must be contiguous as we memcmp() them together (this is asserted for). + - The ClipRect/TexRef/VtxOffset fields must be contiguous as we memcmp() them together (this is asserted for). """ # ImVec4 ClipRect; /* original C++ signature */ clip_rect: ImVec4 # 4*4 // Clipping rectangle (x1, y1, x2, y2). Subtract ImDrawData->DisplayPos to get clipping rectangle in "viewport" coordinates - # ImTextureID TextureId; /* original C++ signature */ - texture_id: ImTextureID # 4-8 // User-provided texture ID. Set by user in ImfontAtlas::SetTexID() for fonts or passed to Image*() functions. Ignore if never using images or multiple fonts atlas. + # ImTextureRef TexRef; /* original C++ signature */ + tex_ref: ImTextureRef # 16 // Reference to a font/texture atlas (where backend called ImTextureData::SetTexID()) or to a user-provided texture ID (via e.g. ImGui::Image() calls). Both will lead to a ImTextureID value. # unsigned int VtxOffset; /* original C++ signature */ vtx_offset: int # 4 // Start offset in vertex buffer. ImGuiBackendFlags_RendererHasVtxOffset: always 0, otherwise may be >0 to support meshes larger than 64K vertices with 16-bit indices. # unsigned int IdxOffset; /* original C++ signature */ @@ -9772,10 +10158,13 @@ class ImDrawCmd: def __init__(self) -> None: """Also ensure our padding fields are zeroed""" pass - # inline ImTextureID GetTexID() const { return TextureId; } /* original C++ signature */ + # Since 1.83: returns ImTextureID associated with this draw call. Warning: DO NOT assume this is always same as 'TextureId' (we will change this function for an upcoming feature) + # Since 1.92: removed ImDrawCmd::TextureId field, the getter function must be used! + # inline ImTextureID GetTexID() const; /* original C++ signature */ def get_tex_id(self) -> ImTextureID: - """Since 1.83: returns ImTextureID associated with this draw call. Warning: DO NOT assume this is always same as 'TextureId' (we will change this function for an upcoming feature) - (private API) + """(private API) + + == (TexRef._TexData ? TexRef._TexData->TexID : TexRef._TexID) """ pass @@ -9811,19 +10200,19 @@ class ImDrawCmdHeader: # ImVec4 ClipRect; /* original C++ signature */ clip_rect: ImVec4 - # ImTextureID TextureId; /* original C++ signature */ - texture_id: ImTextureID + # ImTextureRef TexRef; /* original C++ signature */ + tex_ref: ImTextureRef # unsigned int VtxOffset; /* original C++ signature */ vtx_offset: int - # ImDrawCmdHeader(ImVec4 ClipRect = ImVec4(), ImTextureID TextureId = ImTextureID()); /* original C++ signature */ - def __init__(self, clip_rect: Optional[ImVec4Like] = None, texture_id: Optional[ImTextureID] = None) -> None: + # ImDrawCmdHeader(ImVec4 ClipRect = ImVec4(), ImTextureRef TexRef = ImTextureRef()); /* original C++ signature */ + def __init__(self, clip_rect: Optional[ImVec4Like] = None, tex_ref: Optional[ImTextureRef] = None) -> None: """Auto-generated default constructor with named params Python bindings defaults: If any of the params below is None, then its default value below will be used: * ClipRect: ImVec4() - * TextureId: ImTextureID() + * TexRef: ImTextureRef() """ pass @@ -9883,7 +10272,7 @@ class ImDrawListSplitter: def set_current_channel(self, draw_list: ImDrawList, channel_idx: int) -> None: pass -class ImDrawFlags_(enum.Enum): +class ImDrawFlags_(enum.IntFlag): """Flags for ImDrawList functions (Legacy: bit 0 must always correspond to ImDrawFlags_Closed to be backward compatible with old API using a bool. Bits 1..3 must be unused) """ @@ -9938,7 +10327,7 @@ class ImDrawFlags_(enum.Enum): # } round_corners_mask_ = enum.auto() # (= ImDrawFlags_.round_corners_all | ImDrawFlags_.round_corners_none) -class ImDrawListFlags_(enum.Enum): +class ImDrawListFlags_(enum.IntFlag): """Flags for ImDrawList instance. Those are set automatically by ImGui:: functions from ImGuiIO settings, and generally not manipulated directly. It is however possible to temporarily alter flags between calls to ImDrawList:: functions. """ @@ -10005,8 +10394,8 @@ class ImDrawList: _splitter: ImDrawListSplitter # [Internal] for channels api (note: prefer using your own persistent instance of ImDrawListSplitter!) # ImVector _ClipRectStack; /* original C++ signature */ _clip_rect_stack: ImVector_ImVec4 # [Internal] - # ImVector _TextureIdStack; /* original C++ signature */ - _texture_id_stack: ImVector_ImTextureID # [Internal] + # ImVector _TextureStack; /* original C++ signature */ + _texture_stack: ImVector_ImTextureRef # [Internal] # ImVector _CallbacksDataBuf; /* original C++ signature */ _callbacks_data_buf: ImVector_ImU8 # [Internal] # float _FringeScale; /* original C++ signature */ @@ -10032,11 +10421,11 @@ class ImDrawList: # IMGUI_API void PopClipRect(); /* original C++ signature */ def pop_clip_rect(self) -> None: pass - # IMGUI_API void PushTextureID(ImTextureID texture_id); /* original C++ signature */ - def push_texture_id(self, texture_id: ImTextureID) -> None: + # IMGUI_API void PushTexture(ImTextureRef tex_ref); /* original C++ signature */ + def push_texture(self, tex_ref: ImTextureRef) -> None: pass - # IMGUI_API void PopTextureID(); /* original C++ signature */ - def pop_texture_id(self) -> None: + # IMGUI_API void PopTexture(); /* original C++ signature */ + def pop_texture(self) -> None: pass # inline ImVec2 GetClipRectMin() const { const ImVec4& cr = _ClipRectStack.back(); return ImVec2(cr.x, cr.y); } /* original C++ signature */ def get_clip_rect_min(self) -> ImVec2: @@ -10166,10 +10555,9 @@ class ImDrawList: def add_bezier_quadratic( self, p1: ImVec2Like, p2: ImVec2Like, p3: ImVec2Like, col: ImU32, thickness: float, num_segments: int = 0 ) -> None: + """Quadratic Bezier (3 control points)""" pass - # Quadratic Bezier (3 control points) - - # #ifdef IMGUI_BUNDLE_PYTHON_API + # #ifdef IMGUI_BUNDLE_PYTHON_API # # - Only simple polygons are supported by filling functions (no self-intersections, no holes). # - Concave polygon fill is more expensive than convex one: it has O(N^2) complexity. Provided as a convenience fo user but not used by main library. @@ -10182,17 +10570,17 @@ class ImDrawList: # IMGUI_API void AddConcavePolyFilled(const std::vector& points, ImU32 col); /* original C++ signature */ def add_concave_poly_filled(self, points: List[ImVec2Like], col: ImU32) -> None: pass - # #endif + # #endif # # Image primitives - # - Read FAQ to understand what ImTextureID is. + # - Read FAQ to understand what ImTextureID/ImTextureRef are. # - "p_min" and "p_max" represent the upper-left and lower-right corners of the rectangle. # - "uv_min" and "uv_max" represent the normalized texture coordinates to use for those corners. Using (0,0)->(1,1) texture coordinates will generally display the entire texture. - # IMGUI_API void AddImage(ImTextureID user_texture_id, const ImVec2& p_min, const ImVec2& p_max, const ImVec2& uv_min = ImVec2(0, 0), const ImVec2& uv_max = ImVec2(1, 1), ImU32 col = IM_COL32_WHITE); /* original C++ signature */ + # IMGUI_API void AddImage(ImTextureRef tex_ref, const ImVec2& p_min, const ImVec2& p_max, const ImVec2& uv_min = ImVec2(0, 0), const ImVec2& uv_max = ImVec2(1, 1), ImU32 col = IM_COL32_WHITE); /* original C++ signature */ def add_image( self, - user_texture_id: ImTextureID, + tex_ref: ImTextureRef, p_min: ImVec2Like, p_max: ImVec2Like, uv_min: Optional[ImVec2Like] = None, @@ -10205,10 +10593,10 @@ class ImDrawList: * uv_max: ImVec2(1, 1) """ pass - # IMGUI_API void AddImageQuad(ImTextureID user_texture_id, const ImVec2& p1, const ImVec2& p2, const ImVec2& p3, const ImVec2& p4, const ImVec2& uv1 = ImVec2(0, 0), const ImVec2& uv2 = ImVec2(1, 0), const ImVec2& uv3 = ImVec2(1, 1), const ImVec2& uv4 = ImVec2(0, 1), ImU32 col = IM_COL32_WHITE); /* original C++ signature */ + # IMGUI_API void AddImageQuad(ImTextureRef tex_ref, const ImVec2& p1, const ImVec2& p2, const ImVec2& p3, const ImVec2& p4, const ImVec2& uv1 = ImVec2(0, 0), const ImVec2& uv2 = ImVec2(1, 0), const ImVec2& uv3 = ImVec2(1, 1), const ImVec2& uv4 = ImVec2(0, 1), ImU32 col = IM_COL32_WHITE); /* original C++ signature */ def add_image_quad( self, - user_texture_id: ImTextureID, + tex_ref: ImTextureRef, p1: ImVec2Like, p2: ImVec2Like, p3: ImVec2Like, @@ -10227,10 +10615,10 @@ class ImDrawList: * uv4: ImVec2(0, 1) """ pass - # IMGUI_API void AddImageRounded(ImTextureID user_texture_id, const ImVec2& p_min, const ImVec2& p_max, const ImVec2& uv_min, const ImVec2& uv_max, ImU32 col, float rounding, ImDrawFlags flags = 0); /* original C++ signature */ + # IMGUI_API void AddImageRounded(ImTextureRef tex_ref, const ImVec2& p_min, const ImVec2& p_max, const ImVec2& uv_min, const ImVec2& uv_max, ImU32 col, float rounding, ImDrawFlags flags = 0); /* original C++ signature */ def add_image_rounded( self, - user_texture_id: ImTextureID, + tex_ref: ImTextureRef, p_min: ImVec2Like, p_max: ImVec2Like, uv_min: ImVec2Like, @@ -10300,7 +10688,7 @@ class ImDrawList: pass # IMGUI_API ImDrawList* CloneOutput() const; /* original C++ signature */ def clone_output(self) -> ImDrawList: - """Create a clone of the CmdBuffer/IdxBuffer/VtxBuffer.""" + """Create a clone of the CmdBuffer/IdxBuffer/VtxBuffer. For multi-threaded rendering, consider using `imgui_threaded_rendering` from https://github.com/ocornut/imgui_club instead.""" pass # Advanced: Channels # - Use to split render into layers. By switching channels to can render out-of-order (e.g. submit FG primitives before BG primitives) @@ -10373,6 +10761,9 @@ class ImDrawList: # inline None PathBezierCurveTo(const ImVec2& p2, const ImVec2& p3, const ImVec2& p4, int num_segments = 0) { PathBezierCubicCurveTo(p2, p3, p4, num_segments); } // OBSOLETED in 1.80 (Jan 2021) # [Internal helpers] + # IMGUI_API void _SetDrawListSharedData(ImDrawListSharedData* data); /* original C++ signature */ + def _set_draw_list_shared_data(self, data: ImDrawListSharedData) -> None: + pass # IMGUI_API void _ResetForNewFrame(); /* original C++ signature */ def _reset_for_new_frame(self) -> None: pass @@ -10388,14 +10779,14 @@ class ImDrawList: # IMGUI_API void _OnChangedClipRect(); /* original C++ signature */ def _on_changed_clip_rect(self) -> None: pass - # IMGUI_API void _OnChangedTextureID(); /* original C++ signature */ - def _on_changed_texture_id(self) -> None: + # IMGUI_API void _OnChangedTexture(); /* original C++ signature */ + def _on_changed_texture(self) -> None: pass # IMGUI_API void _OnChangedVtxOffset(); /* original C++ signature */ def _on_changed_vtx_offset(self) -> None: pass - # IMGUI_API void _SetTextureID(ImTextureID texture_id); /* original C++ signature */ - def _set_texture_id(self, texture_id: ImTextureID) -> None: + # IMGUI_API void _SetTexture(ImTextureRef tex_ref); /* original C++ signature */ + def _set_texture(self, tex_ref: ImTextureRef) -> None: pass # IMGUI_API int _CalcCircleAutoSegmentCount(float radius) const; /* original C++ signature */ def _calc_circle_auto_segment_count(self, radius: float) -> int: @@ -10418,7 +10809,7 @@ class ImDrawData: # bool Valid; /* original C++ signature */ valid: bool # Only valid after Render() is called and before the next NewFrame() is called. # int CmdListsCount; /* original C++ signature */ - cmd_lists_count: int # Number of ImDrawList* to render + cmd_lists_count: int # == CmdLists.Size. (OBSOLETE: exists for legacy reasons). Number of ImDrawList* to render. # int TotalIdxCount; /* original C++ signature */ total_idx_count: int # For convenience, sum of all ImDrawList's IdxBuffer.Size # int TotalVtxCount; /* original C++ signature */ @@ -10430,11 +10821,13 @@ class ImDrawData: # ImVec2 DisplaySize; /* original C++ signature */ display_size: ImVec2 # Size of the viewport to render (== GetMainViewport()->Size for the main viewport, == io.DisplaySize in most single-viewport applications) # ImVec2 FramebufferScale; /* original C++ signature */ - framebuffer_scale: ImVec2 # Amount of pixels for each unit of DisplaySize. Based on io.DisplayFramebufferScale. Generally (1,1) on normal display, (2,2) on OSX with Retina display. + framebuffer_scale: ImVec2 # Amount of pixels for each unit of DisplaySize. Copied from viewport->FramebufferScale (== io.DisplayFramebufferScale for main viewport). Generally (1,1) on normal display, (2,2) on OSX with Retina display. # ImGuiViewport* OwnerViewport; /* original C++ signature */ owner_viewport: ( Viewport # Viewport carrying the ImDrawData instance, might be of use to the renderer (generally not). ) + # ImVector* Textures; /* original C++ signature */ + textures: ImVector_ImTextureData_ptr # List of textures to update. Most of the times the list is shared by all ImDrawData, has only 1 texture and it doesn't need any update. This almost always points to ImGui::GetPlatformIO().Textures[]. May be overridden or set to None if you want to manually update textures. # ImDrawData() { Clear(); } /* original C++ signature */ def __init__(self) -> None: @@ -10456,6 +10849,161 @@ class ImDrawData: """Helper to scale the ClipRect field of each ImDrawCmd. Use if your final output buffer is at a different scale than Dear ImGui expects, or if there is a difference between your window resolution and framebuffer resolution.""" pass +# ----------------------------------------------------------------------------- +# [SECTION] Texture API (ImTextureFormat, ImTextureStatus, ImTextureRect, ImTextureData) +# ----------------------------------------------------------------------------- +# In principle, the only data types that user/application code should care about are 'ImTextureRef' and 'ImTextureID'. +# They are defined above in this header file. Read their description to the difference between ImTextureRef and ImTextureID. +# FOR ALL OTHER ImTextureXXXX TYPES: ONLY CORE LIBRARY AND RENDERER BACKENDS NEED TO KNOW AND CARE ABOUT THEM. +# ----------------------------------------------------------------------------- + +class ImTextureFormat(enum.IntFlag): + """We intentionally support a limited amount of texture formats to limit burden on CPU-side code and extension. + Most standard backends only support RGBA32 but we provide a single channel option for low-resource/embedded systems. + """ + + # ImTextureFormat_RGBA32, /* original C++ signature */ + rgba32 = ( + enum.auto() + ) # (= 0) # 4 components per pixel, each is unsigned 8-bit. Total size = TexWidth * TexHeight * 4 + # ImTextureFormat_Alpha8, /* original C++ signature */ + alpha8 = enum.auto() # (= 1) # 1 component per pixel, each is unsigned 8-bit. Total size = TexWidth * TexHeight + +class ImTextureStatus(enum.IntFlag): + """Status of a texture to communicate with Renderer Backend.""" + + # ImTextureStatus_OK, /* original C++ signature */ + ok = enum.auto() # (= 0) + # ImTextureStatus_Destroyed, /* original C++ signature */ + destroyed = enum.auto() # (= 1) # Backend destroyed the texture. + # ImTextureStatus_WantCreate, /* original C++ signature */ + want_create = enum.auto() # (= 2) # Requesting backend to create the texture. Set status OK when done. + # ImTextureStatus_WantUpdates, /* original C++ signature */ + want_updates = ( + enum.auto() + ) # (= 3) # Requesting backend to update specific blocks of pixels (write to texture portions which have never been used before). Set status OK when done. + # ImTextureStatus_WantDestroy, /* original C++ signature */ + want_destroy = enum.auto() # (= 4) # Requesting backend to destroy the texture. Set status to Destroyed when done. + +class ImTextureRect: + """Coordinates of a rectangle within a texture. + When a texture is in ImTextureStatus_WantUpdates state, we provide a list of individual rectangles to copy to the graphics system. + You may use ImTextureData::Updates[] for the list, or ImTextureData::UpdateBox for a single bounding box. + """ + + # unsigned short x, /* original C++ signature */ + x: int # Upper-left coordinates of rectangle to update + # y; /* original C++ signature */ + y: int # Upper-left coordinates of rectangle to update + # unsigned short w, /* original C++ signature */ + w: int # Size of rectangle to update (in pixels) + # h; /* original C++ signature */ + h: int # Size of rectangle to update (in pixels) + # ImTextureRect(); /* original C++ signature */ + def __init__(self) -> None: + """Auto-generated default constructor""" + pass + +class ImTextureData: + """Specs and pixel storage for a texture used by Dear ImGui. + This is only useful for (1) core library and (2) backends. End-user/applications do not need to care about this. + Renderer Backends will create a GPU-side version of this. + Why does we store two identifiers: TexID and BackendUserData? + - ImTextureID TexID = lower-level identifier stored in ImDrawCmd. ImDrawCmd can refer to textures not created by the backend, and for which there's no ImTextureData. + - None* BackendUserData = higher-level opaque storage for backend own book-keeping. Some backends may have enough with TexID and not need both. + In columns below: who reads/writes each fields? 'r'=read, 'w'=write, 'core'=main library, 'backend'=renderer backend + """ + + # ------------------------------------------ core / backend --------------------------------------- + # int UniqueID; /* original C++ signature */ + unique_id: int # w - // [DEBUG] Sequential index to facilitate identifying a texture when debugging/printing. Unique per atlas. + # ImTextureStatus Status; /* original C++ signature */ + status: ImTextureStatus # rw rw // ImTextureStatus_OK/_WantCreate/_WantUpdates/_WantDestroy. Always use SetStatus() to modify! + # void* BackendUserData; /* original C++ signature */ + backend_user_data: Any # - rw // Convenience storage for backend. Some backends may have enough with TexID. + # ImTextureID TexID; /* original C++ signature */ + tex_id: ImTextureID # r w // Backend-specific texture identifier. Always use SetTexID() to modify! The identifier will stored in ImDrawCmd::GetTexID() and passed to backend's RenderDrawData function. + # ImTextureFormat Format; /* original C++ signature */ + format: ImTextureFormat # w r // ImTextureFormat_RGBA32 (default) or ImTextureFormat_Alpha8 + # int Width; /* original C++ signature */ + width: int # w r // Texture width + # int Height; /* original C++ signature */ + height: int # w r // Texture height + # int BytesPerPixel; /* original C++ signature */ + bytes_per_pixel: int # w r // 4 or 1 + # uchar* Pixels; /* original C++ signature */ + pixels: ( + uchar # w r // Pointer to buffer holding 'Width*Height' pixels and 'Width*Height*BytesPerPixels' bytes. + ) + # ImTextureRect UsedRect; /* original C++ signature */ + used_rect: ImTextureRect # w r // Bounding box encompassing all past and queued Updates[]. + # ImTextureRect UpdateRect; /* original C++ signature */ + update_rect: ImTextureRect # w r // Bounding box encompassing all queued Updates[]. + # ImVector Updates; /* original C++ signature */ + updates: ImVector_ImTextureRect # w r // Array of individual updates. + # int UnusedFrames; /* original C++ signature */ + unused_frames: int # w r // In order to facilitate handling Status==WantDestroy in some backend: this is a count successive frames where the texture was not used. Always >0 when Status==WantDestroy. + # unsigned short RefCount; /* original C++ signature */ + ref_count: int # w r // Number of contexts using this texture. Used during backend shutdown. + # bool UseColors; /* original C++ signature */ + use_colors: ( + bool # w r // Tell whether our texture data is known to use colors (rather than just white + alpha). + ) + # bool WantDestroyNextFrame; /* original C++ signature */ + want_destroy_next_frame: bool # rw - // [Internal] Queued to set ImTextureStatus_WantDestroy next frame. May still be used in the current frame. + + # ImTextureData() { memset(this, 0, sizeof(*this)); Status = ImTextureStatus_Destroyed; TexID = ImTextureID_Invalid; } /* original C++ signature */ + def __init__(self) -> None: + """Functions""" + pass + # IMGUI_API void Create(ImTextureFormat format, int w, int h); /* original C++ signature */ + def create(self, format: ImTextureFormat, w: int, h: int) -> None: + pass + # IMGUI_API void DestroyPixels(); /* original C++ signature */ + def destroy_pixels(self) -> None: + pass + # #ifdef IMGUI_BUNDLE_PYTHON_API + # + # ImGuiNpBuffer GetPixelsArray() { return ImGuiNpBuffer{Pixels, Width * Height * BytesPerPixel}; } /* original C++ signature */ + def get_pixels_array(self) -> NpBuffer: + """GetPixelsArray(): returns the pixel data as a NumPy array. + + Note: GetPixelsAt(x, y) is not implemented for Python, but you can use the offset below: + offset = (y * tex.width + x) * tex.bytes_per_pixel + (private API) + """ + pass + # #endif + # + + # int GetSizeInBytes() const { return Width * Height * BytesPerPixel; } /* original C++ signature */ + def get_size_in_bytes(self) -> int: + """(private API)""" + pass + # int GetPitch() const { return Width * BytesPerPixel; } /* original C++ signature */ + def get_pitch(self) -> int: + """(private API)""" + pass + # ImTextureRef GetTexRef() { ImTextureRef tex_ref; tex_ref._TexData = this; tex_ref._TexID = ImTextureID_Invalid; return tex_ref; } /* original C++ signature */ + def get_tex_ref(self) -> ImTextureRef: + """(private API)""" + pass + # ImTextureID GetTexID() const { return TexID; } /* original C++ signature */ + def get_tex_id(self) -> ImTextureID: + """(private API)""" + pass + # Called by Renderer backend + # - Call SetTexID() and SetStatus() after honoring texture requests. Never modify TexID and Status directly! + # - A backend may decide to destroy a texture that we did not request to destroy, which is fine (e.g. freeing resources), but we immediately set the texture back in _WantCreate mode. + # void SetTexID(ImTextureID tex_id) { TexID = tex_id; } /* original C++ signature */ + def set_tex_id(self, tex_id: ImTextureID) -> None: + """(private API)""" + pass + # void SetStatus(ImTextureStatus status) { Status = status; if (status == ImTextureStatus_Destroyed && !WantDestroyNextFrame) Status = ImTextureStatus_WantCreate; } /* original C++ signature */ + def set_status(self, status: ImTextureStatus) -> None: + """(private API)""" + pass + # ----------------------------------------------------------------------------- # [SECTION] Font API (ImFontConfig, ImFontGlyph, ImFontAtlasFlags, ImFontAtlas, ImFontGlyphRangesBuilder, ImFont) # ----------------------------------------------------------------------------- @@ -10463,49 +11011,57 @@ class ImDrawData: class ImFontConfig: """A font input/source (we may rename this to ImFontSource in the future)""" + # Data Source # void* FontData; /* original C++ signature */ font_data: Any # // TTF/OTF data # int FontDataSize; /* original C++ signature */ font_data_size: int # // TTF/OTF data size # bool FontDataOwnedByAtlas; /* original C++ signature */ font_data_owned_by_atlas: ( - bool # True // TTF/OTF data ownership taken by the container ImFontAtlas (will delete memory itself). + bool # True // TTF/OTF data ownership taken by the owner ImFontAtlas (will delete memory itself). ) + + # Options # bool MergeMode; /* original C++ signature */ merge_mode: bool # False // Merge into previous ImFont, so you can combine multiple inputs font into one ImFont (e.g. ASCII font + icons + Japanese glyphs). You may want to use GlyphOffset.y when merge font of different heights. # bool PixelSnapH; /* original C++ signature */ pixel_snap_h: bool # False // Align every glyph AdvanceX to pixel boundaries. Useful e.g. if you are merging a non-pixel aligned font with the default font. If enabled, you can set OversampleH/V to 1. - # int FontNo; /* original C++ signature */ - font_no: int # 0 // Index of font within TTF/OTF file - # int OversampleH; /* original C++ signature */ - oversample_h: int # 0 (2) // Rasterize at higher quality for sub-pixel positioning. 0 == auto == 1 or 2 depending on size. Note the difference between 2 and 3 is minimal. You can reduce this to 1 for large glyphs save memory. Read https://github.com/nothings/stb/blob/master/tests/oversample/README.md for details. - # int OversampleV; /* original C++ signature */ - oversample_v: int # 0 (1) // Rasterize at higher quality for sub-pixel positioning. 0 == auto == 1. This is not really useful as we don't use sub-pixel positions on the Y axis. + # bool PixelSnapV; /* original C++ signature */ + pixel_snap_v: bool # True // Align Scaled GlyphOffset.y to pixel boundaries. + # ImS8 OversampleH; /* original C++ signature */ + oversample_h: ImS8 # 0 (2) // Rasterize at higher quality for sub-pixel positioning. 0 == auto == 1 or 2 depending on size. Note the difference between 2 and 3 is minimal. You can reduce this to 1 for large glyphs save memory. Read https://github.com/nothings/stb/blob/master/tests/oversample/README.md for details. + # ImS8 OversampleV; /* original C++ signature */ + oversample_v: ImS8 # 0 (1) // Rasterize at higher quality for sub-pixel positioning. 0 == auto == 1. This is not really useful as we don't use sub-pixel positions on the Y axis. + # ImWchar EllipsisChar; /* original C++ signature */ + ellipsis_char: ImWchar # 0 // Explicitly specify Unicode codepoint of ellipsis character. When fonts are being merged first specified ellipsis will be used. # float SizePixels; /* original C++ signature */ size_pixels: float # // Size in pixels for rasterizer (more or less maps to the resulting font height). - # ImVec2 GlyphExtraSpacing; // 0, 0 // (REMOVED IN 1.91.9: use GlyphExtraAdvanceX) + # ImVec2 GlyphExtraSpacing; // 0, 0 // (REMOVED AT IT SEEMS LARGELY OBSOLETE. PLEASE REPORT IF YOU WERE USING THIS). Extra spacing (in pixels) between glyphs when rendered: essentially add to glyph->AdvanceX. Only X axis is supported for now. # ImVec2 GlyphOffset; /* original C++ signature */ - glyph_offset: ImVec2 # 0, 0 // Offset all glyphs from this font input. + glyph_offset: ImVec2 # 0, 0 // Offset (in pixels) all glyphs from this font input. Absolute value for default size, other sizes will scale this value. # float GlyphMinAdvanceX; /* original C++ signature */ - glyph_min_advance_x: float # 0 // Minimum AdvanceX for glyphs, set Min to align font icons, set both Min/Max to enforce mono-space font + glyph_min_advance_x: float # 0 // Minimum AdvanceX for glyphs, set Min to align font icons, set both Min/Max to enforce mono-space font. Absolute value for default size, other sizes will scale this value. # float GlyphMaxAdvanceX; /* original C++ signature */ glyph_max_advance_x: float # FLT_MAX // Maximum AdvanceX for glyphs # float GlyphExtraAdvanceX; /* original C++ signature */ - glyph_extra_advance_x: ( - float # 0 // Extra spacing (in pixels) between glyphs. Please contact us if you are using this. - ) - # unsigned int FontBuilderFlags; /* original C++ signature */ - font_builder_flags: int # 0 // Settings for custom font builder. THIS IS BUILDER IMPLEMENTATION DEPENDENT. Leave as zero if unsure. + glyph_extra_advance_x: float # 0 // Extra spacing (in pixels) between glyphs. Please contact us if you are using this. // FIXME-NEWATLAS: Intentionally unscaled + # ImU32 FontNo; /* original C++ signature */ + font_no: ImU32 # 0 // Index of font within TTF/OTF file + # unsigned int FontLoaderFlags; /* original C++ signature */ + font_loader_flags: int # 0 // Settings for custom font builder. THIS IS BUILDER IMPLEMENTATION DEPENDENT. Leave as zero if unsure. + # unsigned int FontBuilderFlags; // -- // [Renamed in 1.92] Ue FontLoaderFlags. # float RasterizerMultiply; /* original C++ signature */ rasterizer_multiply: float # 1.0 // Linearly brighten (>1.0) or darken (<1.0) font output. Brightening small fonts may be a good workaround to make them more readable. This is a silly thing we may remove in the future. # float RasterizerDensity; /* original C++ signature */ - rasterizer_density: float # 1.0 // DPI scale for rasterization, not altering other font metrics: make it easy to swap between e.g. a 100% and a 400% fonts for a zooming display. IMPORTANT: If you increase this it is expected that you increase font scale accordingly, otherwise quality may look lowered. - # ImWchar EllipsisChar; /* original C++ signature */ - ellipsis_char: ImWchar # 0 // Explicitly specify Unicode codepoint of ellipsis character. When fonts are being merged first specified ellipsis will be used. + rasterizer_density: float # 1.0 // [LEGACY: this only makes sense when ImGuiBackendFlags_RendererHasTextures is not supported] DPI scale multiplier for rasterization. Not altering other font metrics: makes it easy to swap between e.g. a 100% and a 400% fonts for a zooming display, or handle Retina screen. IMPORTANT: If you change this it is expected that you increase/decrease font scale roughly to the inverse of this, otherwise quality may look lowered. # [Internal] + # ImFontFlags Flags; /* original C++ signature */ + flags: ImFontFlags # Font flags (don't use just yet, will be exposed in upcoming 1.92.X updates) # ImFont* DstFont; /* original C++ signature */ - dst_font: ImFont + dst_font: ImFont # Target font (as we merging fonts, multiple ImFontConfig may target the same font) + # void* FontLoaderData; /* original C++ signature */ + font_loader_data: Any # Font loader opaque storage (per font config) # IMGUI_API ImFontConfig(); /* original C++ signature */ def __init__(self) -> None: @@ -10513,31 +11069,36 @@ class ImFontConfig: class ImFontGlyph: """Hold rendering data for one glyph. - (Note: some language parsers may fail to convert the 31+1 bitfield members, in this case maybe drop store a single u32 or we can rework this) + (Note: some language parsers may fail to convert the bitfield members, in this case maybe drop store a single u32 or we can rework this) """ # float AdvanceX; /* original C++ signature */ - advance_x: float # Horizontal distance to advance layout with + advance_x: float # Horizontal distance to advance cursor/layout position. # float X0, /* original C++ signature */ - x0: float # Glyph corners + x0: float # Glyph corners. Offsets from current cursor/layout position. # Y0, /* original C++ signature */ - y0: float # Glyph corners + y0: float # Glyph corners. Offsets from current cursor/layout position. # X1, /* original C++ signature */ - x1: float # Glyph corners + x1: float # Glyph corners. Offsets from current cursor/layout position. # Y1; /* original C++ signature */ - y1: float # Glyph corners + y1: float # Glyph corners. Offsets from current cursor/layout position. # float U0, /* original C++ signature */ - u0: float + u0: float # Texture coordinates for the current value of ImFontAtlas->TexRef. Cached equivalent of calling GetCustomRect() with PackId. # V0, /* original C++ signature */ - v0: float + v0: float # Texture coordinates for the current value of ImFontAtlas->TexRef. Cached equivalent of calling GetCustomRect() with PackId. # U1, /* original C++ signature */ - u1: float + u1: float # Texture coordinates for the current value of ImFontAtlas->TexRef. Cached equivalent of calling GetCustomRect() with PackId. # V1; /* original C++ signature */ - v1: float - # Texture coordinates + v1: float # Texture coordinates for the current value of ImFontAtlas->TexRef. Cached equivalent of calling GetCustomRect() with PackId. + # int PackId; /* original C++ signature */ + pack_id: int # [Internal] ImFontAtlasRectId value (FIXME: Cold data, could be moved elsewhere?) + # ImFontGlyph() { memset(this, 0, sizeof(*this)); PackId = -1; } /* original C++ signature */ + def __init__(self) -> None: + pass # #ifdef IMGUI_BUNDLE_PYTHON_API # + # [ADAPT_IMGUI_BUNDLE] # bool isColored() const { return Colored != 0; } /* original C++ signature */ def is_colored(self) -> bool: @@ -10553,21 +11114,6 @@ class ImFontGlyph: pass # [/ADAPT_IMGUI_BUNDLE] # #endif - # ImFontGlyph(float AdvanceX = float(), float X0 = float(), float Y0 = float(), float X1 = float(), float Y1 = float(), float U0 = float(), float V0 = float(), float U1 = float(), float V1 = float()); /* original C++ signature */ - def __init__( - self, - advance_x: float = float(), - x0: float = float(), - y0: float = float(), - x1: float = float(), - y1: float = float(), - u0: float = float(), - v0: float = float(), - u1: float = float(), - v1: float = float(), - ) -> None: - """Auto-generated default constructor with named params""" - pass class ImFontGlyphRangesBuilder: """Helper to build glyph ranges from text/string data. Feed your application strings/characters to it then call BuildRanges(). @@ -10614,34 +11160,30 @@ class ImFontGlyphRangesBuilder: """Output new ranges""" pass -class ImFontAtlasCustomRect: - """See ImFontAtlas::AddCustomRectXXX functions.""" - - # unsigned short X, /* original C++ signature */ - x: int # Output // Packed position in Atlas - # Y; /* original C++ signature */ - y: int # Output // Packed position in Atlas +class ImFontAtlasRect: + """Output of ImFontAtlas::GetCustomRect() when using custom rectangles. + Those values may not be cached/stored as they are only valid for the current value of atlas->TexRef + (this is in theory derived from ImTextureRect but we use separate structures for reasons) + """ - # [Internal] - # unsigned short Width, /* original C++ signature */ - width: int # Input // Desired rectangle dimension - # Height; /* original C++ signature */ - height: int # Input // Desired rectangle dimension - # float GlyphAdvanceX; /* original C++ signature */ - glyph_advance_x: float # Input // For custom font glyphs only: glyph xadvance - # ImVec2 GlyphOffset; /* original C++ signature */ - glyph_offset: ImVec2 # Input // For custom font glyphs only: glyph display offset - # ImFont* Font; /* original C++ signature */ - font: ImFont # Input // For custom font glyphs only: target font - # ImFontAtlasCustomRect() { X = Y = 0xFFFF; Width = Height = 0; GlyphID = 0; GlyphColored = 0; GlyphAdvanceX = 0.0f; GlyphOffset = ImVec2(0, 0); Font = NULL; } /* original C++ signature */ + # unsigned short x, /* original C++ signature */ + x: int # Position (in current texture) + # y; /* original C++ signature */ + y: int # Position (in current texture) + # unsigned short w, /* original C++ signature */ + w: int # Size + # h; /* original C++ signature */ + h: int # Size + # ImVec2 uv0, /* original C++ signature */ + uv0: ImVec2 # UV coordinates (in current texture) + # uv1; /* original C++ signature */ + uv1: ImVec2 # UV coordinates (in current texture) + + # ImFontAtlasRect() { memset(this, 0, sizeof(*this)); } /* original C++ signature */ def __init__(self) -> None: pass - # bool IsPacked() const { return X != 0xFFFF; } /* original C++ signature */ - def is_packed(self) -> bool: - """(private API)""" - pass -class ImFontAtlasFlags_(enum.Enum): +class ImFontAtlasFlags_(enum.IntFlag): """Flags for ImFontAtlas build""" # ImFontAtlasFlags_None = 0, /* original C++ signature */ @@ -10662,12 +11204,14 @@ class ImFontAtlas: - One or more fonts. - Custom graphics data needed to render the shapes needed by Dear ImGui. - Mouse cursor shapes for software cursor rendering (unless setting 'Flags |= ImFontAtlasFlags_NoMouseCursors' in the font atlas). - It is the user-code responsibility to setup/build the atlas, then upload the pixel data into a texture accessible by your graphics api. - - Optionally, call any of the AddFont*** functions. If you don't call any, the default font embedded in the code will be loaded for you. - - Call GetTexDataAsAlpha8() or GetTexDataAsRGBA32() to build and retrieve pixels data. - - Upload the pixels data into a texture within your graphics system (see imgui_impl_xxxx.cpp examples) + - If you don't call any AddFont*** functions, the default font embedded in the code will be loaded for you. + It is the rendering backend responsibility to upload texture into your graphics API: + - ImGui_ImplXXXX_RenderDrawData() functions generally iterate platform_io->Textures[] to create/update/destroy each ImTextureData instance. + - Backend then set ImTextureData's TexID and BackendUserData. + - Texture id are passed back to you during rendering to identify the texture. Read FAQ entry about ImTextureID/ImTextureRef for more details. + Legacy path: + - Call Build() + GetTexDataAsAlpha8() or GetTexDataAsRGBA32() to build and retrieve pixels data. - Call SetTexID(my_tex_id); and pass the pointer/identifier to your texture in a format natural to your graphics API. - This value will be passed back to you during rendering to identify the texture. Read FAQ entry about ImTextureID for more details. Common pitfalls: - If you pass a 'glyph_ranges' array to AddFont*** functions, you need to make sure that your array persist up until the atlas is build (when calling GetTexData*** or Build()). We only copy the pointer, not the data. @@ -10686,117 +11230,55 @@ class ImFontAtlas: # IMGUI_API ImFont* AddFontDefault(const ImFontConfig* font_cfg = NULL); /* original C++ signature */ def add_font_default(self, font_cfg: Optional[ImFontConfig] = None) -> ImFont: pass + # IMGUI_API void RemoveFont(ImFont* font); /* original C++ signature */ + def remove_font(self, font: ImFont) -> None: + pass + # IMGUI_API void Clear(); /* original C++ signature */ + def clear(self) -> None: + """Clear everything (input fonts, output glyphs/textures).""" + pass + # IMGUI_API void CompactCache(); /* original C++ signature */ + def compact_cache(self) -> None: + """Compact cached glyphs and texture.""" + pass + # IMGUI_API void SetFontLoader(const ImFontLoader* font_loader); /* original C++ signature */ + def set_font_loader(self, font_loader: ImFontLoader) -> None: + """Change font loader at runtime.""" + pass + # As we are transitioning toward a new font system, we expect to obsolete those soon: # IMGUI_API void ClearInputData(); /* original C++ signature */ def clear_input_data(self) -> None: - """Clear input data (all ImFontConfig structures including sizes, TTF data, glyph ranges, etc.) = all the data used to build the texture and fonts.""" + """[OBSOLETE] Clear input data (all ImFontConfig structures including sizes, TTF data, glyph ranges, etc.) = all the data used to build the texture and fonts.""" pass # IMGUI_API void ClearFonts(); /* original C++ signature */ def clear_fonts(self) -> None: - """Clear input+output font data (same as ClearInputData() + glyphs storage, UV coordinates).""" + """[OBSOLETE] Clear input+output font data (same as ClearInputData() + glyphs storage, UV coordinates).""" pass # IMGUI_API void ClearTexData(); /* original C++ signature */ def clear_tex_data(self) -> None: - """Clear output texture data (CPU side). Saves RAM once the texture has been copied to graphics memory.""" - pass - # IMGUI_API void Clear(); /* original C++ signature */ - def clear(self) -> None: - """Clear all input and output.""" - pass - # Build atlas, retrieve pixel data. - # User is in charge of copying the pixels into graphics memory (e.g. create a texture with your engine). Then store your texture handle with SetTexID(). - # The pitch is always = Width * BytesPerPixels (1 or 4) - # Building in RGBA32 format is provided for convenience and compatibility, but note that unless you manually manipulate or copy color data into - # the texture (e.g. when using the AddCustomRect*** api), then the RGB pixels emitted will always be white (~75% of memory/bandwidth waste. - # IMGUI_API bool Build(); /* original C++ signature */ - def build(self) -> bool: - """Build pixels data. This is called automatically for you by the GetTexData*** functions.""" - pass - # bool IsBuilt() const { return Fonts.Size > 0 && TexReady; } /* original C++ signature */ - def is_built(self) -> bool: - """(private API) - - Bit ambiguous: used to detect when user didn't build texture but effectively we should check TexID != 0 except that would be backend dependent... - """ - pass - # void SetTexID(ImTextureID id) { TexID = id; } /* original C++ signature */ - def set_tex_id(self, id_: ImTextureID) -> None: - """(private API)""" + """[OBSOLETE] Clear CPU-side copy of the texture data. Saves RAM once the texture has been copied to graphics memory.""" pass # ------------------------------------------- # Glyph Ranges # ------------------------------------------- - # Helpers to retrieve list of common Unicode ranges (2 value per range, values are inclusive, zero-terminated list) - # NB: Make sure that your string are UTF-8 and NOT in your local code page. - # Read https://github.com/ocornut/imgui/blob/master/docs/FONTS.md/#about-utf-8-encoding for details. - # NB: Consider using ImFontGlyphRangesBuilder to build glyph ranges from textual data. - # Default + Vietnamese characters + # Since 1.92: specifying glyph ranges is only useful/necessary if your backend doesn't support ImGuiBackendFlags_RendererHasTextures! # ------------------------------------------- # [ADAPT_IMGUI_BUNDLE] # ------------------------------------------- - # #ifdef IMGUI_BUNDLE_PYTHON_API + # #ifdef IMGUI_BUNDLE_PYTHON_API # # IMGUI_API ImFont* _AddFontFromFileTTF( /* original C++ signature */ # const char* filename, # float size_pixels, - # const ImFontConfig* font_cfg = NULL, - # std::optional> glyph_ranges_as_int_list = std::nullopt); + # const ImFontConfig* font_cfg = NULL); def add_font_from_file_ttf( - self, - filename: str, - size_pixels: float, - font_cfg: Optional[ImFontConfig] = None, - glyph_ranges_as_int_list: Optional[List[ImWchar]] = None, + self, filename: str, size_pixels: float, font_cfg: Optional[ImFontConfig] = None ) -> ImFont: pass - # IMGUI_API inline std::vector _GetGlyphRangesDefault() // Basic Latin, Extended Latin /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesDefault()); } - def get_glyph_ranges_default(self) -> List[ImWchar]: - """// Basic Latin, Extended Latin""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesGreek() // Default + Greek and Coptic /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesGreek()); } - def get_glyph_ranges_greek(self) -> List[ImWchar]: - """// Default + Greek and Coptic""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesKorean() // Default + Korean characters /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesKorean()); } - def get_glyph_ranges_korean(self) -> List[ImWchar]: - """// Default + Korean characters""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesJapanese() // Default + Hiragana, Katakana, Half-Width, Selection of 2999 Ideographs /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesJapanese()); } - def get_glyph_ranges_japanese(self) -> List[ImWchar]: - """// Default + Hiragana, Katakana, Half-Width, Selection of 2999 Ideographs""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesChineseFull() // Default + Half-Width + Japanese Hiragana/Katakana + full set of about 21000 CJK Unified Ideographs /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesChineseFull()); } - def get_glyph_ranges_chinese_full(self) -> List[ImWchar]: - """// Default + Half-Width + Japanese Hiragana/Katakana + full set of about 21000 CJK Unified Ideographs""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesChineseSimplifiedCommon()// Default + Half-Width + Japanese Hiragana/Katakana + set of 2500 CJK Unified Ideographs for common simplified Chinese /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesChineseSimplifiedCommon()); } - def get_glyph_ranges_chinese_simplified_common(self) -> List[ImWchar]: - """// Default + Half-Width + Japanese Hiragana/Katakana + set of 2500 CJK Unified Ideographs for common simplified Chinese""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesCyrillic() // Default + about 400 Cyrillic characters /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesCyrillic()); } - def get_glyph_ranges_cyrillic(self) -> List[ImWchar]: - """// Default + about 400 Cyrillic characters""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesThai() // Default + Thai characters /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesThai()); } - def get_glyph_ranges_thai(self) -> List[ImWchar]: - """// Default + Thai characters""" - pass - # IMGUI_API inline std::vector _GetGlyphRangesVietnamese() // Default + Vietnamese characters /* original C++ signature */ - # { return _ImWcharRangeToVec(GetGlyphRangesVietnamese()); } - def get_glyph_ranges_vietnamese(self) -> List[ImWchar]: - """// Default + Vietnamese characters""" - pass - # #endif + # #endif # # [/ADAPT_IMGUI_BUNDLE] @@ -10804,31 +11286,34 @@ class ImFontAtlas: # [ALPHA] Custom Rectangles/Glyphs API # ------------------------------------------- - # You can request arbitrary rectangles to be packed into the atlas, for your own purposes. - # - After calling Build(), you can query the rectangle position and render your pixels. - # - If you render colored output, set 'atlas->TexPixelsUseColors = True' as this may help some backends decide of preferred texture format. - # - You can also request your rectangles to be mapped as font glyph (given a font + Unicode point), - # so you can render e.g. custom colorful icons and use them as regular glyphs. + # Register and retrieve custom rectangles + # - You can request arbitrary rectangles to be packed into the atlas, for your own purpose. + # - Since 1.92.0, packing is done immediately in the function call (previously packing was done during the Build call) + # - You can render your pixels into the texture right after calling the AddCustomRect() functions. + # - VERY IMPORTANT: + # - Texture may be created/resized at any time when calling ImGui or ImFontAtlas functions. + # - IT WILL INVALIDATE RECTANGLE DATA SUCH AS UV COORDINATES. Always use latest values from GetCustomRect(). + # - UV coordinates are associated to the current texture identifier aka 'atlas->TexRef'. Both TexRef and UV coordinates are typically changed at the same time. + # - If you render colored output into your custom rectangles: set 'atlas->TexPixelsUseColors = True' as this may help some backends decide of preferred texture format. # - Read docs/FONTS.md for more details about using colorful icons. - # - Note: this API may be redesigned later in order to support multi-monitor varying DPI settings. - # IMGUI_API int AddCustomRectRegular(int width, int height); /* original C++ signature */ - def add_custom_rect_regular(self, width: int, height: int) -> int: - pass - # IMGUI_API int AddCustomRectFontGlyph(ImFont* font, ImWchar id, int width, int height, float advance_x, const ImVec2& offset = ImVec2(0, 0)); /* original C++ signature */ - def add_custom_rect_font_glyph( - self, font: ImFont, id_: ImWchar, width: int, height: int, advance_x: float, offset: Optional[ImVec2Like] = None - ) -> int: - """Python bindings defaults: - If offset is None, then its default value will be: ImVec2(0, 0) - """ - pass - # ImFontAtlasCustomRect* GetCustomRectByIndex(int index) { IM_ASSERT(index >= 0); return &CustomRects[index]; } /* original C++ signature */ - def get_custom_rect_by_index(self, index: int) -> ImFontAtlasCustomRect: - """(private API)""" - pass - # IMGUI_API void CalcCustomRectUV(const ImFontAtlasCustomRect* rect, ImVec2* out_uv_min, ImVec2* out_uv_max) const; /* original C++ signature */ - def calc_custom_rect_uv(self, rect: ImFontAtlasCustomRect, out_uv_min: ImVec2Like, out_uv_max: ImVec2Like) -> None: - """[Internal]""" + # - Note: this API may be reworked further in order to facilitate supporting e.g. multi-monitor, varying DPI settings. + # - (Pre-1.92 names) ------------> (1.92 names) + # - GetCustomRectByIndex() --> Use GetCustomRect() + # - CalcCustomRectUV() --> Use GetCustomRect() and read uv0, uv1 fields. + # - AddCustomRectRegular() --> Renamed to AddCustomRect() + # - AddCustomRectFontGlyph() --> Prefer using custom ImFontLoader inside ImFontConfig + # - ImFontAtlasCustomRect --> Renamed to ImFontAtlasRect + # IMGUI_API ImFontAtlasRectId AddCustomRect(int width, int height, ImFontAtlasRect* out_r = NULL); /* original C++ signature */ + def add_custom_rect(self, width: int, height: int, out_r: Optional[ImFontAtlasRect] = None) -> ImFontAtlasRectId: + """Register a rectangle. Return -1 (ImFontAtlasRectId_Invalid) on error.""" + pass + # IMGUI_API void RemoveCustomRect(ImFontAtlasRectId id); /* original C++ signature */ + def remove_custom_rect(self, id_: ImFontAtlasRectId) -> None: + """Unregister a rectangle. Existing pixels will stay in texture until resized / garbage collected.""" + pass + # IMGUI_API bool GetCustomRect(ImFontAtlasRectId id, ImFontAtlasRect* out_r) const; /* original C++ signature */ + def get_custom_rect(self, id_: ImFontAtlasRectId, out_r: ImFontAtlasRect) -> bool: + """Get rectangle coordinates for current texture. Valid immediately, never store this (read above)!""" pass # ------------------------------------------- # Members @@ -10837,139 +11322,252 @@ class ImFontAtlas: # Input # ImFontAtlasFlags Flags; /* original C++ signature */ flags: ImFontAtlasFlags # Build flags (see ImFontAtlasFlags_) - # ImTextureID TexID; /* original C++ signature */ - tex_id: ImTextureID # User data to refer to the texture once it has been uploaded to user's graphic systems. It is passed back to you during rendering via the ImDrawCmd structure. - # int TexDesiredWidth; /* original C++ signature */ - tex_desired_width: int # Texture width desired by user before Build(). Must be a power-of-two. If have many glyphs your graphics API have texture size restrictions you may want to increase texture width to decrease height. + # ImTextureFormat TexDesiredFormat; /* original C++ signature */ + tex_desired_format: ImTextureFormat # Desired texture format (default to ImTextureFormat_RGBA32 but may be changed to ImTextureFormat_Alpha8). # int TexGlyphPadding; /* original C++ signature */ tex_glyph_padding: int # FIXME: Should be called "TexPackPadding". Padding between glyphs within texture in pixels. Defaults to 1. If your rendering method doesn't rely on bilinear filtering you may set this to 0 (will also need to set AntiAliasedLinesUseTex = False). + # int TexMinWidth; /* original C++ signature */ + tex_min_width: int # Minimum desired texture width. Must be a power of two. Default to 512. + # int TexMinHeight; /* original C++ signature */ + tex_min_height: int # Minimum desired texture height. Must be a power of two. Default to 128. + # int TexMaxWidth; /* original C++ signature */ + tex_max_width: int # Maximum desired texture width. Must be a power of two. Default to 8192. + # int TexMaxHeight; /* original C++ signature */ + tex_max_height: int # Maximum desired texture height. Must be a power of two. Default to 8192. # void* UserData; /* original C++ signature */ user_data: Any # Store your own atlas related user-data (if e.g. you have multiple font atlas). + # Output + # - Because textures are dynamically created/resized, the current texture identifier may changed at *ANY TIME* during the frame. + # - This should not affect you as you can always use the latest value. But note that any precomputed UV coordinates are only valid for the current TexRef. + # ImTextureData* TexData; /* original C++ signature */ + tex_data: ImTextureData # Latest texture. + + # #ifdef IMGUI_BUNDLE_PYTHON_API + # + # Convenience methods for python in order to get/set the font texture Id + # (because currently TexID is in a union and we can't access it directly) + # Note: this uses the old way of setting the fonts texture. + # Newer backends should implement ImGuiBackendFlags_RendererHasTextures + # and be able to handle Texture updates + # See https://github.com/ocornut/imgui/issues/8465 + # For inspiration, look at + # def _update_texture(self, tex: imgui.ImTextureData): + # inside ImGui Bundle (bindings/imgui_bundle/python_backends/opengl_xxx_backend.py) + # void Python_SetTextureID(ImTextureID id) { TexRef = ImTextureRef(id); } /* original C++ signature */ + def python_set_texture_id(self, id_: ImTextureID) -> None: + """(private API)""" + pass + # ImTextureID Python_GetTextureID() { return TexRef.GetTexID(); } /* original C++ signature */ + def python_get_texture_id(self) -> ImTextureID: + """(private API)""" + pass + # #endif + # + # [Internal] - # NB: Access texture data via GetTexData*() calls! Which will setup a default font for you. + # ImVector TexList; /* original C++ signature */ + tex_list: ImVector_ImTextureData_ptr # Texture list (most often TexList.Size == 1). TexData is always == TexList.back(). DO NOT USE DIRECTLY, USE GetDrawData().Textures[]/GetPlatformIO().Textures[] instead! # bool Locked; /* original C++ signature */ - locked: bool # Marked as Locked by ImGui::NewFrame() so attempt to modify the atlas will assert. - # bool TexReady; /* original C++ signature */ - tex_ready: bool # Set when texture was built matching current font input + locked: bool # Marked as locked during ImGui::NewFrame()..EndFrame() scope if TexUpdates are not supported. Any attempt to modify the atlas will assert. + # bool RendererHasTextures; /* original C++ signature */ + renderer_has_textures: ( + bool # Copy of (BackendFlags & ImGuiBackendFlags_RendererHasTextures) from supporting context. + ) + # bool TexIsBuilt; /* original C++ signature */ + tex_is_built: ( + bool # Set when texture was built matching current font input. Mostly useful for legacy IsBuilt() call. + ) # bool TexPixelsUseColors; /* original C++ signature */ - tex_pixels_use_colors: bool # Tell whether our texture data is known to use colors (rather than just alpha channel), in order to help backend select a format. - # int TexWidth; /* original C++ signature */ - tex_width: int # Texture width calculated during Build(). - # int TexHeight; /* original C++ signature */ - tex_height: int # Texture height calculated during Build(). + tex_pixels_use_colors: bool # Tell whether our texture data is known to use colors (rather than just alpha channel), in order to help backend select a format or conversion process. # ImVec2 TexUvScale; /* original C++ signature */ - tex_uv_scale: ImVec2 # = (1.0/TexWidth, 1.0/TexHeight) + tex_uv_scale: ImVec2 # = (1.0/TexData->TexWidth, 1.0/TexData->TexHeight). May change as new texture gets created. # ImVec2 TexUvWhitePixel; /* original C++ signature */ - tex_uv_white_pixel: ImVec2 # Texture coordinates to a white pixel + tex_uv_white_pixel: ImVec2 # Texture coordinates to a white pixel. May change as new texture gets created. # ImVector Fonts; /* original C++ signature */ fonts: ImVector_ImFont_ptr # Hold all the fonts returned by AddFont*. Fonts[0] is the default font upon calling ImGui::NewFrame(), use ImGui::PushFont()/PopFont() to change the current font. - # ImVector CustomRects; /* original C++ signature */ - custom_rects: ImVector_ImFontAtlasCustomRect # Rectangles for packing custom texture data into the atlas. # ImVector Sources; /* original C++ signature */ sources: ImVector_ImFontConfig # Source/configuration data - - # [Internal] Font builder - # const ImFontBuilderIO* FontBuilderIO; /* original C++ signature */ - font_builder_io: ImFontBuilderIO # Opaque interface to a font builder (default to stb_truetype, can be changed to use FreeType by defining IMGUI_ENABLE_FREETYPE). # (const) - # unsigned int FontBuilderFlags; /* original C++ signature */ - font_builder_flags: int # Shared flags (for all fonts) for custom font builder. THIS IS BUILD IMPLEMENTATION DEPENDENT. Per-font override is also available in ImFontConfig. - - # [Internal] Packing data - # int PackIdMouseCursors; /* original C++ signature */ - pack_id_mouse_cursors: int # Custom texture rectangle ID for white pixel and mouse cursors - # int PackIdLines; /* original C++ signature */ - pack_id_lines: int # Custom texture rectangle ID for baked anti-aliased lines + # int TexNextUniqueID; /* original C++ signature */ + tex_next_unique_id: int # Next value to be stored in TexData->UniqueID + # int FontNextUniqueID; /* original C++ signature */ + font_next_unique_id: int # Next value to be stored in ImFont->FontID + # const char* FontLoaderName; /* original C++ signature */ + font_loader_name: str # Font loader name (for display e.g. in About box) == FontLoader->Name # (const) + # void* FontLoaderData; /* original C++ signature */ + font_loader_data: Any # Font backend opaque storage + # unsigned int FontLoaderFlags; /* original C++ signature */ + font_loader_flags: int # Shared flags (for all fonts) for font loader. THIS IS BUILD IMPLEMENTATION DEPENDENT (e.g. Per-font override is also available in ImFontConfig). + # int RefCount; /* original C++ signature */ + ref_count: int # Number of contexts using this atlas + # ImGuiContext* OwnerContext; /* original C++ signature */ + owner_context: Context # Context which own the atlas will be in charge of updating and destroying it. # [Obsolete] - # typedef ImFontAtlasCustomRect CustomRect; // OBSOLETED in 1.72+ - # typedef ImFontGlyphRangesBuilder GlyphRangesBuilder; // OBSOLETED in 1.67+ - -class ImFont: - """Font runtime data and rendering - ImFontAtlas automatically loads a default embedded font for you when you call GetTexDataAsAlpha8() or GetTexDataAsRGBA32(). + # unsigned int FontBuilderFlags; // OBSOLETED in 1.92.0: Renamed to FontLoaderFlags. + # int TexDesiredWidth; // OBSOLETED in 1.92.0: Force texture width before calling Build(). Must be a power-of-two. If have many glyphs your graphics API have texture size restrictions you may want to increase texture width to decrease height. + # typedef ImFontAtlasRect ImFontAtlasCustomRect; // OBSOLETED in 1.92.0 + # typedef ImFontAtlasCustomRect CustomRect; // OBSOLETED in 1.72+ + # typedef ImFontGlyphRangesBuilder GlyphRangesBuilder; // OBSOLETED in 1.67+ + +class ImFontBaked: + """Font runtime data for a given size + Important: pointers to ImFontBaked are only valid for the current frame. """ # [Internal] Members: Hot ~20/24 bytes (for CalcTextSize) # ImVector IndexAdvanceX; /* original C++ signature */ index_advance_x: ImVector_float # 12-16 // out // Sparse. Glyphs->AdvanceX in a directly indexable way (cache-friendly for CalcTextSize functions which only this info, and are often bottleneck in large UI). # float FallbackAdvanceX; /* original C++ signature */ - fallback_advance_x: float # 4 // out // = FallbackGlyph->AdvanceX - # float FontSize; /* original C++ signature */ - font_size: float # 4 // in // Height of characters/line, set during loading (don't change after loading) - - # [Internal] Members: Hot ~28/40 bytes (for RenderText loop) + fallback_advance_x: float # 4 // out // FindGlyph(FallbackChar)->AdvanceX + # float Size; /* original C++ signature */ + size: float # 4 // in // Height of characters/line, set during loading (doesn't change after loading) + # float RasterizerDensity; /* original C++ signature */ + rasterizer_density: float # 4 // in // Density this is baked at + + # [Internal] Members: Hot ~28/36 bytes (for RenderText loop) + # ImVector IndexLookup; /* original C++ signature */ + index_lookup: ImVector_ImU16 # 12-16 // out // Sparse. Index glyphs by Unicode code-point. # ImVector Glyphs; /* original C++ signature */ glyphs: ImVector_ImFontGlyph # 12-16 // out // All glyphs. - # ImFontGlyph* FallbackGlyph; /* original C++ signature */ - fallback_glyph: ImFontGlyph # 4-8 // out // = FindGlyph(FontFallbackChar) + # int FallbackGlyphIndex; /* original C++ signature */ + fallback_glyph_index: int # 4 // out // Index of FontFallbackChar - # [Internal] Members: Cold ~32/40 bytes - # Conceptually Sources[] is the list of font sources merged to create this font. - # ImFontAtlas* ContainerAtlas; /* original C++ signature */ - container_atlas: ImFontAtlas # 4-8 // out // What we has been loaded into - # ImFontConfig* Sources; /* original C++ signature */ - sources: ImFontConfig # 4-8 // in // Pointer within ContainerAtlas->Sources[], to SourcesCount instances - # short SourcesCount; /* original C++ signature */ - sources_count: int # 2 // in // Number of ImFontConfig involved in creating this font. Usually 1, or >1 when merging multiple font sources into one ImFont. - # short EllipsisCharCount; /* original C++ signature */ - ellipsis_char_count: int # 1 // out // 1 or 3 - # ImWchar EllipsisChar; /* original C++ signature */ - ellipsis_char: ImWchar # 2-4 // out // Character used for ellipsis rendering ('...'). - # ImWchar FallbackChar; /* original C++ signature */ - fallback_char: ImWchar # 2-4 // out // Character used if a glyph isn't found (U+FFFD, '?') - # float EllipsisWidth; /* original C++ signature */ - ellipsis_width: float # 4 // out // Total ellipsis Width - # float EllipsisCharStep; /* original C++ signature */ - ellipsis_char_step: float # 4 // out // Step between characters when EllipsisCount > 0 - # float Scale; /* original C++ signature */ - scale: float # 4 // in // Base font scale (1.0), multiplied by the per-window font scale which you can adjust with SetWindowFontScale() + # [Internal] Members: Cold # float Ascent, /* original C++ signature */ ascent: float # 4+4 // out // Ascent: distance from top to bottom of e.g. 'A' [0..FontSize] (unscaled) # Descent; /* original C++ signature */ descent: float # 4+4 // out // Ascent: distance from top to bottom of e.g. 'A' [0..FontSize] (unscaled) - # int MetricsTotalSurface; /* original C++ signature */ - metrics_total_surface: int # 4 // out // Total surface in pixels to get an idea of the font rasterization/texture cost (not exact, we approximate the cost of padding between glyphs) - # bool DirtyLookupTables; /* original C++ signature */ - dirty_lookup_tables: bool # 1 // out // - - # IMGUI_API ImFont(); /* original C++ signature */ + # int LastUsedFrame; /* original C++ signature */ + last_used_frame: int # 4 // // Record of that time this was bounds + # ImGuiID BakedId; /* original C++ signature */ + baked_id: ID # 4 // // Unique ID for this baked storage + # ImFont* OwnerFont; /* original C++ signature */ + owner_font: ImFont # 4-8 // in // Parent font + # void* FontLoaderDatas; /* original C++ signature */ + font_loader_datas: Any # 4-8 // // Font loader opaque storage (per baked font * sources): single contiguous buffer allocated by imgui, passed to loader. + + # Functions + # IMGUI_API ImFontBaked(); /* original C++ signature */ def __init__(self) -> None: - """Methods""" pass - # IMGUI_API ImFontGlyph* FindGlyph(ImWchar c); /* original C++ signature */ + # IMGUI_API void ClearOutputData(); /* original C++ signature */ + def clear_output_data(self) -> None: + pass + # IMGUI_API ImFontGlyph* FindGlyph(ImWchar c); /* original C++ signature */ def find_glyph(self, c: ImWchar) -> ImFontGlyph: + """Return U+FFFD glyph if requested glyph doesn't exists.""" pass - # IMGUI_API ImFontGlyph* FindGlyphNoFallback(ImWchar c); /* original C++ signature */ + # IMGUI_API ImFontGlyph* FindGlyphNoFallback(ImWchar c); /* original C++ signature */ def find_glyph_no_fallback(self, c: ImWchar) -> ImFontGlyph: + """Return None if glyph doesn't exist""" pass - # float GetCharAdvance(ImWchar c) { return ((int)c < IndexAdvanceX.Size) ? IndexAdvanceX[(int)c] : FallbackAdvanceX; } /* original C++ signature */ + # IMGUI_API float GetCharAdvance(ImWchar c); /* original C++ signature */ def get_char_advance(self, c: ImWchar) -> float: - """(private API)""" pass - # bool IsLoaded() const { return ContainerAtlas != NULL; } /* original C++ signature */ + # IMGUI_API bool IsGlyphLoaded(ImWchar c); /* original C++ signature */ + def is_glyph_loaded(self, c: ImWchar) -> bool: + pass + +class ImFontFlags_(enum.IntFlag): + """Font flags + (in future versions as we redesign font loading API, this will become more important and better documented. for now please consider this as internal/advanced use) + """ + + # ImFontFlags_None = 0, /* original C++ signature */ + none = enum.auto() # (= 0) + # ImFontFlags_NoLoadError = 1 << 1, /* original C++ signature */ + no_load_error = ( + enum.auto() + ) # (= 1 << 1) # Disable throwing an error/assert when calling AddFontXXX() with missing file/data. Calling code is expected to check AddFontXXX() return value. + # ImFontFlags_NoLoadGlyphs = 1 << 2, /* original C++ signature */ + no_load_glyphs = enum.auto() # (= 1 << 2) # [Internal] Disable loading new glyphs. + # ImFontFlags_LockBakedSizes = 1 << 3, /* original C++ signature */ + lock_baked_sizes = ( + enum.auto() + ) # (= 1 << 3) # [Internal] Disable loading new baked sizes, disable garbage collecting current ones. e.g. if you want to lock a font to a single size. Important: if you use this to preload given sizes, consider the possibility of multiple font density used on Retina display. + +class ImFont: + """Font runtime data and rendering + - ImFontAtlas automatically loads a default embedded font for you if you didn't load one manually. + - Since 1.92.0 a font may be rendered as any size! Therefore a font doesn't have one specific size. + - Use 'font->GetFontBaked(size)' to retrieve the ImFontBaked* corresponding to a given size. + - If you used g.Font + g.FontSize (which is frequent from the ImGui layer), you can use g.FontBaked as a shortcut, as g.FontBaked == g.Font->GetFontBaked(g.FontSize). + """ + + # [Internal] Members: Hot ~12-20 bytes + # ImFontBaked* LastBaked; /* original C++ signature */ + last_baked: ImFontBaked # 4-8 // Cache last bound baked. NEVER USE DIRECTLY. Use GetFontBaked(). + # ImFontAtlas* OwnerAtlas; /* original C++ signature */ + owner_atlas: ImFontAtlas # 4-8 // What we have been loaded into. + # ImFontFlags Flags; /* original C++ signature */ + flags: ImFontFlags # 4 // Font flags. + # float CurrentRasterizerDensity; /* original C++ signature */ + current_rasterizer_density: float # Current rasterizer density. This is a varying state of the font. + + # [Internal] Members: Cold ~24-52 bytes + # Conceptually Sources[] is the list of font sources merged to create this font. + # ImGuiID FontId; /* original C++ signature */ + font_id: ID # Unique identifier for the font + # float LegacySize; /* original C++ signature */ + legacy_size: float # 4 // in // Font size passed to AddFont(). Use for old code calling PushFont() expecting to use that size. (use ImGui::GetFontBaked() to get font baked at current bound size). + # ImVector Sources; /* original C++ signature */ + sources: ImVector_ImFontConfig_ptr # 16 // in // List of sources. Pointers within OwnerAtlas->Sources[] + # ImWchar EllipsisChar; /* original C++ signature */ + ellipsis_char: ImWchar # 2-4 // out // Character used for ellipsis rendering ('...'). + # ImWchar FallbackChar; /* original C++ signature */ + fallback_char: ImWchar # 2-4 // out // Character used if a glyph isn't found (U+FFFD, '?') + # bool EllipsisAutoBake; /* original C++ signature */ + ellipsis_auto_bake: bool # 1 // // Mark when the "..." glyph needs to be generated. + # ImGuiStorage RemapPairs; /* original C++ signature */ + remap_pairs: Storage # 16 // // Remapping pairs when using AddRemapChar(), otherwise empty. + + # IMGUI_API ImFont(); /* original C++ signature */ + def __init__(self) -> None: + """Methods""" + pass + # IMGUI_API bool IsGlyphInFont(ImWchar c); /* original C++ signature */ + def is_glyph_in_font(self, c: ImWchar) -> bool: + pass + # bool IsLoaded() const { return OwnerAtlas != NULL; } /* original C++ signature */ def is_loaded(self) -> bool: """(private API)""" pass - # const char* GetDebugName() const { return Sources ? Sources->Name : ""; } /* original C++ signature */ + # const char* GetDebugName() const { return Sources.Size ? Sources[0]->Name : ""; } /* original C++ signature */ def get_debug_name(self) -> str: - """(private API)""" - pass - # utf8 + """(private API) - # #ifdef IMGUI_BUNDLE_PYTHON_API + Fill ImFontConfig::Name. + """ + pass + # [Internal] Don't use! + # 'max_width' stops rendering after a certain width (could be turned into a 2 size). FLT_MAX to disable. + # 'wrap_width' enable automatic word-wrapping across multiple lines to fit into given width. 0.0 to disable. + # IMGUI_API ImFontBaked* GetFontBaked(float font_size, float density = -1.0f); /* original C++ signature */ + def get_font_baked(self, font_size: float, density: float = -1.0) -> ImFontBaked: + """Get or create baked data for given size""" + pass + # #ifdef IMGUI_BUNDLE_PYTHON_API # - # IMGUI_API int CalcWordWrapPositionAPython(float scale, const char* text, float wrap_width); /* original C++ signature */ - def calc_word_wrap_position_a_python(self, scale: float, text: str, wrap_width: float) -> int: - """Python API for CalcWordWrapPositionA (will return an index in the text, not a pointer)""" + # IMGUI_API int CalcWordWrapPositionPython(float size, const char* text, float wrap_width); /* original C++ signature */ + def calc_word_wrap_position_python(self, size: float, text: str, wrap_width: float) -> int: + """Python API for CalcWordWrapPosition (will return an index in the text, not a pointer)""" pass - # #endif + # #endif # - # IMGUI_API void RenderChar(ImDrawList* draw_list, float size, const ImVec2& pos, ImU32 col, ImWchar c); /* original C++ signature */ - def render_char(self, draw_list: ImDrawList, size: float, pos: ImVec2Like, col: ImU32, c: ImWchar) -> None: + # IMGUI_API void RenderChar(ImDrawList* draw_list, float size, const ImVec2& pos, ImU32 col, ImWchar c, const ImVec4* cpu_fine_clip = NULL); /* original C++ signature */ + def render_char( + self, + draw_list: ImDrawList, + size: float, + pos: ImVec2Like, + col: ImU32, + c: ImWchar, + cpu_fine_clip: Optional[ImVec4Like] = None, + ) -> None: pass - # IMGUI_API void RenderText(ImDrawList* draw_list, float size, const ImVec2& pos, ImU32 col, const ImVec4& clip_rect, const char* text_begin, const char* text_end, float wrap_width = 0.0f, bool cpu_fine_clip = false); /* original C++ signature */ + # IMGUI_API void RenderText(ImDrawList* draw_list, float size, const ImVec2& pos, ImU32 col, const ImVec4& clip_rect, const char* text_begin, const char* text_end, float wrap_width = 0.0f, ImDrawTextFlags flags = 0); /* original C++ signature */ def render_text( self, draw_list: ImDrawList, @@ -10980,38 +11578,16 @@ class ImFont: text_begin: str, text_end: str, wrap_width: float = 0.0, - cpu_fine_clip: bool = False, + flags: ImDrawTextFlags = 0, ) -> None: pass # [Internal] Don't use! - # IMGUI_API void BuildLookupTable(); /* original C++ signature */ - def build_lookup_table(self) -> None: - pass # IMGUI_API void ClearOutputData(); /* original C++ signature */ def clear_output_data(self) -> None: pass - # IMGUI_API void GrowIndex(int new_size); /* original C++ signature */ - def grow_index(self, new_size: int) -> None: - pass - # IMGUI_API void AddGlyph(const ImFontConfig* src_cfg, ImWchar c, float x0, float y0, float x1, float y1, float u0, float v0, float u1, float v1, float advance_x); /* original C++ signature */ - def add_glyph( - self, - src_cfg: ImFontConfig, - c: ImWchar, - x0: float, - y0: float, - x1: float, - y1: float, - u0: float, - v0: float, - u1: float, - v1: float, - advance_x: float, - ) -> None: - pass - # IMGUI_API void AddRemapChar(ImWchar dst, ImWchar src, bool overwrite_dst = true); /* original C++ signature */ - def add_remap_char(self, dst: ImWchar, src: ImWchar, overwrite_dst: bool = True) -> None: - """Makes 'dst' character/glyph points to 'src' character/glyph. Currently needs to be called AFTER fonts have been built.""" + # IMGUI_API void AddRemapChar(ImWchar from_codepoint, ImWchar to_codepoint); /* original C++ signature */ + def add_remap_char(self, from_codepoint: ImWchar, to_codepoint: ImWchar) -> None: + """Makes 'from_codepoint' character points to 'to_codepoint' glyph.""" pass # IMGUI_API bool IsGlyphRangeUnused(unsigned int c_begin, unsigned int c_last); /* original C++ signature */ def is_glyph_range_unused(self, c_begin: int, c_last: int) -> bool: @@ -11021,7 +11597,7 @@ class ImFont: # [SECTION] Viewports # ----------------------------------------------------------------------------- -class ViewportFlags_(enum.Enum): +class ViewportFlags_(enum.IntFlag): """Flags stored in ImGuiViewport::Flags, giving indications to the platform backends.""" # ImGuiViewportFlags_None = 0, /* original C++ signature */ @@ -11093,6 +11669,8 @@ class Viewport: pos: ImVec2 # Main Area: Position of the viewport (Dear ImGui coordinates are the same as OS desktop/native coordinates) # ImVec2 Size; /* original C++ signature */ size: ImVec2 # Main Area: Size of the viewport. + # ImVec2 FramebufferScale; /* original C++ signature */ + framebuffer_scale: ImVec2 # Density of the viewport for Retina display (always 1,1 on Windows, may be 2,2 etc on macOS/iOS). This will affect font rasterizer density. # ImVec2 WorkPos; /* original C++ signature */ work_pos: ImVec2 # Work Area: Position of the viewport minus task bars, menus bars, status bars (>= Pos) # ImVec2 WorkSize; /* original C++ signature */ @@ -11101,6 +11679,10 @@ class Viewport: dpi_scale: float # 1.0 = 96 DPI = No extra scale. # ImGuiID ParentViewportId; /* original C++ signature */ parent_viewport_id: ID # (Advanced) 0: no parent. Instruct the platform backend to setup a parent/child relationship between platform windows. + # ImGuiViewport* ParentViewport; /* original C++ signature */ + parent_viewport: ( + Viewport # (Advanced) Direct shortcut to ImGui::FindViewportByID(ParentViewportId). None: no parent. + ) # ImDrawData* DrawData; /* original C++ signature */ draw_data: ImDrawData # The ImDrawData corresponding to this viewport. Valid after Render() and until the next call to NewFrame(). @@ -11216,7 +11798,7 @@ class PlatformIO: # std::function Platform_OpenInShellFn; /* original C++ signature */ # Optional: Open link/folder/file in OS Shell - # (default to use ShellExecuteW() on Windows, system() on Linux/Mac) + # (default to use ShellExecuteW() on Windows, system() on Linux/Mac. expected to return False on failure, but some platforms may always return True) # [ADAPT_IMGUI_BUNDLE] # bool (*Platform_OpenInShellFn)(ImGuiContext* ctx, const char* path); platform_open_in_shell_fn: Callable[[Context, str], bool] @@ -11237,6 +11819,12 @@ class PlatformIO: # Input - Interface with Renderer Backend # ------------------------------------------------------------------ + # Optional: Maximum texture size supported by renderer (used to adjust how we size textures). 0 if not known. + # int Renderer_TextureMaxWidth; /* original C++ signature */ + renderer_texture_max_width: int + # int Renderer_TextureMaxHeight; /* original C++ signature */ + renderer_texture_max_height: int + # void* Renderer_RenderState; /* original C++ signature */ # Written by some backends during ImGui_ImplXXXX_RenderDrawData() call to point backend_specific ImGui_ImplXXXX_RenderState* structure. renderer_render_state: Any @@ -11268,14 +11856,32 @@ class PlatformIO: monitors: ImVector_PlatformMonitor # ------------------------------------------------------------------ - # Output - List of viewports to render into platform windows + # Output # ------------------------------------------------------------------ + # Textures list (the list is updated by calling ImGui::EndFrame or ImGui::Render) + # The ImGui_ImplXXXX_RenderDrawData() function of each backend generally access this via ImDrawData::Textures which points to this. The array is available here mostly because backends will want to destroy textures on shutdown. + # ImVector Textures; /* original C++ signature */ + textures: ImVector_ImTextureData_ptr # List of textures used by Dear ImGui (most often 1) + contents of external texture list is automatically appended into this. + # Viewports list (the list is updated by calling ImGui::EndFrame or ImGui::Render) # (in the future we will attempt to organize this feature to remove the need for a "main viewport") # ImVector Viewports; /* original C++ signature */ viewports: ImVector_Viewport_ptr # Main viewports, followed by all secondary viewports. + # ------------------------------------------------------------------ + # Functions + # ------------------------------------------------------------------ + + # IMGUI_API void ClearPlatformHandlers(); /* original C++ signature */ + def clear_platform_handlers(self) -> None: + """Clear all Platform_XXX fields. Typically called on Platform Backend shutdown.""" + pass + # IMGUI_API void ClearRendererHandlers(); /* original C++ signature */ + def clear_renderer_handlers(self) -> None: + """Clear all Renderer_XXX fields. Typically called on Renderer Backend shutdown.""" + pass + class PlatformMonitor: """(Optional) This is required when enabling multi-viewport. Represent the bounds of each connected monitor/display and their DPI. We use this information for multiple DPI support + clamping the position of popups and tooltips so they don't straddle multiple monitors. @@ -11298,14 +11904,18 @@ class PlatformMonitor: pass class PlatformImeData: - """(Optional) Support for IME (Input Method Editor) via the platform_io.Platform_SetImeDataFn() function.""" + """(Optional) Support for IME (Input Method Editor) via the platform_io.Platform_SetImeDataFn() function. Handler is called during EndFrame().""" # bool WantVisible; /* original C++ signature */ - want_visible: bool # A widget wants the IME to be visible + want_visible: bool # A widget wants the IME to be visible. + # bool WantTextInput; /* original C++ signature */ + want_text_input: bool # A widget wants text input, not necessarily IME to be visible. This is automatically set to the upcoming value of io.WantTextInput. # ImVec2 InputPos; /* original C++ signature */ - input_pos: ImVec2 # Position of the input cursor + input_pos: ImVec2 # Position of input cursor (for IME). # float InputLineHeight; /* original C++ signature */ - input_line_height: float # Line height + input_line_height: float # Line height (for IME). + # ImGuiID ViewportId; /* original C++ signature */ + viewport_id: ID # ID of platform window/viewport. # ImGuiPlatformImeData() { memset(this, 0, sizeof(*this)); } /* original C++ signature */ def __init__(self) -> None: @@ -11417,8 +12027,6 @@ def suspend_layout() -> None: def resume_layout() -> None: pass -# namespace ImGui - # #endif #################### #################### @@ -11440,18 +12048,28 @@ def resume_layout() -> None: # [SECTION] Stack Layout Internal API # ----------------------------------------------------------------------------- -# namespace ImGuiInternal - # #endif #################### #################### #################### #################### # dear imgui: wrappers for C++ standard library (STL) types (std::string, etc.) + # This is also an example of how you may wrap your own similar types. +# TL;DR; this is using the ImGuiInputTextFlags_CallbackResize facility, +# which also demonstrated in 'Dear ImGui Demo->Widgets->Text Input->Resize Callback'. # Changelog: # - v0.10: Initial version. Added InputText() / InputTextMultiline() calls with std::string +# Usage: +# { +# #include "misc/cpp/imgui_stdlib.h" +# #include "misc/cpp/imgui_stdlib.cpp" // <-- If you want to include implementation without messing with your project/build. +# [...] +# std::string my_string; +# ImGui::InputText("my string", &my_string); +# } + # See more C++ related extension (fmt, RAII, syntaxis sugar) on Wiki: # https://github.com/ocornut/imgui/wiki/Useful-Extensions#cness @@ -11546,7 +12164,6 @@ def get_drag_drop_payload_py_id() -> Optional[Payload_PyId]: ################################################## # Manually inserted code (additional methods, etc.) ################################################## -ImFontAtlas.get_tex_data_as_rgba32 = font_atlas_get_tex_data_as_rgba32 # type: ignore # API for imgui_demo.cpp (specific to ImGui Bundle) def set_imgui_demo_window_pos(pos: ImVec2, size: ImVec2, cond: Cond) -> None: diff --git a/blimgui/dist64/imgui_bundle/imgui/backends.pyi b/blimgui/dist64/imgui_bundle/imgui/backends.pyi index e5ebca9..73c2e3b 100644 --- a/blimgui/dist64/imgui_bundle/imgui/backends.pyi +++ b/blimgui/dist64/imgui_bundle/imgui/backends.pyi @@ -33,6 +33,7 @@ def opengl3_destroy_device_objects() -> None: ############################################################################### # bool: diff --git a/blimgui/dist64/imgui_bundle/imgui/internal.pyi b/blimgui/dist64/imgui_bundle/imgui/internal.pyi index d7eb1dd..b021cb1 100644 --- a/blimgui/dist64/imgui_bundle/imgui/internal.pyi +++ b/blimgui/dist64/imgui_bundle/imgui/internal.pyi @@ -5,6 +5,8 @@ import numpy as np from imgui_bundle.imgui import * +ImDrawTextFlags = int + ################################################## # Manually inserted code (typedefs, etc.) ################################################## @@ -55,6 +57,7 @@ WindowRefreshFlags = int MultiSelectFlags = int LogFlags = int NavRenderCursorFlags = int +ImFontAtlasRectId = int # An identifier to a rectangle in the atlas. -1 when invalid. The rectangle may move, use GetCustomRect() to retrieve it. TypingSelectFlags_None = 0 NavHighlightFlags_None = 0 @@ -79,7 +82,7 @@ KeyRoutingIndex = int ################################################## # // Autogenerated code below! Do not edit! #################### #################### -# dear imgui, v1.91.9b +# dear imgui, v1.92.5 # (internal structures/api) # You may use this file to debug, understand or extend Dear ImGui features but we don't provide any guarantee of forward compatibility. @@ -119,6 +122,7 @@ KeyRoutingIndex = int # // [SECTION] Tab bar, Tab item support # // [SECTION] Table support # // [SECTION] ImGui internal API +# // [SECTION] ImFontLoader # // [SECTION] ImFontAtlas internal API # // [SECTION] Test Engine specific hooks (imgui_test_engine) # @@ -161,7 +165,7 @@ KeyRoutingIndex = int # ----------------------------------------------------------------------------- # Utilities -# (other types which are not forwarded declared are: ImBitArray<>, ImSpan<>, ImSpanAllocator<>, ImPool<>, ImChunkStream<>) +# (other types which are not forwarded declared are: ImBitArray<>, ImSpan<>, ImSpanAllocator<>, ImStableVector<>, ImPool<>, ImChunkStream<>) # ImDrawList/ImFontAtlas @@ -171,7 +175,6 @@ KeyRoutingIndex = int # Use your programming IDE "Go to definition" facility on the names of the center columns to find the actual flags/enum lists. # Flags -# -> enum ImGuiWindowRefreshFlags_ // Flags: for SetNextWindowRefreshPolicy() # [ADAPT_IMGUI_BUNDLE] # #ifdef IMGUI_BUNDLE_PYTHON_API # @@ -179,6 +182,8 @@ KeyRoutingIndex = int # # [/ADAPT_IMGUI_BUNDLE] +# Table column indexing + # ----------------------------------------------------------------------------- # [SECTION] Context pointer # See implementation of this variable in imgui.cpp for comments and details. @@ -203,6 +208,9 @@ KeyRoutingIndex = int # We currently don't have many of those so the effect is currently negligible, but onward intent to add more aggressive ones in the code. ##define IMGUI_DEBUG_PARANOID +# #endif +# + # Hint for branch prediction # Enforce cdecl calling convention for functions called by the standard library, in case compilation settings changed the default to e.g. __vectorcall @@ -236,6 +244,7 @@ KeyRoutingIndex = int # - Helper: ImBitArray # - Helper: ImBitVector # - Helper: ImSpan<>, ImSpanAllocator<> +# - Helper: ImStableVector<> # - Helper: ImPool<> # - Helper: ImChunkStream<> # - Helper: ImGuiTextIndex @@ -251,6 +260,10 @@ def im_hash_data(data: Any, data_size: int, seed: ID = 0) -> ID: def im_hash_str(data: str, data_size: int = 0, seed: ID = 0) -> ID: pass +# IMGUI_API const char* ImHashSkipUncontributingPrefix(const char* label); /* original C++ signature */ +def im_hash_skip_uncontributing_prefix(label: str) -> str: + pass + # Helpers: Sorting # IMGUI_API ImU32 ImAlphaBlendColors(ImU32 col_a, ImU32 col_b); /* original C++ signature */ @@ -259,30 +272,35 @@ def im_alpha_blend_colors(col_a: ImU32, col_b: ImU32) -> ImU32: pass # Helpers: Bit manipulation -# static inline bool ImIsPowerOfTwo(int v) { return v != 0 && (v & (v - 1)) == 0; } /* original C++ signature */ +# inline bool ImIsPowerOfTwo(int v) { return v != 0 && (v & (v - 1)) == 0; } /* original C++ signature */ @overload def im_is_power_of_two(v: int) -> bool: """(private API)""" pass -# static inline bool ImIsPowerOfTwo(ImU64 v) { return v != 0 && (v & (v - 1)) == 0; } /* original C++ signature */ +# inline bool ImIsPowerOfTwo(ImU64 v) { return v != 0 && (v & (v - 1)) == 0; } /* original C++ signature */ @overload def im_is_power_of_two(v: ImU64) -> bool: """(private API)""" pass -# static inline int ImUpperPowerOfTwo(int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } /* original C++ signature */ +# inline int ImUpperPowerOfTwo(int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } /* original C++ signature */ def im_upper_power_of_two(v: int) -> int: """(private API)""" pass -# static inline unsigned int ImCountSetBits(unsigned int v) { unsigned int count = 0; while (v > 0) { v = v & (v - 1); count++; } return count; } /* original C++ signature */ +# inline unsigned int ImCountSetBits(unsigned int v) { unsigned int count = 0; while (v > 0) { v = v & (v - 1); count++; } return count; } /* original C++ signature */ def im_count_set_bits(v: int) -> int: """(private API)""" pass # Helpers: String -# static inline bool ImCharIsBlankW(unsigned int c) { return c == ' ' || c == '\t' || c == 0x3000; } /* original C++ signature */ +# IMGUI_API void* ImMemdup(const void* src, size_t size); /* original C++ signature */ +def im_memdup(src: Any, size: int) -> Any: + """Duplicate a chunk of memory.""" + pass + +# inline bool ImCharIsBlankW(unsigned int c) { return c == ' ' || c == '\t' || c == 0x3000; } /* original C++ signature */ def im_char_is_blank_w(c: int) -> bool: """(private API)""" pass @@ -290,16 +308,46 @@ def im_char_is_blank_w(c: int) -> bool: # Helpers: Formatting # Helpers: UTF-8 <> wchar conversions -# IMGUI_API const char* ImTextFindPreviousUtf8Codepoint(const char* in_text_start, const char* in_text_curr); /* original C++ signature */ -def im_text_find_previous_utf8_codepoint(in_text_start: str, in_text_curr: str) -> str: +# IMGUI_API const char* ImTextFindPreviousUtf8Codepoint(const char* in_text_start, const char* in_p); /* original C++ signature */ +def im_text_find_previous_utf8_codepoint(in_text_start: str, in_p: str) -> str: """return previous UTF-8 code-point.""" pass +# IMGUI_API const char* ImTextFindValidUtf8CodepointEnd(const char* in_text_start, const char* in_text_end, const char* in_p); /* original C++ signature */ +def im_text_find_valid_utf8_codepoint_end(in_text_start: str, in_text_end: str, in_p: str) -> str: + """return previous UTF-8 code-point if 'in_p' is not the end of a valid one.""" + pass + # IMGUI_API int ImTextCountLines(const char* in_text, const char* in_text_end); /* original C++ signature */ def im_text_count_lines(in_text: str, in_text_end: str) -> int: + """return number of lines taken by text. trailing carriage return doesn't count as an extra line.""" + pass + +class ImDrawTextFlags_(enum.IntFlag): + """Helpers: High-level text functions (DO NOT USE!!! THIS IS A MINIMAL SUBSET OF LARGER UPCOMING CHANGES)""" + + # ImDrawTextFlags_None = 0, /* original C++ signature */ + none = enum.auto() # (= 0) + # ImDrawTextFlags_CpuFineClip = 1 << 0, /* original C++ signature */ + cpu_fine_clip = ( + enum.auto() + ) # (= 1 << 0) # Must be == 1/True for legacy with 'bool cpu_fine_clip' arg to RenderText() + # ImDrawTextFlags_WrapKeepBlanks = 1 << 1, /* original C++ signature */ + wrap_keep_blanks = enum.auto() # (= 1 << 1) + # ImDrawTextFlags_StopOnNewLine = 1 << 2, /* original C++ signature */ + # } + stop_on_new_line = enum.auto() # (= 1 << 2) + +# IMGUI_API const char* ImFontCalcWordWrapPositionEx(ImFont* font, float size, const char* text, const char* text_end, float wrap_width, ImDrawTextFlags flags = 0); /* original C++ signature */ +def im_font_calc_word_wrap_position_ex( + font: ImFont, size: float, text: str, text_end: str, wrap_width: float, flags: ImDrawTextFlags = 0 +) -> str: pass -# return number of lines taken by text. trailing carriage return doesn't count as an extra line. +# IMGUI_API const char* ImTextCalcWordWrapNextLineStart(const char* text, const char* text_end, ImDrawTextFlags flags = 0); /* original C++ signature */ +def im_text_calc_word_wrap_next_line_start(text: str, text_end: str, flags: ImDrawTextFlags = 0) -> str: + """trim trailing space and find beginning of next line""" + pass # Helpers: File System @@ -307,77 +355,77 @@ def im_text_count_lines(in_text: str, in_text_end: str) -> int: # - ImMin/ImMax/ImClamp/ImLerp/ImSwap are used by widgets which support variety of types: signed/unsigned int/long long float/double # (Exceptionally using templates here but we could also redefine them for those types) # - Misc maths helpers -# static inline ImVec2 ImMin(const ImVec2& lhs, const ImVec2& rhs) { return ImVec2(lhs.x < rhs.x ? lhs.x : rhs.x, lhs.y < rhs.y ? lhs.y : rhs.y); } /* original C++ signature */ +# inline ImVec2 ImMin(const ImVec2& lhs, const ImVec2& rhs) { return ImVec2(lhs.x < rhs.x ? lhs.x : rhs.x, lhs.y < rhs.y ? lhs.y : rhs.y); } /* original C++ signature */ @overload def im_min(lhs: ImVec2Like, rhs: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline ImVec2 ImMax(const ImVec2& lhs, const ImVec2& rhs) { return ImVec2(lhs.x >= rhs.x ? lhs.x : rhs.x, lhs.y >= rhs.y ? lhs.y : rhs.y); } /* original C++ signature */ +# inline ImVec2 ImMax(const ImVec2& lhs, const ImVec2& rhs) { return ImVec2(lhs.x >= rhs.x ? lhs.x : rhs.x, lhs.y >= rhs.y ? lhs.y : rhs.y); } /* original C++ signature */ @overload def im_max(lhs: ImVec2Like, rhs: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline ImVec2 ImClamp(const ImVec2& v, const ImVec2&mn, const ImVec2&mx) { return ImVec2((v.x < mn.x) ? mn.x : (v.x > mx.x) ? mx.x : v.x, (v.y < mn.y) ? mn.y : (v.y > mx.y) ? mx.y : v.y); } /* original C++ signature */ +# inline ImVec2 ImClamp(const ImVec2& v, const ImVec2&mn, const ImVec2&mx){ return ImVec2((v.x < mn.x) ? mn.x : (v.x > mx.x) ? mx.x : v.x, (v.y < mn.y) ? mn.y : (v.y > mx.y) ? mx.y : v.y); } /* original C++ signature */ @overload def im_clamp(v: ImVec2Like, mn: ImVec2Like, mx: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline ImVec2 ImLerp(const ImVec2& a, const ImVec2& b, float t) { return ImVec2(a.x + (b.x - a.x) * t, a.y + (b.y - a.y) * t); } /* original C++ signature */ +# inline ImVec2 ImLerp(const ImVec2& a, const ImVec2& b, float t) { return ImVec2(a.x + (b.x - a.x) * t, a.y + (b.y - a.y) * t); } /* original C++ signature */ @overload def im_lerp(a: ImVec2Like, b: ImVec2Like, t: float) -> ImVec2: """(private API)""" pass -# static inline ImVec2 ImLerp(const ImVec2& a, const ImVec2& b, const ImVec2& t) { return ImVec2(a.x + (b.x - a.x) * t.x, a.y + (b.y - a.y) * t.y); } /* original C++ signature */ +# inline ImVec2 ImLerp(const ImVec2& a, const ImVec2& b, const ImVec2& t) { return ImVec2(a.x + (b.x - a.x) * t.x, a.y + (b.y - a.y) * t.y); } /* original C++ signature */ @overload def im_lerp(a: ImVec2Like, b: ImVec2Like, t: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline ImVec4 ImLerp(const ImVec4& a, const ImVec4& b, float t) { return ImVec4(a.x + (b.x - a.x) * t, a.y + (b.y - a.y) * t, a.z + (b.z - a.z) * t, a.w + (b.w - a.w) * t); } /* original C++ signature */ +# inline ImVec4 ImLerp(const ImVec4& a, const ImVec4& b, float t) { return ImVec4(a.x + (b.x - a.x) * t, a.y + (b.y - a.y) * t, a.z + (b.z - a.z) * t, a.w + (b.w - a.w) * t); } /* original C++ signature */ @overload def im_lerp(a: ImVec4Like, b: ImVec4Like, t: float) -> ImVec4: """(private API)""" pass -# static inline float ImSaturate(float f) { return (f < 0.0f) ? 0.0f : (f > 1.0f) ? 1.0f : f; } /* original C++ signature */ +# inline float ImSaturate(float f) { return (f < 0.0f) ? 0.0f : (f > 1.0f) ? 1.0f : f; } /* original C++ signature */ def im_saturate(f: float) -> float: """(private API)""" pass -# static inline float ImLengthSqr(const ImVec2& lhs) { return (lhs.x * lhs.x) + (lhs.y * lhs.y); } /* original C++ signature */ +# inline float ImLengthSqr(const ImVec2& lhs) { return (lhs.x * lhs.x) + (lhs.y * lhs.y); } /* original C++ signature */ @overload def im_length_sqr(lhs: ImVec2Like) -> float: """(private API)""" pass -# static inline float ImLengthSqr(const ImVec4& lhs) { return (lhs.x * lhs.x) + (lhs.y * lhs.y) + (lhs.z * lhs.z) + (lhs.w * lhs.w); } /* original C++ signature */ +# inline float ImLengthSqr(const ImVec4& lhs) { return (lhs.x * lhs.x) + (lhs.y * lhs.y) + (lhs.z * lhs.z) + (lhs.w * lhs.w); } /* original C++ signature */ @overload def im_length_sqr(lhs: ImVec4Like) -> float: """(private API)""" pass -# static inline float ImInvLength(const ImVec2& lhs, float fail_value) { float d = (lhs.x * lhs.x) + (lhs.y * lhs.y); if (d > 0.0f) return ImRsqrt(d); return fail_value; } /* original C++ signature */ +# inline float ImInvLength(const ImVec2& lhs, float fail_value) { float d = (lhs.x * lhs.x) + (lhs.y * lhs.y); if (d > 0.0f) return ImRsqrt(d); return fail_value; } /* original C++ signature */ def im_inv_length(lhs: ImVec2Like, fail_value: float) -> float: """(private API)""" pass -# static inline float ImTrunc(float f) { return (float)(int)(f); } /* original C++ signature */ +# inline float ImTrunc(float f) { return (float)(int)(f); } /* original C++ signature */ @overload def im_trunc(f: float) -> float: """(private API)""" pass -# static inline ImVec2 ImTrunc(const ImVec2& v) { return ImVec2((float)(int)(v.x), (float)(int)(v.y)); } /* original C++ signature */ +# inline ImVec2 ImTrunc(const ImVec2& v) { return ImVec2((float)(int)(v.x), (float)(int)(v.y)); } /* original C++ signature */ @overload def im_trunc(v: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline float ImFloor(float f) { return (float)((f >= 0 || (float)(int)f == f) ? (int)f : (int)f - 1); } /* original C++ signature */ +# inline float ImFloor(float f) { return (float)((f >= 0 || (float)(int)f == f) ? (int)f : (int)f - 1); } /* original C++ signature */ @overload def im_floor(f: float) -> float: """(private API) @@ -386,48 +434,58 @@ def im_floor(f: float) -> float: """ pass -# static inline ImVec2 ImFloor(const ImVec2& v) { return ImVec2(ImFloor(v.x), ImFloor(v.y)); } /* original C++ signature */ +# inline ImVec2 ImFloor(const ImVec2& v) { return ImVec2(ImFloor(v.x), ImFloor(v.y)); } /* original C++ signature */ @overload def im_floor(v: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline int ImModPositive(int a, int b) { return (a + b) % b; } /* original C++ signature */ +# inline float ImTrunc64(float f) { return (float)(ImS64)(f); } /* original C++ signature */ +def im_trunc64(f: float) -> float: + """(private API)""" + pass + +# inline float ImRound64(float f) { return (float)(ImS64)(f + 0.5f); } /* original C++ signature */ +def im_round64(f: float) -> float: + """(private API)""" + pass + +# inline int ImModPositive(int a, int b) { return (a + b) % b; } /* original C++ signature */ def im_mod_positive(a: int, b: int) -> int: """(private API)""" pass -# static inline float ImDot(const ImVec2& a, const ImVec2& b) { return a.x * b.x + a.y * b.y; } /* original C++ signature */ +# inline float ImDot(const ImVec2& a, const ImVec2& b) { return a.x * b.x + a.y * b.y; } /* original C++ signature */ def im_dot(a: ImVec2Like, b: ImVec2Like) -> float: """(private API)""" pass -# static inline ImVec2 ImRotate(const ImVec2& v, float cos_a, float sin_a) { return ImVec2(v.x * cos_a - v.y * sin_a, v.x * sin_a + v.y * cos_a); } /* original C++ signature */ +# inline ImVec2 ImRotate(const ImVec2& v, float cos_a, float sin_a) { return ImVec2(v.x * cos_a - v.y * sin_a, v.x * sin_a + v.y * cos_a); } /* original C++ signature */ def im_rotate(v: ImVec2Like, cos_a: float, sin_a: float) -> ImVec2: """(private API)""" pass -# static inline float ImLinearSweep(float current, float target, float speed) { if (current < target) return ImMin(current + speed, target); if (current > target) return ImMax(current - speed, target); return current; } /* original C++ signature */ +# inline float ImLinearSweep(float current, float target, float speed) { if (current < target) return ImMin(current + speed, target); if (current > target) return ImMax(current - speed, target); return current; } /* original C++ signature */ def im_linear_sweep(current: float, target: float, speed: float) -> float: """(private API)""" pass -# static inline float ImLinearRemapClamp(float s0, float s1, float d0, float d1, float x) { return ImSaturate((x - s0) / (s1 - s0)) * (d1 - d0) + d0; } /* original C++ signature */ +# inline float ImLinearRemapClamp(float s0, float s1, float d0, float d1, float x) { return ImSaturate((x - s0) / (s1 - s0)) * (d1 - d0) + d0; } /* original C++ signature */ def im_linear_remap_clamp(s0: float, s1: float, d0: float, d1: float, x: float) -> float: """(private API)""" pass -# static inline ImVec2 ImMul(const ImVec2& lhs, const ImVec2& rhs) { return ImVec2(lhs.x * rhs.x, lhs.y * rhs.y); } /* original C++ signature */ +# inline ImVec2 ImMul(const ImVec2& lhs, const ImVec2& rhs) { return ImVec2(lhs.x * rhs.x, lhs.y * rhs.y); } /* original C++ signature */ def im_mul(lhs: ImVec2Like, rhs: ImVec2Like) -> ImVec2: """(private API)""" pass -# static inline bool ImIsFloatAboveGuaranteedIntegerPrecision(float f) { return f <= -16777216 || f >= 16777216; } /* original C++ signature */ +# inline bool ImIsFloatAboveGuaranteedIntegerPrecision(float f) { return f <= -16777216 || f >= 16777216; } /* original C++ signature */ def im_is_float_above_guaranteed_integer_precision(f: float) -> bool: """(private API)""" pass -# static inline float ImExponentialMovingAverage(float avg, float sample, int n) { avg -= avg / n; avg += sample / n; return avg; } /* original C++ signature */ +# inline float ImExponentialMovingAverage(float avg, float sample, int n){ avg -= avg / n; avg += sample / n; return avg; } /* original C++ signature */ def im_exponential_moving_average(avg: float, sample: float, n: int) -> float: """(private API)""" pass @@ -495,6 +553,22 @@ class ImVec1: def __init__(self, _x: float) -> None: pass +class ImVec2i: + """Helper: ImVec2i (2D vector, integer)""" + + # int x, /* original C++ signature */ + x: int + # y; /* original C++ signature */ + y: int + # constexpr ImVec2i() : x(0), y(0) {} /* original C++ signature */ + @overload + def __init__(self) -> None: + pass + # constexpr ImVec2i(int _x, int _y) : x(_x), y(_y) {} /* original C++ signature */ + @overload + def __init__(self, _x: int, _y: int) -> None: + pass + class ImVec2ih: """Helper: ImVec2ih (2D vector, half-size integer, for long-term packed storage)""" @@ -665,6 +739,10 @@ class ImRect: def to_vec4(self) -> ImVec4: """(private API)""" pass + # const ImVec4& AsVec4() const { return *(const ImVec4*)&Min.x; } /* original C++ signature */ + def as_vec4(self) -> ImVec4: + """(private API)""" + pass # Helper: ImBitArray # inline size_t ImBitArrayGetStorageSizeInBytes(int bitcount) { return (size_t)((bitcount + 31) >> 5) << 2; } /* original C++ signature */ @@ -753,24 +831,24 @@ class TextIndex: Maintain a line index for a text buffer. This is a strong candidate to be moved into the public API. """ - # ImVector LineOffsets; /* original C++ signature */ - line_offsets: ImVector_int + # ImVector Offsets; /* original C++ signature */ + offsets: ImVector_int # int EndOffset = 0; /* original C++ signature */ end_offset: int = 0 # Because we don't own text buffer we need to maintain EndOffset (may bake in LineOffsets?) - # void clear() { LineOffsets.clear(); EndOffset = 0; } /* original C++ signature */ + # void clear() { Offsets.clear(); EndOffset = 0; } /* original C++ signature */ def clear(self) -> None: """(private API)""" pass - # int size() { return LineOffsets.Size; } /* original C++ signature */ + # int size() { return Offsets.Size; } /* original C++ signature */ def size(self) -> int: """(private API)""" pass - # const char* get_line_begin(const char* base, int n) { return base + LineOffsets[n]; } /* original C++ signature */ + # const char* get_line_begin(const char* base, int n) { return base + (Offsets.Size != 0 ? Offsets[n] : 0); } /* original C++ signature */ def get_line_begin(self, base: str, n: int) -> str: """(private API)""" pass - # const char* get_line_end(const char* base, int n) { return base + (n + 1 < LineOffsets.Size ? (LineOffsets[n + 1] - 1) : EndOffset); } /* original C++ signature */ + # const char* get_line_end(const char* base, int n) { return base + (n + 1 < Offsets.Size ? (Offsets[n + 1] - 1) : EndOffset); } /* original C++ signature */ def get_line_end(self, base: str, n: int) -> str: """(private API)""" pass @@ -778,13 +856,13 @@ class TextIndex: def append(self, base: str, old_size: int, new_size: int) -> None: """(private API)""" pass - # ImGuiTextIndex(ImVector LineOffsets = ImVector(), int EndOffset = 0); /* original C++ signature */ - def __init__(self, line_offsets: Optional[ImVector_int] = None, end_offset: int = 0) -> None: + # ImGuiTextIndex(ImVector Offsets = ImVector(), int EndOffset = 0); /* original C++ signature */ + def __init__(self, offsets: Optional[ImVector_int] = None, end_offset: int = 0) -> None: """Auto-generated default constructor with named params Python bindings defaults: - If LineOffsets is None, then its default value will be: ImVector_int() + If Offsets is None, then its default value will be: ImVector_int() """ pass @@ -820,15 +898,17 @@ class ImDrawListSharedData: """ # ImVec2 TexUvWhitePixel; /* original C++ signature */ - tex_uv_white_pixel: ImVec2 # UV of white pixel in the atlas + tex_uv_white_pixel: ImVec2 # UV of white pixel in the atlas (== FontAtlas->TexUvWhitePixel) # const ImVec4* TexUvLines; /* original C++ signature */ - tex_uv_lines: ImVec4 # UV of anti-aliased lines in the atlas # (const) + tex_uv_lines: ImVec4 # UV of anti-aliased lines in the atlas (== FontAtlas->TexUvLines) # (const) + # ImFontAtlas* FontAtlas; /* original C++ signature */ + font_atlas: ImFontAtlas # Current font atlas # ImFont* Font; /* original C++ signature */ - font: ImFont # Current/default font (optional, for simplified AddText overload) + font: ImFont # Current font (used for simplified AddText overload) # float FontSize; /* original C++ signature */ - font_size: float # Current/default font size (optional, for simplified AddText overload) + font_size: float # Current font size (used for for simplified AddText overload) # float FontScale; /* original C++ signature */ - font_scale: float # Current/default font scale (== FontSize / Font->FontSize) + font_scale: float # Current font scale (== FontSize / Font->FontSize) # float CurveTessellationTol; /* original C++ signature */ curve_tessellation_tol: float # Tessellation tolerance when using PathBezierCurveTo() # float CircleSegmentMaxError; /* original C++ signature */ @@ -841,6 +921,10 @@ class ImDrawListSharedData: clip_rect_fullscreen: ImVec4 # Value for PushClipRectFullscreen() # ImVector TempBuffer; /* original C++ signature */ temp_buffer: ImVector_ImVec2 # Temporary write buffer + # ImVector DrawLists; /* original C++ signature */ + draw_lists: ImVector_ImDrawList_ptr # All draw lists associated to this ImDrawListSharedData + # ImGuiContext* Context; /* original C++ signature */ + context: Context # [OPTIONAL] Link to Dear ImGui context. 99% of ImDrawList/ImFontAtlas can function without an ImGui context, but this facilitate handling one legacy edge case. # Lookup tables # float ArcFastRadiusCutoff; /* original C++ signature */ @@ -866,6 +950,18 @@ class ImDrawDataBuilder: def __init__(self) -> None: pass +class ImFontStackData: + # ImFont* Font; /* original C++ signature */ + font: ImFont + # float FontSizeBeforeScaling; /* original C++ signature */ + font_size_before_scaling: float # ~~ style.FontSizeBase + # float FontSizeAfterScaling; /* original C++ signature */ + font_size_after_scaling: float # ~~ g.FontSize + # ImFontStackData(float FontSizeBeforeScaling = float(), float FontSizeAfterScaling = float()); /* original C++ signature */ + def __init__(self, font_size_before_scaling: float = float(), font_size_after_scaling: float = float()) -> None: + """Auto-generated default constructor with named params""" + pass + # ----------------------------------------------------------------------------- # [SECTION] Style support # ----------------------------------------------------------------------------- @@ -945,7 +1041,7 @@ class DataTypeInfo: """Auto-generated default constructor with named params""" pass -class DataTypePrivate_(enum.Enum): +class DataTypePrivate_(enum.IntFlag): """Extend ImGuiDataType_""" # ImGuiDataType_Pointer = ImGuiDataType_COUNT, /* original C++ signature */ @@ -958,7 +1054,7 @@ class DataTypePrivate_(enum.Enum): # [SECTION] Widgets support: flags, enums, data structures # ----------------------------------------------------------------------------- -class ItemFlagsPrivate_(enum.Enum): +class ItemFlagsPrivate_(enum.IntFlag): """Extend ImGuiItemFlags - input: PushItemFlag() manipulates g.CurrentItemFlags, g.NextItemData.ItemFlags, ItemAdd() calls may add extra flags too. - output: stored in g.LastItemData.ItemFlags @@ -989,6 +1085,10 @@ class ItemFlagsPrivate_(enum.Enum): ) # (= 1 << 15) # False // Nav keyboard/gamepad mode doesn't disable hover highlight (behave as if NavHighlightItemUnderNav==False). # ImGuiItemFlags_NoMarkEdited = 1 << 16, /* original C++ signature */ no_mark_edited = enum.auto() # (= 1 << 16) # False // Skip calling MarkItemEdited() + # ImGuiItemFlags_NoFocus = 1 << 17, /* original C++ signature */ + no_focus = ( + enum.auto() + ) # (= 1 << 17) # False // [EXPERIMENTAL: Not very well specced] Clicking doesn't take focus. Automatically sets ImGuiButtonFlags_NoFocus + ImGuiButtonFlags_NoNavFocus in ButtonBehavior(). # Controlled by widget code # ImGuiItemFlags_Inputable = 1 << 20, /* original C++ signature */ @@ -1006,7 +1106,7 @@ class ItemFlagsPrivate_(enum.Enum): # Obsolete # ImGuiItemFlags_SelectableDontClosePopup = !ImGuiItemFlags_AutoClosePopups, // Can't have a redirect as we inverted the behavior -class ItemStatusFlags_(enum.Enum): +class ItemStatusFlags_(enum.IntFlag): """Status flags for an already submitted item - output: stored in g.LastItemData.StatusFlags """ @@ -1047,10 +1147,11 @@ class ItemStatusFlags_(enum.Enum): has_shortcut = ( enum.auto() ) # (= 1 << 10) # g.LastItemData.Shortcut valid. Set by SetNextItemShortcut() -> ItemAdd(). + # ImGuiItemStatusFlags_FocusedByTabbing = 1 << 8, // Removed IN 1.90.1 (Dec 2023). The trigger is part of g.NavActivateId. See commit 54c1bdeceb. # Additional status + semantic for ImGuiTestEngine -class HoveredFlagsPrivate_(enum.Enum): +class HoveredFlagsPrivate_(enum.IntFlag): """Extend ImGuiHoveredFlags_""" # ImGuiHoveredFlags_DelayMask_ = ImGuiHoveredFlags_DelayNone | ImGuiHoveredFlags_DelayShort | ImGuiHoveredFlags_DelayNormal | ImGuiHoveredFlags_NoSharedDelay, /* original C++ signature */ @@ -1067,7 +1168,7 @@ class HoveredFlagsPrivate_(enum.Enum): enum.auto() ) # (= HoveredFlags_AllowWhenBlockedByPopup | HoveredFlags_AllowWhenBlockedByActiveItem | HoveredFlags_AllowWhenOverlapped | HoveredFlags_AllowWhenDisabled | HoveredFlags_NoNavOverride | HoveredFlags_ForTooltip | HoveredFlags_Stationary | HoveredFlags_DelayMask_) -class InputTextFlagsPrivate_(enum.Enum): +class InputTextFlagsPrivate_(enum.IntFlag): """Extend ImGuiInputTextFlags_""" # [Internal] @@ -1080,7 +1181,7 @@ class InputTextFlagsPrivate_(enum.Enum): # ImGuiInputTextFlags_LocalizeDecimalPoint= 1 << 28, /* original C++ signature */ localize_decimal_point = enum.auto() # (= 1 << 28) # For internal use by InputScalar() and TempInputScalar() -class ButtonFlagsPrivate_(enum.Enum): +class ButtonFlagsPrivate_(enum.IntFlag): """Extend ImGuiButtonFlags_""" # ImGuiButtonFlags_PressedOnClick = 1 << 4, /* original C++ signature */ @@ -1134,21 +1235,25 @@ class ButtonFlagsPrivate_(enum.Enum): no_test_key_owner = ( enum.auto() ) # (= 1 << 21) # don't test key/input owner when polling the key (note: mouse buttons are keys! often, the key in question will be ImGuiKey_MouseLeft!) + # ImGuiButtonFlags_NoFocus = 1 << 22, /* original C++ signature */ + no_focus = ( + enum.auto() + ) # (= 1 << 22) # [EXPERIMENTAL: Not very well specced]. Don't focus parent window when clicking. # ImGuiButtonFlags_PressedOnMask_ = ImGuiButtonFlags_PressedOnClick | ImGuiButtonFlags_PressedOnClickRelease | ImGuiButtonFlags_PressedOnClickReleaseAnywhere | ImGuiButtonFlags_PressedOnRelease | ImGuiButtonFlags_PressedOnDoubleClick | ImGuiButtonFlags_PressedOnDragDropHold, /* original C++ signature */ pressed_on_mask_ = ( enum.auto() ) # (= ButtonFlags_PressedOnClick | ButtonFlags_PressedOnClickRelease | ButtonFlags_PressedOnClickReleaseAnywhere | ButtonFlags_PressedOnRelease | ButtonFlags_PressedOnDoubleClick | ButtonFlags_PressedOnDragDropHold) # ImGuiButtonFlags_PressedOnDefault_ = ImGuiButtonFlags_PressedOnClickRelease, /* original C++ signature */ - # } pressed_on_default_ = enum.auto() # (= ButtonFlags_PressedOnClickRelease) + # ImGuiButtonFlags_NoKeyModifiers = ImGuiButtonFlags_NoKeyModsAllowed, // Renamed in 1.91.4 -class ComboFlagsPrivate_(enum.Enum): +class ComboFlagsPrivate_(enum.IntFlag): """Extend ImGuiComboFlags_""" # ImGuiComboFlags_CustomPreview = 1 << 20, /* original C++ signature */ custom_preview = enum.auto() # (= 1 << 20) # enable BeginComboPreview() -class SliderFlagsPrivate_(enum.Enum): +class SliderFlagsPrivate_(enum.IntFlag): """Extend ImGuiSliderFlags_""" # ImGuiSliderFlags_Vertical = 1 << 20, /* original C++ signature */ @@ -1158,16 +1263,12 @@ class SliderFlagsPrivate_(enum.Enum): enum.auto() ) # (= 1 << 21) # Consider using g.NextItemData.ItemFlags |= ImGuiItemFlags_ReadOnly instead. -class SelectableFlagsPrivate_(enum.Enum): +class SelectableFlagsPrivate_(enum.IntFlag): """Extend ImGuiSelectableFlags_""" # NB: need to be in sync with last value of ImGuiSelectableFlags_ # ImGuiSelectableFlags_NoHoldingActiveID = 1 << 20, /* original C++ signature */ no_holding_active_id = enum.auto() # (= 1 << 20) - # ImGuiSelectableFlags_SelectOnNav = 1 << 21, /* original C++ signature */ - select_on_nav = ( - enum.auto() - ) # (= 1 << 21) # (WIP) Auto-select when moved into. This is not exposed in public API as to handle multi-select and modifiers we will need user to explicitly control focus scope. May be replaced with a BeginSelection() API. # ImGuiSelectableFlags_SelectOnClick = 1 << 22, /* original C++ signature */ select_on_click = ( enum.auto() @@ -1189,9 +1290,11 @@ class SelectableFlagsPrivate_(enum.Enum): enum.auto() ) # (= 1 << 27) # Don't set key/input owner on the initial click (note: mouse buttons are keys! often, the key in question will be ImGuiKey_MouseLeft!) -class TreeNodeFlagsPrivate_(enum.Enum): +class TreeNodeFlagsPrivate_(enum.IntFlag): """Extend ImGuiTreeNodeFlags_""" + # ImGuiTreeNodeFlags_NoNavFocus = 1 << 27, /* original C++ signature */ + no_nav_focus = enum.auto() # (= 1 << 27) # Don't claim nav focus when interacting with this item (#8551) # ImGuiTreeNodeFlags_ClipLabelForTrailingButton = 1 << 28, /* original C++ signature */ clip_label_for_trailing_button = enum.auto() # (= 1 << 28) # FIXME-WIP: Hard-coded for CollapsingHeader() # ImGuiTreeNodeFlags_UpsideDownArrow = 1 << 29, /* original C++ signature */ @@ -1199,10 +1302,14 @@ class TreeNodeFlagsPrivate_(enum.Enum): enum.auto() ) # (= 1 << 29) # FIXME-WIP: Turn Down arrow into an Up arrow, for reversed trees (#6517) # ImGuiTreeNodeFlags_OpenOnMask_ = ImGuiTreeNodeFlags_OpenOnDoubleClick | ImGuiTreeNodeFlags_OpenOnArrow, /* original C++ signature */ - # } open_on_mask_ = enum.auto() # (= TreeNodeFlags_OpenOnDoubleClick | TreeNodeFlags_OpenOnArrow) + # ImGuiTreeNodeFlags_DrawLinesMask_ = ImGuiTreeNodeFlags_DrawLinesNone | ImGuiTreeNodeFlags_DrawLinesFull | ImGuiTreeNodeFlags_DrawLinesToNodes, /* original C++ signature */ + # } + draw_lines_mask_ = ( + enum.auto() + ) # (= TreeNodeFlags_DrawLinesNone | TreeNodeFlags_DrawLinesFull | TreeNodeFlags_DrawLinesToNodes) -class SeparatorFlags_(enum.Enum): +class SeparatorFlags_(enum.IntFlag): # ImGuiSeparatorFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiSeparatorFlags_Horizontal = 1 << 0, /* original C++ signature */ @@ -1214,7 +1321,7 @@ class SeparatorFlags_(enum.Enum): # ImGuiSeparatorFlags_SpanAllColumns = 1 << 2, /* original C++ signature */ span_all_columns = enum.auto() # (= 1 << 2) # Make separator cover all columns of a legacy Columns() set. -class FocusRequestFlags_(enum.Enum): +class FocusRequestFlags_(enum.IntFlag): """Flags for FocusWindow(). This is not called ImGuiFocusFlags to avoid confusion with public-facing ImGuiFocusedFlags. FIXME: Once we finishing replacing more uses of GetTopMostPopupModal()+IsWindowWithinBeginStackOf() and FindBlockingModal() with this, we may want to change the flag to be opt-out instead of opt-in. @@ -1227,20 +1334,20 @@ class FocusRequestFlags_(enum.Enum): # ImGuiFocusRequestFlags_UnlessBelowModal = 1 << 1, /* original C++ signature */ unless_below_modal = enum.auto() # (= 1 << 1) # Do not set focus if the window is below a modal. -class TextFlags_(enum.Enum): +class TextFlags_(enum.IntFlag): # ImGuiTextFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiTextFlags_NoWidthForLargeClippedText = 1 << 0, /* original C++ signature */ # } no_width_for_large_clipped_text = enum.auto() # (= 1 << 0) -class TooltipFlags_(enum.Enum): +class TooltipFlags_(enum.IntFlag): # ImGuiTooltipFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiTooltipFlags_OverridePrevious = 1 << 1, /* original C++ signature */ override_previous = enum.auto() # (= 1 << 1) # Clear/ignore previously submitted tooltip (defaults to append) -class LayoutType_(enum.Enum): +class LayoutType_(enum.IntFlag): """FIXME: this is in development, not exposed/functional as a generic feature yet. Horizontal/Vertical enums are fixed to 0/1 so they may be used to index ImVec2 """ @@ -1251,7 +1358,7 @@ class LayoutType_(enum.Enum): # } vertical = enum.auto() # (= 1) -class LogFlags_(enum.Enum): +class LogFlags_(enum.IntFlag): """Flags for LogBegin() text capturing function""" # ImGuiLogFlags_None = 0, /* original C++ signature */ @@ -1271,7 +1378,7 @@ class LogFlags_(enum.Enum): enum.auto() ) # (= LogFlags_OutputTTY | LogFlags_OutputFile | LogFlags_OutputBuffer | LogFlags_OutputClipboard) -class Axis(enum.Enum): +class Axis(enum.IntFlag): """X/Y enums are fixed to 0/1 so they may be used to index ImVec2""" # ImGuiAxis_None = -1, /* original C++ signature */ @@ -1282,7 +1389,7 @@ class Axis(enum.Enum): # } y = enum.auto() # (= 1) -class PlotType(enum.Enum): +class PlotType(enum.IntFlag): # ImGuiPlotType_Lines, /* original C++ signature */ lines = enum.auto() # (= 0) # ImGuiPlotType_Histogram, /* original C++ signature */ @@ -1330,6 +1437,8 @@ class GroupData: backup_curr_line_text_base_offset: float # ImGuiID BackupActiveIdIsAlive; /* original C++ signature */ backup_active_id_is_alive: ID + # bool BackupActiveIdHasBeenEditedThisFrame; /* original C++ signature */ + backup_active_id_has_been_edited_this_frame: bool # bool BackupDeactivatedIdIsAlive; /* original C++ signature */ backup_deactivated_id_is_alive: bool # bool BackupHoveredIdIsAlive; /* original C++ signature */ @@ -1338,7 +1447,7 @@ class GroupData: backup_is_same_line: bool # bool EmitItem; /* original C++ signature */ emit_item: bool - # ImGuiGroupData(ImGuiID WindowID = ImGuiID(), ImVec2 BackupCursorPos = ImVec2(), ImVec2 BackupCursorMaxPos = ImVec2(), ImVec2 BackupCursorPosPrevLine = ImVec2(), ImVec1 BackupIndent = ImVec1(), ImVec1 BackupGroupOffset = ImVec1(), ImVec2 BackupCurrLineSize = ImVec2(), float BackupCurrLineTextBaseOffset = float(), ImGuiID BackupActiveIdIsAlive = ImGuiID(), bool BackupDeactivatedIdIsAlive = bool(), bool BackupHoveredIdIsAlive = bool(), bool BackupIsSameLine = bool(), bool EmitItem = bool()); /* original C++ signature */ + # ImGuiGroupData(ImGuiID WindowID = ImGuiID(), ImVec2 BackupCursorPos = ImVec2(), ImVec2 BackupCursorMaxPos = ImVec2(), ImVec2 BackupCursorPosPrevLine = ImVec2(), ImVec1 BackupIndent = ImVec1(), ImVec1 BackupGroupOffset = ImVec1(), ImVec2 BackupCurrLineSize = ImVec2(), float BackupCurrLineTextBaseOffset = float(), ImGuiID BackupActiveIdIsAlive = ImGuiID(), bool BackupActiveIdHasBeenEditedThisFrame = bool(), bool BackupDeactivatedIdIsAlive = bool(), bool BackupHoveredIdIsAlive = bool(), bool BackupIsSameLine = bool(), bool EmitItem = bool()); /* original C++ signature */ def __init__( self, window_id: ID = ID(), @@ -1350,6 +1459,7 @@ class GroupData: backup_curr_line_size: Optional[ImVec2Like] = None, backup_curr_line_text_base_offset: float = float(), backup_active_id_is_alive: ID = ID(), + backup_active_id_has_been_edited_this_frame: bool = bool(), backup_deactivated_id_is_alive: bool = bool(), backup_hovered_id_is_alive: bool = bool(), backup_is_same_line: bool = bool(), @@ -1454,10 +1564,16 @@ class InputTextState: scroll: ( ImVec2 # horizontal offset (managed manually) + vertical scrolling (pulled from child window's own Scroll.y) ) + # int LineCount; /* original C++ signature */ + line_count: int # last line count (solely for debugging) + # float WrapWidth; /* original C++ signature */ + wrap_width: float # word-wrapping width # float CursorAnim; /* original C++ signature */ cursor_anim: float # timer for cursor blink, reset on every user action so the cursor reappears immediately # bool CursorFollow; /* original C++ signature */ cursor_follow: bool # set when we want scrolling to follow the current cursor position (not always!) + # bool CursorCenterY; /* original C++ signature */ + cursor_center_y: bool # set when we want scrolling to be centered over the cursor position (while resizing a word-wrapping field) # bool SelectedAllMouseLock; /* original C++ signature */ selected_all_mouse_lock: ( bool # after a double-click to select all, we ignore further mouse drags to update selection @@ -1468,6 +1584,8 @@ class InputTextState: want_reload_user_buf: ( bool # force a reload of user buf so it may be modified externally. may be automatic in future version. ) + # ImS8 LastMoveDirectionLR; /* original C++ signature */ + last_move_direction_lr: ImS8 # ImGuiDir_Left or ImGuiDir_Right. track last movement direction so when cursor cross over a word-wrapping boundaries we can display it on either line depending on last move.s # int ReloadSelectionStart; /* original C++ signature */ reload_selection_start: int # int ReloadSelectionEnd; /* original C++ signature */ @@ -1495,6 +1613,10 @@ class InputTextState: def on_char_pressed(self, c: int) -> None: """(private API)""" pass + # float GetPreferredOffsetX() const; /* original C++ signature */ + def get_preferred_offset_x(self) -> float: + """(private API)""" + pass # Cursor & Selection # void CursorAnimReset(); /* original C++ signature */ def cursor_anim_reset(self) -> None: @@ -1546,7 +1668,7 @@ class InputTextState: """(private API)""" pass -class WindowRefreshFlags_(enum.Enum): +class WindowRefreshFlags_(enum.IntFlag): # ImGuiWindowRefreshFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiWindowRefreshFlags_TryToAvoidRefresh = 1 << 0, /* original C++ signature */ @@ -1559,7 +1681,15 @@ class WindowRefreshFlags_(enum.Enum): refresh_on_focus = enum.auto() # (= 1 << 2) # [EXPERIMENTAL] Always refresh on focus # Refresh policy/frequency, Load Balancing etc. -class NextWindowDataFlags_(enum.Enum): +class WindowBgClickFlags_(enum.IntFlag): + # ImGuiWindowBgClickFlags_None = 0, /* original C++ signature */ + none = enum.auto() # (= 0) + # ImGuiWindowBgClickFlags_Move = 1 << 0, /* original C++ signature */ + move = ( + enum.auto() + ) # (= 1 << 0) # Click on bg/None + drag to move window. Cleared by default when using io.ConfigWindowsMoveFromTitleBarOnly. + +class NextWindowDataFlags_(enum.IntFlag): # ImGuiNextWindowDataFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiNextWindowDataFlags_HasPos = 1 << 0, /* original C++ signature */ @@ -1652,7 +1782,7 @@ class NextWindowData: """(private API)""" pass -class NextItemDataFlags_(enum.Enum): +class NextItemDataFlags_(enum.IntFlag): # ImGuiNextItemDataFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiNextItemDataFlags_HasWidth = 1 << 0, /* original C++ signature */ @@ -1733,7 +1863,7 @@ class LastItemData: class TreeNodeStackData: """Store data emitted by TreeNode() for usage by TreePop() - - To implement ImGuiTreeNodeFlags_NavLeftJumpsBackHere: store the minimum amount of data + - To implement ImGuiTreeNodeFlags_NavLeftJumpsToParent: store the minimum amount of data which we can't infer in TreePop(), to perform the equivalent of NavApplyItemToResult(). Only stored when the node is a potential candidate for landing on a Left arrow jump. """ @@ -1746,19 +1876,30 @@ class TreeNodeStackData: item_flags: ItemFlags # Used for nav landing # ImRect NavRect; /* original C++ signature */ nav_rect: ImRect # Used for nav landing - # ImGuiTreeNodeStackData(ImGuiID ID = ImGuiID(), ImGuiTreeNodeFlags TreeFlags = ImGuiTreeNodeFlags(), ImGuiItemFlags ItemFlags = ImGuiItemFlags(), ImRect NavRect = ImRect()); /* original C++ signature */ + # float DrawLinesX1; /* original C++ signature */ + draw_lines_x1: float + # float DrawLinesToNodesY2; /* original C++ signature */ + draw_lines_to_nodes_y2: float + # ImGuiTableColumnIdx DrawLinesTableColumn; /* original C++ signature */ + draw_lines_table_column: TableColumnIdx + # ImGuiTreeNodeStackData(ImGuiID ID = ImGuiID(), ImGuiTreeNodeFlags TreeFlags = ImGuiTreeNodeFlags(), ImGuiItemFlags ItemFlags = ImGuiItemFlags(), ImRect NavRect = ImRect(), float DrawLinesX1 = float(), float DrawLinesToNodesY2 = float(), ImGuiTableColumnIdx DrawLinesTableColumn = ImGuiTableColumnIdx()); /* original C++ signature */ def __init__( self, id_: ID = ID(), tree_flags: TreeNodeFlags = TreeNodeFlags(), item_flags: ItemFlags = ItemFlags(), nav_rect: Optional[ImRect] = None, + draw_lines_x1: float = float(), + draw_lines_to_nodes_y2: float = float(), + draw_lines_table_column: Optional[TableColumnIdx] = None, ) -> None: """Auto-generated default constructor with named params Python bindings defaults: - If NavRect is None, then its default value will be: ImRect() + If any of the params below is None, then its default value below will be used: + * NavRect: ImRect() + * DrawLinesTableColumn: TableColumnIdx() """ pass @@ -1872,7 +2013,7 @@ class DeactivatedItemData: # [SECTION] Popup support # ----------------------------------------------------------------------------- -class PopupPositionPolicy(enum.Enum): +class PopupPositionPolicy(enum.IntFlag): # ImGuiPopupPositionPolicy_Default, /* original C++ signature */ default = enum.auto() # (= 0) # ImGuiPopupPositionPolicy_ComboBox, /* original C++ signature */ @@ -1913,7 +2054,7 @@ class PopupData: # [Internal] Named shortcuts for Navigation -class InputEventType(enum.Enum): +class InputEventType(enum.IntFlag): # ImGuiInputEventType_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiInputEventType_MousePos, /* original C++ signature */ @@ -1934,7 +2075,7 @@ class InputEventType(enum.Enum): # } count = enum.auto() # (= 8) -class InputSource(enum.Enum): +class InputSource(enum.IntFlag): # ImGuiInputSource_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiInputSource_Mouse, /* original C++ signature */ @@ -2064,16 +2205,16 @@ class KeyRoutingData: next_entry_index: KeyRoutingIndex # ImU16 Mods; /* original C++ signature */ mods: ImU16 # Technically we'd only need 4-bits but for simplify we store ImGuiMod_ values which need 16-bits. - # ImU8 RoutingCurrScore; /* original C++ signature */ - routing_curr_score: ImU8 # [DEBUG] For debug display - # ImU8 RoutingNextScore; /* original C++ signature */ - routing_next_score: ImU8 # Lower is better (0: perfect score) + # ImU16 RoutingCurrScore; /* original C++ signature */ + routing_curr_score: ImU16 # [DEBUG] For debug display + # ImU16 RoutingNextScore; /* original C++ signature */ + routing_next_score: ImU16 # Lower is better (0: perfect score) # ImGuiID RoutingCurr; /* original C++ signature */ routing_curr: ID # ImGuiID RoutingNext; /* original C++ signature */ routing_next: ID - # ImGuiKeyRoutingData() { NextEntryIndex = -1; Mods = 0; RoutingCurrScore = RoutingNextScore = 255; RoutingCurr = RoutingNext = ImGuiKeyOwner_NoOwner; } /* original C++ signature */ + # ImGuiKeyRoutingData() { NextEntryIndex = -1; Mods = 0; RoutingCurrScore = RoutingNextScore = 0; RoutingCurr = RoutingNext = ImGuiKeyOwner_NoOwner; } /* original C++ signature */ def __init__(self) -> None: pass @@ -2115,7 +2256,7 @@ class KeyOwnerData: def __init__(self) -> None: pass -class InputFlagsPrivate_(enum.Enum): +class InputFlagsPrivate_(enum.IntFlag): """Extend ImGuiInputFlags_ Flags for extended versions of IsKeyPressed(), IsMouseClicked(), Shortcut(), SetKeyOwner(), SetItemKeyOwner() Don't mistake with ImGuiInputTextFlags! (which is for ImGui::InputText() function) @@ -2272,7 +2413,7 @@ class ListClipperData: # [SECTION] Navigation support # ----------------------------------------------------------------------------- -class ActivateFlags_(enum.Enum): +class ActivateFlags_(enum.IntFlag): # ImGuiActivateFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiActivateFlags_PreferInput = 1 << 0, /* original C++ signature */ @@ -2288,13 +2429,15 @@ class ActivateFlags_(enum.Enum): enum.auto() ) # (= 1 << 2) # Request widget to preserve state if it can (e.g. InputText will try to preserve cursor/selection) # ImGuiActivateFlags_FromTabbing = 1 << 3, /* original C++ signature */ - from_tabbing = enum.auto() # (= 1 << 3) # Activation requested by a tabbing request + from_tabbing = enum.auto() # (= 1 << 3) # Activation requested by a tabbing request (ImGuiNavMoveFlags_IsTabbing) # ImGuiActivateFlags_FromShortcut = 1 << 4, /* original C++ signature */ from_shortcut = ( enum.auto() ) # (= 1 << 4) # Activation requested by an item shortcut via SetNextItemShortcut() function. + # ImGuiActivateFlags_FromFocusApi = 1 << 5, /* original C++ signature */ + from_focus_api = enum.auto() # (= 1 << 5) # Activation requested by an api request (ImGuiNavMoveFlags_FocusApi) -class ScrollFlags_(enum.Enum): +class ScrollFlags_(enum.IntFlag): """Early work-in-progress API for ScrollToItem()""" # ImGuiScrollFlags_None = 0, /* original C++ signature */ @@ -2335,7 +2478,7 @@ class ScrollFlags_(enum.Enum): enum.auto() ) # (= ScrollFlags_KeepVisibleEdgeY | ScrollFlags_KeepVisibleCenterY | ScrollFlags_AlwaysCenterY) -class NavRenderCursorFlags_(enum.Enum): +class NavRenderCursorFlags_(enum.IntFlag): # ImGuiNavRenderCursorFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiNavRenderCursorFlags_Compact = 1 << 1, /* original C++ signature */ @@ -2347,7 +2490,7 @@ class NavRenderCursorFlags_(enum.Enum): # ImGuiNavRenderCursorFlags_NoRounding = 1 << 3, /* original C++ signature */ no_rounding = enum.auto() # (= 1 << 3) -class NavMoveFlags_(enum.Enum): +class NavMoveFlags_(enum.IntFlag): # ImGuiNavMoveFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiNavMoveFlags_LoopX = 1 << 0, /* original C++ signature */ @@ -2373,7 +2516,7 @@ class NavMoveFlags_(enum.Enum): # ImGuiNavMoveFlags_ScrollToEdgeY = 1 << 6, /* original C++ signature */ scroll_to_edge_y = ( enum.auto() - ) # (= 1 << 6) # Force scrolling to min/max (used by Home/End) // FIXME-NAV: Aim to remove or reword, probably unnecessary + ) # (= 1 << 6) # Force scrolling to min/max (used by Home/End) // FIXME-NAV: Aim to remove or reword as ImGuiScrollFlags # ImGuiNavMoveFlags_Forwarded = 1 << 7, /* original C++ signature */ forwarded = enum.auto() # (= 1 << 7) # ImGuiNavMoveFlags_DebugNoResult = 1 << 8, /* original C++ signature */ @@ -2395,7 +2538,7 @@ class NavMoveFlags_(enum.Enum): # ImGuiNavMoveFlags_NoClearActiveId = 1 << 15, /* original C++ signature */ no_clear_active_id = enum.auto() # (= 1 << 15) # (Experimental) Do not clear active id when applying move result -class NavLayer(enum.Enum): +class NavLayer(enum.IntFlag): # ImGuiNavLayer_Main = 0, /* original C++ signature */ main = enum.auto() # (= 0) # Main scrolling layer # ImGuiNavLayer_Menu = 1, /* original C++ signature */ @@ -2450,7 +2593,7 @@ class FocusScopeData: # [SECTION] Typing-select support # ----------------------------------------------------------------------------- -class TypingSelectFlags_(enum.Enum): +class TypingSelectFlags_(enum.IntFlag): """Flags for GetTypingSelectRequest()""" # ImGuiTypingSelectFlags_None = 0, /* original C++ signature */ @@ -2522,7 +2665,7 @@ class TypingSelectState: # [SECTION] Columns support # ----------------------------------------------------------------------------- -class OldColumnFlags_(enum.Enum): +class OldColumnFlags_(enum.IntFlag): """Flags for internal's BeginColumns(). This is an obsolete API. Prefer using BeginTable() nowadays!""" # ImGuiOldColumnFlags_None = 0, /* original C++ signature */ @@ -2723,7 +2866,7 @@ class MultiSelectState: # #ifdef IMGUI_HAS_DOCK # -class DockNodeFlagsPrivate_(enum.Enum): +class DockNodeFlagsPrivate_(enum.IntFlag): """Extend ImGuiDockNodeFlags_""" # [Internal] @@ -2801,7 +2944,7 @@ class DockNodeFlagsPrivate_(enum.Enum): enum.auto() ) # (= DockNodeFlags_NoResizeFlagsMask_ | DockNodeFlags_DockSpace | DockNodeFlags_CentralNode | DockNodeFlags_NoTabBar | DockNodeFlags_HiddenTabBar | DockNodeFlags_NoWindowMenuButton | DockNodeFlags_NoCloseButton) -class DataAuthority_(enum.Enum): +class DataAuthority_(enum.IntFlag): """Store the source authority (dock node vs window) of a field""" # ImGuiDataAuthority_Auto, /* original C++ signature */ @@ -2812,7 +2955,7 @@ class DataAuthority_(enum.Enum): # } window = enum.auto() # (= 2) -class DockNodeState(enum.Enum): +class DockNodeState(enum.IntFlag): # ImGuiDockNodeState_Unknown, /* original C++ signature */ unknown = enum.auto() # (= 0) # ImGuiDockNodeState_HostWindowHiddenBecauseSingleWindow, /* original C++ signature */ @@ -2944,7 +3087,7 @@ class DockNode: """(private API)""" pass -class WindowDockStyleCol(enum.Enum): +class WindowDockStyleCol(enum.IntFlag): """List of colors that are stored at the time of Begin() into Docked Windows. We currently store the packed colors in a simple array window->DockStyle.Colors[]. A better solution may involve appending into a log of colors in ImGuiContext + store offsets into those arrays in ImGuiWindow, @@ -2967,9 +3110,11 @@ class WindowDockStyleCol(enum.Enum): tab_dimmed_selected = enum.auto() # (= 6) # ImGuiWindowDockStyleCol_TabDimmedSelectedOverline, /* original C++ signature */ tab_dimmed_selected_overline = enum.auto() # (= 7) + # ImGuiWindowDockStyleCol_UnsavedMarker, /* original C++ signature */ + unsaved_marker = enum.auto() # (= 8) # ImGuiWindowDockStyleCol_COUNT /* original C++ signature */ # } - count = enum.auto() # (= 8) + count = enum.auto() # (= 9) class WindowDockStyle: """We don't store style.Alpha: dock_node->LastBgColor embeds it and otherwise it would only affect the docking tab, which intuitively I would say we don't want to.""" @@ -3153,7 +3298,7 @@ class SettingsHandler: # [SECTION] Localization support # ----------------------------------------------------------------------------- -class LocKey(enum.Enum): +class LocKey(enum.IntFlag): """This is experimental and not officially supported, it'll probably fall short of features, if/when it does we may backtrack.""" # ImGuiLocKey_VersionStr, /* original C++ signature */ @@ -3212,7 +3357,7 @@ class LocEntry: # [SECTION] Metrics, Debug Tools # ----------------------------------------------------------------------------- -class DebugLogFlags_(enum.Enum): +class DebugLogFlags_(enum.IntFlag): """See IMGUI_DEBUG_LOG() and IMGUI_DEBUG_LOG_XXX() macros.""" # Event types @@ -3293,6 +3438,8 @@ class MetricsConfig: show_draw_cmd_bounding_boxes: bool = True # bool ShowTextEncodingViewer = false; /* original C++ signature */ show_text_encoding_viewer: bool = False + # bool ShowTextureUsedRect = false; /* original C++ signature */ + show_texture_used_rect: bool = False # bool ShowDockingNodes = false; /* original C++ signature */ show_docking_nodes: bool = False # int ShowWindowsRectsType = -1; /* original C++ signature */ @@ -3303,7 +3450,9 @@ class MetricsConfig: highlight_monitor_idx: int = -1 # ImGuiID HighlightViewportID = 0; /* original C++ signature */ highlight_viewport_id: ID = 0 - # ImGuiMetricsConfig(bool ShowDebugLog = false, bool ShowIDStackTool = false, bool ShowWindowsRects = false, bool ShowWindowsBeginOrder = false, bool ShowTablesRects = false, bool ShowDrawCmdMesh = true, bool ShowDrawCmdBoundingBoxes = true, bool ShowTextEncodingViewer = false, bool ShowDockingNodes = false, int ShowWindowsRectsType = -1, int ShowTablesRectsType = -1, int HighlightMonitorIdx = -1, ImGuiID HighlightViewportID = 0); /* original C++ signature */ + # bool ShowFontPreview = true; /* original C++ signature */ + show_font_preview: bool = True + # ImGuiMetricsConfig(bool ShowDebugLog = false, bool ShowIDStackTool = false, bool ShowWindowsRects = false, bool ShowWindowsBeginOrder = false, bool ShowTablesRects = false, bool ShowDrawCmdMesh = true, bool ShowDrawCmdBoundingBoxes = true, bool ShowTextEncodingViewer = false, bool ShowTextureUsedRect = false, bool ShowDockingNodes = false, int ShowWindowsRectsType = -1, int ShowTablesRectsType = -1, int HighlightMonitorIdx = -1, ImGuiID HighlightViewportID = 0, bool ShowFontPreview = true); /* original C++ signature */ def __init__( self, show_debug_log: bool = False, @@ -3314,11 +3463,13 @@ class MetricsConfig: show_draw_cmd_mesh: bool = True, show_draw_cmd_bounding_boxes: bool = True, show_text_encoding_viewer: bool = False, + show_texture_used_rect: bool = False, show_docking_nodes: bool = False, show_windows_rects_type: int = -1, show_tables_rects_type: int = -1, highlight_monitor_idx: int = -1, highlight_viewport_id: ID = 0, + show_font_preview: bool = True, ) -> None: """Auto-generated default constructor with named params""" pass @@ -3327,33 +3478,51 @@ class StackLevelInfo: # ImGuiID ID; /* original C++ signature */ id_: ID # ImS8 QueryFrameCount; /* original C++ signature */ - query_frame_count: ImS8 # >= 1: Query in progress + query_frame_count: ImS8 # >= 1: Sub-query in progress # bool QuerySuccess; /* original C++ signature */ - query_success: bool # Obtained result from DebugHookIdInfo() + query_success: bool # Sub-query obtained result from DebugHookIdInfo() + # ImS8 DataType; /* original C++ signature */ + data_type: ImS8 # ImGuiDataType + # int DescOffset; /* original C++ signature */ + desc_offset: int # -1 or offset into parent's ResultsPathsBuf - # ImGuiStackLevelInfo() { memset(this, 0, sizeof(*this)); } /* original C++ signature */ + # ImGuiStackLevelInfo() { memset(this, 0, sizeof(*this)); DataType = -1; DescOffset = -1; } /* original C++ signature */ + def __init__(self) -> None: + pass + +class DebugItemPathQuery: + # ImGuiID MainID; /* original C++ signature */ + main_id: ID # ID to query details for. + # bool Active; /* original C++ signature */ + active: bool # Used to disambiguate the case when ID == 0 and e.g. some code calls PushOverrideID(0). + # bool Complete; /* original C++ signature */ + complete: bool # All sub-queries are finished (some may have failed). + # ImS8 Step; /* original C++ signature */ + step: ImS8 # -1: query stack + init Results, >= 0: filling individual stack level. + # ImVector Results; /* original C++ signature */ + results: ImVector_StackLevelInfo + # ImGuiTextBuffer ResultsDescBuf; /* original C++ signature */ + results_desc_buf: TextBuffer + # ImGuiTextBuffer ResultPathBuf; /* original C++ signature */ + result_path_buf: TextBuffer + + # ImGuiDebugItemPathQuery() { memset(this, 0, sizeof(*this)); } /* original C++ signature */ def __init__(self) -> None: pass class IDStackTool: """State for ID Stack tool queries""" + # bool OptHexEncodeNonAsciiChars; /* original C++ signature */ + opt_hex_encode_non_ascii_chars: bool + # bool OptCopyToClipboardOnCtrlC; /* original C++ signature */ + opt_copy_to_clipboard_on_ctrl_c: bool # int LastActiveFrame; /* original C++ signature */ last_active_frame: int - # int StackLevel; /* original C++ signature */ - stack_level: int # -1: query stack and resize Results, >= 0: individual stack level - # ImGuiID QueryId; /* original C++ signature */ - query_id: ID # ID to query details for - # ImVector Results; /* original C++ signature */ - results: ImVector_StackLevelInfo - # bool CopyToClipboardOnCtrlC; /* original C++ signature */ - copy_to_clipboard_on_ctrl_c: bool # float CopyToClipboardLastTime; /* original C++ signature */ copy_to_clipboard_last_time: float - # ImGuiTextBuffer ResultPathBuf; /* original C++ signature */ - result_path_buf: TextBuffer - # ImGuiIDStackTool() { memset(this, 0, sizeof(*this)); CopyToClipboardLastTime = -FLT_MAX; } /* original C++ signature */ + # ImGuiIDStackTool() { memset(this, 0, sizeof(*this)); LastActiveFrame = -1; OptHexEncodeNonAsciiChars = true; CopyToClipboardLastTime = -FLT_MAX; } /* original C++ signature */ def __init__(self) -> None: pass @@ -3361,7 +3530,7 @@ class IDStackTool: # [SECTION] Generic context hooks # ----------------------------------------------------------------------------- -class ContextHookType(enum.Enum): +class ContextHookType(enum.IntFlag): """[ADAPT_IMGUI_BUNDLE]: added ImGuiContextHookType_BeginWindow, ImGuiContextHookType_EndWindow, cf https://github.com/thedmd/imgui-node-editor/issues/242#issuecomment-1681806764""" # ImGuiContextHookType_NewFramePre, /* original C++ signature */ @@ -3406,8 +3575,24 @@ class ContextHook: class Context: # bool Initialized; /* original C++ signature */ initialized: bool - # bool FontAtlasOwnedByContext; /* original C++ signature */ - font_atlas_owned_by_context: bool # IO.Fonts-> is owned by the ImGuiContext and will be destructed along with it. + # bool WithinFrameScope; /* original C++ signature */ + within_frame_scope: bool # Set by NewFrame(), cleared by EndFrame() + # bool WithinFrameScopeWithImplicitWindow; /* original C++ signature */ + within_frame_scope_with_implicit_window: ( + bool # Set by NewFrame(), cleared by EndFrame() when the implicit debug window has been pushed + ) + # bool TestEngineHookItems; /* original C++ signature */ + test_engine_hook_items: bool # Will call test engine hooks: ImGuiTestEngineHook_ItemAdd(), ImGuiTestEngineHook_ItemInfo(), ImGuiTestEngineHook_Log() + # int FrameCount; /* original C++ signature */ + frame_count: int + # int FrameCountEnded; /* original C++ signature */ + frame_count_ended: int + # int FrameCountPlatformEnded; /* original C++ signature */ + frame_count_platform_ended: int + # int FrameCountRendered; /* original C++ signature */ + frame_count_rendered: int + # double Time; /* original C++ signature */ + time: float # ImGuiIO IO; /* original C++ signature */ io: IO # ImGuiPlatformIO PlatformIO; /* original C++ signature */ @@ -3419,39 +3604,27 @@ class Context: # ImGuiConfigFlags ConfigFlagsLastFrame; /* original C++ signature */ config_flags_last_frame: ConfigFlags # ImFont* Font; /* original C++ signature */ - font: ImFont # (Shortcut) == FontStack.empty() ? IO.Font : FontStack.back() + font: ImFont # Currently bound font. (== FontStack.back().Font) + # ImFontBaked* FontBaked; /* original C++ signature */ + font_baked: ImFontBaked # Currently bound font at currently bound size. (== Font->GetFontBaked(FontSize)) # float FontSize; /* original C++ signature */ - font_size: float # (Shortcut) == FontBaseSize * g.CurrentWindow->FontWindowScale == window->FontSize(). Text height for current window. - # float FontBaseSize; /* original C++ signature */ - font_base_size: float # (Shortcut) == IO.FontGlobalScale * Font->Scale * Font->FontSize. Base text height. - # float FontScale; /* original C++ signature */ - font_scale: float # == FontSize / Font->FontSize + font_size: float # Currently bound font size == line height (== FontSizeBase + externals scales applied in the UpdateCurrentFontSize() function). + # float FontSizeBase; /* original C++ signature */ + font_size_base: ( + float # Font size before scaling == style.FontSizeBase == value passed to PushFont() when specified. + ) + # float FontBakedScale; /* original C++ signature */ + font_baked_scale: ( + float # == FontBaked->Size / FontSize. Scale factor over baked size. Rarely used nowadays, very often == 1.0. + ) + # float FontRasterizerDensity; /* original C++ signature */ + font_rasterizer_density: float # Current font density. Used by all calls to GetFontBaked(). # float CurrentDpiScale; /* original C++ signature */ current_dpi_scale: float # Current window/viewport DpiScale == CurrentViewport->DpiScale # ImDrawListSharedData DrawListSharedData; /* original C++ signature */ draw_list_shared_data: ImDrawListSharedData - # double Time; /* original C++ signature */ - time: float - # int FrameCount; /* original C++ signature */ - frame_count: int - # int FrameCountEnded; /* original C++ signature */ - frame_count_ended: int - # int FrameCountPlatformEnded; /* original C++ signature */ - frame_count_platform_ended: int - # int FrameCountRendered; /* original C++ signature */ - frame_count_rendered: int # ImGuiID WithinEndChildID; /* original C++ signature */ within_end_child_id: ID # Set within EndChild() - # bool WithinFrameScope; /* original C++ signature */ - within_frame_scope: bool # Set by NewFrame(), cleared by EndFrame() - # bool WithinFrameScopeWithImplicitWindow; /* original C++ signature */ - within_frame_scope_with_implicit_window: ( - bool # Set by NewFrame(), cleared by EndFrame() when the implicit debug window has been pushed - ) - # bool GcCompactAll; /* original C++ signature */ - gc_compact_all: bool # Request full GC - # bool TestEngineHookItems; /* original C++ signature */ - test_engine_hook_items: bool # Will call test engine hooks: ImGuiTestEngineHook_ItemAdd(), ImGuiTestEngineHook_ItemInfo(), ImGuiTestEngineHook_Log() # void* TestEngine; /* original C++ signature */ test_engine: Any # Test engine user data @@ -3508,10 +3681,10 @@ class Context: wheeling_axis_avg: ImVec2 # Item/widgets state and tracking information - # ImGuiID DebugDrawIdConflicts; /* original C++ signature */ - debug_draw_id_conflicts: ID # Set when we detect multiple items with the same identifier - # ImGuiID DebugHookIdInfo; /* original C++ signature */ - debug_hook_id_info: ID # Will call core hooks: DebugHookIdInfo() from GetID functions, used by ID Stack Tool [next HoveredId/ActiveId to not pull in an extra cache-line] + # ImGuiID DebugDrawIdConflictsId; /* original C++ signature */ + debug_draw_id_conflicts_id: ID # Set when we detect multiple items with the same identifier + # ImGuiID DebugHookIdInfoId; /* original C++ signature */ + debug_hook_id_info_id: ID # Will call core hooks: DebugHookIdInfo() from GetID functions, used by ID Stack Tool [next HoveredId/ActiveId to not pull in an extra cache-line] # ImGuiID HoveredId; /* original C++ signature */ hovered_id: ID # Hovered widget, filled during the frame # ImGuiID HoveredIdPreviousFrame; /* original C++ signature */ @@ -3552,14 +3725,18 @@ class Context: active_id_has_been_edited_this_frame: bool # bool ActiveIdFromShortcut; /* original C++ signature */ active_id_from_shortcut: bool + # ImS8 ActiveIdMouseButton; /* original C++ signature */ + active_id_mouse_button: ImS8 + # ImGuiID ActiveIdDisabledId; /* original C++ signature */ + active_id_disabled_id: ID # When clicking a disabled item we set ActiveId=window->MoveId to avoid interference with widget code. Actual item ID is stored here. # ImVec2 ActiveIdClickOffset; /* original C++ signature */ active_id_click_offset: ( ImVec2 # Clicked offset from upper-left corner, if applicable (currently only set by ButtonBehavior) ) - # ImGuiWindow* ActiveIdWindow; /* original C++ signature */ - active_id_window: Window # ImGuiInputSource ActiveIdSource; /* original C++ signature */ active_id_source: InputSource # Activating source: ImGuiInputSource_Mouse OR ImGuiInputSource_Keyboard OR ImGuiInputSource_Gamepad + # ImGuiWindow* ActiveIdWindow; /* original C++ signature */ + active_id_window: Window # ImGuiID ActiveIdPreviousFrame; /* original C++ signature */ active_id_previous_frame: ID # ImGuiDeactivatedItemData DeactivatedItemData; /* original C++ signature */ @@ -3612,6 +3789,8 @@ class Context: next_window_data: NextWindowData # Storage for SetNextWindow** functions # bool DebugShowGroupRects; /* original C++ signature */ debug_show_group_rects: bool + # bool GcCompactAll; /* original C++ signature */ + gc_compact_all: bool # Request full GC # Shared stacks # ImGuiCol DebugFlashStyleColorIdx; /* original C++ signature */ @@ -3620,8 +3799,6 @@ class Context: color_stack: ImVector_ColorMod # Stack for PushStyleColor()/PopStyleColor() - inherited by Begin() # ImVector StyleVarStack; /* original C++ signature */ style_var_stack: ImVector_StyleMod # Stack for PushStyleVar()/PopStyleVar() - inherited by Begin() - # ImVector FontStack; /* original C++ signature */ - font_stack: ImVector_ImFont_ptr # Stack for PushFont()/PopFont() - inherited by Begin() # ImVector FocusScopeStack; /* original C++ signature */ focus_scope_stack: ImVector_FocusScopeData # Stack for PushFocusScope()/PopFocusScope() - inherited by BeginChild(), pushed into by Begin() # ImVector ItemFlagsStack; /* original C++ signature */ @@ -3689,7 +3866,7 @@ class Context: # ImGuiNavLayer NavLayer; /* original C++ signature */ nav_layer: NavLayer # Focused layer (main scrolling layer, or menu/title bar layer) # ImGuiID NavActivateId; /* original C++ signature */ - nav_activate_id: ID # ~~ (g.ActiveId == 0) && (IsKeyPressed(ImGuiKey_Space) || IsKeyDown(ImGuiKey_Enter) || IsKeyPressed(ImGuiKey_NavGamepadActivate)) ? NavId : 0, also set when calling ActivateItem() + nav_activate_id: ID # ~~ (g.ActiveId == 0) && (IsKeyPressed(ImGuiKey_Space) || IsKeyDown(ImGuiKey_Enter) || IsKeyPressed(ImGuiKey_NavGamepadActivate)) ? NavId : 0, also set when calling ActivateItemByID() # ImGuiID NavActivateDownId; /* original C++ signature */ nav_activate_down_id: ID # ~~ IsKeyDown(ImGuiKey_Space) || IsKeyDown(ImGuiKey_Enter) || IsKeyDown(ImGuiKey_NavGamepadActivate) ? NavId : 0 # ImGuiID NavActivatePressedId; /* original C++ signature */ @@ -3703,17 +3880,18 @@ class Context: # float NavHighlightActivatedTimer; /* original C++ signature */ nav_highlight_activated_timer: float # ImGuiID NavNextActivateId; /* original C++ signature */ - nav_next_activate_id: ID # Set by ActivateItem(), queued until next frame. + nav_next_activate_id: ID # Set by ActivateItemByID(), queued until next frame. # ImGuiActivateFlags NavNextActivateFlags; /* original C++ signature */ nav_next_activate_flags: ActivateFlags # ImGuiInputSource NavInputSource; /* original C++ signature */ nav_input_source: ( - InputSource # Keyboard or Gamepad mode? THIS CAN ONLY BE ImGuiInputSource_Keyboard or ImGuiInputSource_Mouse + InputSource # Keyboard or Gamepad mode? THIS CAN ONLY BE ImGuiInputSource_Keyboard or ImGuiInputSource_Gamepad ) # ImGuiSelectionUserData NavLastValidSelectionUserData; /* original C++ signature */ nav_last_valid_selection_user_data: SelectionUserData # Last valid data passed to SetNextItemSelectionUser(), or -1. For current window. Not reset when focusing an item that doesn't have selection data. # ImS8 NavCursorHideFrames; /* original C++ signature */ nav_cursor_hide_frames: ImS8 + # ImGuiID NavActivateInputId; // Removed in 1.89.4 (July 2023). This is now part of g.NavActivateId and sets g.NavActivateFlags |= ImGuiActivateFlags_PreferInput. See commit c9a53aa74, issue #5606. # Navigation: Init & Move Requests # bool NavAnyRequest; /* original C++ signature */ @@ -3781,25 +3959,29 @@ class Context: # bool NavJustMovedToHasSelectionData; /* original C++ signature */ nav_just_moved_to_has_selection_data: bool # Copy of move result's ItemFlags & ImGuiItemFlags_HasSelectionUserData). Maybe we should just store ImGuiNavItemData. - # Navigation: Windowing (CTRL+TAB for list, or Menu button + keys or directional pads to move/resize) + # Navigation: Windowing (Ctrl+Tab for list, or Menu button + keys or directional pads to move/resize) + # bool ConfigNavWindowingWithGamepad; /* original C++ signature */ + config_nav_windowing_with_gamepad: bool # = True. Enable Ctrl+Tab by holding ImGuiKey_GamepadFaceLeft (== ImGuiKey_NavGamepadMenu). When False, the button may still be used to toggle Menu layer. # ImGuiKeyChord ConfigNavWindowingKeyNext; /* original C++ signature */ config_nav_windowing_key_next: KeyChord # = ImGuiMod_Ctrl | ImGuiKey_Tab (or ImGuiMod_Super | ImGuiKey_Tab on OS X). For reconfiguration (see #4828) # ImGuiKeyChord ConfigNavWindowingKeyPrev; /* original C++ signature */ config_nav_windowing_key_prev: KeyChord # = ImGuiMod_Ctrl | ImGuiMod_Shift | ImGuiKey_Tab (or ImGuiMod_Super | ImGuiMod_Shift | ImGuiKey_Tab on OS X) # ImGuiWindow* NavWindowingTarget; /* original C++ signature */ - nav_windowing_target: Window # Target window when doing CTRL+Tab (or Pad Menu + FocusPrev/Next), this window is temporarily displayed top-most! + nav_windowing_target: Window # Target window when doing Ctrl+Tab (or Pad Menu + FocusPrev/Next), this window is temporarily displayed top-most! # ImGuiWindow* NavWindowingTargetAnim; /* original C++ signature */ nav_windowing_target_anim: Window # Record of last valid NavWindowingTarget until DimBgRatio and NavWindowingHighlightAlpha becomes 0.0, so the fade-out can stay on it. # ImGuiWindow* NavWindowingListWindow; /* original C++ signature */ - nav_windowing_list_window: Window # Internal window actually listing the CTRL+Tab contents + nav_windowing_list_window: Window # Internal window actually listing the Ctrl+Tab contents # float NavWindowingTimer; /* original C++ signature */ nav_windowing_timer: float # float NavWindowingHighlightAlpha; /* original C++ signature */ nav_windowing_highlight_alpha: float + # ImGuiInputSource NavWindowingInputSource; /* original C++ signature */ + nav_windowing_input_source: InputSource # bool NavWindowingToggleLayer; /* original C++ signature */ - nav_windowing_toggle_layer: bool + nav_windowing_toggle_layer: bool # Set while Alt or GamepadMenu is held, may be cleared by other operations, and processed when releasing the key. # ImGuiKey NavWindowingToggleKey; /* original C++ signature */ - nav_windowing_toggle_key: Key + nav_windowing_toggle_key: Key # Keyboard/gamepad key used when toggling to menu layer. # ImVec2 NavWindowingAccumDeltaPos; /* original C++ signature */ nav_windowing_accum_delta_pos: ImVec2 # ImVec2 NavWindowingAccumDeltaSize; /* original C++ signature */ @@ -3807,7 +3989,7 @@ class Context: # Render # float DimBgRatio; /* original C++ signature */ - dim_bg_ratio: float # 0.0..1.0 animation when fading in a dimming background (for modal window and CTRL+TAB list) + dim_bg_ratio: float # 0.0..1.0 animation when fading in a dimming background (for modal window and Ctrl+Tab list) # Drag and Drop # bool DragDropActive; /* original C++ signature */ @@ -3832,8 +4014,12 @@ class Context: drag_drop_target_clip_rect: ImRect # Store ClipRect at the time of item's drawing # ImGuiID DragDropTargetId; /* original C++ signature */ drag_drop_target_id: ID - # ImGuiDragDropFlags DragDropAcceptFlags; /* original C++ signature */ - drag_drop_accept_flags: DragDropFlags + # ImGuiID DragDropTargetFullViewport; /* original C++ signature */ + drag_drop_target_full_viewport: ID + # ImGuiDragDropFlags DragDropAcceptFlagsCurr; /* original C++ signature */ + drag_drop_accept_flags_curr: DragDropFlags + # ImGuiDragDropFlags DragDropAcceptFlagsPrev; /* original C++ signature */ + drag_drop_accept_flags_prev: DragDropFlags # float DragDropAcceptIdCurrRectSurface; /* original C++ signature */ drag_drop_accept_id_curr_rect_surface: ( float # Target item surface (we resolve overlapping targets by prioritizing the smaller surface) @@ -3924,12 +4110,16 @@ class Context: # Widget state # ImGuiInputTextState InputTextState; /* original C++ signature */ input_text_state: InputTextState + # ImGuiTextIndex InputTextLineIndex; /* original C++ signature */ + input_text_line_index: TextIndex # Temporary storage # ImGuiInputTextDeactivatedState InputTextDeactivatedState; /* original C++ signature */ input_text_deactivated_state: InputTextDeactivatedState - # ImFont InputTextPasswordFont; /* original C++ signature */ - input_text_password_font: ImFont + # ImFontBaked InputTextPasswordFontBackupBaked; /* original C++ signature */ + input_text_password_font_backup_baked: ImFontBaked + # ImFontFlags InputTextPasswordFontBackupFlags; /* original C++ signature */ + input_text_password_font_backup_flags: ImFontFlags # ImGuiID TempInputId; /* original C++ signature */ - temp_input_id: ID # Temporary text input when CTRL+clicking on a slider, etc. + temp_input_id: ID # Temporary text input when using Ctrl+Click on a slider, etc. # ImGuiDataTypeStorage DataTypeZeroValue; /* original C++ signature */ data_type_zero_value: DataTypeStorage # 0 for all data types # int BeginMenuDepth; /* original C++ signature */ @@ -3993,17 +4183,17 @@ class Context: # Platform support # ImGuiPlatformImeData PlatformImeData; /* original C++ signature */ - platform_ime_data: PlatformImeData # Data updated by current frame + platform_ime_data: PlatformImeData # Data updated by current frame. Will be applied at end of the frame. For some backends, this is required to have WantVisible=True in order to receive text message. # ImGuiPlatformImeData PlatformImeDataPrev; /* original C++ signature */ platform_ime_data_prev: ( PlatformImeData # Previous frame data. When changed we call the platform_io.Platform_SetImeDataFn() handler. ) - # ImGuiID PlatformImeViewport; /* original C++ signature */ - platform_ime_viewport: ID - # ImGuiDockContext DockContext; /* original C++ signature */ # Extensions # FIXME: We could provide an API to register one slot in an array held in ImGuiContext? + # ImVector UserTextures; /* original C++ signature */ + user_textures: ImVector_ImTextureData_ptr # List of textures created/managed by user or third-party extension. Automatically appended into platform_io.Textures[]. + # ImGuiDockContext DockContext; /* original C++ signature */ dock_context: DockContext # Settings @@ -4021,6 +4211,8 @@ class Context: # Capture/Logging # bool LogEnabled; /* original C++ signature */ log_enabled: bool # Currently capturing + # bool LogLineFirstItem; /* original C++ signature */ + log_line_first_item: bool # ImGuiLogFlags LogFlags; /* original C++ signature */ log_flags: LogFlags # Capture flags/type # ImGuiWindow* LogWindow; /* original C++ signature */ @@ -4028,13 +4220,13 @@ class Context: # ImGuiTextBuffer LogBuffer; /* original C++ signature */ log_buffer: TextBuffer # Accumulation buffer when log to clipboard. This is pointer so our GImGui static constructor doesn't call heap allocators. # const char* LogNextPrefix; /* original C++ signature */ - log_next_prefix: str # (const) + log_next_prefix: ( + str # See comment in LogSetNextTextDecoration(): doesn't copy underlying data, use carefully! # (const) + ) # const char* LogNextSuffix; /* original C++ signature */ log_next_suffix: str # (const) # float LogLinePosY; /* original C++ signature */ log_line_pos_y: float - # bool LogLineFirstItem; /* original C++ signature */ - log_line_first_item: bool # int LogDepthRef; /* original C++ signature */ log_depth_ref: int # int LogDepthToExpand; /* original C++ signature */ @@ -4061,7 +4253,7 @@ class Context: # Debug Tools # (some of the highly frequently used data are interleaved in other structures above: DebugBreakXXX fields, DebugHookIdInfo, DebugLocateId etc.) # int DebugDrawIdConflictsCount; /* original C++ signature */ - debug_draw_id_conflicts_count: int # Locked count (preserved when holding CTRL) + debug_draw_id_conflicts_count: int # Locked count (preserved when holding Ctrl) # ImGuiDebugLogFlags DebugLogFlags; /* original C++ signature */ debug_log_flags: DebugLogFlags # ImGuiTextBuffer DebugLogBuf; /* original C++ signature */ @@ -4094,6 +4286,8 @@ class Context: debug_flash_style_color_backup: ImVec4 # ImGuiMetricsConfig DebugMetricsConfig; /* original C++ signature */ debug_metrics_config: MetricsConfig + # ImGuiDebugItemPathQuery DebugItemPathQuery; /* original C++ signature */ + debug_item_path_query: DebugItemPathQuery # ImGuiIDStackTool DebugIDStackTool; /* original C++ signature */ debug_id_stack_tool: IDStackTool # ImGuiDebugAllocInfo DebugAllocInfo; /* original C++ signature */ @@ -4117,7 +4311,7 @@ class Context: # int WantCaptureKeyboardNextFrame; /* original C++ signature */ want_capture_keyboard_next_frame: int # " # int WantTextInputNextFrame; /* original C++ signature */ - want_text_input_next_frame: int + want_text_input_next_frame: int # Copied in EndFrame() from g.PlatformImeData.WantTextInput. Needs to be set for some backends (SDL3) to emit character inputs. # ImVector TempBuffer; /* original C++ signature */ temp_buffer: ImVector_char # Temporary text buffer @@ -4194,6 +4388,8 @@ class WindowTempData: tree_has_stack_data_depth_mask: ( ImU32 # Store whether given depth has ImGuiTreeNodeStackData data. Could be turned into a ImU64 if necessary. ) + # ImU32 TreeRecordsClippedNodesY2Mask; /* original C++ signature */ + tree_records_clipped_nodes_y2_mask: ImU32 # Store whether we should keep recording Y2. Cleared when passing clip max. Equivalent TreeHasStackDataDepthMask value should always be set. # ImVector ChildWindows; /* original C++ signature */ child_windows: ImVector_Window_ptr # ImGuiStorage* StateStorage; /* original C++ signature */ @@ -4229,7 +4425,7 @@ class WindowTempData: item_width_stack: ImVector_float # Store item widths to restore (attention: .back() is not == ItemWidth) # ImVector TextWrapPosStack; /* original C++ signature */ text_wrap_pos_stack: ImVector_float # Store text wrap pos to restore (attention: .back() is not == TextWrapPos) - # ImGuiWindowTempData(ImVec2 CursorPos = ImVec2(), ImVec2 CursorPosPrevLine = ImVec2(), ImVec2 CursorStartPos = ImVec2(), ImVec2 CursorMaxPos = ImVec2(), ImVec2 IdealMaxPos = ImVec2(), ImVec2 CurrLineSize = ImVec2(), ImVec2 PrevLineSize = ImVec2(), float CurrLineTextBaseOffset = float(), float PrevLineTextBaseOffset = float(), bool IsSameLine = bool(), bool IsSetPos = bool(), ImVec1 Indent = ImVec1(), ImVec1 ColumnsOffset = ImVec1(), ImVec1 GroupOffset = ImVec1(), ImVec2 CursorStartPosLossyness = ImVec2(), ImGuiNavLayer NavLayerCurrent = ImGuiNavLayer(), short NavLayersActiveMask = short(), short NavLayersActiveMaskNext = short(), bool NavIsScrollPushableX = bool(), bool NavHideHighlightOneFrame = bool(), bool NavWindowHasScrollY = bool(), bool MenuBarAppending = bool(), ImVec2 MenuBarOffset = ImVec2(), ImGuiMenuColumns MenuColumns = ImGuiMenuColumns(), int TreeDepth = int(), ImU32 TreeHasStackDataDepthMask = ImU32(), ImVector ChildWindows = ImVector(), int CurrentTableIdx = int(), ImGuiLayoutType LayoutType = ImGuiLayoutType(), ImGuiLayoutType ParentLayoutType = ImGuiLayoutType(), ImU32 ModalDimBgColor = ImU32(), ImGuiItemStatusFlags WindowItemStatusFlags = ImGuiItemStatusFlags(), ImGuiItemStatusFlags ChildItemStatusFlags = ImGuiItemStatusFlags(), ImGuiItemStatusFlags DockTabItemStatusFlags = ImGuiItemStatusFlags(), ImRect DockTabItemRect = ImRect(), float ItemWidth = float(), float TextWrapPos = float(), ImVector ItemWidthStack = ImVector(), ImVector TextWrapPosStack = ImVector()); /* original C++ signature */ + # ImGuiWindowTempData(ImVec2 CursorPos = ImVec2(), ImVec2 CursorPosPrevLine = ImVec2(), ImVec2 CursorStartPos = ImVec2(), ImVec2 CursorMaxPos = ImVec2(), ImVec2 IdealMaxPos = ImVec2(), ImVec2 CurrLineSize = ImVec2(), ImVec2 PrevLineSize = ImVec2(), float CurrLineTextBaseOffset = float(), float PrevLineTextBaseOffset = float(), bool IsSameLine = bool(), bool IsSetPos = bool(), ImVec1 Indent = ImVec1(), ImVec1 ColumnsOffset = ImVec1(), ImVec1 GroupOffset = ImVec1(), ImVec2 CursorStartPosLossyness = ImVec2(), ImGuiNavLayer NavLayerCurrent = ImGuiNavLayer(), short NavLayersActiveMask = short(), short NavLayersActiveMaskNext = short(), bool NavIsScrollPushableX = bool(), bool NavHideHighlightOneFrame = bool(), bool NavWindowHasScrollY = bool(), bool MenuBarAppending = bool(), ImVec2 MenuBarOffset = ImVec2(), ImGuiMenuColumns MenuColumns = ImGuiMenuColumns(), int TreeDepth = int(), ImU32 TreeHasStackDataDepthMask = ImU32(), ImU32 TreeRecordsClippedNodesY2Mask = ImU32(), ImVector ChildWindows = ImVector(), int CurrentTableIdx = int(), ImGuiLayoutType LayoutType = ImGuiLayoutType(), ImGuiLayoutType ParentLayoutType = ImGuiLayoutType(), ImU32 ModalDimBgColor = ImU32(), ImGuiItemStatusFlags WindowItemStatusFlags = ImGuiItemStatusFlags(), ImGuiItemStatusFlags ChildItemStatusFlags = ImGuiItemStatusFlags(), ImGuiItemStatusFlags DockTabItemStatusFlags = ImGuiItemStatusFlags(), ImRect DockTabItemRect = ImRect(), float ItemWidth = float(), float TextWrapPos = float(), ImVector ItemWidthStack = ImVector(), ImVector TextWrapPosStack = ImVector()); /* original C++ signature */ def __init__( self, cursor_pos: Optional[ImVec2Like] = None, @@ -4258,6 +4454,7 @@ class WindowTempData: menu_columns: Optional[MenuColumns] = None, tree_depth: int = int(), tree_has_stack_data_depth_mask: ImU32 = ImU32(), + tree_records_clipped_nodes_y2_mask: ImU32 = ImU32(), child_windows: Optional[ImVector_Window] = None, current_table_idx: int = int(), layout_type: Optional[LayoutType] = None, @@ -4440,14 +4637,14 @@ class Window: begin_order_within_context: int # Begin() order within entire imgui context. This is mostly used for debugging submission order related issues. # short FocusOrder; /* original C++ signature */ focus_order: int # Order within WindowsFocusOrder[], altered when windows are focused. + # ImGuiDir AutoPosLastDirection; /* original C++ signature */ + auto_pos_last_direction: Dir # ImS8 AutoFitFramesX, /* original C++ signature */ auto_fit_frames_x: ImS8 # AutoFitFramesY; /* original C++ signature */ auto_fit_frames_y: ImS8 # bool AutoFitOnlyGrows; /* original C++ signature */ auto_fit_only_grows: bool - # ImGuiDir AutoPosLastDirection; /* original C++ signature */ - auto_pos_last_direction: Dir # ImS8 HiddenFramesCanSkipItems; /* original C++ signature */ hidden_frames_can_skip_items: ImS8 # Hide the window for N frames # ImS8 HiddenFramesCannotSkipItems; /* original C++ signature */ @@ -4507,8 +4704,6 @@ class Window: font_window_scale: float # User scale multiplier per-window, via SetWindowFontScale() # float FontWindowScaleParents; /* original C++ signature */ font_window_scale_parents: float - # float FontDpiScale; /* original C++ signature */ - font_dpi_scale: float # float FontRefSize; /* original C++ signature */ font_ref_size: float # This is a copy of window->CalcFontSize() at the time of Begin(), trying to phase out CalcFontSize() especially as it may be called on non-current window. # int SettingsOffset; /* original C++ signature */ @@ -4543,7 +4738,7 @@ class Window: # ImGuiWindow* RootWindowForNav; /* original C++ signature */ root_window_for_nav: Window # Point to ourself or first ancestor which doesn't have the NavFlattened flag. # ImGuiWindow* ParentWindowForFocusRoute; /* original C++ signature */ - parent_window_for_focus_route: Window # Set to manual link a window to its logical parent so that Shortcut() chain are honoerd (e.g. Tool linked to Document) + parent_window_for_focus_route: Window # Set to manual link a window to its logical parent so that Shortcut() chain are honored (e.g. Tool linked to Document) # ImGuiWindow* NavLastChildNavWindow; /* original C++ signature */ nav_last_child_nav_window: Window # When going to the menu bar, we remember the child window we came from. (This could probably be made implicit if we kept g.Windows sorted by last focused including child window.) @@ -4600,10 +4795,6 @@ class Window: def rect(self) -> ImRect: """(private API)""" pass - # float CalcFontSize() const { ImGuiContext& g = *Ctx; return g.FontBaseSize * FontWindowScale * FontDpiScale * FontWindowScaleParents; } /* original C++ signature */ - def calc_font_size(self) -> float: - """(private API)""" - pass # ImRect TitleBarRect() const { return ImRect(Pos, ImVec2(Pos.x + SizeFull.x, Pos.y + TitleBarHeight)); } /* original C++ signature */ def title_bar_rect(self) -> ImRect: """(private API)""" @@ -4612,12 +4803,14 @@ class Window: def menu_bar_rect(self) -> ImRect: """(private API)""" pass + # [OBSOLETE] ImGuiWindow::CalcFontSize() was removed in 1.92.0 because error-prone/misleading. You can use window->FontRefSize for a copy of g.FontSize at the time of the last Begin() call for this window. + # float CalcFontSize() const { ImGuiContext& g = *Ctx; return g.FontSizeBase * FontWindowScale * FontDpiScale * FontWindowScaleParents; # ----------------------------------------------------------------------------- # [SECTION] Tab bar, Tab item support # ----------------------------------------------------------------------------- -class TabBarFlagsPrivate_(enum.Enum): +class TabBarFlagsPrivate_(enum.IntFlag): """Extend ImGuiTabBarFlags_""" # ImGuiTabBarFlags_DockNode = 1 << 20, /* original C++ signature */ @@ -4631,7 +4824,7 @@ class TabBarFlagsPrivate_(enum.Enum): enum.auto() ) # (= 1 << 22) # FIXME: Settings are handled by the docking system, this only request the tab bar to mark settings dirty when reordering tabs -class TabItemFlagsPrivate_(enum.Enum): +class TabItemFlagsPrivate_(enum.IntFlag): """Extend ImGuiTabItemFlags_""" # ImGuiTabItemFlags_SectionMask_ = ImGuiTabItemFlags_Leading | ImGuiTabItemFlags_Trailing, /* original C++ signature */ @@ -4669,7 +4862,7 @@ class TabItem: # float Width; /* original C++ signature */ width: float # Width currently displayed # float ContentWidth; /* original C++ signature */ - content_width: float # Width of label, stored during BeginTabItem() call + content_width: float # Width of label + padding, stored during BeginTabItem() call (misnamed as "Content" would normally imply width of label only) # float RequestedWidth; /* original C++ signature */ requested_width: float # Width optionally requested by caller, -1.0 is unused # ImS32 NameOffset; /* original C++ signature */ @@ -4701,13 +4894,17 @@ class TabBar: # ImGuiID NextSelectedTabId; /* original C++ signature */ next_selected_tab_id: ID # Next selected tab/window. Will also trigger a scrolling animation # ImGuiID VisibleTabId; /* original C++ signature */ - visible_tab_id: ID # Can occasionally be != SelectedTabId (e.g. when previewing contents for CTRL+TAB preview) + visible_tab_id: ID # Can occasionally be != SelectedTabId (e.g. when previewing contents for Ctrl+Tab preview) # int CurrFrameVisible; /* original C++ signature */ curr_frame_visible: int # int PrevFrameVisible; /* original C++ signature */ prev_frame_visible: int # ImRect BarRect; /* original C++ signature */ bar_rect: ImRect + # float BarRectPrevWidth; /* original C++ signature */ + bar_rect_prev_width: ( + float # Backup of previous width. When width change we enforce keep horizontal scroll on focused tab. + ) # float CurrTabsContentsHeight; /* original C++ signature */ curr_tabs_contents_height: float # float PrevTabsContentsHeight; /* original C++ signature */ @@ -4744,6 +4941,8 @@ class TabBar: visible_tab_was_submitted: bool # bool TabsAddedNew; /* original C++ signature */ tabs_added_new: bool # Set to True when a new tab item or button has been added to the tab bar during last frame + # bool ScrollButtonEnabled; /* original C++ signature */ + scroll_button_enabled: bool # ImS16 TabsActiveCount; /* original C++ signature */ tabs_active_count: ImS16 # Number of tabs submitted this frame. # ImS16 LastTabItemIdx; /* original C++ signature */ @@ -4765,8 +4964,6 @@ class TabBar: # [SECTION] Table support # ----------------------------------------------------------------------------- -# Our current column maximum is 64 but we may raise that in the future. - class TableColumn: """[Internal] sizeof() ~ 112 We use the terminology "Enabled" to refer to a column that is not Hidden by user/api. @@ -5148,12 +5345,14 @@ class Table: bool # Set when default context menu is open (also see: ContextPopupColumn, InstanceInteracted). ) # bool DisableDefaultContextMenu; /* original C++ signature */ - disable_default_context_menu: bool # Disable default context menu contents. You may submit your own using TableBeginContextMenuPopup()/EndPopup() + disable_default_context_menu: ( + bool # Disable default context menu. You may submit your own using TableBeginContextMenuPopup()/EndPopup() + ) # bool IsSettingsRequestLoad; /* original C++ signature */ is_settings_request_load: bool # bool IsSettingsDirty; /* original C++ signature */ is_settings_dirty: ( - bool # Set when table settings have changed and needs to be reported into ImGuiTableSetttings data. + bool # Set when table settings have changed and needs to be reported into ImGuiTableSettings data. ) # bool IsDefaultDisplayOrder; /* original C++ signature */ is_default_display_order: ( @@ -5192,6 +5391,8 @@ class TableTempData: sizeof() ~ 136 bytes. """ + # ImGuiID WindowID; /* original C++ signature */ + window_id: ID # Shortcut to g.Tables[TableIndex]->OuterWindow->ID. # int TableIndex; /* original C++ signature */ table_index: int # Index in g.Tables.Buf[] pool # float LastTimeActive; /* original C++ signature */ @@ -5305,6 +5506,10 @@ def get_current_window_read() -> Window: # IMGUI_API ImGuiWindow* GetCurrentWindow() /* original C++ signature */ # { +# #ifdef IMGUI_BUNDLE_PYTHON_API +# // Help python users, because otherwise, this leads to an un-debuggable segfault +# IM_ASSERT(GImGui != NULL && "ImGui::GetCurrentWindow() -> ImGuiContext is NULL. This is likely because you are calling ImGui functions even before ImGui::CreateContext()."); +# #endif # ImGuiContext& g = *GImGui; # #ifdef IMGUI_BUNDLE_PYTHON_API # // Help python users, because otherwise, this leads to an un-debuggable segfault @@ -5340,6 +5545,10 @@ def calc_window_next_auto_fit_size(window: Window) -> ImVec2: def is_window_child_of(window: Window, potential_parent: Window, popup_hierarchy: bool, dock_hierarchy: bool) -> bool: pass +# IMGUI_API bool IsWindowInBeginStack(ImGuiWindow* window); /* original C++ signature */ +def is_window_in_begin_stack(window: Window) -> bool: + pass + # IMGUI_API bool IsWindowWithinBeginStackOf(ImGuiWindow* window, ImGuiWindow* potential_parent); /* original C++ signature */ def is_window_within_begin_stack_of(window: Window, potential_parent: Window) -> bool: pass @@ -5440,20 +5649,58 @@ def set_next_window_refresh_policy(flags: WindowRefreshFlags) -> None: """Windows: Idle, Refresh Policies [EXPERIMENTAL]""" pass -# IMGUI_API void SetCurrentFont(ImFont* font); /* original C++ signature */ -def set_current_font(font: ImFont) -> None: - """Fonts, drawing""" +# Fonts, drawing +# IMGUI_API void RegisterUserTexture(ImTextureData* tex); /* original C++ signature */ +def register_user_texture(tex: ImTextureData) -> None: + """Register external texture. EXPERIMENTAL: DO NOT USE YET.""" pass -# inline ImFont* GetDefaultFont() { ImGuiContext& g = *GImGui; return g.IO.FontDefault ? g.IO.FontDefault : g.IO.Fonts->Fonts[0]; } /* original C++ signature */ -def get_default_font() -> ImFont: +# IMGUI_API void UnregisterUserTexture(ImTextureData* tex); /* original C++ signature */ +def unregister_user_texture(tex: ImTextureData) -> None: + pass + +# IMGUI_API void RegisterFontAtlas(ImFontAtlas* atlas); /* original C++ signature */ +def register_font_atlas(atlas: ImFontAtlas) -> None: + pass + +# IMGUI_API void UnregisterFontAtlas(ImFontAtlas* atlas); /* original C++ signature */ +def unregister_font_atlas(atlas: ImFontAtlas) -> None: + pass + +# IMGUI_API void SetCurrentFont(ImFont* font, float font_size_before_scaling, float font_size_after_scaling); /* original C++ signature */ +def set_current_font(font: ImFont, font_size_before_scaling: float, font_size_after_scaling: float) -> None: + pass + +# IMGUI_API void UpdateCurrentFontSize(float restore_font_size_after_scaling); /* original C++ signature */ +def update_current_font_size(restore_font_size_after_scaling: float) -> None: + pass + +# IMGUI_API void SetFontRasterizerDensity(float rasterizer_density); /* original C++ signature */ +def set_font_rasterizer_density(rasterizer_density: float) -> None: + pass + +# inline float GetFontRasterizerDensity() { return GImGui->FontRasterizerDensity; } /* original C++ signature */ +def get_font_rasterizer_density() -> float: + """(private API)""" + pass + +# inline float GetRoundedFontSize(float size) { return IM_ROUND(size); } /* original C++ signature */ +def get_rounded_font_size(size: float) -> float: """(private API)""" pass +# IMGUI_API ImFont* GetDefaultFont(); /* original C++ signature */ +def get_default_font() -> ImFont: + pass + # IMGUI_API void PushPasswordFont(); /* original C++ signature */ def push_password_font() -> None: pass +# IMGUI_API void PopPasswordFont(); /* original C++ signature */ +def pop_password_font() -> None: + pass + # inline ImDrawList* GetForegroundDrawList(ImGuiWindow* window) { return GetForegroundDrawList(window->Viewport); } /* original C++ signature */ def get_foreground_draw_list(window: Window) -> ImDrawList: """(private API)""" @@ -5480,8 +5727,8 @@ def shutdown() -> None: def update_input_events(trickle_fast_inputs: bool) -> None: pass -# IMGUI_API void UpdateHoveredWindowAndCaptureFlags(); /* original C++ signature */ -def update_hovered_window_and_capture_flags() -> None: +# IMGUI_API void UpdateHoveredWindowAndCaptureFlags(const ImVec2& mouse_pos); /* original C++ signature */ +def update_hovered_window_and_capture_flags(mouse_pos: ImVec2Like) -> None: pass # IMGUI_API void StartMouseMovingWindow(ImGuiWindow* window); /* original C++ signature */ @@ -5492,6 +5739,10 @@ def start_mouse_moving_window(window: Window) -> None: def start_mouse_moving_window_or_node(window: Window, node: DockNode, undock: bool) -> None: pass +# IMGUI_API void StopMouseMovingWindow(); /* original C++ signature */ +def stop_mouse_moving_window() -> None: + pass + # IMGUI_API void UpdateMouseMovingWindowNewFrame(); /* original C++ signature */ def update_mouse_moving_window_new_frame() -> None: pass @@ -5749,8 +6000,14 @@ def calc_wrap_width_for_pos(pos: ImVec2Like, wrap_pos_x: float) -> float: def push_multi_items_widths(components: int, width_full: float) -> None: pass -# IMGUI_API void ShrinkWidths(ImGuiShrinkWidthItem* items, int count, float width_excess); /* original C++ signature */ -def shrink_widths(items: ShrinkWidthItem, count: int, width_excess: float) -> None: +# IMGUI_API void ShrinkWidths(ImGuiShrinkWidthItem* items, int count, float width_excess, float width_min); /* original C++ signature */ +def shrink_widths(items: ShrinkWidthItem, count: int, width_excess: float, width_min: float) -> None: + pass + +# IMGUI_API void CalcClipRectVisibleItemsY(const ImRect& clip_rect, const ImVec2& pos, float items_height, int* out_visible_start, int* out_visible_end); /* original C++ signature */ +def calc_clip_rect_visible_items_y( + clip_rect: ImRect, pos: ImVec2Like, items_height: float, out_visible_start: int, out_visible_end: int +) -> Tuple[int, int]: pass # Parameter stacks (shared) @@ -5909,7 +6166,7 @@ def nav_move_request_forward(move_dir: Dir, clip_dir: Dir, move_flags: NavMoveFl def nav_move_request_resolve_with_last_item(result: NavItemData) -> None: pass -# IMGUI_API void NavMoveRequestResolveWithPastTreeNode(ImGuiNavItemData* result, ImGuiTreeNodeStackData* tree_node_data); /* original C++ signature */ +# IMGUI_API void NavMoveRequestResolveWithPastTreeNode(ImGuiNavItemData* result, const ImGuiTreeNodeStackData* tree_node_data); /* original C++ signature */ def nav_move_request_resolve_with_past_tree_node(result: NavItemData, tree_node_data: TreeNodeStackData) -> None: pass @@ -5963,7 +6220,7 @@ def focus_item() -> None: # IMGUI_API void ActivateItemByID(ImGuiID id); /* original C++ signature */ def activate_item_by_id(id_: ID) -> None: - """Activate an item by ID (button, checkbox, tree node etc.). Activation is queued and processed on the next frame when the item is encountered again.""" + """Activate an item by ID (button, checkbox, tree node etc.). Activation is queued and processed on the next frame when the item is encountered again. Was called 'ActivateItem()' before 1.89.7.""" pass # Inputs @@ -6124,7 +6381,7 @@ def is_key_down(key: Key, owner_id: ID) -> bool: # IMGUI_API bool IsKeyPressed(ImGuiKey key, ImGuiInputFlags flags, ImGuiID owner_id = 0); /* original C++ signature */ def is_key_pressed(key: Key, flags: InputFlags, owner_id: ID = 0) -> bool: - """Important: when transitioning from old to new IsKeyPressed(): old API has "bool repeat = True", so would default to repeat. New API requiress explicit ImGuiInputFlags_Repeat.""" + """Important: when transitioning from old to new IsKeyPressed(): old API has "bool repeat = True", so would default to repeat. New API requires explicit ImGuiInputFlags_Repeat.""" pass # IMGUI_API bool IsKeyReleased(ImGuiKey key, ImGuiID owner_id); /* original C++ signature */ @@ -6434,6 +6691,10 @@ def is_drag_drop_active() -> bool: def begin_drag_drop_target_custom(bb: ImRect, id_: ID) -> bool: pass +# IMGUI_API bool BeginDragDropTargetViewport(ImGuiViewport* viewport, const ImRect* p_bb = NULL); /* original C++ signature */ +def begin_drag_drop_target_viewport(viewport: Viewport, p_bb: Optional[ImRect] = None) -> bool: + pass + # IMGUI_API void ClearDragDrop(); /* original C++ signature */ def clear_drag_drop() -> None: pass @@ -6442,8 +6703,12 @@ def clear_drag_drop() -> None: def is_drag_drop_payload_being_accepted() -> bool: pass -# IMGUI_API void RenderDragDropTargetRect(const ImRect& bb, const ImRect& item_clip_rect); /* original C++ signature */ -def render_drag_drop_target_rect(bb: ImRect, item_clip_rect: ImRect) -> None: +# IMGUI_API void RenderDragDropTargetRectForItem(const ImRect& bb); /* original C++ signature */ +def render_drag_drop_target_rect_for_item(bb: ImRect) -> None: + pass + +# IMGUI_API void RenderDragDropTargetRectEx(ImDrawList* draw_list, const ImRect& bb); /* original C++ signature */ +def render_drag_drop_target_rect_ex(draw_list: ImDrawList, bb: ImRect) -> None: pass # Typing-Select API @@ -6568,6 +6833,14 @@ def table_push_background_channel() -> None: def table_pop_background_channel() -> None: pass +# IMGUI_API void TablePushColumnChannel(int column_n); /* original C++ signature */ +def table_push_column_channel(column_n: int) -> None: + pass + +# IMGUI_API void TablePopColumnChannel(); /* original C++ signature */ +def table_pop_column_channel() -> None: + pass + # IMGUI_API void TableAngledHeadersRowEx(ImGuiID row_id, float angle, float max_label_width, const ImGuiTableHeaderData* data, int data_count); /* original C++ signature */ def table_angled_headers_row_ex( row_id: ID, angle: float, max_label_width: float, data: TableHeaderData, data_count: int @@ -6763,6 +7036,14 @@ def get_current_tab_bar() -> TabBar: """ pass +# IMGUI_API ImGuiTabBar* TabBarFindByID(ImGuiID id); /* original C++ signature */ +def tab_bar_find_by_id(id_: ID) -> TabBar: + pass + +# IMGUI_API void TabBarRemove(ImGuiTabBar* tab_bar); /* original C++ signature */ +def tab_bar_remove(tab_bar: TabBar) -> None: + pass + # IMGUI_API bool BeginTabBarEx(ImGuiTabBar* tab_bar, const ImRect& bb, ImGuiTabBarFlags flags); /* original C++ signature */ def begin_tab_bar_ex(tab_bar: TabBar, bb: ImRect, flags: TabBarFlags) -> bool: pass @@ -6907,12 +7188,11 @@ def render_text_clipped_ex( """ pass -# IMGUI_API void RenderTextEllipsis(ImDrawList* draw_list, const ImVec2& pos_min, const ImVec2& pos_max, float clip_max_x, float ellipsis_max_x, const char* text, const char* text_end, const ImVec2* text_size_if_known); /* original C++ signature */ +# IMGUI_API void RenderTextEllipsis(ImDrawList* draw_list, const ImVec2& pos_min, const ImVec2& pos_max, float ellipsis_max_x, const char* text, const char* text_end, const ImVec2* text_size_if_known); /* original C++ signature */ def render_text_ellipsis( draw_list: ImDrawList, pos_min: ImVec2Like, pos_max: ImVec2Like, - clip_max_x: float, ellipsis_max_x: float, text: str, text_end: str, @@ -6945,9 +7225,9 @@ def render_color_rect_with_alpha_checkerboard( # IMGUI_API void RenderNavCursor(const ImRect& bb, ImGuiID id, ImGuiNavRenderCursorFlags flags = ImGuiNavRenderCursorFlags_None); /* original C++ signature */ def render_nav_cursor(bb: ImRect, id_: ID, flags: NavRenderCursorFlags = NavRenderCursorFlags_None) -> None: + """Navigation highlight""" pass -# Navigation highlight # IMGUI_API const char* FindRenderedTextEnd(const char* text, const char* text_end = NULL); /* original C++ signature */ def find_rendered_text_end(text: str, text_end: Optional[str] = None) -> str: """Find the optional ## from which we stop displaying text.""" @@ -6998,11 +7278,17 @@ def render_rect_filled_with_hole( def calc_rounding_flags_for_rect_in_rect(r_in: ImRect, r_outer: ImRect, threshold: float) -> ImDrawFlags: pass -# Widgets +# Widgets: Text # IMGUI_API void TextEx(const char* text, const char* text_end = NULL, ImGuiTextFlags flags = 0); /* original C++ signature */ def text_ex(text: str, text_end: Optional[str] = None, flags: TextFlags = 0) -> None: pass +# IMGUI_API void TextAligned(float align_x, float size_x, const char* fmt, ...); /* original C++ signature */ +def text_aligned(align_x: float, size_x: float, fmt: str) -> None: + """FIXME-WIP: Works but API is likely to be reworked. This is designed for 1 item on the line. (#7024)""" + pass + +# Widgets # IMGUI_API bool ButtonEx(const char* label, const ImVec2& size_arg = ImVec2(0, 0), ImGuiButtonFlags flags = 0); /* original C++ signature */ def button_ex(label: str, size_arg: Optional[ImVec2Like] = None, flags: ButtonFlags = 0) -> bool: """Python bindings defaults: @@ -7014,10 +7300,10 @@ def button_ex(label: str, size_arg: Optional[ImVec2Like] = None, flags: ButtonFl def arrow_button_ex(str_id: str, dir: Dir, size_arg: ImVec2Like, flags: ButtonFlags = 0) -> bool: pass -# IMGUI_API bool ImageButtonEx(ImGuiID id, ImTextureID user_texture_id, const ImVec2& image_size, const ImVec2& uv0, const ImVec2& uv1, const ImVec4& bg_col, const ImVec4& tint_col, ImGuiButtonFlags flags = 0); /* original C++ signature */ +# IMGUI_API bool ImageButtonEx(ImGuiID id, ImTextureRef tex_ref, const ImVec2& image_size, const ImVec2& uv0, const ImVec2& uv1, const ImVec4& bg_col, const ImVec4& tint_col, ImGuiButtonFlags flags = 0); /* original C++ signature */ def image_button_ex( id_: ID, - user_texture_id: ImTextureID, + tex_ref: ImTextureRef, image_size: ImVec2Like, uv0: ImVec2Like, uv1: ImVec2Like, @@ -7134,6 +7420,14 @@ def splitter_behavior( def tree_node_behavior(id_: ID, flags: TreeNodeFlags, label: str, label_end: Optional[str] = None) -> bool: pass +# IMGUI_API void TreeNodeDrawLineToChildNode(const ImVec2& target_pos); /* original C++ signature */ +def tree_node_draw_line_to_child_node(target_pos: ImVec2Like) -> None: + pass + +# IMGUI_API void TreeNodeDrawLineToTreePop(const ImGuiTreeNodeStackData* data); /* original C++ signature */ +def tree_node_draw_line_to_tree_pop(data: TreeNodeStackData) -> None: + pass + # IMGUI_API void TreePushOverrideID(ImGuiID id); /* original C++ signature */ def tree_push_override_id(id_: ID) -> None: pass @@ -7378,10 +7672,19 @@ def debug_node_draw_cmd_show_mesh_and_bounding_box( def debug_node_font(font: ImFont) -> None: pass +# IMGUI_API void DebugNodeFontGlyphesForSrcMask(ImFont* font, ImFontBaked* baked, int src_mask); /* original C++ signature */ +def debug_node_font_glyphes_for_src_mask(font: ImFont, baked: ImFontBaked, src_mask: int) -> None: + pass + # IMGUI_API void DebugNodeFontGlyph(ImFont* font, const ImFontGlyph* glyph); /* original C++ signature */ def debug_node_font_glyph(font: ImFont, glyph: ImFontGlyph) -> None: pass +# IMGUI_API void DebugNodeTexture(ImTextureData* tex, int int_id, const ImFontAtlasRect* highlight_rect = NULL); /* original C++ signature */ +def debug_node_texture(tex: ImTextureData, int_id: int, highlight_rect: Optional[ImFontAtlasRect] = None) -> None: + """ID used to facilitate persisting the "current" texture.""" + pass + # IMGUI_API void DebugNodeStorage(ImGuiStorage* storage, const char* label); /* original C++ signature */ def debug_node_storage(storage: Storage, label: str) -> None: pass @@ -7440,23 +7743,353 @@ def debug_render_viewport_thumbnail(draw_list: ImDrawList, viewport: ViewportP, # Obsolete functions +# ----------------------------------------------------------------------------- +# [SECTION] ImFontLoader +# ----------------------------------------------------------------------------- + +class ImFontLoader: + """Hooks and storage for a given font backend. + This structure is likely to evolve as we add support for incremental atlas updates. + Conceptually this could be public, but API is still going to be evolve. + """ + + # const char* Name; /* original C++ signature */ + name: str # (const) + + # size_t FontBakedSrcLoaderDataSize; /* original C++ signature */ + # Size of backend data, Per Baked * Per Source. Buffers are managed by core to avoid excessive allocations. + # FIXME: At this point the two other types of buffers may be managed by core to be consistent? + font_baked_src_loader_data_size: int + + # ImFontLoader() { memset(this, 0, sizeof(*this)); } /* original C++ signature */ + def __init__(self) -> None: + pass + # ----------------------------------------------------------------------------- # [SECTION] ImFontAtlas internal API # ----------------------------------------------------------------------------- -class ImFontBuilderIO: - """This structure is likely to evolve as we add support for incremental atlas updates. - Conceptually this could be in ImGuiPlatformIO, but we are far from ready to make this public. +# Helpers: ImTextureRef ==/!= operators provided as convenience +# (note that _TexID and _TexData are never set simultaneously) + +# Refer to ImFontAtlasPackGetRect() to better understand how this works. +# inline int ImFontAtlasRectId_GetIndex(ImFontAtlasRectId id) { return (id & ImFontAtlasRectId_IndexMask_); } /* original C++ signature */ +def im_font_atlas_rect_id_get_index(id_: ImFontAtlasRectId) -> int: + """(private API)""" + pass + +# inline unsigned int ImFontAtlasRectId_GetGeneration(ImFontAtlasRectId id) { return (unsigned int)(id & ImFontAtlasRectId_GenerationMask_) >> ImFontAtlasRectId_GenerationShift_; } /* original C++ signature */ +def im_font_atlas_rect_id_get_generation(id_: ImFontAtlasRectId) -> int: + """(private API)""" + pass + +# inline ImFontAtlasRectId ImFontAtlasRectId_Make(int index_idx, int gen_idx) { IM_ASSERT(index_idx >= 0 && index_idx <= ImFontAtlasRectId_IndexMask_ && gen_idx <= (ImFontAtlasRectId_GenerationMask_ >> ImFontAtlasRectId_GenerationShift_)); return (ImFontAtlasRectId)(index_idx | (gen_idx << ImFontAtlasRectId_GenerationShift_)); } /* original C++ signature */ +def im_font_atlas_rect_id_make(index_idx: int, gen_idx: int) -> ImFontAtlasRectId: + """(private API)""" + pass + +class ImFontAtlasRectEntry: + """Packed rectangle lookup entry (we need an indirection to allow removing/reordering rectangles) + User are returned ImFontAtlasRectId values which are meant to be persistent. + We handle this with an indirection. While Rects[] may be in theory shuffled, compacted etc., RectsIndex[] cannot it is keyed by ImFontAtlasRectId. + RectsIndex[] is used both as an index into Rects[] and an index into itself. This is basically a free-list. See ImFontAtlasBuildAllocRectIndexEntry() code. + Having this also makes it easier to e.g. sort rectangles during repack. """ - # ImFontBuilderIO(); /* original C++ signature */ + # ImFontAtlasRectEntry(); /* original C++ signature */ + def __init__(self) -> None: + """Auto-generated default constructor""" + pass + +class ImFontAtlasPostProcessData: + """Data available to potential texture post-processing functions""" + + # ImFontAtlas* FontAtlas; /* original C++ signature */ + font_atlas: ImFontAtlas + # ImFont* Font; /* original C++ signature */ + font: ImFont + # ImFontConfig* FontSrc; /* original C++ signature */ + font_src: ImFontConfig + # ImFontBaked* FontBaked; /* original C++ signature */ + font_baked: ImFontBaked + # ImFontGlyph* Glyph; /* original C++ signature */ + glyph: ImFontGlyph + + # Pixel data + # void* Pixels; /* original C++ signature */ + pixels: Any + # ImTextureFormat Format; /* original C++ signature */ + format: ImTextureFormat + # int Pitch; /* original C++ signature */ + pitch: int + # int Width; /* original C++ signature */ + width: int + # int Height; /* original C++ signature */ + height: int + # ImFontAtlasPostProcessData(ImTextureFormat Format = ImTextureFormat(), int Pitch = int(), int Width = int(), int Height = int()); /* original C++ signature */ + def __init__( + self, format: Optional[ImTextureFormat] = None, pitch: int = int(), width: int = int(), height: int = int() + ) -> None: + """Auto-generated default constructor with named params + + + Python bindings defaults: + If Format is None, then its default value will be: ImTextureFormat() + """ + pass + +# We avoid dragging imstb_rectpack.h into public header (partly because binding generators are having issues with it) +class stbrp_context_opaque: + # stbrp_context_opaque(); /* original C++ signature */ def __init__(self) -> None: """Auto-generated default constructor""" pass -# Helper for font builder -# IMGUI_API void ImFontAtlasUpdateSourcesPointers(ImFontAtlas* atlas); /* original C++ signature */ -def im_font_atlas_update_sources_pointers(atlas: ImFontAtlas) -> None: +class ImFontAtlasBuilder: + """Internal storage for incrementally packing and building a ImFontAtlas""" + + # stbrp_context_opaque PackContext; /* original C++ signature */ + pack_context: stbrp_context_opaque # Actually 'stbrp_context' but we don't want to define this in the header file. + # ImVector Rects; /* original C++ signature */ + rects: ImVector_ImTextureRect + # ImVector TempBuffer; /* original C++ signature */ + temp_buffer: ImVector_uchar # Misc scratch buffer + # int RectsIndexFreeListStart; /* original C++ signature */ + rects_index_free_list_start: int # First unused entry + # int RectsPackedCount; /* original C++ signature */ + rects_packed_count: int # Number of packed rectangles. + # int RectsPackedSurface; /* original C++ signature */ + rects_packed_surface: ( + int # Number of packed pixels. Used when compacting to heuristically find the ideal texture size. + ) + # int RectsDiscardedCount; /* original C++ signature */ + rects_discarded_count: int + # int RectsDiscardedSurface; /* original C++ signature */ + rects_discarded_surface: int + # int FrameCount; /* original C++ signature */ + frame_count: int # Current frame count + # ImVec2i MaxRectSize; /* original C++ signature */ + max_rect_size: ImVec2i # Largest rectangle to pack (de-facto used as a "minimum texture size") + # ImVec2i MaxRectBounds; /* original C++ signature */ + max_rect_bounds: ImVec2i # Bottom-right most used pixels + # bool LockDisableResize; /* original C++ signature */ + lock_disable_resize: bool # Disable resizing texture + # bool PreloadedAllGlyphsRanges; /* original C++ signature */ + preloaded_all_glyphs_ranges: ( + bool # Set when missing ImGuiBackendFlags_RendererHasTextures features forces atlas to preload everything. + ) + + # Cache of all ImFontBaked + # ImGuiStorage BakedMap; /* original C++ signature */ + baked_map: Storage # BakedId --> ImFontBaked* + # int BakedDiscardedCount; /* original C++ signature */ + baked_discarded_count: int + + # Custom rectangle identifiers + # ImFontAtlasRectId PackIdMouseCursors; /* original C++ signature */ + pack_id_mouse_cursors: ( + ImFontAtlasRectId # White pixel + mouse cursors. Also happen to be fallback in case of packing failure. + ) + # ImFontAtlasRectId PackIdLinesTexData; /* original C++ signature */ + pack_id_lines_tex_data: ImFontAtlasRectId + +# IMGUI_API ImTextureData* ImFontAtlasTextureAdd(ImFontAtlas* atlas, int w, int h); /* original C++ signature */ +def im_font_atlas_texture_add(atlas: ImFontAtlas, w: int, h: int) -> ImTextureData: + pass + +# IMGUI_API void ImFontAtlasTextureMakeSpace(ImFontAtlas* atlas); /* original C++ signature */ +def im_font_atlas_texture_make_space(atlas: ImFontAtlas) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureRepack(ImFontAtlas* atlas, int w, int h); /* original C++ signature */ +def im_font_atlas_texture_repack(atlas: ImFontAtlas, w: int, h: int) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureGrow(ImFontAtlas* atlas, int old_w = -1, int old_h = -1); /* original C++ signature */ +def im_font_atlas_texture_grow(atlas: ImFontAtlas, old_w: int = -1, old_h: int = -1) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureCompact(ImFontAtlas* atlas); /* original C++ signature */ +def im_font_atlas_texture_compact(atlas: ImFontAtlas) -> None: + pass + +# IMGUI_API ImVec2i ImFontAtlasTextureGetSizeEstimate(ImFontAtlas* atlas); /* original C++ signature */ +def im_font_atlas_texture_get_size_estimate(atlas: ImFontAtlas) -> ImVec2i: + pass + +# IMGUI_API bool ImFontAtlasFontSourceInit(ImFontAtlas* atlas, ImFontConfig* src); /* original C++ signature */ +def im_font_atlas_font_source_init(atlas: ImFontAtlas, src: ImFontConfig) -> bool: + pass + +# IMGUI_API void ImFontAtlasFontSourceAddToFont(ImFontAtlas* atlas, ImFont* font, ImFontConfig* src); /* original C++ signature */ +def im_font_atlas_font_source_add_to_font(atlas: ImFontAtlas, font: ImFont, src: ImFontConfig) -> None: + pass + +# IMGUI_API void ImFontAtlasFontDestroySourceData(ImFontAtlas* atlas, ImFontConfig* src); /* original C++ signature */ +def im_font_atlas_font_destroy_source_data(atlas: ImFontAtlas, src: ImFontConfig) -> None: + pass + +# IMGUI_API bool ImFontAtlasFontInitOutput(ImFontAtlas* atlas, ImFont* font); /* original C++ signature */ +def im_font_atlas_font_init_output(atlas: ImFontAtlas, font: ImFont) -> bool: + """Using FontDestroyOutput/FontInitOutput sequence useful notably if font loader params have changed""" + pass + +# IMGUI_API void ImFontAtlasFontDestroyOutput(ImFontAtlas* atlas, ImFont* font); /* original C++ signature */ +def im_font_atlas_font_destroy_output(atlas: ImFontAtlas, font: ImFont) -> None: + pass + +# IMGUI_API void ImFontAtlasFontDiscardBakes(ImFontAtlas* atlas, ImFont* font, int unused_frames); /* original C++ signature */ +def im_font_atlas_font_discard_bakes(atlas: ImFontAtlas, font: ImFont, unused_frames: int) -> None: + pass + +# IMGUI_API ImGuiID ImFontAtlasBakedGetId(ImGuiID font_id, float baked_size, float rasterizer_density); /* original C++ signature */ +def im_font_atlas_baked_get_id(font_id: ID, baked_size: float, rasterizer_density: float) -> ID: + pass + +# IMGUI_API ImFontBaked* ImFontAtlasBakedGetOrAdd(ImFontAtlas* atlas, ImFont* font, float font_size, float font_rasterizer_density); /* original C++ signature */ +def im_font_atlas_baked_get_or_add( + atlas: ImFontAtlas, font: ImFont, font_size: float, font_rasterizer_density: float +) -> ImFontBaked: + pass + +# IMGUI_API ImFontBaked* ImFontAtlasBakedGetClosestMatch(ImFontAtlas* atlas, ImFont* font, float font_size, float font_rasterizer_density); /* original C++ signature */ +def im_font_atlas_baked_get_closest_match( + atlas: ImFontAtlas, font: ImFont, font_size: float, font_rasterizer_density: float +) -> ImFontBaked: + pass + +# IMGUI_API ImFontBaked* ImFontAtlasBakedAdd(ImFontAtlas* atlas, ImFont* font, float font_size, float font_rasterizer_density, ImGuiID baked_id); /* original C++ signature */ +def im_font_atlas_baked_add( + atlas: ImFontAtlas, font: ImFont, font_size: float, font_rasterizer_density: float, baked_id: ID +) -> ImFontBaked: + pass + +# IMGUI_API void ImFontAtlasBakedDiscard(ImFontAtlas* atlas, ImFont* font, ImFontBaked* baked); /* original C++ signature */ +def im_font_atlas_baked_discard(atlas: ImFontAtlas, font: ImFont, baked: ImFontBaked) -> None: + pass + +# IMGUI_API ImFontGlyph* ImFontAtlasBakedAddFontGlyph(ImFontAtlas* atlas, ImFontBaked* baked, ImFontConfig* src, const ImFontGlyph* in_glyph); /* original C++ signature */ +def im_font_atlas_baked_add_font_glyph( + atlas: ImFontAtlas, baked: ImFontBaked, src: ImFontConfig, in_glyph: ImFontGlyph +) -> ImFontGlyph: + pass + +# IMGUI_API void ImFontAtlasBakedAddFontGlyphAdvancedX(ImFontAtlas* atlas, ImFontBaked* baked, ImFontConfig* src, ImWchar codepoint, float advance_x); /* original C++ signature */ +def im_font_atlas_baked_add_font_glyph_advanced_x( + atlas: ImFontAtlas, baked: ImFontBaked, src: ImFontConfig, codepoint: ImWchar, advance_x: float +) -> None: + pass + +# IMGUI_API void ImFontAtlasBakedDiscardFontGlyph(ImFontAtlas* atlas, ImFont* font, ImFontBaked* baked, ImFontGlyph* glyph); /* original C++ signature */ +def im_font_atlas_baked_discard_font_glyph( + atlas: ImFontAtlas, font: ImFont, baked: ImFontBaked, glyph: ImFontGlyph +) -> None: + pass + +# IMGUI_API void ImFontAtlasBakedSetFontGlyphBitmap(ImFontAtlas* atlas, ImFontBaked* baked, ImFontConfig* src, ImFontGlyph* glyph, ImTextureRect* r, const uchar* src_pixels, ImTextureFormat src_fmt, int src_pitch); /* original C++ signature */ +def im_font_atlas_baked_set_font_glyph_bitmap( + atlas: ImFontAtlas, + baked: ImFontBaked, + src: ImFontConfig, + glyph: ImFontGlyph, + r: ImTextureRect, + src_pixels: uchar, + src_fmt: ImTextureFormat, + src_pitch: int, +) -> None: + pass + +# IMGUI_API void ImFontAtlasPackInit(ImFontAtlas* atlas); /* original C++ signature */ +def im_font_atlas_pack_init(atlas: ImFontAtlas) -> None: + pass + +# IMGUI_API ImFontAtlasRectId ImFontAtlasPackAddRect(ImFontAtlas* atlas, int w, int h, ImFontAtlasRectEntry* overwrite_entry = NULL); /* original C++ signature */ +def im_font_atlas_pack_add_rect( + atlas: ImFontAtlas, w: int, h: int, overwrite_entry: Optional[ImFontAtlasRectEntry] = None +) -> ImFontAtlasRectId: + pass + +# IMGUI_API ImTextureRect* ImFontAtlasPackGetRect(ImFontAtlas* atlas, ImFontAtlasRectId id); /* original C++ signature */ +def im_font_atlas_pack_get_rect(atlas: ImFontAtlas, id_: ImFontAtlasRectId) -> ImTextureRect: + pass + +# IMGUI_API ImTextureRect* ImFontAtlasPackGetRectSafe(ImFontAtlas* atlas, ImFontAtlasRectId id); /* original C++ signature */ +def im_font_atlas_pack_get_rect_safe(atlas: ImFontAtlas, id_: ImFontAtlasRectId) -> ImTextureRect: + pass + +# IMGUI_API void ImFontAtlasPackDiscardRect(ImFontAtlas* atlas, ImFontAtlasRectId id); /* original C++ signature */ +def im_font_atlas_pack_discard_rect(atlas: ImFontAtlas, id_: ImFontAtlasRectId) -> None: + pass + +# IMGUI_API void ImFontAtlasUpdateNewFrame(ImFontAtlas* atlas, int frame_count, bool renderer_has_textures); /* original C++ signature */ +def im_font_atlas_update_new_frame(atlas: ImFontAtlas, frame_count: int, renderer_has_textures: bool) -> None: + pass + +# IMGUI_API void ImFontAtlasAddDrawListSharedData(ImFontAtlas* atlas, ImDrawListSharedData* data); /* original C++ signature */ +def im_font_atlas_add_draw_list_shared_data(atlas: ImFontAtlas, data: ImDrawListSharedData) -> None: + pass + +# IMGUI_API void ImFontAtlasRemoveDrawListSharedData(ImFontAtlas* atlas, ImDrawListSharedData* data); /* original C++ signature */ +def im_font_atlas_remove_draw_list_shared_data(atlas: ImFontAtlas, data: ImDrawListSharedData) -> None: + pass + +# IMGUI_API void ImFontAtlasUpdateDrawListsTextures(ImFontAtlas* atlas, ImTextureRef old_tex, ImTextureRef new_tex); /* original C++ signature */ +def im_font_atlas_update_draw_lists_textures(atlas: ImFontAtlas, old_tex: ImTextureRef, new_tex: ImTextureRef) -> None: + pass + +# IMGUI_API void ImFontAtlasUpdateDrawListsSharedData(ImFontAtlas* atlas); /* original C++ signature */ +def im_font_atlas_update_draw_lists_shared_data(atlas: ImFontAtlas) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureBlockConvert(const uchar* src_pixels, ImTextureFormat src_fmt, int src_pitch, uchar* dst_pixels, ImTextureFormat dst_fmt, int dst_pitch, int w, int h); /* original C++ signature */ +def im_font_atlas_texture_block_convert( + src_pixels: uchar, + src_fmt: ImTextureFormat, + src_pitch: int, + dst_pixels: uchar, + dst_fmt: ImTextureFormat, + dst_pitch: int, + w: int, + h: int, +) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureBlockPostProcess(ImFontAtlasPostProcessData* data); /* original C++ signature */ +def im_font_atlas_texture_block_post_process(data: ImFontAtlasPostProcessData) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureBlockPostProcessMultiply(ImFontAtlasPostProcessData* data, float multiply_factor); /* original C++ signature */ +def im_font_atlas_texture_block_post_process_multiply(data: ImFontAtlasPostProcessData, multiply_factor: float) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureBlockFill(ImTextureData* dst_tex, int dst_x, int dst_y, int w, int h, ImU32 col); /* original C++ signature */ +def im_font_atlas_texture_block_fill( + dst_tex: ImTextureData, dst_x: int, dst_y: int, w: int, h: int, col: ImU32 +) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureBlockCopy(ImTextureData* src_tex, int src_x, int src_y, ImTextureData* dst_tex, int dst_x, int dst_y, int w, int h); /* original C++ signature */ +def im_font_atlas_texture_block_copy( + src_tex: ImTextureData, src_x: int, src_y: int, dst_tex: ImTextureData, dst_x: int, dst_y: int, w: int, h: int +) -> None: + pass + +# IMGUI_API void ImFontAtlasTextureBlockQueueUpload(ImFontAtlas* atlas, ImTextureData* tex, int x, int y, int w, int h); /* original C++ signature */ +def im_font_atlas_texture_block_queue_upload( + atlas: ImFontAtlas, tex: ImTextureData, x: int, y: int, w: int, h: int +) -> None: + pass + +# IMGUI_API int ImTextureDataGetFormatBytesPerPixel(ImTextureFormat format); /* original C++ signature */ +def im_texture_data_get_format_bytes_per_pixel(format: ImTextureFormat) -> int: + pass + +# IMGUI_API const char* ImTextureDataGetStatusName(ImTextureStatus status); /* original C++ signature */ +def im_texture_data_get_status_name(status: ImTextureStatus) -> str: + pass + +# IMGUI_API const char* ImTextureDataGetFormatName(ImTextureFormat format); /* original C++ signature */ +def im_texture_data_get_format_name(format: ImTextureFormat) -> str: pass # IMGUI_API bool ImFontAtlasGetMouseCursorTexData(ImFontAtlas* atlas, ImGuiMouseCursor cursor_type, ImVec2* out_offset, ImVec2* out_size, ImVec2 out_uv_border[2], ImVec2 out_uv_fill[2]); /* original C++ signature */ @@ -7487,7 +8120,7 @@ def im_font_atlas_get_mouse_cursor_tex_data( # - ImGuiDockContext # ----------------------------------------------------------------------------- -class DockRequestType(enum.Enum): +class DockRequestType(enum.IntFlag): # ImGuiDockRequestType_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiDockRequestType_Dock, /* original C++ signature */ @@ -7584,7 +8217,7 @@ class DockNodeSettings: # class im_stb: # Proxy class that introduces typings for the *submodule* im_stb - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) # #################### #################### diff --git a/blimgui/dist64/imgui_bundle/imgui/test_engine.pyi b/blimgui/dist64/imgui_bundle/imgui/test_engine.pyi index e1d3d48..09231be 100644 --- a/blimgui/dist64/imgui_bundle/imgui/test_engine.pyi +++ b/blimgui/dist64/imgui_bundle/imgui/test_engine.pyi @@ -128,7 +128,7 @@ ImVector_Window = ImVector_Window_ptr # Types # ------------------------------------------------------------------------- -class TestEngineExportFormat(enum.Enum): +class TestEngineExportFormat(enum.IntFlag): # ImGuiTestEngineExportFormat_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiTestEngineExportFormat_JUnitXml, /* original C++ signature */ @@ -185,18 +185,19 @@ def export_ex(engine: TestEngine, format: TestEngineExportFormat, filename: str) # Types # ------------------------------------------------------------------------- -class TestActiveFunc(enum.Enum): +class TestActiveFunc(enum.IntFlag): """Stored in ImGuiTestContext: where we are currently running GuiFunc or TestFunc""" # ImGuiTestActiveFunc_None, /* original C++ signature */ none = enum.auto() # (= 0) - # ImGuiTestActiveFunc_GuiFunc, /* original C++ signature */ - gui_func = enum.auto() # (= 1) - # ImGuiTestActiveFunc_TestFunc /* original C++ signature */ - # } - test_func = enum.auto() # (= 2) - -class TestRunSpeed(enum.Enum): + # ImGuiTestActiveFunc_GuiFunc, /* original C++ signature */ + gui_func = enum.auto() # (= 1) # == GuiFunc() handler + # ImGuiTestActiveFunc_TestFunc, /* original C++ signature */ + test_func = enum.auto() # (= 2) # == TestFunc() handler + # ImGuiTestActiveFunc_TeardownFunc, /* original C++ signature */ + teardown_func = enum.auto() # (= 3) # == TeardownFunc() handler + +class TestRunSpeed(enum.IntFlag): # ImGuiTestRunSpeed_Fast = 0, /* original C++ signature */ fast = enum.auto() # (= 0) # Run tests as fast as possible (teleport mouse, skip delays, etc.) # ImGuiTestRunSpeed_Normal = 1, /* original C++ signature */ @@ -207,7 +208,7 @@ class TestRunSpeed(enum.Enum): # } count = enum.auto() # (= 3) -class TestVerboseLevel(enum.Enum): +class TestVerboseLevel(enum.IntFlag): # ImGuiTestVerboseLevel_Silent = 0, /* original C++ signature */ silent = enum.auto() # (= 0) # -v0 # ImGuiTestVerboseLevel_Error = 1, /* original C++ signature */ @@ -224,7 +225,7 @@ class TestVerboseLevel(enum.Enum): # } count = enum.auto() # (= 6) -class TestStatus(enum.Enum): +class TestStatus(enum.IntFlag): """Test status (stored in ImGuiTest)""" # ImGuiTestStatus_Unknown = 0, /* original C++ signature */ @@ -243,7 +244,7 @@ class TestStatus(enum.Enum): # } count = enum.auto() # (= 6) -class TestGroup(enum.Enum): +class TestGroup(enum.IntFlag): """Test group: this is mostly used to categorize tests in our testing UI. (Stored in ImGuiTest)""" # ImGuiTestGroup_Unknown = -1, /* original C++ signature */ @@ -256,7 +257,7 @@ class TestGroup(enum.Enum): # } count = enum.auto() # (= 2) -class TestFlags_(enum.Enum): +class TestFlags_(enum.IntFlag): """Flags (stored in ImGuiTest)""" # ImGuiTestFlags_None = 0, /* original C++ signature */ @@ -275,7 +276,7 @@ class TestFlags_(enum.Enum): ) # (= 1 << 2) # Error/recovery warnings (missing End/Pop calls etc.) will be displayed as normal debug entries, for tests which may rely on those. # ImGuiTestFlags_RequireViewports = 1 << 10 -class TestCheckFlags_(enum.Enum): +class TestCheckFlags_(enum.IntFlag): """Flags for IM_CHECK* macros.""" # ImGuiTestCheckFlags_None = 0, /* original C++ signature */ @@ -284,7 +285,7 @@ class TestCheckFlags_(enum.Enum): # } silent_success = enum.auto() # (= 1 << 0) -class TestLogFlags_(enum.Enum): +class TestLogFlags_(enum.IntFlag): """Flags for ImGuiTestContext::Log* functions.""" # ImGuiTestLogFlags_None = 0, /* original C++ signature */ @@ -292,7 +293,7 @@ class TestLogFlags_(enum.Enum): # ImGuiTestLogFlags_NoHeader = 1 << 0 /* original C++ signature */ no_header = enum.auto() # (= 1 << 0) # Do not display frame count and depth padding -class TestRunFlags_(enum.Enum): +class TestRunFlags_(enum.IntFlag): # ImGuiTestRunFlags_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiTestRunFlags_GuiFuncDisable = 1 << 0, /* original C++ signature */ @@ -363,16 +364,16 @@ def find_item_debug_label(ui_ctx: Context, id_: ID) -> str: def check(file: str, func: str, line: int, flags: TestCheckFlags, result: bool, expr: str) -> bool: pass -# IMGUI_API bool ImGuiTestEngine_CheckStrOp(const char* file, const char* func, int line, ImGuiTestCheckFlags flags, const char* op, const char* lhs_var, const char* lhs_value, const char* rhs_var, const char* rhs_value, bool* out_result); /* original C++ signature */ -def check_str_op( +# IMGUI_API bool ImGuiTestEngine_CheckOpStr(const char* file, const char* func, int line, ImGuiTestCheckFlags flags, const char* op, const char* lhs_desc, const char* lhs_value, const char* rhs_desc, const char* rhs_value, bool* out_result); /* original C++ signature */ +def check_op_str( file: str, func: str, line: int, flags: TestCheckFlags, op: str, - lhs_var: str, + lhs_desc: str, lhs_value: str, - rhs_var: str, + rhs_desc: str, rhs_value: str, out_result: bool, ) -> Tuple[bool, bool]: @@ -522,14 +523,6 @@ class TestEngineIO: config_break_on_error: bool = False # Break debugger on test error by calling IM_DEBUG_BREAK() # bool ConfigKeepGuiFunc = false; /* original C++ signature */ config_keep_gui_func: bool = False # Keep test GUI running at the end of the test - # ImGuiTestVerboseLevel ConfigVerboseLevel = ImGuiTestVerboseLevel_Warning; /* original C++ signature */ - config_verbose_level: TestVerboseLevel = TestVerboseLevel_Warning - # ImGuiTestVerboseLevel ConfigVerboseLevelOnError = ImGuiTestVerboseLevel_Info; /* original C++ signature */ - config_verbose_level_on_error: TestVerboseLevel = TestVerboseLevel_Info - # bool ConfigLogToTTY = false; /* original C++ signature */ - config_log_to_tty: bool = False - # bool ConfigLogToDebugger = false; /* original C++ signature */ - config_log_to_debugger: bool = False # bool ConfigRestoreFocusAfterTests = true; /* original C++ signature */ config_restore_focus_after_tests: bool = True # Restore focus back after running tests # bool ConfigCaptureEnabled = true; /* original C++ signature */ @@ -547,6 +540,18 @@ class TestEngineIO: # int PerfStressAmount = 1; /* original C++ signature */ perf_stress_amount: int = 1 # Integer to scale the amount of items submitted in test + # Options: Logging + # ImGuiTestVerboseLevel ConfigVerboseLevel = ImGuiTestVerboseLevel_Warning; /* original C++ signature */ + config_verbose_level: TestVerboseLevel = TestVerboseLevel_Warning + # ImGuiTestVerboseLevel ConfigVerboseLevelOnError = ImGuiTestVerboseLevel_Info; /* original C++ signature */ + config_verbose_level_on_error: TestVerboseLevel = TestVerboseLevel_Info + # bool ConfigLogToTTY = false; /* original C++ signature */ + config_log_to_tty: bool = False # Output log entries to TTY (in addition to Test Engine UI) + # bool ConfigLogToDebugger = false; /* original C++ signature */ + config_log_to_debugger: bool = False # Output log entries to Debugger (in addition to Test Engine UI) + # void* ConfigLogToFuncUserData = NULL; /* original C++ signature */ + config_log_to_func_user_data: Any = None + # Options: Speed of user simulation # float MouseSpeed = 600.0f; /* original C++ signature */ mouse_speed: float = 600.0 # Mouse speed (pixel/second) when not running in fast mode @@ -572,8 +577,7 @@ class TestEngineIO: # float ConfigWatchdogKillTest = 60.0f; /* original C++ signature */ config_watchdog_kill_test: float = 60.0 # Attempt to stop running a test when exceeding this time (in second) # float ConfigWatchdogKillApp = FLT_MAX; /* original C++ signature */ - config_watchdog_kill_app: float = sys.float_info.max - # Stop application when exceeding this time (in second) + config_watchdog_kill_app: float = sys.float_info.max # Stop application when exceeding this time (in second) # Options: Export # While you can manually call ImGuiTestEngine_Export(), registering filename/format here ensure the crash handler will always export if application crash. @@ -581,7 +585,7 @@ class TestEngineIO: # ImGuiTestEngineExportFormat ExportResultsFormat = (ImGuiTestEngineExportFormat)0; /* original C++ signature */ export_results_format: TestEngineExportFormat = TestEngineExportFormat.j_unit_xml - # #ifdef IMGUI_BUNDLE_PYTHON_API + # #ifdef IMGUI_BUNDLE_PYTHON_API # # void ExportResultsFilename_Set(const char* filename); /* original C++ signature */ def export_results_filename_set(self, filename: str) -> None: @@ -589,7 +593,7 @@ class TestEngineIO: (private API) """ pass - # #endif + # #endif # # Options: Sanity Checks @@ -611,7 +615,7 @@ class TestEngineIO: ) # bool IsCapturing = false; /* original C++ signature */ is_capturing: bool = False # Capture is in progress - # ImGuiTestEngineIO(bool ConfigSavedSettings = true, ImGuiTestRunSpeed ConfigRunSpeed = ImGuiTestRunSpeed_Fast, bool ConfigStopOnError = false, bool ConfigBreakOnError = false, bool ConfigKeepGuiFunc = false, ImGuiTestVerboseLevel ConfigVerboseLevel = ImGuiTestVerboseLevel_Warning, ImGuiTestVerboseLevel ConfigVerboseLevelOnError = ImGuiTestVerboseLevel_Info, bool ConfigLogToTTY = false, bool ConfigLogToDebugger = false, bool ConfigRestoreFocusAfterTests = true, bool ConfigCaptureEnabled = true, bool ConfigCaptureOnError = false, bool ConfigNoThrottle = false, bool ConfigMouseDrawCursor = true, float ConfigFixedDeltaTime = 0.0f, int PerfStressAmount = 1, float MouseSpeed = 600.0f, float MouseWobble = 0.25f, float ScrollSpeed = 1400.0f, float TypingSpeed = 20.0f, float ActionDelayShort = 0.15f, float ActionDelayStandard = 0.40f, float ConfigWatchdogWarning = 30.0f, float ConfigWatchdogKillTest = 60.0f, float ConfigWatchdogKillApp = FLT_MAX, ImGuiTestEngineExportFormat ExportResultsFormat = (ImGuiTestEngineExportFormat)0, bool CheckDrawDataIntegrity = false, bool IsRunningTests = false, bool IsRequestingMaxAppSpeed = false, bool IsCapturing = false); /* original C++ signature */ + # ImGuiTestEngineIO(bool ConfigSavedSettings = true, ImGuiTestRunSpeed ConfigRunSpeed = ImGuiTestRunSpeed_Fast, bool ConfigStopOnError = false, bool ConfigBreakOnError = false, bool ConfigKeepGuiFunc = false, bool ConfigRestoreFocusAfterTests = true, bool ConfigCaptureEnabled = true, bool ConfigCaptureOnError = false, bool ConfigNoThrottle = false, bool ConfigMouseDrawCursor = true, float ConfigFixedDeltaTime = 0.0f, int PerfStressAmount = 1, ImGuiTestVerboseLevel ConfigVerboseLevel = ImGuiTestVerboseLevel_Warning, ImGuiTestVerboseLevel ConfigVerboseLevelOnError = ImGuiTestVerboseLevel_Info, bool ConfigLogToTTY = false, bool ConfigLogToDebugger = false, float MouseSpeed = 600.0f, float MouseWobble = 0.25f, float ScrollSpeed = 1400.0f, float TypingSpeed = 20.0f, float ActionDelayShort = 0.15f, float ActionDelayStandard = 0.40f, float ConfigWatchdogWarning = 30.0f, float ConfigWatchdogKillTest = 60.0f, float ConfigWatchdogKillApp = FLT_MAX, ImGuiTestEngineExportFormat ExportResultsFormat = (ImGuiTestEngineExportFormat)0, bool CheckDrawDataIntegrity = false, bool IsRunningTests = false, bool IsRequestingMaxAppSpeed = false, bool IsCapturing = false); /* original C++ signature */ def __init__( self, config_saved_settings: bool = True, @@ -619,10 +623,6 @@ class TestEngineIO: config_stop_on_error: bool = False, config_break_on_error: bool = False, config_keep_gui_func: bool = False, - config_verbose_level: TestVerboseLevel = TestVerboseLevel_Warning, - config_verbose_level_on_error: TestVerboseLevel = TestVerboseLevel_Info, - config_log_to_tty: bool = False, - config_log_to_debugger: bool = False, config_restore_focus_after_tests: bool = True, config_capture_enabled: bool = True, config_capture_on_error: bool = False, @@ -630,6 +630,10 @@ class TestEngineIO: config_mouse_draw_cursor: bool = True, config_fixed_delta_time: float = 0.0, perf_stress_amount: int = 1, + config_verbose_level: TestVerboseLevel = TestVerboseLevel_Warning, + config_verbose_level_on_error: TestVerboseLevel = TestVerboseLevel_Info, + config_log_to_tty: bool = False, + config_log_to_debugger: bool = False, mouse_speed: float = 600.0, mouse_wobble: float = 0.25, scroll_speed: float = 1400.0, @@ -748,11 +752,19 @@ class TestLog: def __init__(self) -> None: """Functions""" pass - # bool IsEmpty() const { return Buffer.empty(); } /* original C++ signature */ + # bool IsEmpty() const { return Buffer.empty(); } /* original C++ signature */ def is_empty(self) -> bool: """(private API)""" pass - # void Clear(); /* original C++ signature */ + # const char* GetText() { return Buffer.c_str(); } /* original C++ signature */ + def get_text(self) -> str: + """(private API)""" + pass + # int GetTextLen() { return Buffer.size(); } /* original C++ signature */ + def get_text_len(self) -> int: + """(private API)""" + pass + # void Clear(); /* original C++ signature */ def clear(self) -> None: """(private API)""" pass @@ -833,7 +845,11 @@ class Test: None # GUI function (optional if your test are running over an existing GUI application) ) # Function_TestRunner TestFunc = nullptr; /* original C++ signature */ - test_func: Function_TestRunner = None # Test function + test_func: Function_TestRunner = None # Test driving function + # Function_TestRunner TeardownFunc = nullptr; /* original C++ signature */ + teardown_func: Function_TestRunner = ( + None # Teardown driving function, executed after TestFunc _regardless_ of TestFunc failing. + ) # void* UserData = nullptr; /* original C++ signature */ user_data: Any = ( None # General purpose user data (if assigning capturing lambdas on GuiFunc/TestFunc you may not need to use this) @@ -974,7 +990,7 @@ class TestRefDesc: # [SECTION] ImGuiTestContext related Flags/Enumerations # ------------------------------------------------------------------------- -class TestAction(enum.Enum): +class TestAction(enum.IntFlag): """Named actions. Generally you will call the named helpers e.g. ItemClick(). This is used by shared/low-level functions such as ItemAction().""" # ImGuiTestAction_Unknown = 0, /* original C++ signature */ @@ -1007,7 +1023,7 @@ class TestAction(enum.Enum): # } count = enum.auto() # (= 10) -class TestOpFlags_(enum.Enum): +class TestOpFlags_(enum.IntFlag): """Generic flags for many ImGuiTestContext functions Some flags are only supported by a handful of functions. Check function headers for list of supported flags. """ @@ -1081,6 +1097,8 @@ class TestGenericItemStatus: ret_value: int # return value # int Hovered; /* original C++ signature */ hovered: int # result of IsItemHovered() + # int HoveredAllowDisabled; /* original C++ signature */ + hovered_allow_disabled: int # result of IsItemHovered(ImGuiHoveredFlags_AllowWhenDisabled) # int Active; /* original C++ signature */ active: int # result of IsItemActive() # int Focused; /* original C++ signature */ @@ -1109,7 +1127,7 @@ class TestGenericItemStatus: def query_set(self, ret_val: bool = False) -> None: """(private API)""" pass - # void QueryInc(bool ret_val = false) { RetValue += ret_val; Hovered += ImGui::IsItemHovered(); Active += ImGui::IsItemActive(); Focused += ImGui::IsItemFocused(); Clicked += ImGui::IsItemClicked(); Visible += ImGui::IsItemVisible(); Edited += ImGui::IsItemEdited(); Activated += ImGui::IsItemActivated(); Deactivated += ImGui::IsItemDeactivated(); DeactivatedAfterEdit += ImGui::IsItemDeactivatedAfterEdit(); } /* original C++ signature */ + # void QueryInc(bool ret_val = false) { RetValue += ret_val; Hovered += ImGui::IsItemHovered(); HoveredAllowDisabled += ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenDisabled); Active += ImGui::IsItemActive(); Focused += ImGui::IsItemFocused(); Clicked += ImGui::IsItemClicked(); Visible += ImGui::IsItemVisible(); Edited += ImGui::IsItemEdited(); Activated += ImGui::IsItemActivated(); Deactivated += ImGui::IsItemDeactivated(); DeactivatedAfterEdit += ImGui::IsItemDeactivatedAfterEdit(); } /* original C++ signature */ def query_inc(self, ret_val: bool = False) -> None: """(private API)""" pass @@ -1141,6 +1159,8 @@ class TestGenericVars: table_flags: TableFlags # ImGuiPopupFlags PopupFlags; /* original C++ signature */ popup_flags: PopupFlags + # ImGuiInputTextFlags InputTextFlags; /* original C++ signature */ + input_text_flags: InputTextFlags # ImGuiTestGenericItemStatus Status; /* original C++ signature */ status: TestGenericItemStatus # bool ShowWindow1, /* original C++ signature */ @@ -1217,7 +1237,7 @@ class TestContext: # ImGuiTest* Test = nullptr; /* original C++ signature */ test: Test = None # Test currently running # ImGuiTestOutput* TestOutput = nullptr; /* original C++ signature */ - test_output: TestOutput = None # Test output (generally == &Test->Output) + test_output: TestOutput = None # Test output (generally == &Test->Output while executing TestFunc) # ImGuiTestOpFlags OpFlags = ImGuiTestOpFlags_None; /* original C++ signature */ op_flags: TestOpFlags = ( TestOpFlags_None # Flags affecting all operation (supported: ImGuiTestOpFlags_NoAutoUncollapse) @@ -1773,6 +1793,20 @@ class TestContext: def scroll_to_bottom(self, ref: Union[TestRef, str]) -> None: """(private API)""" pass + # void ScrollToPos(ImGuiTestRef window_ref, float pos_v, ImGuiAxis axis, ImGuiTestOpFlags flags = ImGuiTestOpFlags_None); /* original C++ signature */ + def scroll_to_pos( + self, window_ref: Union[TestRef, str], pos_v: float, axis: Axis, flags: TestOpFlags = TestOpFlags_None + ) -> None: + """(private API)""" + pass + # void ScrollToPosX(ImGuiTestRef window_ref, float pos_x); /* original C++ signature */ + def scroll_to_pos_x(self, window_ref: Union[TestRef, str], pos_x: float) -> None: + """(private API)""" + pass + # void ScrollToPosY(ImGuiTestRef window_ref, float pos_y); /* original C++ signature */ + def scroll_to_pos_y(self, window_ref: Union[TestRef, str], pos_y: float) -> None: + """(private API)""" + pass # void ScrollToItem(ImGuiTestRef ref, ImGuiAxis axis, ImGuiTestOpFlags flags = ImGuiTestOpFlags_None); /* original C++ signature */ def scroll_to_item(self, ref: Union[TestRef, str], axis: Axis, flags: TestOpFlags = TestOpFlags_None) -> None: """(private API)""" @@ -2157,7 +2191,7 @@ class TestContext: # String compares -# Floating point compares +# Floating point compares using an epsilon # ------------------------------------------------------------------------- @@ -2264,7 +2298,7 @@ class TestFindByLabelTask: """Auto-generated default constructor with named params""" pass -class TestInputType(enum.Enum): +class TestInputType(enum.IntFlag): # ImGuiTestInputType_None, /* original C++ signature */ none = enum.auto() # (= 0) # ImGuiTestInputType_Key, /* original C++ signature */ @@ -2399,7 +2433,9 @@ class TestInputs: host_esc_down: bool = False # float HostEscDownDuration = -1.0f; /* original C++ signature */ host_esc_down_duration: float = -1.0 # Maintain our own DownDuration for host/backend ESC key so we can abort. - # ImGuiTestInputs(ImVec2 MousePosValue = ImVec2(), ImVec2 MouseWheel = ImVec2(), ImGuiID MouseHoveredViewport = 0, int MouseButtonsValue = 0x00, bool HostEscDown = false, float HostEscDownDuration = -1.0f); /* original C++ signature */ + # ImVec2 HostMousePos; /* original C++ signature */ + host_mouse_pos: ImVec2 + # ImGuiTestInputs(ImVec2 MousePosValue = ImVec2(), ImVec2 MouseWheel = ImVec2(), ImGuiID MouseHoveredViewport = 0, int MouseButtonsValue = 0x00, bool HostEscDown = false, float HostEscDownDuration = -1.0f, ImVec2 HostMousePos = ImVec2()); /* original C++ signature */ def __init__( self, mouse_pos_value: Optional[ImVec2Like] = None, @@ -2408,6 +2444,7 @@ class TestInputs: mouse_buttons_value: int = 0x00, host_esc_down: bool = False, host_esc_down_duration: float = -1.0, + host_mouse_pos: Optional[ImVec2Like] = None, ) -> None: """Auto-generated default constructor with named params @@ -2416,6 +2453,7 @@ class TestInputs: If any of the params below is None, then its default value below will be used: * MousePosValue: ImVec2() * MouseWheel: ImVec2() + * HostMousePos: ImVec2() """ pass @@ -2431,6 +2469,8 @@ class TestEngine: # bool Started = false; /* original C++ signature */ started: bool = False + # bool UiContextHasHooks = false; /* original C++ signature */ + ui_context_has_hooks: bool = False # ImU64 BatchStartTime = 0; /* original C++ signature */ batch_start_time: ImU64 = 0 # ImU64 BatchEndTime = 0; /* original C++ signature */ @@ -2449,6 +2489,8 @@ class TestEngine: gather_task: TestGatherTask # ImGuiTestFindByLabelTask FindByLabelTask; /* original C++ signature */ find_by_label_task: TestFindByLabelTask + # ImGuiTextBuffer StringBuilderForChecks; /* original C++ signature */ + string_builder_for_checks: TextBuffer # ImGuiTestInputs Inputs; /* original C++ signature */ # Inputs diff --git a/blimgui/dist64/imgui_bundle/imgui_color_text_edit.pyi b/blimgui/dist64/imgui_bundle/imgui_color_text_edit.pyi index ef9bdbc..c74c597 100644 --- a/blimgui/dist64/imgui_bundle/imgui_color_text_edit.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_color_text_edit.pyi @@ -68,12 +68,12 @@ class TextEditor: def __init__(self) -> None: pass - class PaletteId(enum.Enum): + class PaletteId(enum.IntEnum): dark = enum.auto() # (= 0) light = enum.auto() # (= 1) mariana = enum.auto() # (= 2) retro_blue = enum.auto() # (= 3) - class LanguageDefinitionId(enum.Enum): + class LanguageDefinitionId(enum.IntEnum): none = enum.auto() # (= 0) cpp = enum.auto() # (= 1) c = enum.auto() # (= 2) @@ -85,7 +85,7 @@ class TextEditor: angel_script = enum.auto() # (= 8) glsl = enum.auto() # (= 9) hlsl = enum.auto() # (= 10) - class SetViewAtLineMode(enum.Enum): + class SetViewAtLineMode(enum.IntEnum): first_visible_line = enum.auto() # (= 0) centered = enum.auto() # (= 1) last_visible_line = enum.auto() # (= 2) @@ -112,8 +112,6 @@ class TextEditor: pass def get_line_count(self) -> int: pass - def is_overwrite_enabled(self) -> bool: - pass def set_palette(self, a_value: TextEditor.PaletteId) -> None: pass def get_palette(self) -> TextEditor.PaletteId: diff --git a/blimgui/dist64/imgui_bundle/imgui_command_palette.pyi b/blimgui/dist64/imgui_bundle/imgui_command_palette.pyi index 938506d..dcd2b68 100644 --- a/blimgui/dist64/imgui_bundle/imgui_command_palette.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_command_palette.pyi @@ -20,12 +20,12 @@ class Context: # TODO support std::string_view # TODO support function pointer callback in addition to std::function -class ImCmdTextType(enum.Enum): +class ImCmdTextType(enum.IntEnum): regular = enum.auto() # (= 0) highlight = enum.auto() # (= 1) count = enum.auto() # (= 2) -class ImCmdTextFlag(enum.Enum): +class ImCmdTextFlag(enum.IntEnum): #/ Whether the text is underlined. Default False. underline = enum.auto() # (= 0) count = enum.auto() # (= 1) diff --git a/blimgui/dist64/imgui_bundle/imgui_ctx.py b/blimgui/dist64/imgui_bundle/imgui_ctx.py index ab0e87f..4779867 100644 --- a/blimgui/dist64/imgui_bundle/imgui_ctx.py +++ b/blimgui/dist64/imgui_bundle/imgui_ctx.py @@ -1,16 +1,31 @@ """ -imgui_ctx provide context managers to simplify the use of functions pairs like +imgui_ctx provides context managers to simplify the use of paired ImGui functions such as: - 1. `imgui.begin()` and `imgui.end()` - can be replaced by: `with imgui_ctx.begin() as window:` + 1. `imgui.begin()` / `imgui.end()` + can be replaced by: `with imgui_ctx.begin() as window:` + `if window:` - 2. `imgui.begin_child()` and `imgui.end_child()` - can be replaced by: `with imgui_ctx.begin_child() as child:` + 2. `imgui.begin_child()` / `imgui.end_child()` + can be replaced by: `with imgui_ctx.begin_child() as child:` + `if child:` - 3. `imgui.begin_menu_bar()` and `imgui.end_menu_bar()` - can be replaced by: `with imgui_ctx.begin_menu_bar() as menu_bar:` + 3. `imgui.begin_menu_bar()` / `imgui.end_menu_bar()` + can be replaced by: `with imgui_ctx.begin_menu_bar() as menu_bar:` + `if menu_bar:` - etc. + ... + +Note: + ImGui’s "begin"/"end" functions typically return a boolean indicating whether the context is open and usable. + You may (and often should) use this boolean to guard the inner code, as in the example below: + + ```python + with imgui_ctx.begin_main_menu_bar() as menu_bar: + if menu_bar: + with imgui_ctx.begin_menu("Edit1") as menu_edit: + if menu_edit: + imgui.menu_item_simple("Undo") + imgui.menu_item_simple("Redo") + ``` + + This pattern avoids rendering UI elements inside a closed or collapsed container, as per ImGui’s recommended usage. """ @@ -25,6 +40,7 @@ TabBarFlags = int # see enum imgui.TabBarFlags_ TabItemFlags = int # see enum imgui.TabItemFlags_ DragDropFlags = int # see enum imgui.DragDropFlags_ +TreeNodeFlags = int # see enum imgui.TreeNodeFlags_ OptExceptType = Optional[Type[BaseException]] @@ -641,6 +657,39 @@ def tree_node(label: str) -> _WithTreeNode: return _WithTreeNode(label) +class _WithTreeNodeEx: + visible: bool + _enter_callback: _EnterCallback + + def __init__(self, label: str, flags: TreeNodeFlags = 0) -> None: + self._enter_callback = lambda: imgui.tree_node_ex(label, flags) + + def __enter__(self) -> "_WithTreeNodeEx": + self.visible = self._enter_callback() + return self + + def __exit__(self, _exc_type: OptExceptType, _exc_val: OptBaseException, _exc_tb: OptTraceback) -> None: + if self.visible: + imgui.tree_pop() + + def __bool__(self) -> bool: + return self.visible + + def __repr__(self) -> str: + return "{}(opened={})".format( + self.__class__.__qualname__, self.visible + ) + + def __eq__(self, other) -> bool: + if other.__class__ is self.__class__: + return self.visible is other.visible + return self.visible is other + + +def tree_node_ex(label: str, flags: TreeNodeFlags = 0) -> _WithTreeNodeEx: + return _WithTreeNodeEx(label, flags) + + class _WithPushID: _enter_callback: _EnterCallback @@ -669,8 +718,8 @@ def push_obj_id(obj: Any) -> _WithPushID: class _WithPushFont: _enter_callback: _EnterCallback - def __init__(self, font: imgui.ImFont) -> None: - self._enter_callback = lambda: imgui.push_font(font) + def __init__(self, font: imgui.ImFont, font_size_base_unscaled: float = 0.0) -> None: + self._enter_callback = lambda: imgui.push_font(font, font_size_base_unscaled) def __enter__(self) -> "_WithPushFont": self._enter_callback() diff --git a/blimgui/dist64/imgui_bundle/imgui_knobs.pyi b/blimgui/dist64/imgui_bundle/imgui_knobs.pyi index 039eb0b..7492829 100644 --- a/blimgui/dist64/imgui_bundle/imgui_knobs.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_knobs.pyi @@ -14,13 +14,16 @@ ImGuiKnobVariant = int # // Autogenerated code below! Do not edit! #################### #################### -class ImGuiKnobFlags_(enum.Enum): +class ImGuiKnobFlags_(enum.IntEnum): no_title = enum.auto() # (= 1 << 0) no_input = enum.auto() # (= 1 << 1) value_tooltip = enum.auto() # (= 1 << 2) drag_horizontal = enum.auto() # (= 1 << 3) + drag_vertical = enum.auto() # (= 1 << 4) + logarithmic = enum.auto() # (= 1 << 5) + always_clamp = enum.auto() # (= 1 << 6) -class ImGuiKnobVariant_(enum.Enum): +class ImGuiKnobVariant_(enum.IntEnum): tick = enum.auto() # (= 1 << 0) dot = enum.auto() # (= 1 << 1) wiper = enum.auto() # (= 1 << 2) @@ -48,11 +51,13 @@ def knob( v_min: float, v_max: float, speed: float = 0, - format: Optional[str] = None, + format: str = "%.3", variant: Optional[ImGuiKnobVariant] = None, size: float = 0, flags: ImGuiKnobFlags = 0, steps: int = 10, + angle_min: float = -1, + angle_max: float = -1, ) -> Tuple[bool, float]: """Python bindings defaults: If variant is None, then its default value will be: ImGuiKnobVariant_.tick @@ -65,11 +70,13 @@ def knob_int( v_min: int, v_max: int, speed: float = 0, - format: Optional[str] = None, + format: str = "%i", variant: Optional[ImGuiKnobVariant] = None, size: float = 0, flags: ImGuiKnobFlags = 0, steps: int = 10, + angle_min: float = -1, + angle_max: float = -1, ) -> Tuple[bool, int]: """Python bindings defaults: If variant is None, then its default value will be: ImGuiKnobVariant_.tick diff --git a/blimgui/dist64/imgui_bundle/imgui_md.pyi b/blimgui/dist64/imgui_bundle/imgui_md.pyi index de5c3a3..bcb3fff 100644 --- a/blimgui/dist64/imgui_bundle/imgui_md.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_md.pyi @@ -5,6 +5,7 @@ Python bindings for https://github.com/mekhontsev/imgui_md (with an additional c # ruff: noqa: B008 from typing import Optional, Callable from imgui_bundle.imgui import ImTextureID, ImVec2, ImVec4, ImFont +import numpy as np # using VoidFunction = std::function; # using StringFunction = std::function; @@ -22,9 +23,13 @@ MarkdownImageFunction = Callable[[str], MarkdownImage] class MarkdownFontOptions: font_base_path: str = "fonts/Roboto/Roboto" - max_header_level: int = 2 - size_diff_between_levels: float = 2.0 + # This size is in density-independent pixels regular_size: float = 16.0 + + # Multipliers for header sizes, from h1 to h6 + header_size_factors: ( + np.ndarray + ) # ndarray[type=float, size=6] default:float( 1.42, 1.33, 1.24, 1.15, 1.10, 1.05 ) def __init__(self) -> None: """Autogenerated default constructor""" pass @@ -40,6 +45,18 @@ class MarkdownImage: """Autogenerated default constructor""" pass +class SizedFont: + """Note: Since v1.92, Fonts can be displayed at any size: + in order to display a font at a given size, we need to call + ImGui::PushFont(font, size) (or call separately ImGui::PushFontSize) + """ + + font: ImFont + size: float + def __init__(self) -> None: + """Autogenerated default constructor""" + pass + def on_image_default(image_path: str) -> Optional[MarkdownImage]: pass @@ -125,20 +142,20 @@ def render_unindented(markdown_string: str) -> None: """Renders a markdown string (after having unindented its main indentation)""" pass -def get_code_font() -> ImFont: +def get_code_font() -> SizedFont: pass class MarkdownFontSpec: italic: bool = False bold: bool = False - header_level: int = 0 + header_level: int = 0 # 0 means no header, 1 means h1, 2 means h2, etc. def __init__( self, italic_: bool = False, bold_: bool = False, header_level_: int = 0 ) -> None: pass -def get_font(font_spec: MarkdownFontSpec) -> ImFont: +def get_font(font_spec: MarkdownFontSpec) -> SizedFont: pass #################### #################### diff --git a/blimgui/dist64/imgui_bundle/imgui_node_editor.pyi b/blimgui/dist64/imgui_bundle/imgui_node_editor.pyi index 19eb636..02f9596 100644 --- a/blimgui/dist64/imgui_bundle/imgui_node_editor.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_node_editor.pyi @@ -103,7 +103,9 @@ def resume_editor_canvas() -> None: # ------------------------------------------------------------------------------ -class PinKind(enum.Enum): +# ------------------------------------------------------------------------------ + +class PinKind(enum.IntEnum): """------------------------------------------------------------------------------""" # Input, /* original C++ signature */ @@ -112,14 +114,14 @@ class PinKind(enum.Enum): # } output = enum.auto() # (= 1) -class FlowDirection(enum.Enum): +class FlowDirection(enum.IntEnum): # Forward, /* original C++ signature */ forward = enum.auto() # (= 0) # Backward /* original C++ signature */ # } backward = enum.auto() # (= 1) -class CanvasSizeMode(enum.Enum): +class CanvasSizeMode(enum.IntEnum): # FitVerticalView, /* original C++ signature */ fit_vertical_view = ( enum.auto() @@ -131,7 +133,7 @@ class CanvasSizeMode(enum.Enum): # CenterOnly, /* original C++ signature */ center_only = enum.auto() # (= 2) # Previous view will be centered on new view -class SaveReasonFlags(enum.Enum): +class SaveReasonFlags(enum.IntEnum): """------------------------------------------------------------------------------""" # None = 0x00000000, /* original C++ signature */ @@ -211,7 +213,7 @@ class Config: def __init__(self) -> None: pass -class StyleColor(enum.Enum): +class StyleColor(enum.IntEnum): """------------------------------------------------------------------------------""" # StyleColor_Bg, /* original C++ signature */ @@ -257,7 +259,7 @@ class StyleColor(enum.Enum): # } count = enum.auto() # (= 19) -class StyleVar(enum.Enum): +class StyleVar(enum.IntEnum): # StyleVar_NodePadding, /* original C++ signature */ node_padding = enum.auto() # (= 0) # StyleVar_NodeRounding, /* original C++ signature */ @@ -967,9 +969,9 @@ def canvas_to_screen(pos: ImVec2Like) -> ImVec2: # IMGUI_NODE_EDITOR_API int GetNodeCount(); /* original C++ signature */ def get_node_count() -> int: + """Returns number of submitted nodes since Begin() call""" pass -# Returns number of submitted nodes since Begin() call # #ifdef IMGUI_BUNDLE_PYTHON_API # # IMGUI_NODE_EDITOR_API std::vector GetOrderedNodeIds(); /* original C++ signature */ @@ -984,8 +986,6 @@ def get_ordered_node_ids() -> List[NodeId]: # ------------------------------------------------------------------------------ -# namespace ax - # ------------------------------------------------------------------------------ # # endif #################### #################### diff --git a/blimgui/dist64/imgui_bundle/imgui_node_editor_ctx.py b/blimgui/dist64/imgui_bundle/imgui_node_editor_ctx.py index e341791..d0912e9 100644 --- a/blimgui/dist64/imgui_bundle/imgui_node_editor_ctx.py +++ b/blimgui/dist64/imgui_bundle/imgui_node_editor_ctx.py @@ -5,6 +5,19 @@ can be replaced by: `with imgui_node_editor.begin() etc. + +Note: + begin_create(), begin_delete(), being_group_hint(), and begin_shortcut () + return a context manager that contains a boolean, which indicates whether the + context was opened or not. + + Usage example: + ```python + with imgui_node_editor_ctx.begin_create() as shall_create: + if shall_create: + # do something with the pin + ... + ``` """ from imgui_bundle import ImVec2, ImVec4 from imgui_bundle import imgui_node_editor as ed @@ -91,16 +104,21 @@ def begin_pin(pin_id: ed.PinId, kind: ed.PinKind) -> _BeginPin: class _BeginCreate: # _enter_callback will be called in __enter__. Captures all __init__ arguments. _enter_callback: _EnterCallback + opened: bool def __init__(self, color: ImVec4 = IM_VEC4_ONE, thickness: float = 1.0) -> None: self._enter_callback = lambda: ed.begin_create(color, thickness) def __enter__(self) -> "_BeginCreate": - self._enter_callback() + self.opened = self._enter_callback() return self def __exit__(self, _exc_type: OptExceptType, _exc_val: OptBaseException, _exc_tb: OptTraceback) -> None: - ed.end_create() + if self.opened: + ed.end_create() + + def __bool__(self) -> bool: + return self.opened def __repr__(self): return self.__class__.__name__ @@ -113,16 +131,21 @@ def begin_create(color: ImVec4 = IM_VEC4_ONE, thickness: float = 1.0) -> _BeginC class _BeginDelete: # _enter_callback will be called in __enter__. Captures all __init__ arguments. _enter_callback: _EnterCallback + opened: bool def __init__(self) -> None: self._enter_callback = lambda: ed.begin_delete() def __enter__(self) -> "_BeginDelete": - self._enter_callback() + self.opened = self._enter_callback() return self def __exit__(self, _exc_type: OptExceptType, _exc_val: OptBaseException, _exc_tb: OptTraceback) -> None: - ed.end_delete() + if self.opened: + ed.end_delete() + + def __bool__(self) -> bool: + return self.opened def __repr__(self): return self.__class__.__name__ @@ -135,16 +158,21 @@ def begin_delete() -> _BeginDelete: class _BeginGroupHint: # _enter_callback will be called in __enter__. Captures all __init__ arguments. _enter_callback: _EnterCallback + opened: bool def __init__(self, node_id: ed.NodeId) -> None: self._enter_callback = lambda: ed.begin_group_hint(node_id) def __enter__(self) -> "_BeginGroupHint": - self._enter_callback() + self.opened = self._enter_callback() return self def __exit__(self, _exc_type: OptExceptType, _exc_val: OptBaseException, _exc_tb: OptTraceback) -> None: - ed.end_group_hint() + if self.opened: + ed.end_group_hint() + + def __bool__(self) -> bool: + return self.opened def __repr__(self): return self.__class__.__name__ @@ -157,16 +185,21 @@ def begin_group_hint(node_id: ed.NodeId) -> _BeginGroupHint: class _BeginShortcut: # _enter_callback will be called in __enter__. Captures all __init__ arguments. _enter_callback: _EnterCallback + opened: bool def __init__(self) -> None: self._enter_callback = lambda: ed.begin_shortcut() def __enter__(self) -> "_BeginShortcut": - self._enter_callback() + self.opened = self._enter_callback() return self def __exit__(self, _exc_type: OptExceptType, _exc_val: OptBaseException, _exc_tb: OptTraceback) -> None: - ed.end_shortcut() + if self.opened: + ed.end_shortcut() + + def __bool__(self) -> bool: + return self.opened def __repr__(self): return self.__class__.__name__ diff --git a/blimgui/dist64/imgui_bundle/imgui_tex_inspect.pyi b/blimgui/dist64/imgui_bundle/imgui_tex_inspect.pyi index b2fa621..7fbfd00 100644 --- a/blimgui/dist64/imgui_bundle/imgui_tex_inspect.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_tex_inspect.pyi @@ -44,13 +44,13 @@ def shutdown() -> None: # [SECTION] BASIC USAGE #------------------------------------------------------------------------- -class InspectorAlphaMode(enum.Enum): +class InspectorAlphaMode(enum.IntEnum): im_gui = enum.auto() # (= 0) # Alpha is transparency so you see the ImGui panel background behind image black = enum.auto() # (= 1) # Alpha is used to blend over a black background white = enum.auto() # (= 2) # Alpha is used to blend over a white background custom_color = enum.auto() # (= 3) # Alpha is used to blend over a custom colour. -class InspectorFlags_(enum.Enum): +class InspectorFlags_(enum.IntEnum): show_wrap = enum.auto() # (= 1 << 0) # Draw beyong the [0,1] uv range. What you see will depend on API no_force_filter_nearest = enum.auto() # (= 1 << 1) # Normally we force nearest neighbour sampling when zoomed in. Set to disable this. no_grid = enum.auto() # (= 1 << 2) # By default a grid is shown at high zoom levels @@ -229,7 +229,7 @@ class ValueText: * The text shows the value of the texel. E.g. "R:255, G: 128, B:0, A:255" """ - class Format(enum.Enum): + class Format(enum.IntEnum): hex_string = enum.auto() # (= 0) # E.g. #EF97B9FF bytes_hex = enum.auto() # (= 1) # E.g. R:#EF G:#97 B:#B9 A:#FF (split over 4 lines) bytes_dec = enum.auto() # (= 2) # E.g. R:239 G: 151 B:185 A:255 (split over 4 lines) @@ -270,7 +270,7 @@ class Arrow: line_scale: ImVec2 zero_point: ImVec2 = ImVec2(0, 0) - class Preset(enum.Enum): + class Preset(enum.IntEnum): normal_map = enum.auto() # (= 0) # For normal maps. I.e. Arrow is in (R,G) channels. 128, 128 is zero point normalized_float = enum.auto() # (= 1) # Arrow in (R,G) channels. 0,0 is zero point, (1,0) will draw an arrow exactly to # right edge of texture. (0,-1) will draw exactly to the bottom etc. diff --git a/blimgui/dist64/imgui_bundle/imgui_toggle.pyi b/blimgui/dist64/imgui_bundle/imgui_toggle.pyi index 6e58d68..6707a88 100644 --- a/blimgui/dist64/imgui_bundle/imgui_toggle.pyi +++ b/blimgui/dist64/imgui_bundle/imgui_toggle.pyi @@ -156,7 +156,7 @@ def toggle( def toggle(label: str, v: bool, config: ToggleConfig) -> Tuple[bool, bool]: pass -class ToggleFlags_(enum.Enum): +class ToggleFlags_(enum.IntFlag): """ImGuiToggleFlags: A set of flags that adjust behavior and display for ImGui::Toggle().""" # ImGuiToggleFlags_None = 0, /* original C++ signature */ @@ -186,7 +186,7 @@ class ToggleFlags_(enum.Enum): # ImGuiToggleFlags_Default = ImGuiToggleFlags_None, /* original C++ signature */ default = enum.auto() # (= ToggleFlags_None) # The default flags used when no ImGuiToggleFlags_ are specified. -class ToggleA11yStyle_(enum.Enum): +class ToggleA11yStyle_(enum.IntFlag): """ImGuiToggleA11yStyle: Styles to draw A11y labels.""" # ImGuiToggleA11yStyle_Label, /* original C++ signature */ diff --git a/blimgui/dist64/imgui_bundle/imguizmo.pyi b/blimgui/dist64/imgui_bundle/imguizmo.pyi index a49c90e..8ae837d 100644 --- a/blimgui/dist64/imgui_bundle/imguizmo.pyi +++ b/blimgui/dist64/imgui_bundle/imguizmo.pyi @@ -29,11 +29,11 @@ ImGuiZoomSliderFlags_None = im_zoom_slider.ImGuiZoomSliderFlags_.none # noqa # ImGuizmo/ImGuizmo.h included by ImGuizmoPure/ImGuizmoPure.h // #////////////////////////////////////////////////////////////////////////////////////////////////////////////// # https://github.com/CedricGuillemet/ImGuizmo -# v1.91.3 WIP +# v1.92.5 WIP # # The MIT License(MIT) # -# Copyright(c) 2021 Cedric Guillemet +# Copyright(c) 2016-2021 Cedric Guillemet # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal @@ -89,7 +89,7 @@ ImGuiZoomSliderFlags_None = im_zoom_slider.ImGuiZoomSliderFlags_.none # noqa # class im_guizmo: # Proxy class that introduces typings for the *submodule* im_guizmo - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) @staticmethod def set_drawlist(drawlist: Optional[ImDrawList] = None) -> None: """ call inside your own window and before Manipulate() in order to draw gizmo to that window. @@ -166,7 +166,7 @@ class im_guizmo: # Proxy class that introduces typings for the *submodule* im_g # Render a cube with face color corresponding to face normal. Usefull for debug/tests - class OPERATION(enum.Enum): + class OPERATION(enum.IntEnum): """ call it when you want a gizmo Needs view and projection matrices. matrix parameter is the source matrix (where will be gizmo be drawn) and might be transformed by the function. Return deltaMatrix is optional @@ -194,7 +194,7 @@ class im_guizmo: # Proxy class that introduces typings for the *submodule* im_g universal = enum.auto() # (= OPERATION.translate | OPERATION.rotate | OPERATION.scaleu) - class MODE(enum.Enum): + class MODE(enum.IntEnum): local = enum.auto() # (= 0) world = enum.auto() # (= 1) @@ -204,9 +204,6 @@ class im_guizmo: # Proxy class that introduces typings for the *submodule* im_g def set_alternative_window(window: ImGuiWindow) -> None: pass - @staticmethod - def set_id(id: int) -> None: - pass # ID stack/scopes # Read the FAQ (docs/FAQ.md or http://dearimgui.org/faq) for more details about how ID are handled in dear imgui. @@ -287,7 +284,7 @@ class im_guizmo: # Proxy class that introduces typings for the *submodule* im_g """ Configure the limit where planes are hiden""" pass - class COLOR(enum.Enum): + class COLOR(enum.IntEnum): direction_x = enum.auto() # (= 0) # directionColor[0] direction_y = enum.auto() # (= 1) # directionColor[1] direction_z = enum.auto() # (= 2) # directionColor[2] @@ -399,7 +396,29 @@ class im_guizmo: # Proxy class that introduces typings for the *submodule* im_g local_bounds: Optional[Matrix6] = None, bounds_snap: Optional[Matrix3] = None ) -> bool: - """ Manipulate may change the objectMatrix parameter (return True if modified)""" + """ Manipulate: main API of ImGuizmo + Returns True if the objectMatrix has been modified + + Mandatory input parameters: + - view: camera view matrix (array of 16 floats) + - projection: camera projection matrix (array of 16 floats) + - operation: operation to perform (translate, rotate, scale) + - mode: in which space the operation is applied (local or world) + Input / Output parameter: + - object_matrix: matrix of the object to manipulate (array of 16 floats) + (will be modified when using the gizmo) + + Optional output parameter: + - delta_matrix: matrix that contains the transformation delta (array of 16 floats) + (useful to retrieve the modification between two frames) + pass a newly created Matrix16, and it will be filled if not None. + + Optional input parameters: + - snap: if not None, contains the snap value (array of 3 floats) + (for example, if using TRANSLATE and snap={1,1,1}, the object will be snapped to the next integer position) + - local_bounds: if not None, contains the local bounds of the object (array of 6 floats) + - bounds_snap: if not None, contains the snap value for the bounds (array of 3 floats) + """ pass @staticmethod diff --git a/blimgui/dist64/imgui_bundle/immapp/__init__.py b/blimgui/dist64/imgui_bundle/immapp/__init__.py index d9d4c1d..1c05e4b 100644 --- a/blimgui/dist64/imgui_bundle/immapp/__init__.py +++ b/blimgui/dist64/imgui_bundle/immapp/__init__.py @@ -1,4 +1,5 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle +import importlib.util from imgui_bundle import _imgui_bundle as _native_bundle from imgui_bundle._imgui_bundle import immapp_cpp as immapp_cpp # type: ignore from imgui_bundle._imgui_bundle.immapp_cpp import ( # type: ignore @@ -38,10 +39,8 @@ from imgui_bundle.immapp.immapp_utils import ( static as static, run_anon_block as run_anon_block, - add_static as add_static, - add_static_values as add_static_values, ) -from imgui_bundle.immapp.immapp_notebook import run_nb as run_nb + from imgui_bundle.immapp import immapp_code_utils from imgui_bundle._imgui_bundle.hello_imgui import ( # type: ignore @@ -71,7 +70,6 @@ "icons_fontawesome_6", "static", "run_anon_block", - "run_nb", "RunnerParams", "SimpleRunnerParams", "snippets", @@ -84,3 +82,8 @@ "widget_with_resize_handle_in_node_editor_em", "immapp_code_utils", ] + + +if importlib.util.find_spec("IPython") is not None: + from imgui_bundle.immapp.immapp_notebook import run_nb as run_nb + __all__.append("run_nb") diff --git a/blimgui/dist64/imgui_bundle/immapp/immapp_cpp.pyi b/blimgui/dist64/imgui_bundle/immapp/immapp_cpp.pyi index ccff681..b78700c 100644 --- a/blimgui/dist64/imgui_bundle/immapp/immapp_cpp.pyi +++ b/blimgui/dist64/imgui_bundle/immapp/immapp_cpp.pyi @@ -26,6 +26,8 @@ DefaultScreenSize = (800, 600) # #ifdef IMGUI_BUNDLE_WITH_IMPLOT_AND_IMGUI_NODE_EDITOR # +# These functions wrap ImPlot::BeginPlot and ImPlot::EndPlot, +# but they enable to make the plot content draggable inside a node def begin_plot_in_node_editor( title_id: str, size: Optional[ImVec2Like] = None, flags: ImPlotFlags = 0 ) -> bool: @@ -315,7 +317,7 @@ def delete_node_editor_settings(runner_params: HelloImGui.RunnerParams) -> None: # class manual_render: # Proxy class that introduces typings for the *submodule* manual_render - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) """ namespace ManualRender""" # Immapp::ManualRender is a namespace that groups functions, allowing fine-grained control over the rendering process: # - It is customizable like Immapp::Run: initialize it with `RunnerParams` and `AddOnsParams`. @@ -405,7 +407,7 @@ def clock_seconds() -> float: # class code_utils: # Proxy class that introduces typings for the *submodule* code_utils - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) """ namespace CodeUtils""" @staticmethod @@ -427,12 +429,12 @@ class code_utils: # Proxy class that introduces typings for the *submodule* cod # class snippets: # Proxy class that introduces typings for the *submodule* snippets - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) # # TextEditorBundle: addition to ImGuiColorTextEdit, specific to ImGuiBundle # - class SnippetLanguage(enum.Enum): + class SnippetLanguage(enum.IntEnum): cpp = enum.auto() # (= 0) hlsl = enum.auto() # (= 1) glsl = enum.auto() # (= 2) @@ -442,7 +444,7 @@ class snippets: # Proxy class that introduces typings for the *submodule* snipp lua = enum.auto() # (= 6) python = enum.auto() # (= 7) - class SnippetTheme(enum.Enum): + class SnippetTheme(enum.IntEnum): dark = enum.auto() # (= 0) light = enum.auto() # (= 1) retro_blue = enum.auto() # (= 2) diff --git a/blimgui/dist64/imgui_bundle/immapp/immapp_notebook.py b/blimgui/dist64/imgui_bundle/immapp/immapp_notebook.py index 20a6154..511025f 100644 --- a/blimgui/dist64/imgui_bundle/immapp/immapp_notebook.py +++ b/blimgui/dist64/imgui_bundle/immapp/immapp_notebook.py @@ -1,4 +1,4 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle # mypy: disable_error_code=no-untyped-call from typing import Callable, Tuple from imgui_bundle import immapp, hello_imgui diff --git a/blimgui/dist64/imgui_bundle/immapp/immapp_utils.py b/blimgui/dist64/imgui_bundle/immapp/immapp_utils.py index a102b16..b8d341a 100644 --- a/blimgui/dist64/imgui_bundle/immapp/immapp_utils.py +++ b/blimgui/dist64/imgui_bundle/immapp/immapp_utils.py @@ -1,6 +1,6 @@ -# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2023 Pascal Thomet - https://github.com/pthom/imgui_bundle +# Part of ImGui Bundle - MIT License - Copyright (c) 2022-2025 Pascal Thomet - https://github.com/pthom/imgui_bundle from typing import Callable, TypeVar, Any -from munch import Munch # type: ignore + # Create type variables for the argument and return types of the function A = TypeVar("A", bound=Callable[..., Any]) @@ -59,97 +59,3 @@ def _win_code(): # Create type variables for the argument and return types of the function AnyCallable = TypeVar("AnyCallable", bound=Callable[..., Any]) - - -def add_static(func: AnyCallable) -> AnyCallable: - """ - A decorator that adds a persistent 'static' attribute as a Munch object to a function. - - - The `static` attribute allows the function to store variables persistently across calls. - - Unlike global variables, this keeps the state encapsulated within the function. - - This is useful for maintaining UI state in ImGui-based applications. - - **Example:** - ```python - from imgui_bundle.immapp import add_static - - @add_static - def counter_demo(): - static = counter_demo.static # Access the static storage which was created by the decorator - # You can add any attributes to this object. - - if not hasattr(static, "count"): - static.count = 0 # Initialize on first run - - static.count += 1 - print(f"Counter: {static.count}") - - counter_demo() # Output: Counter: 1 - counter_demo() # Output: Counter: 2 - counter_demo() # Output: Counter: 3 - ``` - - **When to Use:** - - Use `@add_static` when you need function-scoped persistent storage **without predefined values**. - - You must manually initialize static variables inside the function. - - **Notes:** - - This decorator is lightweight and adds no runtime overhead beyond attribute assignment. - - It is especially useful in immediate mode GUI programming where state persistence is needed. - - Static variables are similar to global variables, with the same shortcomings! - Use them only in small scripts, not in production code! - - :param func: The function to decorate. - :return: The decorated function with an attached `static` attribute. - """ - - if not hasattr(func, "static"): - func.static = Munch() # Initialize an empty storage container - - return func - - -def add_static_values(**defaults: Any) -> Callable[[A], A]: - """ - A decorator that adds a persistent 'static' attribute as a Munch object with optional default values. - - - This is similar to `@add_static`, but allows you to define **default values** upfront. - - The `static` attribute is created once and persists across function calls. - - This is useful for initializing UI-related state variables without explicit checks. - - **Example:** - ```python - from imgui_bundle.immapp import add_static_values - - @add_static_values(count=0, step=2) - def counter_demo(): - static = counter_demo.static # Access the static storage which was created by the decorator - # You can add any attributes to this object. - - static.count += static.step - print(f"Counter: {static.count}") - - counter_demo() # Output: Counter: 2 - counter_demo() # Output: Counter: 4 - counter_demo() # Output: Counter: 6 - ``` - - **When to Use:** - - Use `@add_static_values(default1=value1, default2=value2, ...)` **when you want pre-initialized static variables**. - - No need for `if not hasattr(static, "var"):` checks inside the function. - - **Notes:** - - This decorator is lightweight and only runs once per function definition. - - Using `Munch`, it allows **dot-accessible** static variables (`static.var_name` instead of `static['var_name']`). - - Static variables are similar to global variables, with the same shortcomings! - Use them only in small scripts, not in production code! - - :param defaults: Keyword arguments representing the default static variables. - :return: A decorator that adds a `static` attribute to the function. - """ - - def decorator(func: A) -> A: - func.static = Munch(defaults) # Initialize with user-defined defaults - return func - - return decorator diff --git a/blimgui/dist64/imgui_bundle/immvision.pyi b/blimgui/dist64/imgui_bundle/immvision.pyi index 84fbb34..fb26b42 100644 --- a/blimgui/dist64/imgui_bundle/immvision.pyi +++ b/blimgui/dist64/imgui_bundle/immvision.pyi @@ -92,7 +92,7 @@ def pop_color_order() -> None: """(private API)""" pass -class ColorMapStatsTypeId(enum.Enum): +class ColorMapStatsTypeId(enum.IntEnum): """Are we using the stats on the full image, on the Visible ROI, or are we using Min/Max values""" # FromFullImage, /* original C++ signature */ @@ -595,8 +595,6 @@ def version_info() -> str: """Return immvision version info""" pass -# namespace ImmVision - # //////////////////////////////////////////////////////////////////////////////////////////////////////////////// # src/immvision/immvision.h continued // # ////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -701,10 +699,10 @@ class GlTexture: # class cv_drawing_utils: # Proxy class that introduces typings for the *submodule* cv_drawing_utils - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) """ namespace CvDrawingUtils""" - class Colors(enum.Enum): + class Colors(enum.IntEnum): # Black, /* original C++ signature */ black = enum.auto() # (= 0) # Red, /* original C++ signature */ diff --git a/blimgui/dist64/imgui_bundle/implot/__init__.pyi b/blimgui/dist64/imgui_bundle/implot/__init__.pyi index 548965c..0af738c 100644 --- a/blimgui/dist64/imgui_bundle/implot/__init__.pyi +++ b/blimgui/dist64/imgui_bundle/implot/__init__.pyi @@ -133,7 +133,8 @@ Bin_Sturges = Bin_.sturges #################### #################### # MIT License -# Copyright (c) 2023 Evan Pezent +# Copyright (c) 2020-2024 Evan Pezent +# Copyright (c) 2025 Breno Cunha Queiroz # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -202,7 +203,7 @@ class BoxedValue: # Define attributes of all API symbols declarations (e.g. for DLL under Windows) # Using ImPlot via a shared library is not recommended, because we don't guarantee # backward nor forward ABI compatibility and also function call overhead. If you -# do use ImPlot as a DLL, be sure to call SetImGuiContext (see Miscellanous section). +# do use ImPlot as a DLL, be sure to call SetImGuiContext (see Miscellaneous section). # ----------------------------------------------------------------------------- # [SECTION] Enums and Types @@ -212,7 +213,7 @@ class BoxedValue: # Enums/Flags -class ImAxis_(enum.Enum): +class ImAxis_(enum.IntFlag): """Axis indices. The values assigned may change; NEVER hardcode these.""" # horizontal axes @@ -231,10 +232,10 @@ class ImAxis_(enum.Enum): y3 = enum.auto() # (= 5) # disabled by default # ImAxis_COUNT /* original C++ signature */ # } - # bookeeping + # bookkeeping count = enum.auto() # (= 6) -class Flags_(enum.Enum): +class Flags_(enum.IntFlag): """Options for plots (see BeginPlot).""" # ImPlotFlags_None = 0, /* original C++ signature */ @@ -242,7 +243,7 @@ class Flags_(enum.Enum): # ImPlotFlags_NoTitle = 1 << 0, /* original C++ signature */ no_title = ( enum.auto() - ) # (= 1 << 0) # the plot title will not be displayed (titles are also hidden if preceeded by double hashes, e.g. "##MyPlot") + ) # (= 1 << 0) # the plot title will not be displayed (titles are also hidden if preceded by double hashes, e.g. "##MyPlot") # ImPlotFlags_NoLegend = 1 << 1, /* original C++ signature */ no_legend = enum.auto() # (= 1 << 1) # the legend will not be displayed # ImPlotFlags_NoMouseText = 1 << 2, /* original C++ signature */ @@ -267,7 +268,7 @@ class Flags_(enum.Enum): enum.auto() ) # (= Flags_NoTitle | Flags_NoLegend | Flags_NoMenus | Flags_NoBoxSelect | Flags_NoMouseText) -class AxisFlags_(enum.Enum): +class AxisFlags_(enum.IntFlag): """Options for plot axes (see SetupAxis).""" # ImPlotAxisFlags_None = 0, /* original C++ signature */ @@ -326,7 +327,7 @@ class AxisFlags_(enum.Enum): # } aux_default = enum.auto() # (= AxisFlags_NoGridLines | AxisFlags_Opposite) -class SubplotFlags_(enum.Enum): +class SubplotFlags_(enum.IntFlag): """Options for subplots (see BeginSubplot)""" # ImPlotSubplotFlags_None = 0, /* original C++ signature */ @@ -334,7 +335,7 @@ class SubplotFlags_(enum.Enum): # ImPlotSubplotFlags_NoTitle = 1 << 0, /* original C++ signature */ no_title = ( enum.auto() - ) # (= 1 << 0) # the subplot title will not be displayed (titles are also hidden if preceeded by double hashes, e.g. "##MySubplot") + ) # (= 1 << 0) # the subplot title will not be displayed (titles are also hidden if preceded by double hashes, e.g. "##MySubplot") # ImPlotSubplotFlags_NoLegend = 1 << 1, /* original C++ signature */ no_legend = ( enum.auto() @@ -370,7 +371,7 @@ class SubplotFlags_(enum.Enum): enum.auto() ) # (= 1 << 10) # subplots are added in column major order instead of the default row major order -class LegendFlags_(enum.Enum): +class LegendFlags_(enum.IntFlag): """Options for legends (see SetupLegend)""" # ImPlotLegendFlags_None = 0, /* original C++ signature */ @@ -393,8 +394,10 @@ class LegendFlags_(enum.Enum): horizontal = enum.auto() # (= 1 << 5) # legend entries will be displayed horizontally # ImPlotLegendFlags_Sort = 1 << 6, /* original C++ signature */ sort = enum.auto() # (= 1 << 6) # legend entries will be displayed in alphabetical order + # ImPlotLegendFlags_Reverse = 1 << 7, /* original C++ signature */ + reverse = enum.auto() # (= 1 << 7) # legend entries will be displayed in reverse order -class MouseTextFlags_(enum.Enum): +class MouseTextFlags_(enum.IntFlag): """Options for mouse hover text (see SetupMouseText)""" # ImPlotMouseTextFlags_None = 0, /* original C++ signature */ @@ -406,7 +409,7 @@ class MouseTextFlags_(enum.Enum): # ImPlotMouseTextFlags_ShowAlways = 1 << 2, /* original C++ signature */ show_always = enum.auto() # (= 1 << 2) # always display mouse position even if plot not hovered -class DragToolFlags_(enum.Enum): +class DragToolFlags_(enum.IntFlag): """Options for DragPoint, DragLine, DragRect""" # ImPlotDragToolFlags_None = 0, /* original C++ signature */ @@ -422,7 +425,7 @@ class DragToolFlags_(enum.Enum): enum.auto() ) # (= 1 << 3) # tool rendering will be delayed one frame; useful when applying position-constraints -class ColormapScaleFlags_(enum.Enum): +class ColormapScaleFlags_(enum.IntFlag): """Flags for ColormapScale""" # ImPlotColormapScaleFlags_None = 0, /* original C++ signature */ @@ -436,7 +439,7 @@ class ColormapScaleFlags_(enum.Enum): enum.auto() ) # (= 1 << 2) # invert the colormap bar and axis scale (this only affects rendering; if you only want to reverse the scale mapping, make scale_min > scale_max) -class ItemFlags_(enum.Enum): +class ItemFlags_(enum.IntFlag): """Flags for ANY PlotX function""" # ImPlotItemFlags_None = 0, /* original C++ signature */ @@ -446,7 +449,7 @@ class ItemFlags_(enum.Enum): # ImPlotItemFlags_NoFit = 1 << 1, /* original C++ signature */ no_fit = enum.auto() # (= 1 << 1) # the item won't be considered for plot fits -class LineFlags_(enum.Enum): +class LineFlags_(enum.IntFlag): """Flags for PlotLine""" # ImPlotLineFlags_None = 0, /* original C++ signature */ @@ -464,7 +467,7 @@ class LineFlags_(enum.Enum): enum.auto() ) # (= 1 << 14) # a filled region between the line and horizontal origin will be rendered; use PlotShaded for more advanced cases -class ScatterFlags_(enum.Enum): +class ScatterFlags_(enum.IntFlag): """Flags for PlotScatter""" # ImPlotScatterFlags_None = 0, /* original C++ signature */ @@ -472,7 +475,7 @@ class ScatterFlags_(enum.Enum): # ImPlotScatterFlags_NoClip = 1 << 10, /* original C++ signature */ no_clip = enum.auto() # (= 1 << 10) # markers on the edge of a plot will not be clipped -class StairsFlags_(enum.Enum): +class StairsFlags_(enum.IntFlag): """Flags for PlotStairs""" # ImPlotStairsFlags_None = 0, /* original C++ signature */ @@ -486,13 +489,13 @@ class StairsFlags_(enum.Enum): enum.auto() ) # (= 1 << 11) # a filled region between the stairs and horizontal origin will be rendered; use PlotShaded for more advanced cases -class ShadedFlags_(enum.Enum): +class ShadedFlags_(enum.IntFlag): """Flags for PlotShaded (placeholder)""" # ImPlotShadedFlags_None = 0 /* original C++ signature */ none = enum.auto() # (= 0) # default -class BarsFlags_(enum.Enum): +class BarsFlags_(enum.IntFlag): """Flags for PlotBars""" # ImPlotBarsFlags_None = 0, /* original C++ signature */ @@ -500,7 +503,7 @@ class BarsFlags_(enum.Enum): # ImPlotBarsFlags_Horizontal = 1 << 10, /* original C++ signature */ horizontal = enum.auto() # (= 1 << 10) # bars will be rendered horizontally on the current y-axis -class BarGroupsFlags_(enum.Enum): +class BarGroupsFlags_(enum.IntFlag): """Flags for PlotBarGroups""" # ImPlotBarGroupsFlags_None = 0, /* original C++ signature */ @@ -510,7 +513,7 @@ class BarGroupsFlags_(enum.Enum): # ImPlotBarGroupsFlags_Stacked = 1 << 11, /* original C++ signature */ stacked = enum.auto() # (= 1 << 11) # items in a group will be stacked on top of each other -class ErrorBarsFlags_(enum.Enum): +class ErrorBarsFlags_(enum.IntFlag): """Flags for PlotErrorBars""" # ImPlotErrorBarsFlags_None = 0, /* original C++ signature */ @@ -518,7 +521,7 @@ class ErrorBarsFlags_(enum.Enum): # ImPlotErrorBarsFlags_Horizontal = 1 << 10, /* original C++ signature */ horizontal = enum.auto() # (= 1 << 10) # error bars will be rendered horizontally on the current y-axis -class StemsFlags_(enum.Enum): +class StemsFlags_(enum.IntFlag): """Flags for PlotStems""" # ImPlotStemsFlags_None = 0, /* original C++ signature */ @@ -526,7 +529,7 @@ class StemsFlags_(enum.Enum): # ImPlotStemsFlags_Horizontal = 1 << 10, /* original C++ signature */ horizontal = enum.auto() # (= 1 << 10) # stems will be rendered horizontally on the current y-axis -class InfLinesFlags_(enum.Enum): +class InfLinesFlags_(enum.IntFlag): """Flags for PlotInfLines""" # ImPlotInfLinesFlags_None = 0, /* original C++ signature */ @@ -534,7 +537,7 @@ class InfLinesFlags_(enum.Enum): # ImPlotInfLinesFlags_Horizontal = 1 << 10 /* original C++ signature */ horizontal = enum.auto() # (= 1 << 10) # lines will be rendered horizontally on the current y-axis -class PieChartFlags_(enum.Enum): +class PieChartFlags_(enum.IntFlag): """Flags for PlotPieChart""" # ImPlotPieChartFlags_None = 0, /* original C++ signature */ @@ -550,7 +553,7 @@ class PieChartFlags_(enum.Enum): # ImPlotPieChartFlags_Exploding = 1 << 12 /* original C++ signature */ exploding = enum.auto() # (= 1 << 12) # Explode legend-hovered slice -class HeatmapFlags_(enum.Enum): +class HeatmapFlags_(enum.IntFlag): """Flags for PlotHeatmap""" # ImPlotHeatmapFlags_None = 0, /* original C++ signature */ @@ -558,7 +561,7 @@ class HeatmapFlags_(enum.Enum): # ImPlotHeatmapFlags_ColMajor = 1 << 10, /* original C++ signature */ col_major = enum.auto() # (= 1 << 10) # data will be read in column major order -class HistogramFlags_(enum.Enum): +class HistogramFlags_(enum.IntFlag): """Flags for PlotHistogram and PlotHistogram2D""" # ImPlotHistogramFlags_None = 0, /* original C++ signature */ @@ -578,23 +581,23 @@ class HistogramFlags_(enum.Enum): # ImPlotHistogramFlags_NoOutliers = 1 << 13, /* original C++ signature */ no_outliers = ( enum.auto() - ) # (= 1 << 13) # exclude values outside the specifed histogram range from the count toward normalizing and cumulative counts + ) # (= 1 << 13) # exclude values outside the specified histogram range from the count toward normalizing and cumulative counts # ImPlotHistogramFlags_ColMajor = 1 << 14 /* original C++ signature */ col_major = enum.auto() # (= 1 << 14) # data will be read in column major order (not supported by PlotHistogram) -class DigitalFlags_(enum.Enum): +class DigitalFlags_(enum.IntFlag): """Flags for PlotDigital (placeholder)""" # ImPlotDigitalFlags_None = 0 /* original C++ signature */ none = enum.auto() # (= 0) # default -class ImageFlags_(enum.Enum): +class ImageFlags_(enum.IntFlag): """Flags for PlotImage (placeholder)""" # ImPlotImageFlags_None = 0 /* original C++ signature */ none = enum.auto() # (= 0) # default -class TextFlags_(enum.Enum): +class TextFlags_(enum.IntFlag): """Flags for PlotText""" # ImPlotTextFlags_None = 0, /* original C++ signature */ @@ -602,13 +605,13 @@ class TextFlags_(enum.Enum): # ImPlotTextFlags_Vertical = 1 << 10 /* original C++ signature */ vertical = enum.auto() # (= 1 << 10) # text will be rendered vertically -class DummyFlags_(enum.Enum): +class DummyFlags_(enum.IntFlag): """Flags for PlotDummy (placeholder)""" # ImPlotDummyFlags_None = 0 /* original C++ signature */ none = enum.auto() # (= 0) # default -class Cond_(enum.Enum): +class Cond_(enum.IntFlag): """Represents a condition for SetupAxisLimits etc. (same as ImGuiCond, but we only support a subset of those enums)""" # ImPlotCond_None = ImGuiCond_None, /* original C++ signature */ @@ -618,7 +621,7 @@ class Cond_(enum.Enum): # ImPlotCond_Once = ImGuiCond_Once, /* original C++ signature */ once = enum.auto() # (= Cond_Once) # Set the variable once per runtime session (only the first call will succeed) -class Col_(enum.Enum): +class Col_(enum.IntFlag): """Plot styling colors.""" # item styling colors @@ -650,7 +653,7 @@ class Col_(enum.Enum): # ImPlotCol_InlayText, /* original C++ signature */ inlay_text = enum.auto() # (= 12) # color of text appearing inside of plots (defaults to ImGuiCol_Text) # ImPlotCol_AxisText, /* original C++ signature */ - axis_text = enum.auto() # (= 13) # axis label and tick lables color (defaults to ImGuiCol_Text) + axis_text = enum.auto() # (= 13) # axis label and tick labels color (defaults to ImGuiCol_Text) # ImPlotCol_AxisGrid, /* original C++ signature */ axis_grid = enum.auto() # (= 14) # axis grid color (defaults to 25% ImPlotCol_AxisText) # ImPlotCol_AxisTick, /* original C++ signature */ @@ -669,7 +672,7 @@ class Col_(enum.Enum): # } count = enum.auto() # (= 21) -class StyleVar_(enum.Enum): +class StyleVar_(enum.IntFlag): """Plot styling variables.""" # item styling variables @@ -736,7 +739,7 @@ class StyleVar_(enum.Enum): # } count = enum.auto() # (= 27) -class Scale_(enum.Enum): +class Scale_(enum.IntFlag): """Axis scale""" # ImPlotScale_Linear = 0, /* original C++ signature */ @@ -744,11 +747,11 @@ class Scale_(enum.Enum): # ImPlotScale_Time, /* original C++ signature */ time = enum.auto() # (= 1) # date/time scale # ImPlotScale_Log10, /* original C++ signature */ - log10 = enum.auto() # (= 2) # base 10 logartithmic scale + log10 = enum.auto() # (= 2) # base 10 logarithmic scale # ImPlotScale_SymLog, /* original C++ signature */ sym_log = enum.auto() # (= 3) # symmetric log scale -class Marker_(enum.Enum): +class Marker_(enum.IntFlag): """Marker specifications.""" # ImPlotMarker_None = -1, /* original C++ signature */ @@ -777,7 +780,7 @@ class Marker_(enum.Enum): # } count = enum.auto() # (= 10) -class Colormap_(enum.Enum): +class Colormap_(enum.IntFlag): """Built-in colormaps""" # ImPlotColormap_Deep = 0, /* original C++ signature */ @@ -813,7 +816,7 @@ class Colormap_(enum.Enum): # ImPlotColormap_Greys = 15, /* original C++ signature */ greys = enum.auto() # (= 15) # white/black (qual=False, n=2 ) -class Location_(enum.Enum): +class Location_(enum.IntFlag): """Used to position items on a plot (e.g. legends, labels, etc.)""" # ImPlotLocation_Center = 0, /* original C++ signature */ @@ -835,7 +838,7 @@ class Location_(enum.Enum): # ImPlotLocation_SouthEast = ImPlotLocation_South | ImPlotLocation_East /* original C++ signature */ south_east = enum.auto() # (= Location_South | Location_East) # bottom-right -class Bin_(enum.Enum): +class Bin_(enum.IntFlag): """Enums for different automatic histogram binning methods (k = bin count or w = bin width)""" # ImPlotBin_Sqrt = -1, /* original C++ signature */ @@ -864,15 +867,13 @@ class Point: @overload def __init__(self, p: ImVec2Like) -> None: pass - # double& operator[] (size_t idx) { IM_ASSERT(idx == 0 || idx == 1); return ((double*)(void*)(char*)this)[idx]; } /* original C++ signature */ + # IMPLOT_API double& operator[] (size_t idx) { IM_ASSERT(idx == 0 || idx == 1); return ((double*)(void*)(char*)this)[idx]; } /* original C++ signature */ @overload def __getitem__(self, idx: int) -> float: - """(private API)""" pass - # double operator[] (size_t idx) const { IM_ASSERT(idx == 0 || idx == 1); return ((const double*)(const void*)(const char*)this)[idx]; } /* original C++ signature */ + # IMPLOT_API double operator[] (size_t idx) const { IM_ASSERT(idx == 0 || idx == 1); return ((const double*)(const void*)(const char*)this)[idx]; } /* original C++ signature */ @overload def __getitem__(self, idx: int) -> float: - """(private API)""" pass class Range: @@ -890,17 +891,14 @@ class Range: @overload def __init__(self, _min: float, _max: float) -> None: pass - # bool Contains(double value) const { return value >= Min && value <= Max; } /* original C++ signature */ + # IMPLOT_API bool Contains(double value) const { return value >= Min && value <= Max; } /* original C++ signature */ def contains(self, value: float) -> bool: - """(private API)""" pass - # double Size() const { return Max - Min; } /* original C++ signature */ + # IMPLOT_API double Size() const { return Max - Min; } /* original C++ signature */ def size(self) -> float: - """(private API)""" pass - # double Clamp(double value) const { return (value < Min) ? Min : (value > Max) ? Max : value; } /* original C++ signature */ + # IMPLOT_API double Clamp(double value) const { return (value < Min) ? Min : (value > Max) ? Max : value; } /* original C++ signature */ def clamp(self, value: float) -> float: - """(private API)""" pass class Rect: @@ -918,37 +916,30 @@ class Rect: @overload def __init__(self, x_min: float, x_max: float, y_min: float, y_max: float) -> None: pass - # bool Contains(const ImPlotPoint& p) const { return Contains(p.x, p.y); } /* original C++ signature */ + # IMPLOT_API bool Contains(const ImPlotPoint& p) const { return Contains(p.x, p.y); } /* original C++ signature */ @overload def contains(self, p: Point) -> bool: - """(private API)""" pass - # bool Contains(double x, double y) const { return X.Contains(x) && Y.Contains(y); } /* original C++ signature */ + # IMPLOT_API bool Contains(double x, double y) const { return X.Contains(x) && Y.Contains(y); } /* original C++ signature */ @overload def contains(self, x: float, y: float) -> bool: - """(private API)""" pass - # ImPlotPoint Size() const { return ImPlotPoint(X.Size(), Y.Size()); } /* original C++ signature */ + # IMPLOT_API ImPlotPoint Size() const { return ImPlotPoint(X.Size(), Y.Size()); } /* original C++ signature */ def size(self) -> Point: - """(private API)""" pass - # ImPlotPoint Clamp(const ImPlotPoint& p) { return Clamp(p.x, p.y); } /* original C++ signature */ + # IMPLOT_API ImPlotPoint Clamp(const ImPlotPoint& p) { return Clamp(p.x, p.y); } /* original C++ signature */ @overload def clamp(self, p: Point) -> Point: - """(private API)""" pass - # ImPlotPoint Clamp(double x, double y) { return ImPlotPoint(X.Clamp(x),Y.Clamp(y)); } /* original C++ signature */ + # IMPLOT_API ImPlotPoint Clamp(double x, double y) { return ImPlotPoint(X.Clamp(x),Y.Clamp(y)); } /* original C++ signature */ @overload def clamp(self, x: float, y: float) -> Point: - """(private API)""" pass - # ImPlotPoint Min() const { return ImPlotPoint(X.Min, Y.Min); } /* original C++ signature */ + # IMPLOT_API ImPlotPoint Min() const { return ImPlotPoint(X.Min, Y.Min); } /* original C++ signature */ def min(self) -> Point: - """(private API)""" pass - # ImPlotPoint Max() const { return ImPlotPoint(X.Max, Y.Max); } /* original C++ signature */ + # IMPLOT_API ImPlotPoint Max() const { return ImPlotPoint(X.Max, Y.Max); } /* original C++ signature */ def max(self) -> Point: - """(private API)""" pass class Style: @@ -1165,7 +1156,7 @@ def end_plot() -> None: # Starts a subdivided plotting context. If the function returns True, # EndSubplots() MUST be called! Call BeginPlot/EndPlot AT MOST [rows*cols] -# times in between the begining and end of the subplot context. Plots are +# times in between the beginning and end of the subplot context. Plots are # added in row major order. # # Example: @@ -1375,7 +1366,7 @@ def setup_finish() -> None: # using a preceding button or slider widget to change the plot limits). In # this case, you can use the `SetNext` API below. While this is not as feature # rich as the Setup API, most common needs are provided. These functions can be -# called anwhere except for inside of `Begin/EndPlot`. For example: +# called anywhere except for inside of `Begin/EndPlot`. For example: # if (ImGui::Button("Center Plot")) # ImPlot::SetNextPlotLimits(-1,1,-1,1); @@ -1433,7 +1424,7 @@ def set_next_axes_to_fit() -> None: # [SECTION] Plot Items # ----------------------------------------------------------------------------- -# The main plotting API is provied below. Call these functions between +# The main plotting API is provided below. Call these functions between # Begin/EndPlot and after any Setup API calls. Each plots data on the current # x and y axes, which can be changed with `SetAxis/Axes`. # @@ -1687,7 +1678,7 @@ def plot_digital(label_id: str, xs: np.ndarray, ys: np.ndarray, flags: DigitalFl # IMPLOT_API void PlotImage(const char* label_id, ImTextureRef tex_ref, const ImPlotPoint& bounds_min, const ImPlotPoint& bounds_max, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1), const ImVec4& tint_col = ImVec4(1, 1, 1, 1), ImPlotImageFlags flags = 0); /* original C++ signature */ def plot_image( label_id: str, - tex_ref: ImTextureID, + tex_ref: ImTextureRef, bounds_min: Point, bounds_max: Point, uv0: Optional[ImVec2Like] = None, @@ -1734,7 +1725,7 @@ def plot_dummy(label_id: str, flags: DummyFlags = 0) -> None: # when user interaction causes the provided coordinates to change. Additional # user interactions can be retrieved through the optional output parameters. -# IMPLOT_API bool DragPoint(int id, double* x, double* y, const ImVec4& col, float size = 4, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* held = nullptr); /* original C++ signature */ +# IMPLOT_API bool DragPoint(int id, double* x, double* y, const ImVec4& col, float size = 4, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* out_held = nullptr); /* original C++ signature */ def drag_point( id_: int, x: float, @@ -1744,12 +1735,12 @@ def drag_point( flags: DragToolFlags = 0, out_clicked: Optional[bool] = None, out_hovered: Optional[bool] = None, - held: Optional[bool] = None, + out_held: Optional[bool] = None, ) -> Tuple[bool, float, float, Optional[bool], Optional[bool], Optional[bool]]: """Shows a draggable point at x,y. #col defaults to ImGuiCol_Text.""" pass -# IMPLOT_API bool DragLineX(int id, double* x, const ImVec4& col, float thickness = 1, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* held = nullptr); /* original C++ signature */ +# IMPLOT_API bool DragLineX(int id, double* x, const ImVec4& col, float thickness = 1, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* out_held = nullptr); /* original C++ signature */ def drag_line_x( id_: int, x: float, @@ -1758,12 +1749,12 @@ def drag_line_x( flags: DragToolFlags = 0, out_clicked: Optional[bool] = None, out_hovered: Optional[bool] = None, - held: Optional[bool] = None, + out_held: Optional[bool] = None, ) -> Tuple[bool, float, Optional[bool], Optional[bool], Optional[bool]]: """Shows a draggable vertical guide line at an x-value. #col defaults to ImGuiCol_Text.""" pass -# IMPLOT_API bool DragLineY(int id, double* y, const ImVec4& col, float thickness = 1, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* held = nullptr); /* original C++ signature */ +# IMPLOT_API bool DragLineY(int id, double* y, const ImVec4& col, float thickness = 1, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* out_held = nullptr); /* original C++ signature */ def drag_line_y( id_: int, y: float, @@ -1772,12 +1763,12 @@ def drag_line_y( flags: DragToolFlags = 0, out_clicked: Optional[bool] = None, out_hovered: Optional[bool] = None, - held: Optional[bool] = None, + out_held: Optional[bool] = None, ) -> Tuple[bool, float, Optional[bool], Optional[bool], Optional[bool]]: """Shows a draggable horizontal guide line at a y-value. #col defaults to ImGuiCol_Text.""" pass -# IMPLOT_API bool DragRect(int id, double* x1, double* y1, double* x2, double* y2, const ImVec4& col, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* held = nullptr); /* original C++ signature */ +# IMPLOT_API bool DragRect(int id, double* x1, double* y1, double* x2, double* y2, const ImVec4& col, ImPlotDragToolFlags flags = 0, bool* out_clicked = nullptr, bool* out_hovered = nullptr, bool* out_held = nullptr); /* original C++ signature */ def drag_rect( id_: int, x1: float, @@ -1788,7 +1779,7 @@ def drag_rect( flags: DragToolFlags = 0, out_clicked: Optional[bool] = None, out_hovered: Optional[bool] = None, - held: Optional[bool] = None, + out_held: Optional[bool] = None, ) -> Tuple[bool, float, float, float, float, Optional[bool], Optional[bool], Optional[bool]]: """Shows a draggable and resizeable rectangle.""" pass @@ -1888,7 +1879,7 @@ def get_plot_pos() -> ImVec2: # IMPLOT_API ImVec2 GetPlotSize(); /* original C++ signature */ def get_plot_size() -> ImVec2: - """Get the curent Plot size in pixels.""" + """Get the current Plot size in pixels.""" pass # IMPLOT_API ImPlotPoint GetPlotMousePos(ImAxis x_axis = IMPLOT_AUTO, ImAxis y_axis = IMPLOT_AUTO); /* original C++ signature */ @@ -2079,7 +2070,7 @@ def end_drag_drop_source() -> None: # manually set these colors to whatever you like, and further can Push/Pop # them around individual plots for plot-specific styling (e.g. coloring axes). -# Provides access to plot style structure for permanant modifications to colors, sizes, etc. +# Provides access to plot style structure for permanent modifications to colors, sizes, etc. # IMPLOT_API ImPlotStyle& GetStyle(); /* original C++ signature */ def get_style() -> Style: pass @@ -2348,7 +2339,7 @@ def colormap_slider( # IMPLOT_API bool ColormapButton(const char* label, const ImVec2& size = ImVec2(0,0), ImPlotColormap cmap = IMPLOT_AUTO); /* original C++ signature */ def colormap_button(label: str, size: Optional[ImVec2Like] = None, cmap: Optional[Colormap] = None) -> bool: - """Shows a button with a colormap gradient brackground. + """Shows a button with a colormap gradient background. Python bindings defaults: @@ -2361,7 +2352,7 @@ def colormap_button(label: str, size: Optional[ImVec2Like] = None, cmap: Optiona # IMPLOT_API void BustColorCache(const char* plot_title_id = nullptr); /* original C++ signature */ def bust_color_cache(plot_title_id: Optional[str] = None) -> None: """When items in a plot sample their color from a colormap, the color is cached and does not change - unless explicitly overriden. Therefore, if you change the colormap after the item has already been plotted, + unless explicitly overridden. Therefore, if you change the colormap after the item has already been plotted, item colors will NOT update. If you need item colors to resample the new colormap, then use this function to bust the cached colors. If #plot_title_id is None, then every item in EVERY existing plot will be cache busted. Otherwise only the plot specified by #plot_title_id will be busted. For the @@ -2376,7 +2367,7 @@ def bust_color_cache(plot_title_id: Optional[str] = None) -> None: # IMPLOT_API ImPlotInputMap& GetInputMap(); /* original C++ signature */ def get_input_map() -> InputMap: - """Provides access to input mapping structure for permanant modifications to controls for pan, select, etc.""" + """Provides access to input mapping structure for permanent modifications to controls for pan, select, etc.""" pass # IMPLOT_API void MapInputDefault(ImPlotInputMap* dst = nullptr); /* original C++ signature */ @@ -2467,8 +2458,6 @@ def show_all_demos() -> None: """Bundle: ShowAllDemos is extracted from ShowDemoWindow, so that it can be used without creating an ImGui window.""" pass -# namespace ImPlot - # ----------------------------------------------------------------------------- # [SECTION] Obsolete API # ----------------------------------------------------------------------------- @@ -2502,6 +2491,7 @@ def add_colormap(name: str, cols: np.ndarray, qual: bool = True) -> Colormap: # Sets an axis' ticks and optionally the labels for the next plot. To keep the default ticks, set #keep_default=true. # IMPLOT_API void SetupAxisTicks(ImAxis axis, double v_min, double v_max, int n_ticks, const char* const labels[]=nullptr, bool keep_default=false); +@overload def setup_axis_ticks( axis: ImAxis, v_min: float, @@ -2514,6 +2504,7 @@ def setup_axis_ticks( # Sets an axis' ticks and optionally the labels for the next plot. To keep the default ticks, set #keep_default=true. # IMPLOT_API void SetupAxisTicks(ImAxis axis, const double* values, int n_ticks, const char* const labels[]=nullptr, bool keep_default=false); +@overload def setup_axis_ticks( axis: ImAxis, values: List[float], diff --git a/blimgui/dist64/imgui_bundle/implot/internal.pyi b/blimgui/dist64/imgui_bundle/implot/internal.pyi index 2e0d41c..8675537 100644 --- a/blimgui/dist64/imgui_bundle/implot/internal.pyi +++ b/blimgui/dist64/imgui_bundle/implot/internal.pyi @@ -53,7 +53,8 @@ time_t = int #################### #################### # MIT License -# Copyright (c) 2023 Evan Pezent +# Copyright (c) 2020-2024 Evan Pezent +# Copyright (c) 2025 Breno Cunha Queiroz # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -239,7 +240,7 @@ def im_mix_u32(a: ImU32, b: ImU32, s: ImU32) -> ImU32: # return ImMixU32(colors[i1], colors[i2], (ImU32)(tr*256)); # } def im_lerp_u32(colors: ImU32, size: int, t: float) -> ImU32: - """ Lerp across an array of 32-bit collors given t in [0.0 1.0] + """ Lerp across an array of 32-bit colors given t in [0.0 1.0] (private API) """ pass @@ -259,7 +260,7 @@ def im_alpha_u32(col: ImU32, alpha: float) -> ImU32: #----------------------------------------------------------------------------- -class TimeUnit_(enum.Enum): +class TimeUnit_(enum.IntFlag): # ImPlotTimeUnit_Us, /* original C++ signature */ us = enum.auto() # (= 0) # microsecond # ImPlotTimeUnit_Ms, /* original C++ signature */ @@ -280,7 +281,7 @@ class TimeUnit_(enum.Enum): # } count = enum.auto() # (= 8) -class DateFmt_(enum.Enum): +class DateFmt_(enum.IntFlag): # default [ ISO 8601 ] # ImPlotDateFmt_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) @@ -295,7 +296,7 @@ class DateFmt_(enum.Enum): # ImPlotDateFmt_Yr /* original C++ signature */ yr = enum.auto() # (= 5) # 1991 [ 1991 ] -class TimeFmt_(enum.Enum): +class TimeFmt_(enum.IntFlag): # default [ 24 Hour Clock ] # ImPlotTimeFmt_None = 0, /* original C++ signature */ none = enum.auto() # (= 0) @@ -544,9 +545,14 @@ class PointError: neg: float # Pos; /* original C++ signature */ pos: float + # ImPlotPointError() { X = 0; Y = 0; Neg = 0; Pos = 0; } /* original C++ signature */ + @overload + def __init__(self) -> None: + pass # ImPlotPointError(double x, double y, double neg, double pos) { /* original C++ signature */ # X = x; Y = y; Neg = neg; Pos = pos; # } + @overload def __init__(self, x: float, y: float, neg: float, pos: float) -> None: pass @@ -622,14 +628,15 @@ class Tag: color_fg: ImU32 # int TextOffset; /* original C++ signature */ text_offset: int - # ImPlotTag(ImAxis Axis = ImAxis(), double Value = double(), ImU32 ColorBg = ImU32(), ImU32 ColorFg = ImU32(), int TextOffset = int()); /* original C++ signature */ - def __init__(self, axis: Optional[ImAxis] = None, value: float = float(), color_bg: ImU32 = ImU32(), color_fg: ImU32 = ImU32(), text_offset: int = int()) -> None: - """Auto-generated default constructor with named params - - Python bindings defaults: - If Axis is None, then its default value will be: ImAxis() - """ + # ImPlotTag() { /* original C++ signature */ + # Axis = 0; + # Value = 0; + # ColorBg = 0; + # ColorFg = 0; + # TextOffset = 0; + # } + def __init__(self) -> None: pass class TagCollection: @@ -689,6 +696,20 @@ class Tick: # int Idx; /* original C++ signature */ idx: int + # ImPlotTick() { /* original C++ signature */ + # PlotPos = 0; + # PixelPos = 0; + # LabelSize = ImVec2(0,0); + # TextOffset = -1; + # Major = false; + # ShowLabel = false; + # Level = 0; + # Idx = -1; + # } + @overload + def __init__(self) -> None: + pass + # ImPlotTick(double value, bool major, int level, bool show_label) { /* original C++ signature */ # PixelPos = 0; # PlotPos = value; @@ -697,6 +718,7 @@ class Tick: # Level = level; # TextOffset = -1; # } + @overload def __init__(self, value: float, major: bool, level: int, show_label: bool) -> None: pass @@ -2234,24 +2256,6 @@ def get_days_in_month(year: int, month: int) -> int: # NB: The following functions only work if there is a current ImPlotContext because the # internal tm struct is owned by the context! They are aware of ImPlotStyle.UseLocalTime. -# static inline ImPlotTime MkTime(struct tm *ptm) { /* original C++ signature */ -# if (GetStyle().UseLocalTime) return MkLocTime(ptm); -# else return MkGmtTime(ptm); -# } -def mk_time(ptm: struct tm) -> Time: - """ // Make a UNIX timestamp from a tm struct according to the current ImPlotStyle.UseLocalTime setting. - (private API) - """ - pass -# static inline tm* GetTime(const ImPlotTime& t, tm* ptm) { /* original C++ signature */ -# if (GetStyle().UseLocalTime) return GetLocTime(t,ptm); -# else return GetGmtTime(t,ptm); -# } -def get_time(t: Time, ptm: tm) -> tm: - """ Get a tm struct from a UNIX timestamp according to the current ImPlotStyle.UseLocalTime setting. - (private API) - """ - pass # IMPLOT_API ImPlotTime MakeTime(int year, int month = 0, int day = 1, int hour = 0, int min = 0, int sec = 0, int us = 0); /* original C++ signature */ def make_time(year: int, month: int = 0, day: int = 1, hour: int = 0, min: int = 0, sec: int = 0, us: int = 0) -> Time: @@ -2415,7 +2419,6 @@ class Formatter_Time_Data: -# namespace ImPlot # #endif #################### #################### diff --git a/blimgui/dist64/imgui_bundle/implot3d/__init__.pyi b/blimgui/dist64/imgui_bundle/implot3d/__init__.pyi index d414368..b98c10d 100644 --- a/blimgui/dist64/imgui_bundle/implot3d/__init__.pyi +++ b/blimgui/dist64/imgui_bundle/implot3d/__init__.pyi @@ -16,9 +16,10 @@ from imgui_bundle.imgui import ( ImVec4Like, ImU32, ImDrawList, - ImTextureID, + ImTextureRef, ) -ImageFlags = int # enum ImageFlags_ + +ImageFlags = int # enum ImageFlags_ ImGui_Context = imgui.internal.Context @@ -78,6 +79,7 @@ Location = int # enum Location_ ImPlane3D = int # enum ImPlane3D_ # typedef int ImPlot3DColormap; // -> ImPlot3DColormap_ // Enum: Colormaps # +ImAxis3D = int # enum ImAxis3D_ """ """ @@ -123,6 +125,13 @@ ImPlane3D = int # enum ImPlane3D_ # #ifndef IMGUI_DISABLE # +# [ADAPT_IMGUI_BUNDLE] +# #ifdef IMGUI_BUNDLE_PYTHON_API +# +# #endif +# +# [/ADAPT_IMGUI_BUNDLE] + # ----------------------------------------------------------------------------- # [SECTION] Macros and Defines # ----------------------------------------------------------------------------- @@ -136,7 +145,6 @@ ImPlane3D = int # enum ImPlane3D_ # Enums # Flags -# -> ImPlot3DAxisFlags_ // Flags: Axis flags # Fallback for ImGui versions before v1.92: define ImTextureRef as ImTextureID # You can `#define IMPLOT3D_NO_IMTEXTUREREF` to avoid this fallback @@ -145,7 +153,7 @@ ImPlane3D = int # enum ImPlane3D_ # [SECTION] Flags & Enumerations # ----------------------------------------------------------------------------- -class Flags_(enum.Enum): +class Flags_(enum.IntFlag): """Flags for ImPlot3D::BeginPlot()""" # ImPlot3DFlags_None = 0, /* original C++ signature */ @@ -160,11 +168,21 @@ class Flags_(enum.Enum): no_clip = enum.auto() # (= 1 << 3) # Disable 3D box clipping # ImPlot3DFlags_NoMenus = 1 << 4, /* original C++ signature */ no_menus = enum.auto() # (= 1 << 4) # The user will not be able to open context menus + # ImPlot3DFlags_Equal = 1 << 5, /* original C++ signature */ + equal = enum.auto() # (= 1 << 5) # X, Y, and Z axes will be constrained to have the same units/pixel + # ImPlot3DFlags_NoRotate = 1 << 6, /* original C++ signature */ + no_rotate = enum.auto() # (= 1 << 6) # Lock rotation interaction + # ImPlot3DFlags_NoPan = 1 << 7, /* original C++ signature */ + no_pan = enum.auto() # (= 1 << 7) # Lock panning/translation interaction + # ImPlot3DFlags_NoZoom = 1 << 8, /* original C++ signature */ + no_zoom = enum.auto() # (= 1 << 8) # Lock zoom interaction + # ImPlot3DFlags_NoInputs = 1 << 9, /* original C++ signature */ + no_inputs = enum.auto() # (= 1 << 9) # Disable all user inputs # ImPlot3DFlags_CanvasOnly = ImPlot3DFlags_NoTitle | ImPlot3DFlags_NoLegend | ImPlot3DFlags_NoMouseText, /* original C++ signature */ # } canvas_only = enum.auto() # (= Flags_NoTitle | Flags_NoLegend | Flags_NoMouseText) -class Cond_(enum.Enum): +class Cond_(enum.IntFlag): """Represents a condition for SetupAxisLimits etc. (same as ImGuiCond, but we only support a subset of those enums)""" # ImPlot3DCond_None = ImGuiCond_None, /* original C++ signature */ @@ -174,7 +192,7 @@ class Cond_(enum.Enum): # ImPlot3DCond_Once = ImGuiCond_Once, /* original C++ signature */ once = enum.auto() # (= Cond_Once) # Set the variable once per runtime session (only the first call will succeed) -class Col_(enum.Enum): +class Col_(enum.IntFlag): # Item colors # ImPlot3DCol_Line = 0, /* original C++ signature */ line = enum.auto() # (= 0) # Line color @@ -213,7 +231,7 @@ class Col_(enum.Enum): # } count = enum.auto() # (= 15) -class StyleVar_(enum.Enum): +class StyleVar_(enum.IntFlag): """Plot styling variables""" # Item style @@ -238,18 +256,22 @@ class StyleVar_(enum.Enum): ) # (= 7) # ImVec2, padding between widget frame and plot area, labels, or outside legends (i.e. main padding) # ImPlot3DStyleVar_LabelPadding, /* original C++ signature */ label_padding = enum.auto() # (= 8) # ImVec2, padding between axes labels, tick labels, and plot edge + # ImPlot3DStyleVar_ViewScaleFactor, /* original C++ signature */ + view_scale_factor = ( + enum.auto() + ) # (= 9) # float, scale factor for 3D view, you can use it to make the whole plot larger or smaller # Legend style # ImPlot3DStyleVar_LegendPadding, /* original C++ signature */ - legend_padding = enum.auto() # (= 9) # ImVec2, legend padding from plot edges + legend_padding = enum.auto() # (= 10) # ImVec2, legend padding from plot edges # ImPlot3DStyleVar_LegendInnerPadding, /* original C++ signature */ - legend_inner_padding = enum.auto() # (= 10) # ImVec2, legend inner padding from legend edges + legend_inner_padding = enum.auto() # (= 11) # ImVec2, legend inner padding from legend edges # ImPlot3DStyleVar_LegendSpacing, /* original C++ signature */ - legend_spacing = enum.auto() # (= 11) # ImVec2, spacing between legend entries + legend_spacing = enum.auto() # (= 12) # ImVec2, spacing between legend entries # ImPlot3DStyleVar_COUNT /* original C++ signature */ # } - count = enum.auto() # (= 12) + count = enum.auto() # (= 13) -class Marker_(enum.Enum): +class Marker_(enum.IntFlag): # ImPlot3DMarker_None = -1, /* original C++ signature */ none = enum.auto() # (= -1) # No marker # ImPlot3DMarker_Circle, /* original C++ signature */ @@ -276,7 +298,7 @@ class Marker_(enum.Enum): # } count = enum.auto() # (= 10) -class ItemFlags_(enum.Enum): +class ItemFlags_(enum.IntFlag): """Flags for items""" # ImPlot3DItemFlags_None = 0, /* original C++ signature */ @@ -286,7 +308,7 @@ class ItemFlags_(enum.Enum): # ImPlot3DItemFlags_NoFit = 1 << 1, /* original C++ signature */ no_fit = enum.auto() # (= 1 << 1) # The item won't be considered for plot fits -class ScatterFlags_(enum.Enum): +class ScatterFlags_(enum.IntFlag): """Flags for PlotScatter""" # ImPlot3DScatterFlags_None = 0, /* original C++ signature */ @@ -297,7 +319,7 @@ class ScatterFlags_(enum.Enum): # } no_fit = enum.auto() # (= ItemFlags_NoFit) -class LineFlags_(enum.Enum): +class LineFlags_(enum.IntFlag): """Flags for PlotLine""" # ImPlot3DLineFlags_None = 0, /* original C++ signature */ @@ -313,7 +335,7 @@ class LineFlags_(enum.Enum): # ImPlot3DLineFlags_SkipNaN = 1 << 12, /* original C++ signature */ skip_nan = enum.auto() # (= 1 << 12) # NaNs values will be skipped instead of rendered as missing data -class TriangleFlags_(enum.Enum): +class TriangleFlags_(enum.IntFlag): """Flags for PlotTriangle""" # ImPlot3DTriangleFlags_None = 0, /* original C++ signature */ @@ -321,10 +343,15 @@ class TriangleFlags_(enum.Enum): # ImPlot3DTriangleFlags_NoLegend = ImPlot3DItemFlags_NoLegend, /* original C++ signature */ no_legend = enum.auto() # (= ItemFlags_NoLegend) # ImPlot3DTriangleFlags_NoFit = ImPlot3DItemFlags_NoFit, /* original C++ signature */ - # } no_fit = enum.auto() # (= ItemFlags_NoFit) - -class QuadFlags_(enum.Enum): + # ImPlot3DTriangleFlags_NoLines = 1 << 10, /* original C++ signature */ + no_lines = enum.auto() # (= 1 << 10) # No lines will be rendered + # ImPlot3DTriangleFlags_NoFill = 1 << 11, /* original C++ signature */ + no_fill = enum.auto() # (= 1 << 11) # No fill will be rendered + # ImPlot3DTriangleFlags_NoMarkers = 1 << 12, /* original C++ signature */ + no_markers = enum.auto() # (= 1 << 12) # No markers will be rendered + +class QuadFlags_(enum.IntFlag): """Flags for PlotQuad""" # ImPlot3DQuadFlags_None = 0, /* original C++ signature */ @@ -332,10 +359,15 @@ class QuadFlags_(enum.Enum): # ImPlot3DQuadFlags_NoLegend = ImPlot3DItemFlags_NoLegend, /* original C++ signature */ no_legend = enum.auto() # (= ItemFlags_NoLegend) # ImPlot3DQuadFlags_NoFit = ImPlot3DItemFlags_NoFit, /* original C++ signature */ - # } no_fit = enum.auto() # (= ItemFlags_NoFit) - -class SurfaceFlags_(enum.Enum): + # ImPlot3DQuadFlags_NoLines = 1 << 10, /* original C++ signature */ + no_lines = enum.auto() # (= 1 << 10) # No lines will be rendered + # ImPlot3DQuadFlags_NoFill = 1 << 11, /* original C++ signature */ + no_fill = enum.auto() # (= 1 << 11) # No fill will be rendered + # ImPlot3DQuadFlags_NoMarkers = 1 << 12, /* original C++ signature */ + no_markers = enum.auto() # (= 1 << 12) # No markers will be rendered + +class SurfaceFlags_(enum.IntFlag): """Flags for PlotSurface""" # ImPlot3DSurfaceFlags_None = 0, /* original C++ signature */ @@ -343,10 +375,15 @@ class SurfaceFlags_(enum.Enum): # ImPlot3DSurfaceFlags_NoLegend = ImPlot3DItemFlags_NoLegend, /* original C++ signature */ no_legend = enum.auto() # (= ItemFlags_NoLegend) # ImPlot3DSurfaceFlags_NoFit = ImPlot3DItemFlags_NoFit, /* original C++ signature */ - # } no_fit = enum.auto() # (= ItemFlags_NoFit) - -class MeshFlags_(enum.Enum): + # ImPlot3DSurfaceFlags_NoLines = 1 << 10, /* original C++ signature */ + no_lines = enum.auto() # (= 1 << 10) # No lines will be rendered + # ImPlot3DSurfaceFlags_NoFill = 1 << 11, /* original C++ signature */ + no_fill = enum.auto() # (= 1 << 11) # No fill will be rendered + # ImPlot3DSurfaceFlags_NoMarkers = 1 << 12, /* original C++ signature */ + no_markers = enum.auto() # (= 1 << 12) # No markers will be rendered + +class MeshFlags_(enum.IntFlag): """Flags for PlotMesh""" # ImPlot3DMeshFlags_None = 0, /* original C++ signature */ @@ -354,10 +391,15 @@ class MeshFlags_(enum.Enum): # ImPlot3DMeshFlags_NoLegend = ImPlot3DItemFlags_NoLegend, /* original C++ signature */ no_legend = enum.auto() # (= ItemFlags_NoLegend) # ImPlot3DMeshFlags_NoFit = ImPlot3DItemFlags_NoFit, /* original C++ signature */ - # } no_fit = enum.auto() # (= ItemFlags_NoFit) - -class ImageFlags_(enum.Enum): + # ImPlot3DMeshFlags_NoLines = 1 << 10, /* original C++ signature */ + no_lines = enum.auto() # (= 1 << 10) # No lines will be rendered + # ImPlot3DMeshFlags_NoFill = 1 << 11, /* original C++ signature */ + no_fill = enum.auto() # (= 1 << 11) # No fill will be rendered + # ImPlot3DMeshFlags_NoMarkers = 1 << 12, /* original C++ signature */ + no_markers = enum.auto() # (= 1 << 12) # No markers will be rendered + +class ImageFlags_(enum.IntFlag): """Flags for PlotImage""" # ImPlot3DImageFlags_None = 0, /* original C++ signature */ @@ -368,7 +410,7 @@ class ImageFlags_(enum.Enum): # } no_fit = enum.auto() # (= ItemFlags_NoFit) -class LegendFlags_(enum.Enum): +class LegendFlags_(enum.IntFlag): """Flags for legends""" # ImPlot3DLegendFlags_None = 0, /* original C++ signature */ @@ -382,7 +424,7 @@ class LegendFlags_(enum.Enum): # ImPlot3DLegendFlags_Horizontal = 1 << 2, /* original C++ signature */ horizontal = enum.auto() # (= 1 << 2) # Legend entries will be displayed horizontally -class Location_(enum.Enum): +class Location_(enum.IntFlag): """Used to position legend on a plot""" # ImPlot3DLocation_Center = 0, /* original C++ signature */ @@ -404,7 +446,7 @@ class Location_(enum.Enum): # ImPlot3DLocation_SouthEast = ImPlot3DLocation_South | ImPlot3DLocation_East /* original C++ signature */ south_east = enum.auto() # (= Location_South | Location_East) # Bottom-right -class AxisFlags_(enum.Enum): +class AxisFlags_(enum.IntFlag): """Flags for axis""" # ImPlot3DAxisFlags_None = 0, /* original C++ signature */ @@ -425,13 +467,17 @@ class AxisFlags_(enum.Enum): auto_fit = enum.auto() # (= 1 << 6) # Axis will be auto-fitting to data extents # ImPlot3DAxisFlags_Invert = 1 << 7, /* original C++ signature */ invert = enum.auto() # (= 1 << 7) # The axis will be inverted + # ImPlot3DAxisFlags_PanStretch = 1 << 8, /* original C++ signature */ + pan_stretch = ( + enum.auto() + ) # (= 1 << 8) # Panning in a locked or constrained state will cause the axis to stretch if possible # ImPlot3DAxisFlags_Lock = ImPlot3DAxisFlags_LockMin | ImPlot3DAxisFlags_LockMax, /* original C++ signature */ lock = enum.auto() # (= AxisFlags_LockMin | AxisFlags_LockMax) # ImPlot3DAxisFlags_NoDecorations = ImPlot3DAxisFlags_NoLabel | ImPlot3DAxisFlags_NoGridLines | ImPlot3DAxisFlags_NoTickLabels, /* original C++ signature */ # } no_decorations = enum.auto() # (= AxisFlags_NoLabel | AxisFlags_NoGridLines | AxisFlags_NoTickLabels) -class ImAxis3D_(enum.Enum): +class ImAxis3D_(enum.IntFlag): """Axis indices""" # ImAxis3D_X = 0, /* original C++ signature */ @@ -444,7 +490,7 @@ class ImAxis3D_(enum.Enum): # } count = enum.auto() # (= 3) -class ImPlane3D_(enum.Enum): +class ImPlane3D_(enum.IntFlag): """Plane indices""" # ImPlane3D_YZ = 0, /* original C++ signature */ @@ -457,7 +503,7 @@ class ImPlane3D_(enum.Enum): # } count = enum.auto() # (= 3) -class Colormap_(enum.Enum): +class Colormap_(enum.IntFlag): """Colormaps""" # ImPlot3DColormap_Deep = 0, /* original C++ signature */ @@ -587,6 +633,16 @@ def setup_axis_limits(axis: ImAxis3D, v_min: float, v_max: float, cond: Optional """ pass +# IMPLOT3D_API void SetupAxisLimitsConstraints(ImAxis3D axis, double v_min, double v_max); /* original C++ signature */ +def setup_axis_limits_constraints(axis: ImAxis3D, v_min: float, v_max: float) -> None: + """Sets an axis' limits constraints""" + pass + +# IMPLOT3D_API void SetupAxisZoomConstraints(ImAxis3D axis, double z_min, double z_max); /* original C++ signature */ +def setup_axis_zoom_constraints(axis: ImAxis3D, z_min: float, z_max: float) -> None: + """Sets an axis' zoom constraints""" + pass + # IMPLOT3D_API void SetupAxes(const char* x_label, const char* y_label, const char* z_label, ImPlot3DAxisFlags x_flags = 0, /* original C++ signature */ # ImPlot3DAxisFlags y_flags = 0, ImPlot3DAxisFlags z_flags = 0); def setup_axes( @@ -608,7 +664,7 @@ def setup_axes_limits( """ pass -# IMPLOT3D_API void SetupBoxRotation(float elevation, float azimuth, bool animate = false, ImPlot3DCond cond = ImPlot3DCond_Once); /* original C++ signature */ +# IMPLOT3D_API void SetupBoxRotation(double elevation, double azimuth, bool animate = false, ImPlot3DCond cond = ImPlot3DCond_Once); /* original C++ signature */ @overload def setup_box_rotation(elevation: float, azimuth: float, animate: bool = False, cond: Optional[Cond] = None) -> None: """Sets the plot box rotation given the elevation and azimuth angles in degrees. If ImPlot3DCond_Always is used, the rotation will be locked @@ -630,7 +686,7 @@ def setup_box_rotation(rotation: Quat, animate: bool = False, cond: Optional[Con """ pass -# IMPLOT3D_API void SetupBoxInitialRotation(float elevation, float azimuth); /* original C++ signature */ +# IMPLOT3D_API void SetupBoxInitialRotation(double elevation, double azimuth); /* original C++ signature */ @overload def setup_box_initial_rotation(elevation: float, azimuth: float) -> None: """Sets the plot box initial rotation given the elevation and azimuth angles in degrees. The initial rotation is the rotation the plot goes back to @@ -646,7 +702,7 @@ def setup_box_initial_rotation(rotation: Quat) -> None: """ pass -# IMPLOT3D_API void SetupBoxScale(float x, float y, float z); /* original C++ signature */ +# IMPLOT3D_API void SetupBoxScale(double x, double y, double z); /* original C++ signature */ def setup_box_scale(x: float, y: float, z: float) -> None: """Sets the plot box X/Y/Z scale. A scale of 1.0 is the default. Values greater than 1.0 enlarge the plot, while values between 0.0 and 1.0 shrink it""" pass @@ -779,13 +835,13 @@ def plot_mesh(label_id: str, mesh: Mesh, flags: MeshFlags = 0) -> None: # # [/ADAPT_IMGUI_BUNDLE] -# IMPLOT3D_API void PlotImage(const char* label_id, ImTextureID tex_ref, const ImPlot3DPoint& center, const ImPlot3DPoint& axis_u, /* original C++ signature */ +# IMPLOT3D_API void PlotImage(const char* label_id, ImTextureRef tex_ref, const ImPlot3DPoint& center, const ImPlot3DPoint& axis_u, /* original C++ signature */ # const ImPlot3DPoint& axis_v, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 1), # const ImVec4& tint_col = ImVec4(1, 1, 1, 1), ImPlot3DImageFlags flags = 0); @overload def plot_image( label_id: str, - tex_ref: ImTextureID, + tex_ref: ImTextureRef, center: Point, axis_u: Point, axis_v: Point, @@ -810,14 +866,14 @@ def plot_image( """ pass -# IMPLOT3D_API void PlotImage(const char* label_id, ImTextureID tex_ref, const ImPlot3DPoint& p0, const ImPlot3DPoint& p1, const ImPlot3DPoint& p2, /* original C++ signature */ +# IMPLOT3D_API void PlotImage(const char* label_id, ImTextureRef tex_ref, const ImPlot3DPoint& p0, const ImPlot3DPoint& p1, const ImPlot3DPoint& p2, /* original C++ signature */ # const ImPlot3DPoint& p3, const ImVec2& uv0 = ImVec2(0, 0), const ImVec2& uv1 = ImVec2(1, 0), # const ImVec2& uv2 = ImVec2(1, 1), const ImVec2& uv3 = ImVec2(0, 1), const ImVec4& tint_col = ImVec4(1, 1, 1, 1), # ImPlot3DImageFlags flags = 0); @overload def plot_image( label_id: str, - tex_ref: ImTextureID, + tex_ref: ImTextureRef, p0: Point, p1: Point, p2: Point, @@ -846,7 +902,7 @@ def plot_image( """ pass -# IMPLOT3D_API void PlotText(const char* text, float x, float y, float z, float angle = 0.0f, const ImVec2& pix_offset = ImVec2(0, 0)); /* original C++ signature */ +# IMPLOT3D_API void PlotText(const char* text, double x, double y, double z, double angle = 0.0, const ImVec2& pix_offset = ImVec2(0, 0)); /* original C++ signature */ def plot_text( text: str, x: float, y: float, z: float, angle: float = 0.0, pix_offset: Optional[ImVec2Like] = None ) -> None: @@ -1149,9 +1205,7 @@ def show_demo_window(p_open: Optional[bool] = None) -> Optional[bool]: # IMPLOT3D_API void ShowAllDemos(); /* original C++ signature */ def show_all_demos() -> None: - """Bundle: ShowAllDemos is extracted from ShowDemoWindow, - so that it can be used without creating an ImGui window. - """ + """Shows all ImPlot3D demos, without enclosing window""" pass # IMPLOT3D_API void ShowStyleEditor(ImPlot3DStyle* ref = nullptr); /* original C++ signature */ @@ -1159,6 +1213,11 @@ def show_style_editor(ref: Optional[Style] = None) -> None: """Shows ImPlot3D style editor block (not a window)""" pass +# IMPLOT3D_API void ShowMetricsWindow(bool* p_popen = nullptr); /* original C++ signature */ +def show_metrics_window(p_popen: Optional[bool] = None) -> Optional[bool]: + """Shows ImPlot3D metrics/debug information window.""" + pass + # ----------------------------------------------------------------------------- # [SECTION] ImPlot3DPoint # ----------------------------------------------------------------------------- @@ -1166,43 +1225,43 @@ def show_style_editor(ref: Optional[Style] = None) -> None: class Point: """ImPlot3DPoint: 3D vector to store points in 3D""" - # float x, /* original C++ signature */ + # double x, /* original C++ signature */ x: float # y, /* original C++ signature */ y: float # z; /* original C++ signature */ z: float - # constexpr ImPlot3DPoint() : x(0.0f), y(0.0f), z(0.0f) {} /* original C++ signature */ + # constexpr ImPlot3DPoint() : x(0.0), y(0.0), z(0.0) {} /* original C++ signature */ @overload def __init__(self) -> None: pass - # constexpr ImPlot3DPoint(float _x, float _y, float _z) : x(_x), y(_y), z(_z) {} /* original C++ signature */ + # constexpr ImPlot3DPoint(double _x, double _y, double _z) : x(_x), y(_y), z(_z) {} /* original C++ signature */ @overload def __init__(self, _x: float, _y: float, _z: float) -> None: pass # Accessors - # float& operator[](size_t idx) { /* original C++ signature */ + # double& operator[](size_t idx) { /* original C++ signature */ # IM_ASSERT(idx == 0 || idx == 1 || idx == 2); - # return ((float*)(void*)(char*)this)[idx]; + # return ((double*)(void*)(char*)this)[idx]; # } @overload def __getitem__(self, idx: int) -> float: """(private API)""" pass - # float operator[](size_t idx) const { /* original C++ signature */ + # double operator[](size_t idx) const { /* original C++ signature */ # IM_ASSERT(idx == 0 || idx == 1 || idx == 2); - # return ((const float*)(const void*)(const char*)this)[idx]; + # return ((const double*)(const void*)(const char*)this)[idx]; # } @overload def __getitem__(self, idx: int) -> float: """(private API)""" pass # Binary operators - # IMPLOT3D_API ImPlot3DPoint operator*(float rhs) const; /* original C++ signature */ + # IMPLOT3D_API ImPlot3DPoint operator*(double rhs) const; /* original C++ signature */ @overload def __mul__(self, rhs: float) -> Point: pass - # IMPLOT3D_API ImPlot3DPoint operator/(float rhs) const; /* original C++ signature */ + # IMPLOT3D_API ImPlot3DPoint operator/(double rhs) const; /* original C++ signature */ @overload def __truediv__(self, rhs: float) -> Point: pass @@ -1227,11 +1286,11 @@ class Point: """Unary operator""" pass # Compound assignment operators - # IMPLOT3D_API ImPlot3DPoint& operator*=(float rhs); /* original C++ signature */ + # IMPLOT3D_API ImPlot3DPoint& operator*=(double rhs); /* original C++ signature */ @overload def __imul__(self, rhs: float) -> Point: pass - # IMPLOT3D_API ImPlot3DPoint& operator/=(float rhs); /* original C++ signature */ + # IMPLOT3D_API ImPlot3DPoint& operator/=(double rhs); /* original C++ signature */ @overload def __itruediv__(self, rhs: float) -> Point: pass @@ -1256,7 +1315,7 @@ class Point: # IMPLOT3D_API bool operator!=(const ImPlot3DPoint& rhs) const; /* original C++ signature */ def __ne__(self, rhs: Point) -> bool: pass - # IMPLOT3D_API float Dot(const ImPlot3DPoint& rhs) const; /* original C++ signature */ + # IMPLOT3D_API double Dot(const ImPlot3DPoint& rhs) const; /* original C++ signature */ def dot(self, rhs: Point) -> float: """Dot product""" pass @@ -1264,11 +1323,11 @@ class Point: def cross(self, rhs: Point) -> Point: """Cross product""" pass - # IMPLOT3D_API float Length() const; /* original C++ signature */ + # IMPLOT3D_API double Length() const; /* original C++ signature */ def length(self) -> float: """Get vector length""" pass - # IMPLOT3D_API float LengthSquared() const; /* original C++ signature */ + # IMPLOT3D_API double LengthSquared() const; /* original C++ signature */ def length_squared(self) -> float: """Get vector squared length""" pass @@ -1365,26 +1424,26 @@ class Box: # ----------------------------------------------------------------------------- class Range: - # float Min; /* original C++ signature */ + # double Min; /* original C++ signature */ min: float - # float Max; /* original C++ signature */ + # double Max; /* original C++ signature */ max: float - # constexpr ImPlot3DRange() : Min(0.0f), Max(0.0f) {} /* original C++ signature */ + # constexpr ImPlot3DRange() : Min(0.0), Max(0.0) {} /* original C++ signature */ @overload def __init__(self) -> None: pass - # constexpr ImPlot3DRange(float min, float max) : Min(min), Max(max) {} /* original C++ signature */ + # constexpr ImPlot3DRange(double min, double max) : Min(min), Max(max) {} /* original C++ signature */ @overload def __init__(self, min: float, max: float) -> None: pass - # IMPLOT3D_API void Expand(float value); /* original C++ signature */ + # IMPLOT3D_API void Expand(double value); /* original C++ signature */ def expand(self, value: float) -> None: pass - # IMPLOT3D_API bool Contains(float value) const; /* original C++ signature */ + # IMPLOT3D_API bool Contains(double value) const; /* original C++ signature */ def contains(self, value: float) -> bool: pass - # float Size() const { return Max - Min; } /* original C++ signature */ + # double Size() const { return Max - Min; } /* original C++ signature */ def size(self) -> float: """(private API)""" pass @@ -1394,7 +1453,7 @@ class Range: # ----------------------------------------------------------------------------- class Quat: - # float x, /* original C++ signature */ + # double x, /* original C++ signature */ x: float # y, /* original C++ signature */ y: float @@ -1404,15 +1463,15 @@ class Quat: w: float # Constructors - # constexpr ImPlot3DQuat() : x(0.0f), y(0.0f), z(0.0f), w(1.0f) {} /* original C++ signature */ + # constexpr ImPlot3DQuat() : x(0.0), y(0.0), z(0.0), w(1.0) {} /* original C++ signature */ @overload def __init__(self) -> None: pass - # constexpr ImPlot3DQuat(float _x, float _y, float _z, float _w) : x(_x), y(_y), z(_z), w(_w) {} /* original C++ signature */ + # constexpr ImPlot3DQuat(double _x, double _y, double _z, double _w) : x(_x), y(_y), z(_z), w(_w) {} /* original C++ signature */ @overload def __init__(self, _x: float, _y: float, _z: float, _w: float) -> None: pass - # IMPLOT3D_API ImPlot3DQuat(float _angle, const ImPlot3DPoint& _axis); /* original C++ signature */ + # IMPLOT3D_API ImPlot3DQuat(double _angle, const ImPlot3DPoint& _axis); /* original C++ signature */ @overload def __init__(self, _angle: float, _axis: Point) -> None: pass @@ -1421,12 +1480,12 @@ class Quat: def from_two_vectors(v0: Point, v1: Point) -> Quat: """Set quaternion from two vectors""" pass - # IMPLOT3D_API static ImPlot3DQuat FromElAz(float elevation, float azimuth); /* original C++ signature */ + # IMPLOT3D_API static ImPlot3DQuat FromElAz(double elevation, double azimuth); /* original C++ signature */ @staticmethod def from_el_az(elevation: float, azimuth: float) -> Quat: """Set quaternion given elevation and azimuth angles in radians""" pass - # IMPLOT3D_API float Length() const; /* original C++ signature */ + # IMPLOT3D_API double Length() const; /* original C++ signature */ def length(self) -> float: """Get quaternion length""" pass @@ -1463,12 +1522,12 @@ class Quat: # IMPLOT3D_API bool operator!=(const ImPlot3DQuat& rhs) const; /* original C++ signature */ def __ne__(self, rhs: Quat) -> bool: pass - # IMPLOT3D_API static ImPlot3DQuat Slerp(const ImPlot3DQuat& q1, const ImPlot3DQuat& q2, float t); /* original C++ signature */ + # IMPLOT3D_API static ImPlot3DQuat Slerp(const ImPlot3DQuat& q1, const ImPlot3DQuat& q2, double t); /* original C++ signature */ @staticmethod def slerp(q1: Quat, q2: Quat, t: float) -> Quat: """Interpolate between two quaternions""" pass - # IMPLOT3D_API float Dot(const ImPlot3DQuat& rhs) const; /* original C++ signature */ + # IMPLOT3D_API double Dot(const ImPlot3DQuat& rhs) const; /* original C++ signature */ def dot(self, rhs: Quat) -> float: """Get quaternion dot product""" pass @@ -1502,6 +1561,8 @@ class Style: plot_padding: ImVec2 # ImVec2 LabelPadding; /* original C++ signature */ label_padding: ImVec2 + # float ViewScaleFactor; /* original C++ signature */ + view_scale_factor: float # Legend style # ImVec2 LegendPadding; /* original C++ signature */ legend_padding: ImVec2 # Legend padding from plot edges @@ -1540,8 +1601,6 @@ class Style: # Duck (Rubber Duck by Poly by Google [CC-BY] via Poly Pizza) -# namespace ImPlot3D - # #endif #################### #################### @@ -1554,6 +1613,7 @@ class Style: # Sets an axis' ticks and optionally the labels for the next plot. To keep the default ticks, set #keep_default=true. # IMPLOT3D_API void SetupAxisTicks(ImAxis3D axis, double v_min, double v_max, int n_ticks, const char* const labels[] = nullptr, bool keep_default = false); +@overload def setup_axis_ticks( axis: ImAxis3D, v_min: float, @@ -1566,6 +1626,7 @@ def setup_axis_ticks( # Sets an axis' ticks and optionally the labels for the next plot. To keep the default ticks, set #keep_default=true. # IMPLOT3D_API void SetupAxisTicks(ImAxis3D axis, const double* values, int n_ticks, const char* const labels[] = nullptr, bool keep_default = false); +@overload def setup_axis_ticks( axis: ImAxis3D, values: List[float], diff --git a/blimgui/dist64/imgui_bundle/implot3d/internal.pyi b/blimgui/dist64/imgui_bundle/implot3d/internal.pyi index ecc18a8..6886e91 100644 --- a/blimgui/dist64/imgui_bundle/implot3d/internal.pyi +++ b/blimgui/dist64/imgui_bundle/implot3d/internal.pyi @@ -8,7 +8,7 @@ from imgui_bundle.imgui import ImVec2, ImVec4, ImU32, ID, ImVec2Like, ImVec4Like from imgui_bundle.imgui.internal import ImRect from imgui_bundle.implot3d import ( Colormap, Marker, Flags, Col, Point, LegendFlags, AxisFlags, Location, - Quat, Ray, Range, Cond, Style, ItemFlags + Quat, Ray, Range, Cond, Style, ItemFlags, ImAxis3D ) @@ -58,39 +58,44 @@ from imgui_bundle.implot3d import ( #----------------------------------------------------------------------------- +# Computes the common (base-10) logarithm # static inline float ImLog10(float x) { return log10f(x); } /* original C++ signature */ +@overload def im_log10(x: float) -> float: - """ Computes the common (base-10) logarithm - (private API) - """ + """(private API)""" + pass +# static inline double ImLog10(double x) { return log10(x); } /* original C++ signature */ +@overload +def im_log10(x: float) -> float: + """(private API)""" pass # Flips a flag in a flagset -# static inline bool ImNan(float val) { return isnan(val); } /* original C++ signature */ +# static inline bool ImNan(double val) { return isnan(val); } /* original C++ signature */ def im_nan(val: float) -> bool: """ Returns True if val is NAN (private API) """ pass -# static inline bool ImNanOrInf(float val) { return !(val >= -FLT_MAX && val <= FLT_MAX) || ImNan(val); } /* original C++ signature */ +# static inline bool ImNanOrInf(double val) { return !(val >= -DBL_MAX && val <= DBL_MAX) || ImNan(val); } /* original C++ signature */ def im_nan_or_inf(val: float) -> bool: """ Returns True if val is NAN or INFINITY (private API) """ pass -# static inline double ImConstrainNan(float val) { return ImNan(val) ? 0 : val; } /* original C++ signature */ +# static inline double ImConstrainNan(double val) { return ImNan(val) ? 0 : val; } /* original C++ signature */ def im_constrain_nan(val: float) -> float: """ Turns NANs to 0s (private API) """ pass -# static inline double ImConstrainInf(double val) { return val >= FLT_MAX ? FLT_MAX : val <= -FLT_MAX ? -FLT_MAX : val; } /* original C++ signature */ +# static inline double ImConstrainInf(double val) { return val >= DBL_MAX ? DBL_MAX : val <= -DBL_MAX ? -DBL_MAX : val; } /* original C++ signature */ def im_constrain_inf(val: float) -> float: """ Turns infinity to floating point maximums (private API) """ pass # static inline bool ImAlmostEqual(double v1, double v2, int ulp = 2) { /* original C++ signature */ -# return ImAbs(v1 - v2) < FLT_EPSILON * ImAbs(v1 + v2) * ulp || ImAbs(v1 - v2) < FLT_MIN; +# return ImAbs(v1 - v2) < DBL_EPSILON * ImAbs(v1 + v2) * ulp || ImAbs(v1 - v2) < DBL_MIN; # } def im_almost_equal(v1: float, v2: float, ulp: int = 2) -> bool: """ True if two numbers are approximately equal using units in the last place. @@ -461,7 +466,7 @@ class ItemGroup: class Tick: """ Tick mark info""" - # float PlotPos; /* original C++ signature */ + # double PlotPos; /* original C++ signature */ plot_pos: float # bool Major; /* original C++ signature */ major: bool @@ -475,7 +480,7 @@ class Tick: idx: int # ImPlot3DTick(double value, bool major, bool show_label) { /* original C++ signature */ - # PlotPos = (float)value; + # PlotPos = value; # Major = major; # ShowLabel = show_label; # TextOffset = -1; @@ -542,14 +547,18 @@ class Ticker: class Axis: """ Holds axis information""" + # Flags # ImPlot3DAxisFlags Flags; /* original C++ signature */ flags: AxisFlags # ImPlot3DAxisFlags PreviousFlags; /* original C++ signature */ previous_flags: AxisFlags + # Range # ImPlot3DRange Range; /* original C++ signature */ range: Range # ImPlot3DCond RangeCond; /* original C++ signature */ range_cond: Cond + # double NDCScale; /* original C++ signature */ + ndc_scale: float # Ticks # ImPlot3DTicker Ticker; /* original C++ signature */ ticker: Ticker @@ -562,6 +571,11 @@ class Axis: fit_this_frame: bool # ImPlot3DRange FitExtents; /* original C++ signature */ fit_extents: Range + # Constraints + # ImPlot3DRange ConstraintRange; /* original C++ signature */ + constraint_range: Range + # ImPlot3DRange ConstraintZoom; /* original C++ signature */ + constraint_zoom: Range # User input # bool Hovered; /* original C++ signature */ hovered: bool @@ -571,9 +585,10 @@ class Axis: # ImPlot3DAxis() { /* original C++ signature */ # PreviousFlags = Flags = ImPlot3DAxisFlags_None; # // Range - # Range.Min = 0.0f; - # Range.Max = 1.0f; + # Range.Min = 0.0; + # Range.Max = 1.0; # RangeCond = ImPlot3DCond_None; + # NDCScale = 1.0; # // Ticks # Formatter = nullptr; # FormatterData = nullptr; @@ -581,8 +596,10 @@ class Axis: # ShowDefaultTicks = true; # // Fit data # FitThisFrame = true; - # FitExtents.Min = HUGE_VAL; - # FitExtents.Max = -HUGE_VAL; + # FitExtents = ImPlot3DRange(HUGE_VAL, -HUGE_VAL); + # // Constraints + # ConstraintRange = ImPlot3DRange(-INFINITY, INFINITY); + # ConstraintZoom = ImPlot3DRange(DBL_MIN, INFINITY); # // User input # Hovered = false; # Held = false; @@ -592,22 +609,27 @@ class Axis: pass # inline void Reset() { /* original C++ signature */ + # RangeCond = ImPlot3DCond_None; + # // Ticks + # Ticker.Reset(); # Formatter = nullptr; # FormatterData = nullptr; # Locator = nullptr; # ShowDefaultTicks = true; - # FitExtents.Min = HUGE_VAL; - # FitExtents.Max = -HUGE_VAL; - # RangeCond = ImPlot3DCond_None; - # Ticker.Reset(); + # // Fit data + # FitExtents = ImPlot3DRange(HUGE_VAL, -HUGE_VAL); + # // Constraints + # ConstraintRange = ImPlot3DRange(-INFINITY, INFINITY); + # ConstraintZoom = ImPlot3DRange(DBL_MIN, INFINITY); # } def reset(self) -> None: """(private API)""" pass # inline void SetRange(double v1, double v2) { /* original C++ signature */ - # Range.Min = (float)ImMin(v1, v2); - # Range.Max = (float)ImMax(v1, v2); + # Range.Min = ImMin(v1, v2); + # Range.Max = ImMax(v1, v2); + # Constrain(); # } def set_range(self, v1: float, v2: float) -> None: """(private API)""" @@ -616,9 +638,21 @@ class Axis: # inline bool SetMin(double _min, bool force = false) { /* original C++ signature */ # if (!force && IsLockedMin()) # return false; - # _min = ImPlot3D::ImConstrainNan((float)ImPlot3D::ImConstrainInf(_min)); + # _min = ImPlot3D::ImConstrainNan(ImPlot3D::ImConstrainInf(_min)); + # + # // Constraints + # if (_min < ConstraintRange.Min) + # _min = ConstraintRange.Min; + # double zoom = Range.Max - _min; + # if (zoom < ConstraintZoom.Min) + # _min = Range.Max - ConstraintZoom.Min; + # if (zoom > ConstraintZoom.Max) + # _min = Range.Max - ConstraintZoom.Max; + # + # // Ensure min is less than max # if (_min >= Range.Max) # return false; + # # Range.Min = (float)_min; # return true; # } @@ -629,7 +663,18 @@ class Axis: # inline bool SetMax(double _max, bool force = false) { /* original C++ signature */ # if (!force && IsLockedMax()) # return false; - # _max = ImPlot3D::ImConstrainNan((float)ImPlot3D::ImConstrainInf(_max)); + # _max = ImPlot3D::ImConstrainNan(ImPlot3D::ImConstrainInf(_max)); + # + # // Constraints + # if (_max > ConstraintRange.Max) + # _max = ConstraintRange.Max; + # double zoom = _max - Range.Min; + # if (zoom < ConstraintZoom.Min) + # _max = Range.Min + ConstraintZoom.Min; + # if (zoom > ConstraintZoom.Max) + # _max = Range.Min + ConstraintZoom.Max; + # + # // Ensure max is greater than min # if (_max <= Range.Min) # return false; # Range.Max = (float)_max; @@ -639,6 +684,31 @@ class Axis: """(private API)""" pass + # inline void Constrain() { /* original C++ signature */ + # Range.Min = ImPlot3D::ImConstrainNan(ImPlot3D::ImConstrainInf(Range.Min)); + # Range.Max = ImPlot3D::ImConstrainNan(ImPlot3D::ImConstrainInf(Range.Max)); + # if (Range.Min < ConstraintRange.Min) + # Range.Min = ConstraintRange.Min; + # if (Range.Max > ConstraintRange.Max) + # Range.Max = ConstraintRange.Max; + # double zoom = Range.Size(); + # if (zoom < ConstraintZoom.Min) { + # double delta = (ConstraintZoom.Min - zoom) * 0.5; + # Range.Min -= delta; + # Range.Max += delta; + # } + # if (zoom > ConstraintZoom.Max) { + # double delta = (zoom - ConstraintZoom.Max) * 0.5; + # Range.Min += delta; + # Range.Max -= delta; + # } + # if (Range.Max <= Range.Min) + # Range.Max = Range.Min + DBL_EPSILON; + # } + def constrain(self) -> None: + """(private API)""" + pass + # inline bool IsRangeLocked() const { return RangeCond == ImPlot3DCond_Always; } /* original C++ signature */ def is_range_locked(self) -> bool: """(private API)""" @@ -668,6 +738,22 @@ class Axis: """(private API)""" pass + # inline bool IsPanLocked(bool increasing) { /* original C++ signature */ + # if (ImPlot3D::ImHasFlag(Flags, ImPlot3DAxisFlags_PanStretch)) { + # return IsInputLocked(); + # } else { + # if (IsLockedMin() || IsLockedMax() || IsAutoFitting()) + # return false; + # if (increasing) + # return Range.Max == ConstraintRange.Max; + # else + # return Range.Min == ConstraintRange.Min; + # } + # } + def is_pan_locked(self, increasing: bool) -> bool: + """(private API)""" + pass + # inline void SetLabel(const char* label) { /* original C++ signature */ # Label.Buf.shrink(0); # if (label && ImGui::FindRenderedTextEnd(label, nullptr) != label) @@ -682,6 +768,36 @@ class Axis: """(private API)""" pass + # inline double NDCSize() const { /* original C++ signature */ + # // By default, the axis span from NDC -0.5 to 0.5, so size is 1.0 + # // If NDCScale is applied, the size is scaled accordingly + # return NDCScale; + # } + def ndc_size(self) -> float: + """(private API)""" + pass + + # inline void SetAspect(double units_per_ndc_unit) { /* original C++ signature */ + # double new_size = units_per_ndc_unit * NDCSize(); + # double delta = (new_size - Range.Size()) * 0.5; + # if (IsLocked()) + # return; + # else if (IsLockedMin() && !IsLockedMax()) + # SetRange(Range.Min, Range.Max + 2 * delta); + # else if (!IsLockedMin() && IsLockedMax()) + # SetRange(Range.Min - 2 * delta, Range.Max); + # else + # SetRange(Range.Min - delta, Range.Max + delta); + # } + def set_aspect(self, units_per_ndc_unit: float) -> None: + """(private API)""" + pass + + # double GetAspect() const { return Range.Size() / NDCSize(); } /* original C++ signature */ + def get_aspect(self) -> float: + """(private API)""" + pass + # bool HasLabel() const; /* original C++ signature */ def has_label(self) -> bool: """(private API)""" @@ -702,7 +818,7 @@ class Axis: def is_auto_fitting(self) -> bool: """(private API)""" pass - # void ExtendFit(float value); /* original C++ signature */ + # void ExtendFit(double value); /* original C++ signature */ def extend_fit(self, value: float) -> None: """(private API)""" pass @@ -737,8 +853,6 @@ class Plot: rotation: Quat # Current rotation quaternion # ImPlot3DCond RotationCond; /* original C++ signature */ rotation_cond: Cond - # ImPlot3DPoint BoxScale; /* original C++ signature */ - box_scale: Point # Scale factor for plot box X, Y, Z axes # Animation # float AnimationTime; /* original C++ signature */ animation_time: float # Remaining animation time @@ -755,6 +869,8 @@ class Plot: held_edge_idx: int # Index of the edge being held # int HeldPlaneIdx; /* original C++ signature */ held_plane_idx: int # Index of the plane being held + # ImPlot3DPoint DragRotationAxis; /* original C++ signature */ + drag_rotation_axis: Point # Axis of rotation for the duration of a drag # bool FitThisFrame; /* original C++ signature */ # Fit data fit_this_frame: bool @@ -771,18 +887,18 @@ class Plot: # PreviousFlags = Flags = ImPlot3DFlags_None; # JustCreated = true; # Initialized = false; - # InitialRotation = ImPlot3DQuat(-0.513269f, -0.212596f, -0.318184f, 0.76819f); - # Rotation = ImPlot3DQuat(0.0f, 0.0f, 0.0f, 1.0f); + # InitialRotation = ImPlot3DQuat(-0.513269, -0.212596, -0.318184, 0.76819); + # Rotation = ImPlot3DQuat(0.0, 0.0, 0.0, 1.0); # RotationCond = ImPlot3DCond_None; # for (int i = 0; i < 3; i++) # Axes[i] = ImPlot3DAxis(); - # BoxScale = ImPlot3DPoint(1.0f, 1.0f, 1.0f); # AnimationTime = 0.0f; # RotationAnimationEnd = Rotation; # SetupLocked = false; # Hovered = Held = false; # HeldEdgeIdx = -1; # HeldPlaneIdx = -1; + # DragRotationAxis = ImPlot3DPoint(0.0, 0.0, 0.0); # FitThisFrame = true; # ContextClick = false; # OpenContextThisFrame = false; @@ -813,27 +929,59 @@ class Plot: # void ExtendFit(const ImPlot3DPoint& point); /* original C++ signature */ def extend_fit(self, point: Point) -> None: - """(private API)""" + """ Extends the fit range of all three axes to include the provided point + (private API) + """ pass + # ImPlot3DPoint RangeMin() const; /* original C++ signature */ def range_min(self) -> Point: - """(private API)""" + """ Returns the minimum of the range in all three dimensions + (private API) + """ pass + # ImPlot3DPoint RangeMax() const; /* original C++ signature */ def range_max(self) -> Point: - """(private API)""" + """ Returns the maximum of the range in all three dimensions + (private API) + """ pass + # ImPlot3DPoint RangeCenter() const; /* original C++ signature */ def range_center(self) -> Point: - """(private API)""" + """ Returns the point at the center of the range in all three dimensions + (private API) + """ pass + # void SetRange(const ImPlot3DPoint& min, const ImPlot3DPoint& max); /* original C++ signature */ def set_range(self, min: Point, max: Point) -> None: - """(private API)""" + """ Sets the range of all three axes + (private API) + """ pass - # float GetBoxZoom() const; /* original C++ signature */ - def get_box_zoom(self) -> float: - """(private API)""" + + # float GetViewScale() const; /* original C++ signature */ + def get_view_scale(self) -> float: + """ Returns the scale of the plot view (constant to convert from NDC coordinates to pixels coordinates) + (private API) + """ + pass + + # ImPlot3DPoint GetBoxScale() const; /* original C++ signature */ + def get_box_scale(self) -> Point: + """ Returns the scale of the plot box in each dimension + (private API) + """ + pass + + # void ApplyEqualAspect(ImAxis3D ref_axis); /* original C++ signature */ + def apply_equal_aspect(self, ref_axis: ImAxis3D) -> None: + """ Applies equal aspect ratio constraint using the specified axis as reference. + Other axes are adjusted to match the reference axis's aspect ratio (units per NDC unit). + (private API) + """ pass class Context: @@ -900,6 +1048,20 @@ def get_auto_color(idx: Col) -> ImVec4: def get_style_color_name(idx: Col) -> str: pass +# Returns white or black text given background color +# static inline ImU32 CalcTextColor(const ImVec4& bg) { /* original C++ signature */ +# return (bg.x * 0.299f + bg.y * 0.587f + bg.z * 0.114f) > 0.5f ? IM_COL32_BLACK : IM_COL32_WHITE; +# } +@overload +def calc_text_color(bg: ImVec4Like) -> ImU32: + """(private API)""" + pass +# static inline ImU32 CalcTextColor(ImU32 bg) { return CalcTextColor(ImGui::ColorConvertU32ToFloat4(bg)); } /* original C++ signature */ +@overload +def calc_text_color(bg: ImU32) -> ImU32: + """(private API)""" + pass + # IMPLOT3D_API const ImPlot3DNextItemData& GetItemData(); /* original C++ signature */ def get_item_data() -> NextItemData: """ Get styling data for next item (call between BeginItem/EndItem)""" @@ -915,6 +1077,12 @@ def next_colormap_color_u32() -> ImU32: """ Returns the next unused colormap color and advances the colormap. Can be used to skip colors if desired""" pass +# IMPLOT3D_API void RenderColorBar(const ImU32* colors, int size, ImDrawList& DrawList, const ImRect& bounds, bool vert, bool reversed, /* original C++ signature */ +# bool continuous); +def render_color_bar(colors: ImU32, size: int, draw_list: ImDrawList, bounds: ImRect, vert: bool, reversed: bool, continuous: bool) -> None: + """ Render a colormap bar""" + pass + #----------------------------------------------------------------------------- # [SECTION] Item Utils #----------------------------------------------------------------------------- @@ -1012,7 +1180,6 @@ def setup_lock() -> None: -# namespace ImPlot3D # #endif #################### #################### diff --git a/blimgui/dist64/imgui_bundle/imspinner.pyi b/blimgui/dist64/imgui_bundle/imspinner.pyi index d111763..770c556 100644 --- a/blimgui/dist64/imgui_bundle/imspinner.pyi +++ b/blimgui/dist64/imgui_bundle/imspinner.pyi @@ -4,7 +4,7 @@ https://github.com/dalerank/imspinner # ruff: noqa: B008 import enum import math -from typing import Tuple, Optional +from typing import Tuple, Optional, overload from imgui_bundle.imgui import ImColor @@ -16,6 +16,10 @@ PI_2 = IM_PI * 2. LeafColor = ImColor +def demo_spinners() -> None: + ... + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! AUTOGENERATED CODE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # // Autogenerated code below! Do not edit! #################### #################### @@ -56,7 +60,7 @@ half_white: ImColor # = ImColor(1., 1., 1., 0.5) red: ImColor # = ImColor(1., 0., 0., 1.) -class SpinnerTypeT(enum.Enum): +class SpinnerTypeT(enum.IntEnum): e_st_rainbow = enum.auto() # (= 0) e_st_angle = enum.auto() # (= 1) e_st_dots = enum.auto() # (= 2) @@ -95,9 +99,57 @@ def damped_gravity(limtime: float) -> float: def damped_trifolium(limtime: float, a: float = 0., b: float = 1.) -> float: pass -def damped_infinity(a: float, t: float) -> Tuple[float, float]: +def damped_inoutelastic(t: float, amplitude: float, period: float) -> float: + pass + +def damped_infinity(t: float, a: float) -> Tuple[float, float]: pass +def ease_inquad(time: float) -> float: + pass +def ease_outquad(time: float) -> float: + pass +@overload +def ease_inoutquad(t: float) -> float: + pass +@overload +def ease_inoutquad(p: float) -> float: + pass +def ease_outcubic(t: float) -> float: + pass +def ease_inexpo(t: float) -> float: + pass +@overload +def ease_inoutexpo(t: float) -> float: + pass +@overload +def ease_inoutexpo(p: float) -> float: + pass +def ease_spring(p: float) -> float: + pass +def ease_gravity(p: float) -> float: + pass +def ease_infinity(p: float) -> float: + pass +def ease_inoutelastic(p: float) -> float: + pass +def ease_sine(p: float) -> float: + pass +def ease_damping(p: float) -> float: + pass + +class ease_mode(enum.IntEnum): + e_ease_none = enum.auto() # (= 0) + e_ease_inoutquad = enum.auto() # (= 1) + e_ease_inoutexpo = enum.auto() # (= 2) + e_ease_spring = enum.auto() # (= 3) + e_ease_gravity = enum.auto() # (= 4) + e_ease_infinity = enum.auto() # (= 5) + e_ease_elastic = enum.auto() # (= 6) + e_ease_sine = enum.auto() # (= 7) + e_ease_damping = enum.auto() # (= 8) + + def spinner_rainbow( label: str, radius: float, @@ -168,6 +220,23 @@ def spinner_ang( pass +def spinner_ang8( + label: str, + radius: float, + thickness: float, + color: Optional[ImColor] = None, + bg: Optional[ImColor] = None, + speed: float = 2.8, + angle: float = IM_PI, + mode: int = 0, + rkoef: float = 0.5 + ) -> None: + """Python bindings defaults: + If any of the params below is None, then its default value below will be used: + * color: white + * bg: white + """ + pass def spinner_ang_mix( label: str, @@ -248,7 +317,8 @@ def spinner_twin_pulsar( thickness: float, color: Optional[ImColor] = None, speed: float = 2.8, - rings: int = 2 + rings: int = 2, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -260,7 +330,21 @@ def spinner_fade_pulsar( radius: float, color: Optional[ImColor] = None, speed: float = 2.8, - rings: int = 2 + rings: int = 2, + mode: int = 0 + ) -> None: + """Python bindings defaults: + If color is None, then its default value will be: white + """ + pass + +def spinner_fade_pulsar_square( + label: str, + radius: float, + color: Optional[ImColor] = None, + speed: float = 2.8, + rings: int = 2, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -272,7 +356,8 @@ def spinner_circular_lines( radius: float, color: Optional[ImColor] = None, speed: float = 1.8, - lines: int = 8 + lines: int = 8, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -303,7 +388,8 @@ def spinner_v_dots( bgcolor: Optional[ImColor] = None, speed: float = 2.8, dots: int = 12, - mdots: int = 6 + mdots: int = 6, + mode: int = 0 ) -> None: """Python bindings defaults: If any of the params below is None, then its default value below will be used: @@ -441,6 +527,22 @@ def spinner_multi_fade_dots( """ pass +def spinner_thick_to_sin( + label: str, + radius: float, + thickness: float, + color: Optional[ImColor] = None, + speed: float = 2.8, + nt: int = 1, + lt: int = 8, + mode: int = 0 + ) -> None: + """Python bindings defaults: + If color is None, then its default value will be: white + """ + pass + + def spinner_scale_dots( label: str, radius: float, @@ -660,7 +762,8 @@ def spinner_fade_tris( color: Optional[ImColor] = None, speed: float = 2.8, dim: int = 2, - scale: bool = False + scale: bool = False, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -732,7 +835,8 @@ def spinner_arc_fade( thickness: float, color: Optional[ImColor] = None, speed: float = 2.8, - arcs: int = 4 + arcs: int = 4, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -763,6 +867,8 @@ def spinner_square_stroke_fade( """ pass + + def spinner_ascii_symbol_points( label: str, text: str, @@ -851,6 +957,21 @@ def spinner_filled_arc_fade( """ pass +def spinner_points_roller( + label: str, + radius: float, + thickness: float, + color: Optional[ImColor] = None, + speed: float = 2.8, + points: int = 8, + circles: int = 2, + rspeed: float = 1. + ) -> None: + """Python bindings defaults: + If color is None, then its default value will be: white + """ + pass + def spinner_points_arc_bounce( label: str, radius: float, @@ -902,7 +1023,8 @@ def spinner_arc_wedges( radius: float, color: Optional[ImColor] = None, speed: float = 2.8, - arcs: int = 4 + arcs: int = 4, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: red @@ -918,7 +1040,8 @@ def spinner_twin_ball( ball: Optional[ImColor] = None, bg: Optional[ImColor] = None, speed: float = 2.8, - balls: int = 2 + balls: int = 2, + mode: int = 0 ) -> None: """Python bindings defaults: If any of the params below is None, then its default value below will be used: @@ -1098,7 +1221,8 @@ def spinner_ing_yang( color_i: Optional[ImColor] = None, color_y: Optional[ImColor] = None, speed: float = 2.8, - angle: float = math.pi * 0.7 + angle: float = math.pi * 0.7, + mode: int = 0 ) -> None: """Python bindings defaults: If any of the params below is None, then its default value below will be used: @@ -1133,7 +1257,8 @@ def spinner_rotate_gooey_balls( thickness: float, color: ImColor, speed: float, - balls: int + balls: int, + mode: int = 0 ) -> None: pass @@ -1162,7 +1287,8 @@ def spinner_rotate_triangles( thickness: float, color: ImColor, speed: float, - tris: int + tris: int, + mode: int = 0 ) -> None: pass @@ -1182,7 +1308,8 @@ def spinner_sin_squares( radius: float, thickness: float, color: ImColor, - speed: float + speed: float, + mode: int = 0 ) -> None: pass @@ -1291,7 +1418,8 @@ def spinner_rotate_segments( color: Optional[ImColor] = None, speed: float = 2.8, arcs: int = 4, - layers: int = 1 + layers: int = 1, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -1426,7 +1554,8 @@ def spinner_rotated_atom( thickness: float, color: Optional[ImColor] = None, speed: float = 2.8, - elipses: int = 3 + elipses: int = 3, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -1439,7 +1568,10 @@ def spinner_rainbow_balls( thickness: float, color: ImColor, speed: float, - balls: int = 5 + balls: int = 5, + mode: int = 0, + rings: int = 1, + mx: int = 1 ) -> None: pass @@ -1515,7 +1647,8 @@ def spinner_bar_chart_rainbow( thickness: float, color: ImColor, speed: float, - bars: int = 5 + bars: int = 5, + mode: int = 0 ) -> None: pass @@ -1601,7 +1734,8 @@ def spinner_arc_polar_fade( radius: float, color: Optional[ImColor] = None, speed: float = 2.8, - arcs: int = 4 + arcs: int = 4, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -1613,7 +1747,8 @@ def spinner_arc_polar_radius( radius: float, color: Optional[ImColor] = None, speed: float = 2.8, - arcs: int = 4 + arcs: int = 4, + mode: int = 0 ) -> None: """Python bindings defaults: If color is None, then its default value will be: white @@ -1642,7 +1777,8 @@ def spinner_hbo_dots( minfade: float = 0.0, ryk: float = 0., speed: float = 1.1, - dots: int = 6 + dots: int = 6, + mode: int = 0 ) -> None: """ spinner idea by nitz 'Chris Dailey' @@ -1856,7 +1992,6 @@ def spinner_spline_ang( # #endif -# #endif -#################### #################### +# #endif #################### #################### # // Autogenerated code end! diff --git a/blimgui/dist64/imgui_bundle/nanovg.pyi b/blimgui/dist64/imgui_bundle/nanovg.pyi index 3da98af..d0c3022 100644 --- a/blimgui/dist64/imgui_bundle/nanovg.pyi +++ b/blimgui/dist64/imgui_bundle/nanovg.pyi @@ -101,19 +101,19 @@ class Paint: """ pass -class Winding(enum.Enum): +class Winding(enum.IntEnum): # NVG_CCW = 1, /* original C++ signature */ ccw = enum.auto() # (= 1) # Winding for solid shapes # NVG_CW = 2, /* original C++ signature */ cw = enum.auto() # (= 2) # Winding for holes -class Solidity(enum.Enum): +class Solidity(enum.IntEnum): # NVG_SOLID = 1, /* original C++ signature */ solid = enum.auto() # (= 1) # CCW # NVG_HOLE = 2, /* original C++ signature */ hole = enum.auto() # (= 2) # CW -class LineCap(enum.Enum): +class LineCap(enum.IntEnum): # NVG_BUTT, /* original C++ signature */ butt = enum.auto() # (= 0) # NVG_ROUND, /* original C++ signature */ @@ -126,7 +126,7 @@ class LineCap(enum.Enum): # } miter = enum.auto() # (= 4) -class Align(enum.Enum): +class Align(enum.IntEnum): # Horizontal align # NVG_ALIGN_LEFT = 1<<0, /* original C++ signature */ align_left = enum.auto() # (= 1<<0) # Default, align text horizontally to left. @@ -144,7 +144,7 @@ class Align(enum.Enum): # NVG_ALIGN_BASELINE = 1<<6, /* original C++ signature */ align_baseline = enum.auto() # (= 1<<6) # Default, align text vertically to baseline. -class BlendFactor(enum.Enum): +class BlendFactor(enum.IntEnum): # NVG_ZERO = 1<<0, /* original C++ signature */ zero = enum.auto() # (= 1<<0) # NVG_ONE = 1<<1, /* original C++ signature */ @@ -169,7 +169,7 @@ class BlendFactor(enum.Enum): # } src_alpha_saturate = enum.auto() # (= 1<<10) -class CompositeOperation(enum.Enum): +class CompositeOperation(enum.IntEnum): # NVG_SOURCE_OVER, /* original C++ signature */ source_over = enum.auto() # (= 0) # NVG_SOURCE_IN, /* original C++ signature */ @@ -256,7 +256,7 @@ class TextRow: """Auto-generated default constructor with named params""" pass -class ImageFlags(enum.Enum): +class ImageFlags(enum.IntEnum): # NVG_IMAGE_GENERATE_MIPMAPS = 1<<0, /* original C++ signature */ image_generate_mipmaps = enum.auto() # (= 1<<0) # Generate mipmaps during creation of the image. # NVG_IMAGE_REPEATX = 1<<1, /* original C++ signature */ @@ -1042,7 +1042,7 @@ def font_face(ctx: Context, font: str) -> None: -class Texture(enum.Enum): +class Texture(enum.IntEnum): """ Internal Render API @@ -1169,7 +1169,7 @@ def debug_dump_path_cache(ctx: Context) -> None: # class nvg_imgui: # Proxy class that introduces typings for the *submodule* nvg_imgui - pass # (This corresponds to a C++ namespace. All method are static!) + pass # (This corresponds to a C++ namespace. All methods are static!) #///////////////////////////////////////////////////////////////////////// # @@ -1177,7 +1177,7 @@ class nvg_imgui: # Proxy class that introduces typings for the *submodule* nvg_ # #/////////////////////////////////////////////////////////////////////// - class NvgCreateFlags(enum.Enum): + class NvgCreateFlags(enum.IntEnum): """ Combination of NVGcreateFlags in nanovg_gl.h + nanovg_mtl.h""" # NVG_ANTIALIAS = 1<<0, /* original C++ signature */ # Flag indicating if geometry based antialiasing is used (may not be needed when using MSAA). diff --git a/blimgui/dist64/imgui_bundle/portable_file_dialogs.pyi b/blimgui/dist64/imgui_bundle/portable_file_dialogs.pyi index 09b5f1e..3e08d67 100644 --- a/blimgui/dist64/imgui_bundle/portable_file_dialogs.pyi +++ b/blimgui/dist64/imgui_bundle/portable_file_dialogs.pyi @@ -14,7 +14,7 @@ default_wait_timeout = 20 # // Autogenerated code below! Do not edit! #################### #################### -class button(enum.Enum): +class button(enum.IntEnum): cancel = enum.auto() # (= -1) ok = enum.auto() # (= 0) yes = enum.auto() # (= 1) @@ -23,7 +23,7 @@ class button(enum.Enum): retry = enum.auto() # (= 4) ignore = enum.auto() # (= 5) -class choice(enum.Enum): +class choice(enum.IntEnum): ok = enum.auto() # (= 0) ok_cancel = enum.auto() # (= 1) yes_no = enum.auto() # (= 2) @@ -31,13 +31,13 @@ class choice(enum.Enum): retry_cancel = enum.auto() # (= 4) abort_retry_ignore = enum.auto() # (= 5) -class icon(enum.Enum): +class icon(enum.IntEnum): info = enum.auto() # (= 0) warning = enum.auto() # (= 1) error = enum.auto() # (= 2) question = enum.auto() # (= 3) -class opt(enum.Enum): +class opt(enum.IntEnum): """Additional option flags for various dialog constructors""" none = enum.auto() # (= 0) diff --git a/blimgui/dist64/imgui_bundle/pyodide_patch_runners.py b/blimgui/dist64/imgui_bundle/pyodide_patch_runners.py index 8edc6ef..0868d73 100644 --- a/blimgui/dist64/imgui_bundle/pyodide_patch_runners.py +++ b/blimgui/dist64/imgui_bundle/pyodide_patch_runners.py @@ -60,9 +60,9 @@ def request_stop(self): @dataclass class _RenderLifeCycleFunctions: - setup: Callable[[], None] = None - render: Callable[[], None] = None - tear_down: Callable[[], None] = None + setup: Callable[[], None] + render: Callable[[], None] + tear_down: Callable[[], None] class _HelloImGuiOrImmApp(Enum): @@ -73,12 +73,10 @@ class _HelloImGuiOrImmApp(Enum): def _arg_to_render_lifecycle_functions(himgui_or_immapp: _HelloImGuiOrImmApp, *args, **kwargs) -> _RenderLifeCycleFunctions: """Converts the arguments to the correct render lifecycle functions, depending on the type of arguments passed and whether it is a hello_imgui or immapp application.""" - functions = _RenderLifeCycleFunctions() - if himgui_or_immapp == _HelloImGuiOrImmApp.HELLO_IMGUI: render_module = hello_imgui.manual_render elif himgui_or_immapp == _HelloImGuiOrImmApp.IMMAPP: - render_module = immapp.manual_render + render_module = immapp.manual_render # type: ignore else: raise ValueError("Invalid value for himgui_or_immapp") @@ -89,20 +87,20 @@ def _arg_to_render_lifecycle_functions(himgui_or_immapp: _HelloImGuiOrImmApp, *a if use_runner_params: _log("overload with RunnerParams") - functions.setup = lambda: render_module.setup_from_runner_params(*args, **kwargs) + fn_setup = lambda: render_module.setup_from_runner_params(*args, **kwargs) # noqa: E731 elif use_simple_params: _log("overload with SimpleRunnerParams") - functions.setup = lambda: render_module.setup_from_simple_runner_params(*args, **kwargs) + fn_setup = lambda: render_module.setup_from_simple_runner_params(*args, **kwargs) # noqa: E731 elif use_gui_function: _log("overload with callable") - functions.setup = lambda:render_module.setup_from_gui_function(*args, **kwargs) + fn_setup = lambda:render_module.setup_from_gui_function(*args, **kwargs) # noqa: E731 else: raise ValueError("Invalid arguments") - functions.render = render_module.render - - functions.tear_down = render_module.tear_down + fn_render = render_module.render + fn_tear_down = render_module.tear_down + functions = _RenderLifeCycleFunctions(fn_setup, fn_render, fn_tear_down) return functions @@ -122,6 +120,7 @@ def _stop(self): self.js_animation_renderer = None try: + assert(self.js_animation_renderer is not None) self.render_lifecycle_functions.tear_down() self.render_lifecycle_functions = None _log("_ManualRenderJs: HelloImGuiRunnerJs: Renderer torn down successfully.") diff --git a/blimgui/dist64/imgui_bundle/python_backends/base_backend.py b/blimgui/dist64/imgui_bundle/python_backends/base_backend.py deleted file mode 100644 index cdcee70..0000000 --- a/blimgui/dist64/imgui_bundle/python_backends/base_backend.py +++ /dev/null @@ -1,33 +0,0 @@ -from imgui_bundle import imgui - - -class BaseOpenGLRenderer(object): - def __init__(self): - if not imgui.get_current_context(): - raise RuntimeError( - "No valid ImGui context. Use imgui.create_context() first and/or " - "imgui.set_current_context()." - ) - self.io = imgui.get_io() - - self._font_texture = None - - self.io.delta_time = 1.0 / 60.0 - - self._create_device_objects() - self.refresh_font_texture() - - def render(self, draw_data): - raise NotImplementedError - - def refresh_font_texture(self): - raise NotImplementedError - - def _create_device_objects(self): - raise NotImplementedError - - def _invalidate_device_objects(self): - raise NotImplementedError - - def shutdown(self): - self._invalidate_device_objects() diff --git a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_glfw3.py b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_glfw3.py index 7e3b891..04ed6c9 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_glfw3.py +++ b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_glfw3.py @@ -14,7 +14,7 @@ os.environ["PYOPENGL_PLATFORM"] = "x11" -import OpenGL.GL as gl # type: ignore +import OpenGL.GL as gl # pip install PyOpenGL from imgui_bundle.python_backends.glfw_backend import GlfwRenderer # When using a pure python backend, prefer to import glfw before imgui_bundle (so that you end up using the standard glfw, not the one provided by imgui_bundle) import glfw # type: ignore @@ -31,19 +31,6 @@ class AppState: def init_fonts_and_markdown(): - # Note: - # The way font are loaded in this example is a bit tricky. - # We are not using imgui.backends.opengl3_XXX anywhere else, because the rendering is done via Python. - # - # Howver, we will here need to: - # - call imgui.backends.opengl3_init(glsl_version) at startup - # - call imgui.backends.opengl3_new_frame() after loading the fonts, because this is how ImGui - # will load the fonts into a texture (using imgui.get_io().fonts.build() is not enough) - - # We need to initialize the OpenGL backend (so that we can later call opengl3_new_frame) - imgui.backends.opengl3_init("#version 100") - - imgui.get_io().fonts.clear() # uncomment to keep using the default hardcoded font, or load your default font here # imgui.get_io().fonts.add_font_default() @@ -52,9 +39,6 @@ def init_fonts_and_markdown(): font_loader = imgui_md.get_font_loader_function() font_loader() - # We need to call this function to load the fonts into a texture - imgui.backends.opengl3_new_frame() - def main(): imgui.create_context() diff --git a/blimgui/dist64/imgui_bundle/python_backends/examples_disabled/example_python_backend_pygame.py b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_pygame.py similarity index 79% rename from blimgui/dist64/imgui_bundle/python_backends/examples_disabled/example_python_backend_pygame.py rename to blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_pygame.py index caa79cb..e5cc9ff 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/examples_disabled/example_python_backend_pygame.py +++ b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_pygame.py @@ -1,91 +1,97 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import absolute_import - -from imgui_bundle import imgui -from imgui_bundle.python_backends.python_backends_disabled.pygame_backend import ( - PygameRenderer, -) -import OpenGL.GL as gl # type: ignore -import pygame # type: ignore -import sys - - -class AppState: - text: str = """Hello, World\nLorem ipsum, etc.\netc.""" - text2: str = "Ahh" - - -app_state = AppState() - - -def main(): - pygame.init() - size = 800, 600 - - pygame.display.set_mode(size, pygame.DOUBLEBUF | pygame.OPENGL | pygame.RESIZABLE) - - imgui.create_context() - impl = PygameRenderer() - - io = imgui.get_io() - io.display_size = size - - show_custom_window = True - - while 1: - for event in pygame.event.get(): - if event.type == pygame.QUIT: - sys.exit(0) - impl.process_event(event) - impl.process_inputs() - - imgui.new_frame() - - if imgui.begin_main_menu_bar(): - if imgui.begin_menu("File", True): - - clicked_quit, selected_quit = imgui.menu_item( - "Quit", "Cmd+Q", False, True - ) - - if clicked_quit: - sys.exit(0) - - imgui.end_menu() - imgui.end_main_menu_bar() - - if show_custom_window: - imgui.set_next_window_size((400, 400)) - is_expand, show_custom_window = imgui.begin("Custom window", True) - if is_expand: - imgui.text("Example Text") - if imgui.button("Hello"): - print("World") - _, app_state.text = imgui.input_text_multiline( - "Edit", app_state.text, imgui.ImVec2(200, 200) - ) - _, app_state.text2 = imgui.input_text("Text2", app_state.text2) - - io = imgui.get_io() - imgui.text(f""" - Keyboard modifiers: - {io.key_ctrl=} - {io.key_alt=} - {io.key_shift=} - {io.key_super=}""") - imgui.end() - - # note: cannot use screen.fill((1, 1, 1)) because pygame's screen - # does not support fill() on OpenGL sufraces - gl.glClearColor(1, 1, 1, 1) - gl.glClear(gl.GL_COLOR_BUFFER_BIT) - imgui.render() - impl.render(imgui.get_draw_data()) - - pygame.display.flip() - - -if __name__ == "__main__": - main() +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from imgui_bundle import imgui +from imgui_bundle.python_backends.pygame_backend import PygameRenderer +import OpenGL.GL as gl # pip install PyOpenGL +import pygame # type: ignore +import sys + + +class AppState: + text: str = """Hello, World\nLorem ipsum, etc.\netc.""" + text2: str = "Ahh" + + +app_state = AppState() + + +def main(): + pygame.init() + size = 800, 600 + + # --- request an OpenGL 3.3 core (forward-compatible) context --- + pygame.display.gl_set_attribute(pygame.GL_CONTEXT_MAJOR_VERSION, 3) + pygame.display.gl_set_attribute(pygame.GL_CONTEXT_MINOR_VERSION, 3) + pygame.display.gl_set_attribute( + pygame.GL_CONTEXT_PROFILE_MASK, pygame.GL_CONTEXT_PROFILE_CORE + ) + # macOS needs the forward-compatible flag too + pygame.display.gl_set_attribute(pygame.GL_CONTEXT_FORWARD_COMPATIBLE_FLAG, 1) + pygame.display.set_mode(size, pygame.DOUBLEBUF | pygame.OPENGL | pygame.RESIZABLE) + + imgui.create_context() + impl = PygameRenderer() + + io = imgui.get_io() + io.display_size = size + + show_custom_window = True + + while 1: + for event in pygame.event.get(): + if event.type == pygame.QUIT: + sys.exit(0) + impl.process_event(event) + impl.process_inputs() + + imgui.new_frame() + + if imgui.begin_main_menu_bar(): + if imgui.begin_menu("File", True): + + clicked_quit, selected_quit = imgui.menu_item( + "Quit", "Cmd+Q", False, True + ) + + if clicked_quit: + sys.exit(0) + + imgui.end_menu() + imgui.end_main_menu_bar() + + if show_custom_window: + imgui.set_next_window_size((400, 400)) + is_expand, show_custom_window = imgui.begin("Custom window", True) + if is_expand: + imgui.text("Example Text") + if imgui.button("Hello"): + print("World") + _, app_state.text = imgui.input_text_multiline( + "Edit", app_state.text, imgui.ImVec2(200, 200) + ) + _, app_state.text2 = imgui.input_text("Text2", app_state.text2) + + io = imgui.get_io() + imgui.text(f""" + Keyboard modifiers: + {io.key_ctrl=} + {io.key_alt=} + {io.key_shift=} + {io.key_super=}""") + imgui.end() + + # note: cannot use screen.fill((1, 1, 1)) because pygame's screen + # does not support fill() on OpenGL sufraces + gl.glClearColor(1, 1, 1, 1) + gl.glClear(gl.GL_COLOR_BUFFER_BIT) + imgui.render() + impl.render(imgui.get_draw_data()) + + pygame.display.flip() + + +if __name__ == "__main__": + main() diff --git a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl2.py b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl2.py index 4f9577b..02effd0 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl2.py +++ b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl2.py @@ -9,7 +9,7 @@ from imgui_bundle import imgui from imgui_bundle.python_backends.sdl2_backend import SDL2Renderer -import OpenGL.GL as gl # type: ignore +import OpenGL.GL as gl # pip install PyOpenGL from sdl2 import * # type: ignore import ctypes import sys diff --git a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl3.py b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl3.py index 4ccc07c..40eae4f 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl3.py +++ b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_sdl3.py @@ -9,7 +9,7 @@ from imgui_bundle import imgui from imgui_bundle.python_backends.sdl3_backend import SDL3Renderer -import OpenGL.GL as gl # type: ignore +import OpenGL.GL as gl # pip install PyOpenGL from sdl3 import * # type: ignore import ctypes import sys diff --git a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_wgpu.py b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_wgpu.py index e846f30..66fefa0 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_wgpu.py +++ b/blimgui/dist64/imgui_bundle/python_backends/examples/example_python_backend_wgpu.py @@ -6,17 +6,19 @@ # https://github.com/pygfx/wgpu-py/tree/main/examples # (look for examples whose name starts with "imgui_") # -# Requirements: install wgpu with `pip install wgpu` - +# Requirements: install wgpu and rendercanvas with +# pip install wgpu rendercanvas import wgpu import sys from imgui_bundle import imgui, imgui_ctx -from wgpu.gui.auto import WgpuCanvas, run +from rendercanvas.auto import RenderCanvas, loop from wgpu.utils.imgui import ImguiRenderer # Create a canvas to render to -canvas = WgpuCanvas(title="imgui", size=(640, 480)) +canvas = RenderCanvas( + title="imgui", size=(640, 480), max_fps=60, update_mode="continuous" +) # Create a wgpu device adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") @@ -27,7 +29,6 @@ def update_gui(): - imgui.new_frame() if imgui.begin_main_menu_bar(): if imgui.begin_menu("File", True): clicked_quit, _ = imgui.menu_item("Quit", "Cmd+Q", False, True) @@ -37,6 +38,8 @@ def update_gui(): imgui.end_menu() imgui.end_main_menu_bar() + imgui.show_demo_window() + imgui.set_next_window_size((300, 0), imgui.Cond_.appearing) imgui.set_next_window_pos((0, 20), imgui.Cond_.appearing) @@ -69,21 +72,11 @@ def update_gui(): imgui.end() - imgui.end_frame() - imgui.render() - - return imgui.get_draw_data() - # set the GUI update function that gets called to return the draw data imgui_renderer.set_gui(update_gui) -def draw_frame(): - imgui_renderer.render() - canvas.request_draw() - - if __name__ == "__main__": - canvas.request_draw(draw_frame) - run() + canvas.request_draw(imgui_renderer.render) + loop.run() diff --git a/blimgui/dist64/imgui_bundle/python_backends/glfw_backend.py b/blimgui/dist64/imgui_bundle/python_backends/glfw_backend.py index 6ed3adc..dbcfdc2 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/glfw_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/glfw_backend.py @@ -2,10 +2,10 @@ from __future__ import absolute_import from imgui_bundle import imgui -import glfw # type: ignore +import glfw # pip install glfw from imgui_bundle.python_backends import compute_fb_scale -from .opengl_backend import ProgrammablePipelineRenderer +from .opengl_backend_programmable import ProgrammablePipelineRenderer from typing import Dict @@ -30,7 +30,8 @@ def __init__(self, window, attach_callbacks: bool = True): self.io.display_size = glfw.get_framebuffer_size(self.window) def get_clipboard_text(_ctx: imgui.internal.Context) -> str: - return glfw.get_clipboard_string(self.window) + s = glfw.get_clipboard_string(self.window) + return s.decode() def set_clipboard_text(_ctx: imgui.internal.Context, text: str) -> None: glfw.set_clipboard_string(self.window, text) diff --git a/blimgui/dist64/imgui_bundle/python_backends/opengl_backend.py b/blimgui/dist64/imgui_bundle/python_backends/opengl_backend.py deleted file mode 100644 index a48284a..0000000 --- a/blimgui/dist64/imgui_bundle/python_backends/opengl_backend.py +++ /dev/null @@ -1,535 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import OpenGL.GL as gl # noqa -from imgui_bundle import imgui -import ctypes -import numpy as np - -from .base_backend import BaseOpenGLRenderer - - -class ProgrammablePipelineRenderer(BaseOpenGLRenderer): - """Basic OpenGL integration base class.""" - - VERTEX_SHADER_SRC = """ - #version 330 - - uniform mat4 ProjMtx; - in vec2 Position; - in vec2 UV; - in vec4 Color; - out vec2 Frag_UV; - out vec4 Frag_Color; - - void main() { - Frag_UV = UV; - Frag_Color = Color; - - gl_Position = ProjMtx * vec4(Position.xy, 0, 1); - } - """ - - FRAGMENT_SHADER_SRC = """ - #version 330 - - uniform sampler2D Texture; - in vec2 Frag_UV; - in vec4 Frag_Color; - out vec4 Out_Color; - - void main() { - Out_Color = Frag_Color * texture(Texture, Frag_UV.st); - } - """ - - def __init__(self): - self._shader_handle = None - self._vert_handle = None - self._fragment_handle = None - - self._attrib_location_tex = None - self._attrib_proj_mtx = None - self._attrib_location_position = None - self._attrib_location_uv = None - self._attrib_location_color = None - - self._vbo_handle = None - self._elements_handle = None - self._vao_handle = None - - super(ProgrammablePipelineRenderer, self).__init__() - - def refresh_font_texture(self): - # save texture state - last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) - - # width, height, pixels = self.io.fonts.get_tex_data_as_rgba32() - font_matrix: np.ndarray = self.io.fonts.get_tex_data_as_rgba32() - width = font_matrix.shape[1] - height = font_matrix.shape[0] - pixels = font_matrix.data - - if self._font_texture is not None: - gl.glDeleteTextures([self._font_texture]) - - self._font_texture = gl.glGenTextures(1) - - gl.glBindTexture(gl.GL_TEXTURE_2D, self._font_texture) - gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) - gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) - gl.glTexImage2D( - gl.GL_TEXTURE_2D, - 0, - gl.GL_RGBA, - width, - height, - 0, - gl.GL_RGBA, - gl.GL_UNSIGNED_BYTE, - pixels, - ) - - self.io.fonts.tex_id = self._font_texture - gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) - self.io.fonts.clear_tex_data() - - def _create_device_objects(self): - # save state - last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) - last_array_buffer = gl.glGetIntegerv(gl.GL_ARRAY_BUFFER_BINDING) - - last_vertex_array = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) - - self._shader_handle = gl.glCreateProgram() - # note: no need to store shader parts handles after linking - vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER) - fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) - - gl.glShaderSource(vertex_shader, self.VERTEX_SHADER_SRC) - gl.glShaderSource(fragment_shader, self.FRAGMENT_SHADER_SRC) - gl.glCompileShader(vertex_shader) - gl.glCompileShader(fragment_shader) - - gl.glAttachShader(self._shader_handle, vertex_shader) - gl.glAttachShader(self._shader_handle, fragment_shader) - - gl.glLinkProgram(self._shader_handle) - - # note: after linking shaders can be removed - gl.glDeleteShader(vertex_shader) - gl.glDeleteShader(fragment_shader) - - self._attrib_location_tex = gl.glGetUniformLocation( - self._shader_handle, "Texture" - ) - self._attrib_proj_mtx = gl.glGetUniformLocation(self._shader_handle, "ProjMtx") - self._attrib_location_position = gl.glGetAttribLocation( - self._shader_handle, "Position" - ) - self._attrib_location_uv = gl.glGetAttribLocation(self._shader_handle, "UV") - self._attrib_location_color = gl.glGetAttribLocation( - self._shader_handle, "Color" - ) - - self._vbo_handle = gl.glGenBuffers(1) - self._elements_handle = gl.glGenBuffers(1) - - self._vao_handle = gl.glGenVertexArrays(1) - gl.glBindVertexArray(self._vao_handle) - gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._vbo_handle) - - gl.glEnableVertexAttribArray(self._attrib_location_position) - gl.glEnableVertexAttribArray(self._attrib_location_uv) - gl.glEnableVertexAttribArray(self._attrib_location_color) - - gl.glVertexAttribPointer( - self._attrib_location_position, - 2, - gl.GL_FLOAT, - gl.GL_FALSE, - imgui.VERTEX_SIZE, - ctypes.c_void_p(imgui.VERTEX_BUFFER_POS_OFFSET), - ) - gl.glVertexAttribPointer( - self._attrib_location_uv, - 2, - gl.GL_FLOAT, - gl.GL_FALSE, - imgui.VERTEX_SIZE, - ctypes.c_void_p(imgui.VERTEX_BUFFER_UV_OFFSET), - ) - gl.glVertexAttribPointer( - self._attrib_location_color, - 4, - gl.GL_UNSIGNED_BYTE, - gl.GL_TRUE, - imgui.VERTEX_SIZE, - ctypes.c_void_p(imgui.VERTEX_BUFFER_COL_OFFSET), - ) - - # restore state - gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) - gl.glBindBuffer(gl.GL_ARRAY_BUFFER, last_array_buffer) - gl.glBindVertexArray(last_vertex_array) - - def render(self, draw_data: imgui.ImDrawData): - # perf: local for faster access - io = self.io - - display_width, display_height = io.display_size - fb_width = int(display_width * io.display_framebuffer_scale[0]) - fb_height = int(display_height * io.display_framebuffer_scale[1]) - - if fb_width == 0 or fb_height == 0: - return - - draw_data.scale_clip_rects(io.display_framebuffer_scale) - - # backup GL state - # todo: provide cleaner version of this backup-restore code - common_gl_state_tuple = get_common_gl_state() - last_program = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) - last_active_texture = gl.glGetIntegerv(gl.GL_ACTIVE_TEXTURE) - last_array_buffer = gl.glGetIntegerv(gl.GL_ARRAY_BUFFER_BINDING) - last_element_array_buffer = gl.glGetIntegerv(gl.GL_ELEMENT_ARRAY_BUFFER_BINDING) - last_vertex_array = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) - - gl.glEnable(gl.GL_BLEND) - gl.glBlendEquation(gl.GL_FUNC_ADD) - gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) - gl.glDisable(gl.GL_CULL_FACE) - gl.glDisable(gl.GL_DEPTH_TEST) - gl.glEnable(gl.GL_SCISSOR_TEST) - gl.glActiveTexture(gl.GL_TEXTURE0) - gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL) - - gl.glViewport(0, 0, int(fb_width), int(fb_height)) - - ortho_projection = (ctypes.c_float * 16)( # noqa - 2.0 / display_width, - 0.0, - 0.0, - 0.0, - 0.0, - 2.0 / -display_height, - 0.0, - 0.0, - 0.0, - 0.0, - -1.0, - 0.0, - -1.0, - 1.0, - 0.0, - 1.0, - ) - - gl.glUseProgram(self._shader_handle) - gl.glUniform1i(self._attrib_location_tex, 0) - gl.glUniformMatrix4fv(self._attrib_proj_mtx, 1, gl.GL_FALSE, ortho_projection) - gl.glBindVertexArray(self._vao_handle) - - for commands in draw_data.cmd_lists: - - gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._vbo_handle) - # todo: check this (sizes) - gl.glBufferData( - gl.GL_ARRAY_BUFFER, - commands.vtx_buffer.size() * imgui.VERTEX_SIZE, - ctypes.c_void_p(commands.vtx_buffer.data_address()), - gl.GL_STREAM_DRAW, - ) - - gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self._elements_handle) - # todo: check this (sizes) - gl.glBufferData( - gl.GL_ELEMENT_ARRAY_BUFFER, - commands.idx_buffer.size() * imgui.INDEX_SIZE, - ctypes.c_void_p(commands.idx_buffer.data_address()), - gl.GL_STREAM_DRAW, - ) - - # todo: allow to iterate over _CmdList - for command in commands.cmd_buffer: - gl.glBindTexture(gl.GL_TEXTURE_2D, command.texture_id) - - # todo: use named tuple - x, y, z, w = command.clip_rect - gl.glScissor(int(x), int(fb_height - w), int(z - x), int(w - y)) - - if imgui.INDEX_SIZE == 2: - gltype = gl.GL_UNSIGNED_SHORT - else: - gltype = gl.GL_UNSIGNED_INT - - gl.glDrawElements( - gl.GL_TRIANGLES, - command.elem_count, - gltype, - ctypes.c_void_p(command.idx_offset * imgui.INDEX_SIZE), - ) - - - # restore modified GL state - restore_common_gl_state(common_gl_state_tuple) - - gl.glUseProgram(last_program) - gl.glActiveTexture(last_active_texture) - gl.glBindVertexArray(last_vertex_array) - gl.glBindBuffer(gl.GL_ARRAY_BUFFER, last_array_buffer) - gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, last_element_array_buffer) - - def _invalidate_device_objects(self): - if self._vao_handle > -1: - gl.glDeleteVertexArrays(1, [self._vao_handle]) - if self._vbo_handle > -1: - gl.glDeleteBuffers(1, [self._vbo_handle]) - if self._elements_handle > -1: - gl.glDeleteBuffers(1, [self._elements_handle]) - self._vao_handle = self._vbo_handle = self._elements_handle = 0 - - gl.glDeleteProgram(self._shader_handle) - self._shader_handle = 0 - - if self._font_texture > -1: - gl.glDeleteTextures([self._font_texture]) - self.io.fonts.tex_id = 0 - self._font_texture = 0 - - -class FixedPipelineRenderer( - BaseOpenGLRenderer -): # Probably buggy (bad rendering with pygame) - """Basic OpenGL integration base class.""" - - # note: no need to override __init__ - - def refresh_font_texture(self): - # save texture state - # last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) - # width, height, pixels = self.io.fonts.get_tex_data_as_alpha8() - texture: np.array = self.io.fonts.get_tex_data_as_rgba32() - - if self._font_texture is not None: - gl.glDeleteTextures([self._font_texture]) - - self._font_texture = gl.glGenTextures(1) - gl.glBindTexture(gl.GL_TEXTURE_2D, self._font_texture) - gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) - gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) - # gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_ALPHA, width, height, 0, gl.GL_ALPHA, gl.GL_UNSIGNED_BYTE, pixels) - gl.glTexImage2D( - gl.GL_TEXTURE_2D, - 0, - gl.GL_ALPHA, - texture.shape[0], - texture.shape[1], - 0, - gl.GL_ALPHA, - gl.GL_UNSIGNED_BYTE, - texture.data, - ) - - self.io.fonts.tex_id = self._font_texture - # gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) - self.io.fonts.clear_tex_data() - - def _create_device_objects(self): - pass - - def render(self, draw_data): - # perf: local for faster access - io = self.io - - display_width, display_height = io.display_size - fb_width = int(display_width * io.display_framebuffer_scale[0]) - fb_height = int(display_height * io.display_framebuffer_scale[1]) - - if fb_width == 0 or fb_height == 0: - return - - draw_data.scale_clip_rects(io.display_framebuffer_scale) - - # note: we are using fixed pipeline for cocos2d/pyglet - # todo: consider porting to programmable pipeline - # backup gl state - common_gl_state_tuple = get_common_gl_state() - - gl.glPushAttrib(gl.GL_ENABLE_BIT | gl.GL_COLOR_BUFFER_BIT | gl.GL_TRANSFORM_BIT) - gl.glEnable(gl.GL_BLEND) - gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) - gl.glDisable(gl.GL_CULL_FACE) - gl.glDisable(gl.GL_DEPTH_TEST) - gl.glEnable(gl.GL_SCISSOR_TEST) - gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL) - - gl.glEnableClientState(gl.GL_VERTEX_ARRAY) - gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY) - gl.glEnableClientState(gl.GL_COLOR_ARRAY) - gl.glEnable(gl.GL_TEXTURE_2D) - - gl.glViewport(0, 0, int(fb_width), int(fb_height)) - gl.glMatrixMode(gl.GL_PROJECTION) - gl.glPushMatrix() - gl.glLoadIdentity() - gl.glOrtho(0, io.display_size.x, io.display_size.y, 0.0, -1.0, 1.0) - gl.glMatrixMode(gl.GL_MODELVIEW) - gl.glPushMatrix() - gl.glLoadIdentity() - - for commands in draw_data.cmd_lists: - - gl.glVertexPointer( - 2, - gl.GL_FLOAT, - imgui.VERTEX_SIZE, - ctypes.c_void_p( - commands.vtx_buffer.data_address() + imgui.VERTEX_BUFFER_POS_OFFSET - ), - ) - gl.glTexCoordPointer( - 2, - gl.GL_FLOAT, - imgui.VERTEX_SIZE, - ctypes.c_void_p( - commands.vtx_buffer.data_address() + imgui.VERTEX_BUFFER_UV_OFFSET - ), - ) - gl.glColorPointer( - 4, - gl.GL_UNSIGNED_BYTE, - imgui.VERTEX_SIZE, - ctypes.c_void_p( - commands.vtx_buffer.data_address() + imgui.VERTEX_BUFFER_COL_OFFSET - ), - ) - - for command in commands.cmd_buffer: - gl.glBindTexture(gl.GL_TEXTURE_2D, command.texture_id) - - x, y, z, w = command.clip_rect - gl.glScissor(int(x), int(fb_height - w), int(z - x), int(w - y)) - - if imgui.INDEX_SIZE == 2: - gltype = gl.GL_UNSIGNED_SHORT - else: - gltype = gl.GL_UNSIGNED_INT - - gl.glDrawElements( - gl.GL_TRIANGLES, - command.elem_count, - gltype, - ctypes.c_void_p(command.idx_offset * imgui.INDEX_SIZE), - ) - - - restore_common_gl_state(common_gl_state_tuple) - - gl.glDisableClientState(gl.GL_COLOR_ARRAY) - gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY) - gl.glDisableClientState(gl.GL_VERTEX_ARRAY) - - gl.glMatrixMode(gl.GL_MODELVIEW) - gl.glPopMatrix() - gl.glMatrixMode(gl.GL_PROJECTION) - gl.glPopMatrix() - gl.glPopAttrib() - - def _invalidate_device_objects(self): - if self._font_texture > -1: - gl.glDeleteTextures([self._font_texture]) - self.io.fonts.texture_id = 0 - self._font_texture = 0 - - -def get_common_gl_state(): - """ - Backups the current OpenGL state - Returns a tuple of results for glGet / glIsEnabled calls - NOTE: when adding more backuped state in the future, - make sure to update function `restore_common_gl_state` - """ - last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) - last_viewport = gl.glGetIntegerv(gl.GL_VIEWPORT) - last_enable_blend = gl.glIsEnabled(gl.GL_BLEND) - last_enable_cull_face = gl.glIsEnabled(gl.GL_CULL_FACE) - last_enable_depth_test = gl.glIsEnabled(gl.GL_DEPTH_TEST) - last_enable_scissor_test = gl.glIsEnabled(gl.GL_SCISSOR_TEST) - last_scissor_box = gl.glGetIntegerv(gl.GL_SCISSOR_BOX) - last_blend_src = gl.glGetIntegerv(gl.GL_BLEND_SRC) - last_blend_dst = gl.glGetIntegerv(gl.GL_BLEND_DST) - last_blend_equation_rgb = gl.glGetIntegerv(gl.GL_BLEND_EQUATION_RGB) - last_blend_equation_alpha = gl.glGetIntegerv(gl.GL_BLEND_EQUATION_ALPHA) - last_front_and_back_polygon_mode, _ = gl.glGetIntegerv(gl.GL_POLYGON_MODE) - return ( - last_texture, - last_viewport, - last_enable_blend, - last_enable_cull_face, - last_enable_depth_test, - last_enable_scissor_test, - last_scissor_box, - last_blend_src, - last_blend_dst, - last_blend_equation_rgb, - last_blend_equation_alpha, - last_front_and_back_polygon_mode, - ) - - -def restore_common_gl_state(common_gl_state_tuple): - """ - Takes a tuple after calling function `get_common_gl_state`, - to set the given OpenGL state back as it was before rendering the UI - """ - ( - last_texture, - last_viewport, - last_enable_blend, - last_enable_cull_face, - last_enable_depth_test, - last_enable_scissor_test, - last_scissor_box, - last_blend_src, - last_blend_dst, - last_blend_equation_rgb, - last_blend_equation_alpha, - last_front_and_back_polygon_mode, - ) = common_gl_state_tuple - - gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) - gl.glBlendEquationSeparate(last_blend_equation_rgb, last_blend_equation_alpha) - gl.glBlendFunc(last_blend_src, last_blend_dst) - - gl.glPolygonMode(gl.GL_FRONT_AND_BACK, last_front_and_back_polygon_mode) - - if last_enable_blend: - gl.glEnable(gl.GL_BLEND) - else: - gl.glDisable(gl.GL_BLEND) - - if last_enable_cull_face: - gl.glEnable(gl.GL_CULL_FACE) - else: - gl.glDisable(gl.GL_CULL_FACE) - - if last_enable_depth_test: - gl.glEnable(gl.GL_DEPTH_TEST) - else: - gl.glDisable(gl.GL_DEPTH_TEST) - - if last_enable_scissor_test: - gl.glEnable(gl.GL_SCISSOR_TEST) - else: - gl.glDisable(gl.GL_SCISSOR_TEST) - - gl.glScissor( - last_scissor_box[0], - last_scissor_box[1], - last_scissor_box[2], - last_scissor_box[3], - ) - gl.glViewport( - last_viewport[0], last_viewport[1], last_viewport[2], last_viewport[3] - ) diff --git a/blimgui/dist64/imgui_bundle/python_backends/opengl_backend_programmable.py b/blimgui/dist64/imgui_bundle/python_backends/opengl_backend_programmable.py new file mode 100644 index 0000000..386fa77 --- /dev/null +++ b/blimgui/dist64/imgui_bundle/python_backends/opengl_backend_programmable.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import OpenGL.GL as gl # pip install PyOpenGL +from imgui_bundle import imgui +import ctypes + +from .opengl_base_backend import BaseOpenGLRenderer, get_common_gl_state, restore_common_gl_state + + +class ProgrammablePipelineRenderer(BaseOpenGLRenderer): + """Basic OpenGL integration base class.""" + + VERTEX_SHADER_SRC = """ + #version 330 + + uniform mat4 ProjMtx; + in vec2 Position; + in vec2 UV; + in vec4 Color; + out vec2 Frag_UV; + out vec4 Frag_Color; + + void main() { + Frag_UV = UV; + Frag_Color = Color; + + gl_Position = ProjMtx * vec4(Position.xy, 0, 1); + } + """ + + FRAGMENT_SHADER_SRC = """ + #version 330 + + uniform sampler2D Texture; + in vec2 Frag_UV; + in vec4 Frag_Color; + out vec4 Out_Color; + + void main() { + Out_Color = Frag_Color * texture(Texture, Frag_UV.st); + } + """ + + def __init__(self): + self._shader_handle = None + self._vert_handle = None + self._fragment_handle = None + + self._attrib_location_tex = None + self._attrib_proj_mtx = None + self._attrib_location_position = None + self._attrib_location_uv = None + self._attrib_location_color = None + + self._vbo_handle = None + self._elements_handle = None + self._vao_handle = None + + + super(ProgrammablePipelineRenderer, self).__init__() + + def _create_device_objects(self): + # save state + last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) + last_array_buffer = gl.glGetIntegerv(gl.GL_ARRAY_BUFFER_BINDING) + + last_vertex_array = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) + + self._shader_handle = gl.glCreateProgram() + # note: no need to store shader parts handles after linking + vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER) + fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) + + gl.glShaderSource(vertex_shader, self.VERTEX_SHADER_SRC) + gl.glShaderSource(fragment_shader, self.FRAGMENT_SHADER_SRC) + gl.glCompileShader(vertex_shader) + gl.glCompileShader(fragment_shader) + + gl.glAttachShader(self._shader_handle, vertex_shader) + gl.glAttachShader(self._shader_handle, fragment_shader) + + gl.glLinkProgram(self._shader_handle) + + # note: after linking shaders can be removed + gl.glDeleteShader(vertex_shader) + gl.glDeleteShader(fragment_shader) + + self._attrib_location_tex = gl.glGetUniformLocation( + self._shader_handle, "Texture" + ) + self._attrib_proj_mtx = gl.glGetUniformLocation(self._shader_handle, "ProjMtx") + self._attrib_location_position = gl.glGetAttribLocation( + self._shader_handle, "Position" + ) + self._attrib_location_uv = gl.glGetAttribLocation(self._shader_handle, "UV") + self._attrib_location_color = gl.glGetAttribLocation( + self._shader_handle, "Color" + ) + + self._vbo_handle = gl.glGenBuffers(1) + self._elements_handle = gl.glGenBuffers(1) + + self._vao_handle = gl.glGenVertexArrays(1) + gl.glBindVertexArray(self._vao_handle) + gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._vbo_handle) + + gl.glEnableVertexAttribArray(self._attrib_location_position) + gl.glEnableVertexAttribArray(self._attrib_location_uv) + gl.glEnableVertexAttribArray(self._attrib_location_color) + + gl.glVertexAttribPointer( + self._attrib_location_position, + 2, + gl.GL_FLOAT, + gl.GL_FALSE, + imgui.VERTEX_SIZE, + ctypes.c_void_p(imgui.VERTEX_BUFFER_POS_OFFSET), + ) + gl.glVertexAttribPointer( + self._attrib_location_uv, + 2, + gl.GL_FLOAT, + gl.GL_FALSE, + imgui.VERTEX_SIZE, + ctypes.c_void_p(imgui.VERTEX_BUFFER_UV_OFFSET), + ) + gl.glVertexAttribPointer( + self._attrib_location_color, + 4, + gl.GL_UNSIGNED_BYTE, + gl.GL_TRUE, + imgui.VERTEX_SIZE, + ctypes.c_void_p(imgui.VERTEX_BUFFER_COL_OFFSET), + ) + + # restore state + gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) + gl.glBindBuffer(gl.GL_ARRAY_BUFFER, last_array_buffer) + gl.glBindVertexArray(last_vertex_array) + + def render(self, draw_data: imgui.ImDrawData): + # perf: local for faster access + io = self.io + + display_width, display_height = io.display_size + fb_width = int(display_width * io.display_framebuffer_scale[0]) + fb_height = int(display_height * io.display_framebuffer_scale[1]) + + # Honor RendererHasTextures + self._update_textures() + + if fb_width == 0 or fb_height == 0: + return + + draw_data.scale_clip_rects(io.display_framebuffer_scale) + + # backup GL state + # todo: provide cleaner version of this backup-restore code + common_gl_state_tuple = get_common_gl_state() + last_program = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) + last_active_texture = gl.glGetIntegerv(gl.GL_ACTIVE_TEXTURE) + last_array_buffer = gl.glGetIntegerv(gl.GL_ARRAY_BUFFER_BINDING) + last_element_array_buffer = gl.glGetIntegerv(gl.GL_ELEMENT_ARRAY_BUFFER_BINDING) + last_vertex_array = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) + + gl.glEnable(gl.GL_BLEND) + gl.glBlendEquation(gl.GL_FUNC_ADD) + gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) + gl.glDisable(gl.GL_CULL_FACE) + gl.glDisable(gl.GL_DEPTH_TEST) + gl.glEnable(gl.GL_SCISSOR_TEST) + gl.glActiveTexture(gl.GL_TEXTURE0) + gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL) + + gl.glViewport(0, 0, int(fb_width), int(fb_height)) + + ortho_projection = (ctypes.c_float * 16)( # noqa + 2.0 / display_width, + 0.0, + 0.0, + 0.0, + 0.0, + 2.0 / -display_height, + 0.0, + 0.0, + 0.0, + 0.0, + -1.0, + 0.0, + -1.0, + 1.0, + 0.0, + 1.0, + ) + + gl.glUseProgram(self._shader_handle) + gl.glUniform1i(self._attrib_location_tex, 0) + gl.glUniformMatrix4fv(self._attrib_proj_mtx, 1, gl.GL_FALSE, ortho_projection) + gl.glBindVertexArray(self._vao_handle) + + for commands in draw_data.cmd_lists: + + gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._vbo_handle) + # todo: check this (sizes) + gl.glBufferData( + gl.GL_ARRAY_BUFFER, + commands.vtx_buffer.size() * imgui.VERTEX_SIZE, + ctypes.c_void_p(commands.vtx_buffer.data_address()), + gl.GL_STREAM_DRAW, + ) + + gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self._elements_handle) + # todo: check this (sizes) + gl.glBufferData( + gl.GL_ELEMENT_ARRAY_BUFFER, + commands.idx_buffer.size() * imgui.INDEX_SIZE, + ctypes.c_void_p(commands.idx_buffer.data_address()), + gl.GL_STREAM_DRAW, + ) + + # todo: allow to iterate over _CmdList + for command in commands.cmd_buffer: + gl.glBindTexture(gl.GL_TEXTURE_2D, command.tex_ref.get_tex_id()) + + # todo: use named tuple + x, y, z, w = command.clip_rect + gl.glScissor(int(x), int(fb_height - w), int(z - x), int(w - y)) + + if imgui.INDEX_SIZE == 2: + gltype = gl.GL_UNSIGNED_SHORT + else: + gltype = gl.GL_UNSIGNED_INT + + gl.glDrawElements( + gl.GL_TRIANGLES, + command.elem_count, + gltype, + ctypes.c_void_p(command.idx_offset * imgui.INDEX_SIZE), + ) + + + # restore modified GL state + restore_common_gl_state(common_gl_state_tuple) + + gl.glUseProgram(last_program) + gl.glActiveTexture(last_active_texture) + gl.glBindVertexArray(last_vertex_array) + gl.glBindBuffer(gl.GL_ARRAY_BUFFER, last_array_buffer) + gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, last_element_array_buffer) + + def _invalidate_device_objects(self): + if self._vao_handle > -1: + gl.glDeleteVertexArrays(1, [self._vao_handle]) + if self._vbo_handle > -1: + gl.glDeleteBuffers(1, [self._vbo_handle]) + if self._elements_handle > -1: + gl.glDeleteBuffers(1, [self._elements_handle]) + self._vao_handle = self._vbo_handle = self._elements_handle = 0 + + gl.glDeleteProgram(self._shader_handle) + self._shader_handle = 0 diff --git a/blimgui/dist64/imgui_bundle/python_backends/opengl_base_backend.py b/blimgui/dist64/imgui_bundle/python_backends/opengl_base_backend.py new file mode 100644 index 0000000..ba2d172 --- /dev/null +++ b/blimgui/dist64/imgui_bundle/python_backends/opengl_base_backend.py @@ -0,0 +1,244 @@ +from imgui_bundle import imgui +import OpenGL.GL as gl # pip install PyOpenGL + + +def _log__update_texture(msg: str): + pass + # import logging + # logging.warning(msg) + +class BaseOpenGLRenderer(object): + def __init__(self): + if not imgui.get_current_context(): + raise RuntimeError( + "No valid ImGui context. Use imgui.create_context() first and/or " + "imgui.set_current_context()." + ) + self.io = imgui.get_io() + self.io.delta_time = 1.0 / 60.0 + + self._create_device_objects() + + # Honor RendererHasTextures + # cf https://github.com/ocornut/imgui/commit/ff3f471ab2af25f1cc11c20356711aaa4e6833f8 + imgui.get_io().backend_flags |= imgui.BackendFlags_.renderer_has_textures.value + max_texture_size = gl.glGetIntegerv(gl.GL_MAX_TEXTURE_SIZE) + imgui.get_platform_io().renderer_texture_max_width = max_texture_size + imgui.get_platform_io().renderer_texture_max_height = max_texture_size + + def render(self, draw_data): + raise NotImplementedError + + def _update_textures(self): + # Honor RendererHasTextures + # cf https://github.com/ocornut/imgui/commit/ff3f471ab2af25f1cc11c20356711aaa4e6833f8 + for tex in imgui.get_platform_io().textures: + if tex.status != imgui.ImTextureStatus.ok: + self._update_texture(tex) + + def _destroy_all_textures(self): + for tex in imgui.get_platform_io().textures: + if tex.ref_count == 1: + tex.status = imgui.ImTextureStatus.want_destroy + self._update_texture(tex) + + def _update_texture(self, tex: imgui.ImTextureData): + # Honor RendererHasTextures + # cf https://github.com/ocornut/imgui/commit/ff3f471ab2af25f1cc11c20356711aaa4e6833f8 + # This method is a port of the C++ function ImGui_ImplOpenGL3_UpdateTexture + # where we use + # pixels = tex.get_pixels_array() + # to get a numpy array of the pixel data + # When doing updates, we use a sub-view of the full_pixels array to avoid copying data + + if tex.status == imgui.ImTextureStatus.want_create: + # Create and upload new texture to graphics system + _log__update_texture(f"UpdateTexture #{tex.unique_id}: WantCreate {tex.width}x{tex.height}") + assert tex.tex_id == 0 + assert tex.backend_user_data is None + assert tex.format == imgui.ImTextureFormat.rgba32 + + # Upload texture to graphics system + # (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling) + last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) + gl_texture_id = gl.glGenTextures(1) + gl.glBindTexture(gl.GL_TEXTURE_2D, gl_texture_id) + gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) + gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) + gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE) + gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE) + if hasattr(gl, "GL_UNPACK_ROW_LENGTH"): + gl.glPixelStorei(gl.GL_UNPACK_ROW_LENGTH, 0) + + pixels_array = tex.get_pixels_array() + gl.glTexImage2D( + gl.GL_TEXTURE_2D, + 0, + gl.GL_RGBA, + tex.width, + tex.height, + 0, + gl.GL_RGBA, + gl.GL_UNSIGNED_BYTE, + pixels_array, + ) + + # Store identifiers: store the new GL texture ID back into ImGui's structure + tex.set_tex_id(gl_texture_id) + tex.status = imgui.ImTextureStatus.ok + + # Restore state + gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) + + elif tex.status == imgui.ImTextureStatus.want_updates: + _log__update_texture(f"UpdateTexture #{tex.unique_id}: WantUpdate {len(tex.updates)}") + # Update selected blocks. We only ever write to textures regions that have never been used before! + # This backend chooses to use tex.Updates[], but you can use tex.UpdateRect to upload a single region. + last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) + gl.glBindTexture(gl.GL_TEXTURE_2D, tex.tex_id) + + # We assume desktop OpenGL where GL_UNPACK_ROW_LENGTH is supported. + # This allows partial updates without line-by-line copies in Python. + gl.glPixelStorei(gl.GL_UNPACK_ROW_LENGTH, tex.width) + + # Get the full 1D array of pixels (shape=(width*height*bpp,)) + full_pixels = tex.get_pixels_array() + + for r in tex.updates: + # Compute offset into the 1D array for the sub-rectangle's top-left pixel: + offset = (r.y * tex.width + r.x) * tex.bytes_per_pixel + # Then get a slice from that offset to the end. We only need the pointer’s start address: + sub_view = full_pixels[offset:] # shape is still 1D, but it starts at the correct place + + # glTexSubImage2D will read only r.h rows and r.w columns per row, + # with the stride controlled by GL_UNPACK_ROW_LENGTH: + gl.glTexSubImage2D( + gl.GL_TEXTURE_2D, + 0, # mip level + r.x, + r.y, + r.w, + r.h, + gl.GL_RGBA, + gl.GL_UNSIGNED_BYTE, + sub_view + ) + + # Restore the row-length to 0 (the driver default) + gl.glPixelStorei(gl.GL_UNPACK_ROW_LENGTH, 0) + + tex.status = imgui.ImTextureStatus.ok + gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) # Restore state + + elif tex.status == imgui.ImTextureStatus.want_destroy: + _log__update_texture(f"UpdateTexture #{tex.unique_id}: WantDestroy") + gl_tex_id = tex.tex_id + gl.glDeleteTextures([gl_tex_id]) + + # Clear identifiers and mark as destroyed (so e.g. InvalidateDeviceObjects can be called at runtime) + ImTextureID_Invalid = 0 + tex.set_tex_id(ImTextureID_Invalid) + tex.status = imgui.ImTextureStatus.destroyed + + def _create_device_objects(self): + raise NotImplementedError + + def _invalidate_device_objects(self): + raise NotImplementedError + + def shutdown(self): + self._destroy_all_textures() + imgui.get_io().backend_flags &= ~imgui.BackendFlags_.renderer_has_textures.value + self._invalidate_device_objects() + + +def get_common_gl_state(): + """ + Backups the current OpenGL state + Returns a tuple of results for glGet / glIsEnabled calls + NOTE: when adding more backuped state in the future, + make sure to update function `restore_common_gl_state` + """ + last_texture = gl.glGetIntegerv(gl.GL_TEXTURE_BINDING_2D) + last_viewport = gl.glGetIntegerv(gl.GL_VIEWPORT) + last_enable_blend = gl.glIsEnabled(gl.GL_BLEND) + last_enable_cull_face = gl.glIsEnabled(gl.GL_CULL_FACE) + last_enable_depth_test = gl.glIsEnabled(gl.GL_DEPTH_TEST) + last_enable_scissor_test = gl.glIsEnabled(gl.GL_SCISSOR_TEST) + last_scissor_box = gl.glGetIntegerv(gl.GL_SCISSOR_BOX) + last_blend_src = gl.glGetIntegerv(gl.GL_BLEND_SRC) + last_blend_dst = gl.glGetIntegerv(gl.GL_BLEND_DST) + last_blend_equation_rgb = gl.glGetIntegerv(gl.GL_BLEND_EQUATION_RGB) + last_blend_equation_alpha = gl.glGetIntegerv(gl.GL_BLEND_EQUATION_ALPHA) + last_front_and_back_polygon_mode, _ = gl.glGetIntegerv(gl.GL_POLYGON_MODE) + return ( + last_texture, + last_viewport, + last_enable_blend, + last_enable_cull_face, + last_enable_depth_test, + last_enable_scissor_test, + last_scissor_box, + last_blend_src, + last_blend_dst, + last_blend_equation_rgb, + last_blend_equation_alpha, + last_front_and_back_polygon_mode, + ) + + +def restore_common_gl_state(common_gl_state_tuple): + """ + Takes a tuple after calling function `get_common_gl_state`, + to set the given OpenGL state back as it was before rendering the UI + """ + ( + last_texture, + last_viewport, + last_enable_blend, + last_enable_cull_face, + last_enable_depth_test, + last_enable_scissor_test, + last_scissor_box, + last_blend_src, + last_blend_dst, + last_blend_equation_rgb, + last_blend_equation_alpha, + last_front_and_back_polygon_mode, + ) = common_gl_state_tuple + + gl.glBindTexture(gl.GL_TEXTURE_2D, last_texture) + gl.glBlendEquationSeparate(last_blend_equation_rgb, last_blend_equation_alpha) + gl.glBlendFunc(last_blend_src, last_blend_dst) + + gl.glPolygonMode(gl.GL_FRONT_AND_BACK, last_front_and_back_polygon_mode) + + if last_enable_blend: + gl.glEnable(gl.GL_BLEND) + else: + gl.glDisable(gl.GL_BLEND) + + if last_enable_cull_face: + gl.glEnable(gl.GL_CULL_FACE) + else: + gl.glDisable(gl.GL_CULL_FACE) + + if last_enable_depth_test: + gl.glEnable(gl.GL_DEPTH_TEST) + else: + gl.glDisable(gl.GL_DEPTH_TEST) + + if last_enable_scissor_test: + gl.glEnable(gl.GL_SCISSOR_TEST) + else: + gl.glDisable(gl.GL_SCISSOR_TEST) + + gl.glScissor( + last_scissor_box[0], + last_scissor_box[1], + last_scissor_box[2], + last_scissor_box[3], + ) + gl.glViewport( + last_viewport[0], last_viewport[1], last_viewport[2], last_viewport[3] + ) diff --git a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/pygame_backend.py b/blimgui/dist64/imgui_bundle/python_backends/pygame_backend.py similarity index 87% rename from blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/pygame_backend.py rename to blimgui/dist64/imgui_bundle/python_backends/pygame_backend.py index 9e2efdc..2932f1c 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/pygame_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/pygame_backend.py @@ -1,144 +1,139 @@ -# Note: this backend was adapted to the new ImGui API. However, there are rendering issues with it, -# probably because it uses an old version of the OpenGL API -# (FixedPipelineRenderer instead of ProgrammablePipelineRenderer). - -from __future__ import absolute_import - -from imgui_bundle import imgui - -from imgui_bundle.python_backends.opengl_backend import FixedPipelineRenderer -# from imgui_bundle.python_backends.opengl_backend import ProgrammablePipelineRenderer - -from typing import Dict - -import pygame -import pygame.event -import pygame.time - -PygameKey = int - - -class PygameRenderer(FixedPipelineRenderer): - key_map: Dict[PygameKey, imgui.Key] - modifier_map: Dict[PygameKey, imgui.Key] - - def __init__(self): - super(PygameRenderer, self).__init__() - - self._gui_time = None - self._map_keys() - - def _map_keys(self): - self.key_map = { - pygame.K_LEFT: imgui.Key.left_arrow, - pygame.K_RIGHT: imgui.Key.right_arrow, - pygame.K_UP: imgui.Key.up_arrow, - pygame.K_DOWN: imgui.Key.down_arrow, - pygame.K_PAGEUP: imgui.Key.page_up, - pygame.K_PAGEDOWN: imgui.Key.page_down, - pygame.K_HOME: imgui.Key.home, - pygame.K_END: imgui.Key.end, - pygame.K_INSERT: imgui.Key.insert, - pygame.K_DELETE: imgui.Key.delete, - pygame.K_BACKSPACE: imgui.Key.backspace, - pygame.K_SPACE: imgui.Key.space, - pygame.K_RETURN: imgui.Key.enter, - pygame.K_ESCAPE: imgui.Key.escape, - pygame.K_KP_ENTER: imgui.Key.keypad_enter, - - pygame.K_LCTRL: imgui.Key.left_ctrl, - pygame.K_RCTRL: imgui.Key.right_ctrl, - pygame.K_LALT: imgui.Key.left_alt, - pygame.K_RALT: imgui.Key.right_alt, - pygame.K_RSHIFT: imgui.Key.right_shift, - pygame.K_LSHIFT: imgui.Key.left_shift, - pygame.K_LSUPER: imgui.Key.left_super, - pygame.K_RSUPER: imgui.Key.right_super, - - # pygame.K_a: imgui.Key.a, - # pygame.K_c: imgui.Key.c, - # pygame.K_v: imgui.Key.v, - # pygame.K_x: imgui.Key.x, - # pygame.K_y: imgui.Key.y, - # pygame.K_z: imgui.Key.z, - } - - self.modifier_map = { - pygame.K_LCTRL: imgui.Key.mod_ctrl, - pygame.K_RCTRL: imgui.Key.mod_ctrl, - pygame.K_LSHIFT: imgui.Key.mod_shift, - pygame.K_RSHIFT: imgui.Key.mod_shift, - pygame.K_LALT: imgui.Key.mod_alt, - pygame.K_RALT: imgui.Key.mod_alt, - pygame.K_LSUPER: imgui.Key.mod_super, - pygame.K_RSUPER: imgui.Key.mod_super, - } - - def process_event(self, event): - # perf: local for faster access - io = self.io - - if event.type == pygame.MOUSEMOTION: - io.add_mouse_pos_event(event.pos[0], event.pos[1]) - return True - - if event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.MOUSEBUTTONUP: - down = event.type == pygame.MOUSEBUTTONDOWN - imgui_button = event.button - 1 - io.add_mouse_button_event(imgui_button, down) - return True - - if event.type == pygame.MOUSEWHEEL: - k = 0.5 - io.add_mouse_wheel_event(event.x * k, event.y * k) - return True - - processed_special_key = False - if event.type in (pygame.KEYDOWN, pygame.KEYUP): - is_down = event.type == pygame.KEYDOWN - if event.key in self.key_map.keys(): - io.add_key_event(self.key_map[event.key], down=is_down) - processed_special_key = True - if event.key in self.modifier_map.keys(): - io.add_key_event(self.modifier_map[event.key], down=is_down) - processed_special_key = True - - if event.type == pygame.KEYDOWN and not processed_special_key: - for char in event.unicode: - code = ord(char) - if 0 < code < 0x10000: - io.add_input_character(code) - - return True - - if event.type == pygame.VIDEORESIZE: - surface = pygame.display.get_surface() - # note: pygame does not modify existing surface upon resize, - # we need to do it ourselves. - pygame.display.set_mode( - (event.w, event.h), - flags=surface.get_flags(), - ) - # existing font texure is no longer valid, so we need to refresh it - self.refresh_font_texture() - - # notify imgui about new window size - io.display_size = event.size - - # delete old surface, it is no longer needed - del surface - - return True - - def process_inputs(self): - io = imgui.get_io() - - current_time = pygame.time.get_ticks() / 1000.0 - - if self._gui_time: - io.delta_time = current_time - self._gui_time - else: - io.delta_time = 1.0 / 60.0 - if io.delta_time <= 0.0: - io.delta_time = 1.0 / 1000.0 - self._gui_time = current_time +from __future__ import absolute_import + +from imgui_bundle import imgui + +from imgui_bundle.python_backends.opengl_backend_programmable import ProgrammablePipelineRenderer + +from typing import Dict + +import pygame +import pygame.event +import pygame.time + +PygameKey = int + + +class PygameRenderer(ProgrammablePipelineRenderer): + key_map: Dict[PygameKey, imgui.Key] + modifier_map: Dict[PygameKey, imgui.Key] + + def __init__(self): + super(PygameRenderer, self).__init__() + + self._gui_time = None + self._map_keys() + + def _map_keys(self): + self.key_map = { + pygame.K_LEFT: imgui.Key.left_arrow, + pygame.K_RIGHT: imgui.Key.right_arrow, + pygame.K_UP: imgui.Key.up_arrow, + pygame.K_DOWN: imgui.Key.down_arrow, + pygame.K_PAGEUP: imgui.Key.page_up, + pygame.K_PAGEDOWN: imgui.Key.page_down, + pygame.K_HOME: imgui.Key.home, + pygame.K_END: imgui.Key.end, + pygame.K_INSERT: imgui.Key.insert, + pygame.K_DELETE: imgui.Key.delete, + pygame.K_BACKSPACE: imgui.Key.backspace, + pygame.K_RETURN: imgui.Key.enter, + pygame.K_ESCAPE: imgui.Key.escape, + pygame.K_KP_ENTER: imgui.Key.keypad_enter, + pygame.K_TAB: imgui.Key.tab, + + pygame.K_LCTRL: imgui.Key.left_ctrl, + pygame.K_RCTRL: imgui.Key.right_ctrl, + pygame.K_LALT: imgui.Key.left_alt, + pygame.K_RALT: imgui.Key.right_alt, + pygame.K_RSHIFT: imgui.Key.right_shift, + pygame.K_LSHIFT: imgui.Key.left_shift, + pygame.K_LSUPER: imgui.Key.left_super, + pygame.K_RSUPER: imgui.Key.right_super, + + # pygame.K_a: imgui.Key.a, + # pygame.K_c: imgui.Key.c, + # pygame.K_v: imgui.Key.v, + # pygame.K_x: imgui.Key.x, + # pygame.K_y: imgui.Key.y, + # pygame.K_z: imgui.Key.z, + } + + self.modifier_map = { + pygame.K_LCTRL: imgui.Key.mod_ctrl, + pygame.K_RCTRL: imgui.Key.mod_ctrl, + pygame.K_LSHIFT: imgui.Key.mod_shift, + pygame.K_RSHIFT: imgui.Key.mod_shift, + pygame.K_LALT: imgui.Key.mod_alt, + pygame.K_RALT: imgui.Key.mod_alt, + pygame.K_LSUPER: imgui.Key.mod_super, + pygame.K_RSUPER: imgui.Key.mod_super, + } + + def process_event(self, event): + # perf: local for faster access + io = self.io + + if event.type == pygame.MOUSEMOTION: + io.add_mouse_pos_event(event.pos[0], event.pos[1]) + return True + + if event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.MOUSEBUTTONUP: + down = event.type == pygame.MOUSEBUTTONDOWN + imgui_button = event.button - 1 + io.add_mouse_button_event(imgui_button, down) + return True + + if event.type == pygame.MOUSEWHEEL: + k = 0.5 + io.add_mouse_wheel_event(event.x * k, event.y * k) + return True + + processed_special_key = False + if event.type in (pygame.KEYDOWN, pygame.KEYUP): + is_down = event.type == pygame.KEYDOWN + if event.key in self.key_map.keys(): + io.add_key_event(self.key_map[event.key], down=is_down) + processed_special_key = True + if event.key in self.modifier_map.keys(): + io.add_key_event(self.modifier_map[event.key], down=is_down) + processed_special_key = True + + if event.type == pygame.KEYDOWN and not processed_special_key: + for char in event.unicode: + code = ord(char) + if 0 < code < 0x10000: + io.add_input_character(code) + + return True + + if event.type == pygame.VIDEORESIZE: + surface = pygame.display.get_surface() + # note: pygame does not modify existing surface upon resize, + # we need to do it ourselves. + pygame.display.set_mode( + (event.w, event.h), + flags=surface.get_flags(), + ) + # existing font texure is no longer valid, so we need to refresh it + self._update_textures() + + # notify imgui about new window size + io.display_size = event.size + + # delete old surface, it is no longer needed + del surface + + return True + + def process_inputs(self): + io = imgui.get_io() + + current_time = pygame.time.get_ticks() / 1000.0 + + if self._gui_time: + io.delta_time = current_time - self._gui_time + else: + io.delta_time = 1.0 / 60.0 + if io.delta_time <= 0.0: + io.delta_time = 1.0 / 1000.0 + self._gui_time = current_time diff --git a/blimgui/dist64/imgui_bundle/python_backends/pyglet_backend.py b/blimgui/dist64/imgui_bundle/python_backends/pyglet_backend.py index ac217b4..50dfc61 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/pyglet_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/pyglet_backend.py @@ -10,10 +10,7 @@ from imgui_bundle.python_backends import compute_fb_scale -from imgui_bundle.python_backends.opengl_backend import ( - FixedPipelineRenderer, - ProgrammablePipelineRenderer, -) +from imgui_bundle.python_backends.opengl_backend_programmable import ProgrammablePipelineRenderer class PygletMixin(object): @@ -283,18 +280,6 @@ def process_inputs(self): self._gui_time = current_time -class PygletFixedPipelineRenderer(PygletMixin, FixedPipelineRenderer): - def __init__(self, window, attach_callbacks=True): - super(PygletFixedPipelineRenderer, self).__init__() - self._set_pixel_ratio(window) - if attach_callbacks: - self._attach_callbacks(window) - - def render(self, draw_data): - super(PygletFixedPipelineRenderer, self).render(draw_data) - self._handle_mouse_cursor() - - class PygletProgrammablePipelineRenderer(PygletMixin, ProgrammablePipelineRenderer): def __init__(self, window, attach_callbacks=True): super(PygletProgrammablePipelineRenderer, self).__init__() @@ -306,20 +291,6 @@ def render(self, draw_data): super(PygletProgrammablePipelineRenderer, self).render(draw_data) self._handle_mouse_cursor() - -class PygletRenderer(PygletFixedPipelineRenderer): - def __init__(self, window, attach_callbacks=True): - warnings.warn( - "PygletRenderer is deprecated; please use either " - "PygletFixedPipelineRenderer (for OpenGL 2.1, pyglet < 2.0) or " - "PygletProgrammablePipelineRenderer (for later versions) or " - "create_renderer(window) to auto-detect.", - DeprecationWarning, - stacklevel=2, - ) - super(PygletRenderer, self).__init__(window, attach_callbacks) - - def create_renderer(window, attach_callbacks=True): """ This is a helper function that wraps the appropriate version of the Pyglet @@ -329,7 +300,5 @@ def create_renderer(window, attach_callbacks=True): # Pyglet < 2.0 has issues with ProgrammablePipeline even when the context # is OpenGL 3, so we need to check the pyglet version rather than looking # at window.config.major_version to see if we want to use programmable. - if int(pyglet.version.split('.')[0]) < 2: - return PygletFixedPipelineRenderer(window, attach_callbacks) - else: - return PygletProgrammablePipelineRenderer(window, attach_callbacks) + assert int(pyglet.version.split('.')[0]) >= 2 + return PygletProgrammablePipelineRenderer(window, attach_callbacks) diff --git a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/cocos2d_backend.py b/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/cocos2d_backend.py index b93fd04..98d7bb6 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/cocos2d_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/cocos2d_backend.py @@ -7,7 +7,7 @@ from imgui_bundle.python_backends import compute_fb_scale from imgui_bundle.python_backends.pyglet_backend import PygletMixin -from imgui_bundle.python_backends.opengl_backend import FixedPipelineRenderer +from imgui_bundle.python_backends.opengl_backend_fixed import FixedPipelineRenderer class ImguiLayer(PygletMixin, cocos.layer.Layer): diff --git a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/glumpy_backend.py b/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/glumpy_backend.py index 3391016..60526c4 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/glumpy_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/glumpy_backend.py @@ -6,8 +6,8 @@ import ctypes from ctypes import * -from imgui_bundle.python_backends.opengl_backend import BaseOpenGLRenderer - +from imgui_bundle.python_backends.opengl_base_backend import BaseOpenGLRenderer +from imgui_bundle import imgui from glumpy import gloo, gl # type: ignore from matplotlib import pyplot as plt # type: ignore diff --git a/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/opengl_backend_fixed.py b/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/opengl_backend_fixed.py new file mode 100644 index 0000000..6257619 --- /dev/null +++ b/blimgui/dist64/imgui_bundle/python_backends/python_backends_disabled/opengl_backend_fixed.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +"""The FixedPipelineRenderer is broken and should not be used.""" +from __future__ import absolute_import + +import OpenGL.GL as gl # pip install PyOpenGL +from imgui_bundle import imgui +import ctypes + +from .opengl_base_backend import BaseOpenGLRenderer, get_common_gl_state, restore_common_gl_state + + +class FixedPipelineRenderer( + BaseOpenGLRenderer +): # Probably buggy (bad rendering with pygame) + """Basic OpenGL integration base class.""" + + # note: no need to override __init__ + + def _create_device_objects(self): + pass + + def render(self, draw_data): + # perf: local for faster access + io = self.io + + display_width, display_height = io.display_size + fb_width = int(display_width * io.display_framebuffer_scale[0]) + fb_height = int(display_height * io.display_framebuffer_scale[1]) + + if fb_width == 0 or fb_height == 0: + return + + # Honor RendererHasTextures + self._update_textures() + + draw_data.scale_clip_rects(io.display_framebuffer_scale) + + # note: we are using fixed pipeline for cocos2d/pyglet + # todo: consider porting to programmable pipeline + # backup gl state + common_gl_state_tuple = get_common_gl_state() + + gl.glPushAttrib(gl.GL_ENABLE_BIT | gl.GL_COLOR_BUFFER_BIT | gl.GL_TRANSFORM_BIT) + gl.glEnable(gl.GL_BLEND) + gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) + gl.glDisable(gl.GL_CULL_FACE) + gl.glDisable(gl.GL_DEPTH_TEST) + gl.glEnable(gl.GL_SCISSOR_TEST) + gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL) + + gl.glEnableClientState(gl.GL_VERTEX_ARRAY) + gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY) + gl.glEnableClientState(gl.GL_COLOR_ARRAY) + gl.glEnable(gl.GL_TEXTURE_2D) + + gl.glViewport(0, 0, int(fb_width), int(fb_height)) + gl.glMatrixMode(gl.GL_PROJECTION) + gl.glPushMatrix() + gl.glLoadIdentity() + gl.glOrtho(0, io.display_size.x, io.display_size.y, 0.0, -1.0, 1.0) + gl.glMatrixMode(gl.GL_MODELVIEW) + gl.glPushMatrix() + gl.glLoadIdentity() + + for commands in draw_data.cmd_lists: + + gl.glVertexPointer( + 2, + gl.GL_FLOAT, + imgui.VERTEX_SIZE, + ctypes.c_void_p( + commands.vtx_buffer.data_address() + imgui.VERTEX_BUFFER_POS_OFFSET + ), + ) + gl.glTexCoordPointer( + 2, + gl.GL_FLOAT, + imgui.VERTEX_SIZE, + ctypes.c_void_p( + commands.vtx_buffer.data_address() + imgui.VERTEX_BUFFER_UV_OFFSET + ), + ) + gl.glColorPointer( + 4, + gl.GL_UNSIGNED_BYTE, + imgui.VERTEX_SIZE, + ctypes.c_void_p( + commands.vtx_buffer.data_address() + imgui.VERTEX_BUFFER_COL_OFFSET + ), + ) + + for command in commands.cmd_buffer: + gl.glBindTexture(gl.GL_TEXTURE_2D, command.get_tex_id()) + + x, y, z, w = command.clip_rect + gl.glScissor(int(x), int(fb_height - w), int(z - x), int(w - y)) + + if imgui.INDEX_SIZE == 2: + gltype = gl.GL_UNSIGNED_SHORT + else: + gltype = gl.GL_UNSIGNED_INT + + gl.glDrawElements( + gl.GL_TRIANGLES, + command.elem_count, + gltype, + ctypes.c_void_p(command.idx_offset * imgui.INDEX_SIZE), + ) + + + restore_common_gl_state(common_gl_state_tuple) + + gl.glDisableClientState(gl.GL_COLOR_ARRAY) + gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY) + gl.glDisableClientState(gl.GL_VERTEX_ARRAY) + + gl.glMatrixMode(gl.GL_MODELVIEW) + gl.glPopMatrix() + gl.glMatrixMode(gl.GL_PROJECTION) + gl.glPopMatrix() + gl.glPopAttrib() + + def _invalidate_device_objects(self): + pass diff --git a/blimgui/dist64/imgui_bundle/python_backends/sdl2_backend.py b/blimgui/dist64/imgui_bundle/python_backends/sdl2_backend.py index 464024a..2a97182 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/sdl2_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/sdl2_backend.py @@ -6,7 +6,7 @@ from imgui_bundle import imgui from sdl2 import * -from .opengl_backend import ProgrammablePipelineRenderer +from .opengl_backend_programmable import ProgrammablePipelineRenderer import ctypes diff --git a/blimgui/dist64/imgui_bundle/python_backends/sdl3_backend.py b/blimgui/dist64/imgui_bundle/python_backends/sdl3_backend.py index 8ce661d..64ee72c 100644 --- a/blimgui/dist64/imgui_bundle/python_backends/sdl3_backend.py +++ b/blimgui/dist64/imgui_bundle/python_backends/sdl3_backend.py @@ -6,7 +6,7 @@ from imgui_bundle import imgui from sdl3 import * -from .opengl_backend import ProgrammablePipelineRenderer +from .opengl_backend_programmable import ProgrammablePipelineRenderer import ctypes diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/DELVEWHEEL b/blimgui/dist64/numpy-2.2.5.dist-info/DELVEWHEEL deleted file mode 100644 index 3667a30..0000000 --- a/blimgui/dist64/numpy-2.2.5.dist-info/DELVEWHEEL +++ /dev/null @@ -1,2 +0,0 @@ -Version: 1.10.0 -Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-065080rt\\cp313-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '--add-path', 'D:/a/numpy/numpy/.openblas/lib', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-065080rt\\cp313-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-065080rt\\cp313-win_amd64\\built_wheel\\numpy-2.2.5-cp313-cp313-win_amd64.whl'] diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/METADATA b/blimgui/dist64/numpy-2.2.5.dist-info/METADATA deleted file mode 100644 index bd22164..0000000 --- a/blimgui/dist64/numpy-2.2.5.dist-info/METADATA +++ /dev/null @@ -1,1071 +0,0 @@ -Metadata-Version: 2.1 -Name: numpy -Version: 2.2.5 -Summary: Fundamental package for array computing in Python -Author: Travis E. Oliphant et al. -Maintainer-Email: NumPy Developers -License: Copyright (c) 2005-2024, NumPy Developers. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - ---- - - The NumPy repository and source distributions bundle several libraries that are - compatibly licensed. We list these here. - - Name: lapack-lite - Files: numpy/linalg/lapack_lite/* - License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - - Name: dragon4 - Files: numpy/_core/src/multiarray/dragon4.c - License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - - Name: libdivide - Files: numpy/_core/include/numpy/libdivide/* - License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - - Note that the following files are vendored in the repository and sdist but not - installed in built numpy packages: - - Name: Meson - Files: vendored-meson/meson/* - License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - - Name: spin - Files: .spin/cmds.py - License: BSD-3 - For license text, see .spin/LICENSE - - Name: tempita - Files: numpy/_build_utils/tempita/* - License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt - - ---- - - This binary distribution of NumPy also bundles the following software: - - - Name: OpenBLAS - Files: numpy.libs\libscipy_openblas*.dll - Description: bundled as a dynamically linked library - Availability: https://github.com/OpenMathLib/OpenBLAS/ - License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - Name: LAPACK - Files: numpy.libs\libscipy_openblas*.dll - Description: bundled in OpenBLAS - Availability: https://github.com/OpenMathLib/OpenBLAS/ - License: BSD-3-Clause-Attribution - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - Name: GCC runtime library - Files: numpy.libs\libscipy_openblas*.dll - Description: statically linked to files compiled with gcc - Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran - License: GPL-3.0-with-GCC-exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - - ---- - - Full text of license texts referred to above follows (that they are - listed below does not necessarily imply the conditions apply to the - present binary release): - - ---- - - GCC RUNTIME LIBRARY EXCEPTION - - Version 3.1, 31 March 2009 - - Copyright (C) 2009 Free Software Foundation, Inc. - - Everyone is permitted to copy and distribute verbatim copies of this - license document, but changing it is not allowed. - - This GCC Runtime Library Exception ("Exception") is an additional - permission under section 7 of the GNU General Public License, version - 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that - bears a notice placed by the copyright holder of the file stating that - the file is governed by GPLv3 along with this Exception. - - When you use GCC to compile a program, GCC may combine portions of - certain GCC header files and runtime libraries with the compiled - program. The purpose of this Exception is to allow compilation of - non-GPL (including proprietary) programs to use, in this way, the - header files and runtime libraries covered by this Exception. - - 0. Definitions. - - A file is an "Independent Module" if it either requires the Runtime - Library for execution after a Compilation Process, or makes use of an - interface provided by the Runtime Library, but is not otherwise based - on the Runtime Library. - - "GCC" means a version of the GNU Compiler Collection, with or without - modifications, governed by version 3 (or a specified later version) of - the GNU General Public License (GPL) with the option of using any - subsequent versions published by the FSF. - - "GPL-compatible Software" is software whose conditions of propagation, - modification and use would permit combination with GCC in accord with - the license of GCC. - - "Target Code" refers to output from any compiler for a real or virtual - target processor architecture, in executable form or suitable for - input to an assembler, loader, linker and/or execution - phase. Notwithstanding that, Target Code does not include data in any - format that is used as a compiler intermediate representation, or used - for producing a compiler intermediate representation. - - The "Compilation Process" transforms code entirely represented in - non-intermediate languages designed for human-written code, and/or in - Java Virtual Machine byte code, into Target Code. Thus, for example, - use of source code generators and preprocessors need not be considered - part of the Compilation Process, since the Compilation Process can be - understood as starting with the output of the generators or - preprocessors. - - A Compilation Process is "Eligible" if it is done using GCC, alone or - with other GPL-compatible software, or if it is done without using any - work based on GCC. For example, using non-GPL-compatible Software to - optimize any GCC intermediate representations would not qualify as an - Eligible Compilation Process. - - 1. Grant of Additional Permission. - - You have permission to propagate a work of Target Code formed by - combining the Runtime Library with Independent Modules, even if such - propagation would otherwise violate the terms of GPLv3, provided that - all Target Code was generated by Eligible Compilation Processes. You - may then convey such a combination under terms of your choice, - consistent with the licensing of the Independent Modules. - - 2. No Weakening of GCC Copyleft. - - The availability of this Exception does not imply any general - presumption that third-party software is unaffected by the copyleft - requirements of the license of GCC. - - ---- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for - software and other kinds of works. - - The licenses for most software and other practical works are designed - to take away your freedom to share and change the works. By contrast, - the GNU General Public License is intended to guarantee your freedom to - share and change all versions of a program--to make sure it remains free - software for all its users. We, the Free Software Foundation, use the - GNU General Public License for most of our software; it applies also to - any other work released this way by its authors. You can apply it to - your programs, too. - - When we speak of free software, we are referring to freedom, not - price. Our General Public Licenses are designed to make sure that you - have the freedom to distribute copies of free software (and charge for - them if you wish), that you receive source code or can get it if you - want it, that you can change the software or use pieces of it in new - free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you - these rights or asking you to surrender the rights. Therefore, you have - certain responsibilities if you distribute copies of the software, or if - you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether - gratis or for a fee, you must pass on to the recipients the same - freedoms that you received. You must make sure that they, too, receive - or can get the source code. And you must show them these terms so they - know their rights. - - Developers that use the GNU GPL protect your rights with two steps: - (1) assert copyright on the software, and (2) offer you this License - giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains - that there is no warranty for this free software. For both users' and - authors' sake, the GPL requires that modified versions be marked as - changed, so that their problems will not be attributed erroneously to - authors of previous versions. - - Some devices are designed to deny users access to install or run - modified versions of the software inside them, although the manufacturer - can do so. This is fundamentally incompatible with the aim of - protecting users' freedom to change the software. The systematic - pattern of such abuse occurs in the area of products for individuals to - use, which is precisely where it is most unacceptable. Therefore, we - have designed this version of the GPL to prohibit the practice for those - products. If such problems arise substantially in other domains, we - stand ready to extend this provision to those domains in future versions - of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. - States should not allow patents to restrict development and use of - software on general-purpose computers, but in those that do, we wish to - avoid the special danger that patents applied to a free program could - make it effectively proprietary. To prevent this, the GPL assures that - patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and - modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of - works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this - License. Each licensee is addressed as "you". "Licensees" and - "recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work - in a fashion requiring copyright permission, other than the making of an - exact copy. The resulting work is called a "modified version" of the - earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based - on the Program. - - To "propagate" a work means to do anything with it that, without - permission, would make you directly or secondarily liable for - infringement under applicable copyright law, except executing it on a - computer or modifying a private copy. Propagation includes copying, - distribution (with or without modification), making available to the - public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other - parties to make or receive copies. Mere interaction with a user through - a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" - to the extent that it includes a convenient and prominently visible - feature that (1) displays an appropriate copyright notice, and (2) - tells the user that there is no warranty for the work (except to the - extent that warranties are provided), that licensees may convey the - work under this License, and how to view a copy of this License. If - the interface presents a list of user commands or options, such as a - menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work - for making modifications to it. "Object code" means any non-source - form of a work. - - A "Standard Interface" means an interface that either is an official - standard defined by a recognized standards body, or, in the case of - interfaces specified for a particular programming language, one that - is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other - than the work as a whole, that (a) is included in the normal form of - packaging a Major Component, but which is not part of that Major - Component, and (b) serves only to enable use of the work with that - Major Component, or to implement a Standard Interface for which an - implementation is available to the public in source code form. A - "Major Component", in this context, means a major essential component - (kernel, window system, and so on) of the specific operating system - (if any) on which the executable work runs, or a compiler used to - produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all - the source code needed to generate, install, and (for an executable - work) run the object code and to modify the work, including scripts to - control those activities. However, it does not include the work's - System Libraries, or general-purpose tools or generally available free - programs which are used unmodified in performing those activities but - which are not part of the work. For example, Corresponding Source - includes interface definition files associated with source files for - the work, and the source code for shared libraries and dynamically - linked subprograms that the work is specifically designed to require, - such as by intimate data communication or control flow between those - subprograms and other parts of the work. - - The Corresponding Source need not include anything that users - can regenerate automatically from other parts of the Corresponding - Source. - - The Corresponding Source for a work in source code form is that - same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of - copyright on the Program, and are irrevocable provided the stated - conditions are met. This License explicitly affirms your unlimited - permission to run the unmodified Program. The output from running a - covered work is covered by this License only if the output, given its - content, constitutes a covered work. This License acknowledges your - rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not - convey, without conditions so long as your license otherwise remains - in force. You may convey covered works to others for the sole purpose - of having them make modifications exclusively for you, or provide you - with facilities for running those works, provided that you comply with - the terms of this License in conveying all material for which you do - not control copyright. Those thus making or running the covered works - for you must do so exclusively on your behalf, under your direction - and control, on terms that prohibit them from making any copies of - your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under - the conditions stated below. Sublicensing is not allowed; section 10 - makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological - measure under any applicable law fulfilling obligations under article - 11 of the WIPO copyright treaty adopted on 20 December 1996, or - similar laws prohibiting or restricting circumvention of such - measures. - - When you convey a covered work, you waive any legal power to forbid - circumvention of technological measures to the extent such circumvention - is effected by exercising rights under this License with respect to - the covered work, and you disclaim any intention to limit operation or - modification of the work as a means of enforcing, against the work's - users, your or third parties' legal rights to forbid circumvention of - technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you - receive it, in any medium, provided that you conspicuously and - appropriately publish on each copy an appropriate copyright notice; - keep intact all notices stating that this License and any - non-permissive terms added in accord with section 7 apply to the code; - keep intact all notices of the absence of any warranty; and give all - recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, - and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to - produce it from the Program, in the form of source code under the - terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent - works, which are not by their nature extensions of the covered work, - and which are not combined with it such as to form a larger program, - in or on a volume of a storage or distribution medium, is called an - "aggregate" if the compilation and its resulting copyright are not - used to limit the access or legal rights of the compilation's users - beyond what the individual works permit. Inclusion of a covered work - in an aggregate does not cause this License to apply to the other - parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms - of sections 4 and 5, provided that you also convey the - machine-readable Corresponding Source under the terms of this License, - in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded - from the Corresponding Source as a System Library, need not be - included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any - tangible personal property which is normally used for personal, family, - or household purposes, or (2) anything designed or sold for incorporation - into a dwelling. In determining whether a product is a consumer product, - doubtful cases shall be resolved in favor of coverage. For a particular - product received by a particular user, "normally used" refers to a - typical or common use of that class of product, regardless of the status - of the particular user or of the way in which the particular user - actually uses, or expects or is expected to use, the product. A product - is a consumer product regardless of whether the product has substantial - commercial, industrial or non-consumer uses, unless such uses represent - the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, - procedures, authorization keys, or other information required to install - and execute modified versions of a covered work in that User Product from - a modified version of its Corresponding Source. The information must - suffice to ensure that the continued functioning of the modified object - code is in no case prevented or interfered with solely because - modification has been made. - - If you convey an object code work under this section in, or with, or - specifically for use in, a User Product, and the conveying occurs as - part of a transaction in which the right of possession and use of the - User Product is transferred to the recipient in perpetuity or for a - fixed term (regardless of how the transaction is characterized), the - Corresponding Source conveyed under this section must be accompanied - by the Installation Information. But this requirement does not apply - if neither you nor any third party retains the ability to install - modified object code on the User Product (for example, the work has - been installed in ROM). - - The requirement to provide Installation Information does not include a - requirement to continue to provide support service, warranty, or updates - for a work that has been modified or installed by the recipient, or for - the User Product in which it has been modified or installed. Access to a - network may be denied when the modification itself materially and - adversely affects the operation of the network or violates the rules and - protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, - in accord with this section must be in a format that is publicly - documented (and with an implementation available to the public in - source code form), and must require no special password or key for - unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this - License by making exceptions from one or more of its conditions. - Additional permissions that are applicable to the entire Program shall - be treated as though they were included in this License, to the extent - that they are valid under applicable law. If additional permissions - apply only to part of the Program, that part may be used separately - under those permissions, but the entire Program remains governed by - this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option - remove any additional permissions from that copy, or from any part of - it. (Additional permissions may be written to require their own - removal in certain cases when you modify the work.) You may place - additional permissions on material, added by you to a covered work, - for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you - add to a covered work, you may (if authorized by the copyright holders of - that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further - restrictions" within the meaning of section 10. If the Program as you - received it, or any part of it, contains a notice stating that it is - governed by this License along with a term that is a further - restriction, you may remove that term. If a license document contains - a further restriction but permits relicensing or conveying under this - License, you may add to a covered work material governed by the terms - of that license document, provided that the further restriction does - not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you - must place, in the relevant source files, a statement of the - additional terms that apply to those files, or a notice indicating - where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the - form of a separately written license, or stated as exceptions; - the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly - provided under this License. Any attempt otherwise to propagate or - modify it is void, and will automatically terminate your rights under - this License (including any patent licenses granted under the third - paragraph of section 11). - - However, if you cease all violation of this License, then your - license from a particular copyright holder is reinstated (a) - provisionally, unless and until the copyright holder explicitly and - finally terminates your license, and (b) permanently, if the copyright - holder fails to notify you of the violation by some reasonable means - prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is - reinstated permanently if the copyright holder notifies you of the - violation by some reasonable means, this is the first time you have - received notice of violation of this License (for any work) from that - copyright holder, and you cure the violation prior to 30 days after - your receipt of the notice. - - Termination of your rights under this section does not terminate the - licenses of parties who have received copies or rights from you under - this License. If your rights have been terminated and not permanently - reinstated, you do not qualify to receive new licenses for the same - material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or - run a copy of the Program. Ancillary propagation of a covered work - occurring solely as a consequence of using peer-to-peer transmission - to receive a copy likewise does not require acceptance. However, - nothing other than this License grants you permission to propagate or - modify any covered work. These actions infringe copyright if you do - not accept this License. Therefore, by modifying or propagating a - covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically - receives a license from the original licensors, to run, modify and - propagate that work, subject to this License. You are not responsible - for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an - organization, or substantially all assets of one, or subdividing an - organization, or merging organizations. If propagation of a covered - work results from an entity transaction, each party to that - transaction who receives a copy of the work also receives whatever - licenses to the work the party's predecessor in interest had or could - give under the previous paragraph, plus a right to possession of the - Corresponding Source of the work from the predecessor in interest, if - the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the - rights granted or affirmed under this License. For example, you may - not impose a license fee, royalty, or other charge for exercise of - rights granted under this License, and you may not initiate litigation - (including a cross-claim or counterclaim in a lawsuit) alleging that - any patent claim is infringed by making, using, selling, offering for - sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this - License of the Program or a work on which the Program is based. The - work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims - owned or controlled by the contributor, whether already acquired or - hereafter acquired, that would be infringed by some manner, permitted - by this License, of making, using, or selling its contributor version, - but do not include claims that would be infringed only as a - consequence of further modification of the contributor version. For - purposes of this definition, "control" includes the right to grant - patent sublicenses in a manner consistent with the requirements of - this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free - patent license under the contributor's essential patent claims, to - make, use, sell, offer for sale, import and otherwise run, modify and - propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express - agreement or commitment, however denominated, not to enforce a patent - (such as an express permission to practice a patent or covenant not to - sue for patent infringement). To "grant" such a patent license to a - party means to make such an agreement or commitment not to enforce a - patent against the party. - - If you convey a covered work, knowingly relying on a patent license, - and the Corresponding Source of the work is not available for anyone - to copy, free of charge and under the terms of this License, through a - publicly available network server or other readily accessible means, - then you must either (1) cause the Corresponding Source to be so - available, or (2) arrange to deprive yourself of the benefit of the - patent license for this particular work, or (3) arrange, in a manner - consistent with the requirements of this License, to extend the patent - license to downstream recipients. "Knowingly relying" means you have - actual knowledge that, but for the patent license, your conveying the - covered work in a country, or your recipient's use of the covered work - in a country, would infringe one or more identifiable patents in that - country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or - arrangement, you convey, or propagate by procuring conveyance of, a - covered work, and grant a patent license to some of the parties - receiving the covered work authorizing them to use, propagate, modify - or convey a specific copy of the covered work, then the patent license - you grant is automatically extended to all recipients of the covered - work and works based on it. - - A patent license is "discriminatory" if it does not include within - the scope of its coverage, prohibits the exercise of, or is - conditioned on the non-exercise of one or more of the rights that are - specifically granted under this License. You may not convey a covered - work if you are a party to an arrangement with a third party that is - in the business of distributing software, under which you make payment - to the third party based on the extent of your activity of conveying - the work, and under which the third party grants, to any of the - parties who would receive the covered work from you, a discriminatory - patent license (a) in connection with copies of the covered work - conveyed by you (or copies made from those copies), or (b) primarily - for and in connection with specific products or compilations that - contain the covered work, unless you entered into that arrangement, - or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting - any implied license or other defenses to infringement that may - otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or - otherwise) that contradict the conditions of this License, they do not - excuse you from the conditions of this License. If you cannot convey a - covered work so as to satisfy simultaneously your obligations under this - License and any other pertinent obligations, then as a consequence you may - not convey it at all. For example, if you agree to terms that obligate you - to collect a royalty for further conveying from those to whom you convey - the Program, the only way you could satisfy both those terms and this - License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have - permission to link or combine any covered work with a work licensed - under version 3 of the GNU Affero General Public License into a single - combined work, and to convey the resulting work. The terms of this - License will continue to apply to the part which is the covered work, - but the special requirements of the GNU Affero General Public License, - section 13, concerning interaction through a network will apply to the - combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of - the GNU General Public License from time to time. Such new versions will - be similar in spirit to the present version, but may differ in detail to - address new problems or concerns. - - Each version is given a distinguishing version number. If the - Program specifies that a certain numbered version of the GNU General - Public License "or any later version" applies to it, you have the - option of following the terms and conditions either of that numbered - version or of any later version published by the Free Software - Foundation. If the Program does not specify a version number of the - GNU General Public License, you may choose any version ever published - by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future - versions of the GNU General Public License can be used, that proxy's - public statement of acceptance of a version permanently authorizes you - to choose that version for the Program. - - Later license versions may give you additional or different - permissions. However, no additional obligations are imposed on any - author or copyright holder as a result of your choosing to follow a - later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY - APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT - HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY - OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, - THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM - IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF - ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING - WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS - THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY - GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE - USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF - DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD - PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), - EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF - SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided - above cannot be given local legal effect according to their terms, - reviewing courts shall apply local law that most closely approximates - an absolute waiver of all civil liability in connection with the - Program, unless a warranty or assumption of liability accompanies a - copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest - possible use to the public, the best way to achieve this is to make it - free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest - to attach them to the start of each source file to most effectively - state the exclusion of warranty; and each file should have at least - the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - - Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short - notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - - The hypothetical commands `show w' and `show c' should show the appropriate - parts of the General Public License. Of course, your program's commands - might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, - if any, to sign a "copyright disclaimer" for the program, if necessary. - For more information on this, and how to apply and follow the GNU GPL, see - . - - The GNU General Public License does not permit incorporating your program - into proprietary programs. If your program is a subroutine library, you - may consider it more useful to permit linking proprietary applications with - the library. If this is what you want to do, use the GNU Lesser General - Public License instead of this License. But first, please read - . - - -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Science/Research -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Programming Language :: C -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: 3.11 -Classifier: Programming Language :: Python :: 3.12 -Classifier: Programming Language :: Python :: 3.13 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Topic :: Software Development -Classifier: Topic :: Scientific/Engineering -Classifier: Typing :: Typed -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX -Classifier: Operating System :: Unix -Classifier: Operating System :: MacOS -Project-URL: homepage, https://numpy.org -Project-URL: documentation, https://numpy.org/doc/ -Project-URL: source, https://github.com/numpy/numpy -Project-URL: download, https://pypi.org/project/numpy/#files -Project-URL: tracker, https://github.com/numpy/numpy/issues -Project-URL: release notes, https://numpy.org/doc/stable/release -Requires-Python: >=3.10 -Description-Content-Type: text/markdown - -

- -


- - -[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( -https://numfocus.org) -[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( -https://pypi.org/project/numpy/) -[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( -https://anaconda.org/conda-forge/numpy) -[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( -https://stackoverflow.com/questions/tagged/numpy) -[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( -https://doi.org/10.1038/s41586-020-2649-2) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) - - -NumPy is the fundamental package for scientific computing with Python. - -- **Website:** https://www.numpy.org -- **Documentation:** https://numpy.org/doc -- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion -- **Source code:** https://github.com/numpy/numpy -- **Contributing:** https://www.numpy.org/devdocs/dev/index.html -- **Bug reports:** https://github.com/numpy/numpy/issues -- **Report a security vulnerability:** https://tidelift.com/docs/security - -It provides: - -- a powerful N-dimensional array object -- sophisticated (broadcasting) functions -- tools for integrating C/C++ and Fortran code -- useful linear algebra, Fourier transform, and random number capabilities - -Testing: - -NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: - - python -c "import numpy, sys; sys.exit(numpy.test() is False)" - -Code of Conduct ----------------------- - -NumPy is a community-driven open source project developed by a diverse group of -[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong -commitment to creating an open, inclusive, and positive community. Please read the -[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact -with others in a way that makes our community thrive. - -Call for Contributions ----------------------- - -The NumPy project welcomes your expertise and enthusiasm! - -Small improvements or fixes are always appreciated. If you are considering larger contributions -to the source code, please contact us through the [mailing -list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. - -Writing code isn’t the only way to contribute to NumPy. You can also: -- review pull requests -- help us stay on top of new and old issues -- develop tutorials, presentations, and other educational materials -- maintain and improve [our website](https://github.com/numpy/numpy.org) -- develop graphic design for our brand assets and promotional materials -- translate website content -- help with outreach and onboard new contributors -- write grant proposals and help with other fundraising efforts - -For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). -If you’re unsure where to start or how your skills fit in, reach out! You can -ask on the mailing list or here, on GitHub, by opening a new issue or leaving a -comment on a relevant issue that is already open. - -Our preferred channels of communication are all public, but if you’d like to -speak to us in private first, contact our community coordinators at -numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for -an invitation). - -We also have a biweekly community call, details of which are announced on the -mailing list. You are very welcome to join. - -If you are new to contributing to open source, [this -guide](https://opensource.guide/how-to-contribute/) helps explain why, what, -and how to successfully get involved. diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/RECORD b/blimgui/dist64/numpy-2.2.5.dist-info/RECORD deleted file mode 100644 index e7a431e..0000000 --- a/blimgui/dist64/numpy-2.2.5.dist-info/RECORD +++ /dev/null @@ -1,1310 +0,0 @@ -../../bin/f2py.exe,sha256=j2-_Qrh2S8DB57Qz2Jm-XHAio1xf4i9fnNNLNfJE2Os,108393 -../../bin/numpy-config.exe,sha256=0C2ZoRvqmF16XepaNYJjdNL7LljiKoJkitCCmxBzUcA,108393 -numpy-2.2.5-cp313-cp313-win_amd64.whl,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy-2.2.5.dist-info/DELVEWHEEL,sha256=yA4L2DocBqSmGAGYy1e136BjU2zGyIjOh0x975mmPYM,446 -numpy-2.2.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -numpy-2.2.5.dist-info/LICENSE.txt,sha256=FCVsw6LJ0yrChNqWuTf-tE9y3ZC-4jF6wwIBZoRq2Z0,47709 -numpy-2.2.5.dist-info/METADATA,sha256=HUkV87vN7MofIyrgafDovg3IjwtVKgDwjiuGPBC6jBI,60844 -numpy-2.2.5.dist-info/RECORD,, -numpy-2.2.5.dist-info/WHEEL,sha256=suq8ARrxbiI7iLH3BgK-82uzxQ-4Hm-m8w01oCokrtA,85 -numpy-2.2.5.dist-info/entry_points.txt,sha256=4mXDNhJDQ9GHqMBeRJ8B3PlixTFmkXGqU3RVuac20q0,172 -numpy.libs/libscipy_openblas64_-43e11ff0749b8cbe0a615c9cf6737e0e.dll,sha256=Y0aHyl_t73bnhNjFT8S6L5ZaRMC2DXVMSRdFid6rgVc,20301824 -numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll,sha256=pMIim9wqKmMKzcCVtNhgCOXD47x3cxdDVPPaT1vrnN4,575056 -numpy/__config__.py,sha256=b9waj_PNmyxi3T3vC9OPP4eXFliE9CW-4bru_DJvoQY,5694 -numpy/__config__.pyi,sha256=b1FAG-TOWL9zaaazZoHWMp2OogmcZSTn3mkUJ5MRh3A,2479 -numpy/__init__.cython-30.pxd,sha256=bwqZBDOkt5Nce2EmeHd9mV1eGoLTSoctn2n-lCWKWxc,48041 -numpy/__init__.pxd,sha256=HUXemn5QF2YGUqOvJmqmFYpWZ64RPv5GwS66LG1eu5g,44591 -numpy/__init__.py,sha256=7hWQTVK6pC7OFv_2OtWAL20YBVI8vN7lgo2YnkTdEnM,23016 -numpy/__init__.pyi,sha256=d3zY0U5FGzmf-OimA-Ok3e3nQf7TM25F6uEZsC1dDN4,217782 -numpy/__pycache__/__config__.cpython-313.pyc,, -numpy/__pycache__/__init__.cpython-313.pyc,, -numpy/__pycache__/_array_api_info.cpython-313.pyc,, -numpy/__pycache__/_configtool.cpython-313.pyc,, -numpy/__pycache__/_distributor_init.cpython-313.pyc,, -numpy/__pycache__/_expired_attrs_2_0.cpython-313.pyc,, -numpy/__pycache__/_globals.cpython-313.pyc,, -numpy/__pycache__/_pytesttester.cpython-313.pyc,, -numpy/__pycache__/conftest.cpython-313.pyc,, -numpy/__pycache__/ctypeslib.cpython-313.pyc,, -numpy/__pycache__/dtypes.cpython-313.pyc,, -numpy/__pycache__/exceptions.cpython-313.pyc,, -numpy/__pycache__/matlib.cpython-313.pyc,, -numpy/__pycache__/version.cpython-313.pyc,, -numpy/_array_api_info.py,sha256=Qd_2x_pUQLdBtnPKodEZy2Zds-R5i2DKQacMmMVRaRk,10727 -numpy/_array_api_info.pyi,sha256=Y7SGdw5yxh4JQGeavwCaN2fpR7DR0KzU8GoOn7SKoiw,5102 -numpy/_configtool.py,sha256=CgdDWSv9AX6XNKIibBXBisvuCu0aUkVVKbNudJfERIw,1046 -numpy/_configtool.pyi,sha256=IlC395h8TlcZ4DiSW5i6NBQO9I74ERfXpwSYAktzoaU,25 -numpy/_core/__init__.py,sha256=ziVwv-eSrrG6jAQYH3eQcPtNsdRZaWBnvzKCj4MrtbA,5792 -numpy/_core/__init__.pyi,sha256=C5NQDIktXlR1OosGgyvY87pyotkyJr3Ci2dMWTLpSi4,88 -numpy/_core/__pycache__/__init__.cpython-313.pyc,, -numpy/_core/__pycache__/_add_newdocs.cpython-313.pyc,, -numpy/_core/__pycache__/_add_newdocs_scalars.cpython-313.pyc,, -numpy/_core/__pycache__/_asarray.cpython-313.pyc,, -numpy/_core/__pycache__/_dtype.cpython-313.pyc,, -numpy/_core/__pycache__/_dtype_ctypes.cpython-313.pyc,, -numpy/_core/__pycache__/_exceptions.cpython-313.pyc,, -numpy/_core/__pycache__/_internal.cpython-313.pyc,, -numpy/_core/__pycache__/_machar.cpython-313.pyc,, -numpy/_core/__pycache__/_methods.cpython-313.pyc,, -numpy/_core/__pycache__/_string_helpers.cpython-313.pyc,, -numpy/_core/__pycache__/_type_aliases.cpython-313.pyc,, -numpy/_core/__pycache__/_ufunc_config.cpython-313.pyc,, -numpy/_core/__pycache__/arrayprint.cpython-313.pyc,, -numpy/_core/__pycache__/cversions.cpython-313.pyc,, -numpy/_core/__pycache__/defchararray.cpython-313.pyc,, -numpy/_core/__pycache__/einsumfunc.cpython-313.pyc,, -numpy/_core/__pycache__/fromnumeric.cpython-313.pyc,, -numpy/_core/__pycache__/function_base.cpython-313.pyc,, -numpy/_core/__pycache__/getlimits.cpython-313.pyc,, -numpy/_core/__pycache__/memmap.cpython-313.pyc,, -numpy/_core/__pycache__/multiarray.cpython-313.pyc,, -numpy/_core/__pycache__/numeric.cpython-313.pyc,, -numpy/_core/__pycache__/numerictypes.cpython-313.pyc,, -numpy/_core/__pycache__/overrides.cpython-313.pyc,, -numpy/_core/__pycache__/printoptions.cpython-313.pyc,, -numpy/_core/__pycache__/records.cpython-313.pyc,, -numpy/_core/__pycache__/shape_base.cpython-313.pyc,, -numpy/_core/__pycache__/strings.cpython-313.pyc,, -numpy/_core/__pycache__/umath.cpython-313.pyc,, -numpy/_core/_add_newdocs.py,sha256=eQ_QDKS8SuavunWLZh9rz0QtRVtzDtrIhmc3OAgodXw,215729 -numpy/_core/_add_newdocs.pyi,sha256=ttPc9PlJ6lBkZrBrjzzWD4_jxmkIxpojL8RWR-d3e1c,171 -numpy/_core/_add_newdocs_scalars.py,sha256=TeVoRpAbqG46cLwGVf-PRK-cIt9qgAKstAI-nHs8abg,12992 -numpy/_core/_add_newdocs_scalars.pyi,sha256=qgD9RUeJdv6bkYewvQPXXCzO_roSKbaueq9PyvS6wSA,589 -numpy/_core/_asarray.py,sha256=3wUlbaCM-agtm5HVRzD6T2xiqNpafdZ77QVkgb-HCAw,4047 -numpy/_core/_asarray.pyi,sha256=vuCMO_o0RNeK0av8O5fvo93YOdxjJ2kgFeaw3GDobpY,1126 -numpy/_core/_dtype.py,sha256=itXloCOgln5qr5mMFvGA54AEpC0ueUA3qiEH6Z798O0,11108 -numpy/_core/_dtype.pyi,sha256=fVZoHORimwm-ck_pKiUx1RJvtSZoF7d5QxdGZI3ebVI,2009 -numpy/_core/_dtype_ctypes.py,sha256=ebN9U_QbymSP-ombYBYc4F7HtgC3ViucNW91MqpNhrM,3838 -numpy/_core/_dtype_ctypes.pyi,sha256=d5BudSdtj6n046OX9c-rUoX5zVGghdoO22yEhkjVRoM,3765 -numpy/_core/_exceptions.py,sha256=35d-to48ERMggcjK60hKzHYhZJUUAxWY1GcJWh9bPJE,5551 -numpy/_core/_exceptions.pyi,sha256=g4N5rEZf25Fbpu3AKAJn9c5MTlj671zZ6zWqPTd1Dnk,2219 -numpy/_core/_internal.py,sha256=f7PNtQIywHQYg7rGnL7Wgo27Wwswcwl1i5tlRKnjgmw,30127 -numpy/_core/_internal.pyi,sha256=sKos4TSABLgaoK1w_l06DewqULMQQIcNfQl6OPYBKPk,2726 -numpy/_core/_machar.py,sha256=TWlW2yOKVA7Vk-s9gusxRgumvgTdCcAPL_72k8SROd4,11921 -numpy/_core/_machar.pyi,sha256=g4N5rEZf25Fbpu3AKAJn9c5MTlj671zZ6zWqPTd1Dnk,2219 -numpy/_core/_methods.py,sha256=QQaL40BLBbWChnmVaD9zYZtBqfxyufDMeYP1X1MTEUY,9725 -numpy/_core/_methods.pyi,sha256=J9wblAExV__OQipgX4HbG74DOK5p4Ec1I31yNwv5WWg,580 -numpy/_core/_multiarray_tests.cp313-win_amd64.lib,sha256=0qltFmDijs2NGUO1LiHxUNo08PXiTXBEVN0dLgjleaI,2418 -numpy/_core/_multiarray_tests.cp313-win_amd64.pyd,sha256=EFb6nms-JMCrh3AsFCrqfPfwPiEnxEfGKQyo4fXl5W8,63488 -numpy/_core/_multiarray_umath.cp313-win_amd64.lib,sha256=85FmS-czkWi5ivL4ECFpJ8OzCXpKs6MkGDgkjLQOXlc,2192 -numpy/_core/_multiarray_umath.cp313-win_amd64.pyd,sha256=GEp-c-d9fjDPbageuIihWVYRVKeHW_SvchaowKOPwKA,4176896 -numpy/_core/_operand_flag_tests.cp313-win_amd64.lib,sha256=ozPhoA5akD5sPujnk8i6jauxlZgVgYxj6xRWv0WzKa4,2228 -numpy/_core/_operand_flag_tests.cp313-win_amd64.pyd,sha256=chdCkdTwUR1dhRf1MTWx_pr4Wx_rDKQjBjRbSifIIm8,11776 -numpy/_core/_rational_tests.cp313-win_amd64.lib,sha256=GwnIVNxPv4d8HYjKZiD5NzePKI3_3eDA25TrIPeCq1o,2156 -numpy/_core/_rational_tests.cp313-win_amd64.pyd,sha256=sz13uHd5xfrTuCzH0K3fh9MNZPerF1b1WEcZ5JVamUA,40448 -numpy/_core/_simd.cp313-win_amd64.lib,sha256=ElUhc1ETeZmUTFjOnYuHCszZWETI98TNN6XgQtuVl9M,1976 -numpy/_core/_simd.cp313-win_amd64.pyd,sha256=srGEnbUDL1XbpMf3FU7kXwyJFZYTqYceO5phtUvEnOI,2236928 -numpy/_core/_simd.pyi,sha256=RN-uZiTi3jZNOgOOKlu97Mu1Ufkb8jvLUDDEnaW77Lc,694 -numpy/_core/_string_helpers.py,sha256=yqhYXnS3SgnP_4PvP7NUYvYJ7c5GeFJz8a8zI_uU0DI,2937 -numpy/_core/_string_helpers.pyi,sha256=bThH7ichGlrmQ6O38n71QcJ7Oi_mRPUf-DFMU5hdYU0,370 -numpy/_core/_struct_ufunc_tests.cp313-win_amd64.lib,sha256=JQw2wRTTHnL-bGj9RgL5_PNokiYAMdplD6OyaUf6QE0,2228 -numpy/_core/_struct_ufunc_tests.cp313-win_amd64.pyd,sha256=wIgVPB5f_7Ao1o-L8vo9VmoDRCFyGdCwqPXNEDF8NDQ,13824 -numpy/_core/_type_aliases.py,sha256=uUDC8quSr11Ld3MRI0-4Rm1fiX1GFvu_rn1PVADuoKE,3608 -numpy/_core/_type_aliases.pyi,sha256=YU1X6HEVmLcxqwswoZ_vdckwXcNPuyNhRxZgHAubMdA,2496 -numpy/_core/_ufunc_config.py,sha256=82EbRK2pOLuW9YZRbepA0u8-_xzuYbDi2Y0rHv5Lq1k,15513 -numpy/_core/_ufunc_config.pyi,sha256=L0fOlCDoajz-jkPfFDa7xo28o_GJDCsm4GjgpSi3e_Y,1211 -numpy/_core/_umath_tests.cp313-win_amd64.lib,sha256=KilSBgG4HYONxhCElrajbzfmP1h0K5cQKl5Q300fqEI,2104 -numpy/_core/_umath_tests.cp313-win_amd64.pyd,sha256=huJwFBfVKUcD4O90Du5IwwCC4xFigLKb_-Y7YtOiuDY,34304 -numpy/_core/arrayprint.py,sha256=OY0l5ex3zDsf_sVW1XG2_LNX3779nOLWjcDY78e7n4Y,66568 -numpy/_core/arrayprint.pyi,sha256=DGL2dfAhzXXOzOCIe_dEErom1cKMVOW-TB_18WnQfho,7163 -numpy/_core/cversions.py,sha256=FISv1d4R917Bi5xJjKKy8Lo6AlFkV00WvSoB7l3acA4,360 -numpy/_core/defchararray.py,sha256=eOGMMdrk4pVbuGvv3EBjjcGBNusWrHK4tNVIpJO5Z7I,39212 -numpy/_core/defchararray.pyi,sha256=gIu3J0lelyWehaqPwUFwxgxlLtwSLMJWTVctGLS1yck,28058 -numpy/_core/einsumfunc.py,sha256=bsFwrvIcvknVoQuaTk91125O-HApybSIvEPahQD8VxE,54319 -numpy/_core/einsumfunc.pyi,sha256=fef7KF6KWLIhmBdbyDXt1J7g1C1QsMf0P05JKI7FGE4,5114 -numpy/_core/fromnumeric.py,sha256=0X8Dqwd61a4aovHetN0_zzIsac7pW7JOw7UK7UXrDVI,148176 -numpy/_core/fromnumeric.pyi,sha256=rwqQrnix67jCJ8I7YuIlVq_z-Kg_3UdBruLvIdTI4vI,42923 -numpy/_core/function_base.py,sha256=x3yPOA2m9lZMNC0qJrUNO65ddzgoISNe2mqzgJ0BIBM,20279 -numpy/_core/function_base.pyi,sha256=2wwLp3M55Vd6DCNCWzgk_DClf_0tOKyY5TbyNLE5YK0,5925 -numpy/_core/getlimits.py,sha256=d3G99QSPEYNn73Mzxab-v1RWwVzBp67V_TcXvJ6wbCs,26859 -numpy/_core/getlimits.pyi,sha256=3u55btDSVkpbsnFxkCWqRY7LZ1WhGop_LAUnjJfOUR8,64 -numpy/_core/include/numpy/__multiarray_api.c,sha256=Vc65MKuXE5761vVI9qdZkPyg3C5_k_ickum0Q04EOOA,13045 -numpy/_core/include/numpy/__multiarray_api.h,sha256=SzcxgIDQ8m4Ds1fvlM9fQ8RuINJpcPLRKzpb9HFDtpw,62996 -numpy/_core/include/numpy/__ufunc_api.c,sha256=NoTcyLqrAF8F3AE0TDvlDFS7DXuFJRpoINEaDnZWhys,1809 -numpy/_core/include/numpy/__ufunc_api.h,sha256=Q36B7NKN8E6GLytefgBOGLfgRnt8ayO1Conr2QWlqkA,13506 -numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s5TK2aPpClbw4CbVJCij__hzoh5IgHIIZK0k6FKtqfc,1947 -numpy/_core/include/numpy/_numpyconfig.h,sha256=mqDMFv5Vhk2nHXNf6TIWzz7ozrtc9aNaN8_LJZBYjX0,902 -numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=4ylG8s52kZEx__QODt_7Do8QitmhDSvTeZ7Lar0fOgo,4660 -numpy/_core/include/numpy/arrayobject.h,sha256=ghWzloPUkSaVkcsAnBnpbrxtXeXL-mkzVGJQEHFxjnk,211 -numpy/_core/include/numpy/arrayscalars.h,sha256=4TrsilxaUiH4mVCkElEPTM_C_8c67O9R4Whx-3QzDE4,4439 -numpy/_core/include/numpy/dtype_api.h,sha256=cfQuPb0zrVqYFdWauOqbgdXR8rtm4DjNz2nbfSWvSRo,19718 -numpy/_core/include/numpy/halffloat.h,sha256=qYgX5iQfNzXICsnd0MCRq5ELhhfFjlRGm1xXGimQm44,2029 -numpy/_core/include/numpy/ndarrayobject.h,sha256=V5Zkf5a9vWyV8ZInBgAceBn7c9GK4aquhzeGTW_Sgls,12361 -numpy/_core/include/numpy/ndarraytypes.h,sha256=R3CFlGdGUQNj7rEovi8zRkJoJMk0y8682cu1kX1soAA,66986 -numpy/_core/include/numpy/npy_1_7_deprecated_api.h,sha256=eYbQlqb6mzJnUKuVfl2mmrMpvB3GN2rFgHazFO9CKT8,3858 -numpy/_core/include/numpy/npy_2_compat.h,sha256=VxsRXAtDfLlXkvH-ErZRSuH49k9EjcFwcSUSfTPRzAU,8795 -numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=uW0iF-qMwQNn4PvIfWCrYce6b4OrYUO4BWu-VYYAZag,885 -numpy/_core/include/numpy/npy_3kcompat.h,sha256=dV01ltbxntPY8cN7WAL4MX3KHeyCLeSBDQreDxs09aQ,10022 -numpy/_core/include/numpy/npy_common.h,sha256=3njI4LhBxMZvkkdG3nLq0NZI7lNqx9dnvcTfCgEW0rI,37621 -numpy/_core/include/numpy/npy_cpu.h,sha256=FSFhzOQ_lvcpGw-4Qtzqu5W8eiD6k_K351_9WiI5uTg,4837 -numpy/_core/include/numpy/npy_endian.h,sha256=NZSi-5CbqZ92AUztILDJLBKP61-VQezmAatYTNLwRu8,2912 -numpy/_core/include/numpy/npy_math.h,sha256=ItgOGoKdQan93epl_EPF9Rm6M5Mis6xW__PbPIZsENA,19492 -numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=jIcjEP2AbovDTfgE-qtvdP51_dVGjVnEGBX86rlGSKE,698 -numpy/_core/include/numpy/npy_os.h,sha256=j044vd1C1oCcW52r3htiVNhUaJSEqCjKrODwMHq3TU0,1298 -numpy/_core/include/numpy/numpyconfig.h,sha256=zDucznj2xbSOImOJLhjYHosMFqkoWY6AZvdRDpUrcw8,7339 -numpy/_core/include/numpy/random/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 -numpy/_core/include/numpy/random/bitgen.h,sha256=_H0uXqmnub4PxnJWdMWaNqfpyFDu2KB0skf2wc5vjUc,508 -numpy/_core/include/numpy/random/distributions.h,sha256=GLURa3sFESZE0_0RK-3Gqmfa96itBHw8LlsNyy9EPt4,10070 -numpy/_core/include/numpy/random/libdivide.h,sha256=F9PLx6TcOk-sd0dObe0nWLyz4HhbHv2K7voR_kolpGU,82217 -numpy/_core/include/numpy/ufuncobject.h,sha256=PO10lEoSvptYe57rrGpMiF0tytnmq9PW2UeMIwN06MY,12265 -numpy/_core/include/numpy/utils.h,sha256=vzJAbatJYfxHmX2yL_xBirmB4mEGLOhJ92JlV9s8yPs,1222 -numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=hYWFyoBxE036dh19si8UPka01H2cv64qlc4ZtgoA_7A,156 -numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=e0rdsb00Y93VuammuvIIFlzZtnUAXwsS1XNKlCU8mFQ,381 -numpy/_core/lib/npymath.lib,sha256=MB9a7L_emxqUHmgfAz8AQQiM9kBdQTw2v9QH9xWUVR4,154174 -numpy/_core/lib/pkgconfig/numpy.pc,sha256=OfqoWh0L4-MyoQBagVOB1dHnSfcOyiO3Lo3FKmm4Z2k,198 -numpy/_core/memmap.py,sha256=gtXDgEHkmzuCjwPpYFT5gkFzziYC41JAGYuKNDo2CfI,13025 -numpy/_core/memmap.pyi,sha256=n0kBe4iQD5lcWvAvVhdUU18YIoPX6Sf5e2qh9IdO5uQ,50 -numpy/_core/multiarray.py,sha256=-cH3HHzztYiSSESwdN7zcgqXk1L_AREuimMRJBk_cTE,59891 -numpy/_core/multiarray.pyi,sha256=YEdOhn2NyCGHLQRgpe1Gd45xBHFlJSddPsHiesSWmAM,34751 -numpy/_core/numeric.py,sha256=81YhZQ2LpEvj5Unv5EUlbXWktmkGR9dN2CvOWEjYukE,84439 -numpy/_core/numeric.pyi,sha256=JVDzjcZBz_VOzHxL9cYux2LPrvq9Fg7ZI6fOiLciiAA,20094 -numpy/_core/numerictypes.py,sha256=o2lhl1SFpGuDeIGFzfgS2zLWDwIAt41gi3VphVd9Cwg,16754 -numpy/_core/numerictypes.pyi,sha256=Nuq8mEo62a0A1-6HJNTrONN7b9hkUNSp-HTRP28rYkc,3750 -numpy/_core/overrides.py,sha256=gHrmLDRjVyi3poY5qYVrsAP9QNvvhXtiW1ggx5gwgGs,7392 -numpy/_core/overrides.pyi,sha256=eSG0Xzxm7wutARSaQ_mmHPRDX-xpFHM0TAVheebF3S8,1793 -numpy/_core/printoptions.py,sha256=btxyfisjJ_7DB5JtZKAtaLYL9qmcmTnnJ8pHFcwn2Wc,1095 -numpy/_core/printoptions.pyi,sha256=QE36MVL3BgqflyQuj6UOzywbnELMiLeyNz_1sALvOSU,622 -numpy/_core/records.py,sha256=tQof8zOMoY8UXn4ZtoIU8O6lPj8pUsDQFCOkZRAat9A,37953 -numpy/_core/records.pyi,sha256=9df4GE_hkYVI-IlqCI6cxJHtYvuQ8TUwsXSjM_U8fOI,9104 -numpy/_core/shape_base.py,sha256=ZROh3EbNF1IuHLF_1ecJ0NXpwUf__RXw8M2bWC1IO0Q,33887 -numpy/_core/shape_base.pyi,sha256=vIF5OAbTOYBsImf_9BdIlT115WNburRg2KAFUV9ZMIs,4720 -numpy/_core/strings.py,sha256=ZAKXDmpIzKReWYaUl-F4h5Fht4VVJ-z0VQpRV-Wf0kc,47313 -numpy/_core/strings.pyi,sha256=XfkkJ03nRnTkqZop0LKUE_K-jVhrA4RFh9z-LHGQMng,13270 -numpy/_core/tests/__pycache__/_locales.cpython-313.pyc,, -numpy/_core/tests/__pycache__/_natype.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test__exceptions.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_abc.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_api.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_argparse.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_array_api_info.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_array_coercion.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_array_interface.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_arraymethod.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_arrayobject.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_arrayprint.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_casting_unittests.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_conversion_utils.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_cpu_features.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_cython.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_datetime.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_defchararray.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_deprecations.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_dlpack.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_dtype.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_einsum.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_errstate.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_extint128.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_function_base.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_getlimits.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_half.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_hashtable.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_indexerrors.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_indexing.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_item_selection.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_limited_api.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_longdouble.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_machar.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_mem_overlap.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_mem_policy.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_memmap.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_multiarray.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_multithreading.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_nditer.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_numeric.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_numerictypes.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_overrides.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_print.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_protocols.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_records.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_scalar_methods.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_scalarinherit.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_scalarmath.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_scalarprint.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_shape_base.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_simd.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_simd_module.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_stringdtype.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_strings.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_ufunc.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_umath.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_umath_complex.cpython-313.pyc,, -numpy/_core/tests/__pycache__/test_unicode.cpython-313.pyc,, -numpy/_core/tests/_locales.py,sha256=xsKJqT3ZZiJGLQbm4Xx1W2i9KLqx14oQE9wUa49PkJ8,2248 -numpy/_core/tests/_natype.py,sha256=uVXHCahmyDbZZAaQ-OKqaWnOgJRIYRETU06drssSSP0,6457 -numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 -numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=9TBdxpPo0djv1CKxQ6_DbGKRxIZVawitAm7AMmWKroI,6012 -numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 -numpy/_core/tests/data/umath-validation-set-README.txt,sha256=GfrkmU_wTjpLkOftWDuGayEDdV3RPpN2GRVQX61VgWI,982 -numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=VUdQdKBFrpXHLlPtX2WYIK_uwkaXgky85CZ4aNuvmD4,62794 -numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=tbuOQkvnYxSyJf_alGk3Zw3Vyv0HO5dMC1hUle2hWwQ,62794 -numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=JPEWWMxgPKdNprDq0pH5QhJ2oiVCzuDbK-3WhTKny8o,62768 -numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=fwuq25xeS57kBExBuSNfewgHb-mgoR9wUGVqcOXbfoI,61718 -numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=nu33YyL-ALXSSF5cupCTaf_jTPLK_QyUfciNQGpffkY,61734 -numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=wHSKFY2Yvbv3fnmmfLqPYpjhkEM88YHkFVpZQioyBDw,62768 -numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=FFi_XxEnGrfJd7OxtjVFT6WFC2tUqKhVV8fmQfb0z8o,62275 -numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=ccDri5_jQ84D_kAmSwZ_ztNUPIhzhgycDtNsPB7m8dc,60497 -numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=DnN6RGvKQHAWIofchmhGH7kkJej2VtNwGGMRZGzBkTQ,62298 -numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=mPhjF4KLe0bdwx38SJiNipD24ntLI_5aWc8h-V0UMgM,17903 -numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=sD94pK2EAZAyD2fDEocfw1oXNw1qTlW1TBwRlcpbcsI,60053 -numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=tyfZN5D8tlm7APgxCIPyuy774AZHytMOB59H9KewxEs,61728 -numpy/_core/tests/data/umath-validation-set-log.csv,sha256=CDPky64PjaURWhqkHxkLElmMiI21v5ugGGyzhdfUbnI,11963 -numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=dW6FPEBlRx2pcS-7eui_GtqTpXzOy147il55qdP-8Ak,70551 -numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=2aEsHVcvRym-4535CkvJTsmHywkt01ZMfmjl-d4fvVI,61732 -numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=aVZ7VMQ5urGOx5MMMOUmMKBhFLFE-U7y6DVCTeXQfo0,70546 -numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=GvPrQUEYMX1iB2zjbfK26JUJOxtqbfiRUgXuAO1QcP0,59981 -numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=lc7OYcYWWpkxbMuRAWmogQ5cKi7EwsQ2ibiMdpJWYbw,61722 -numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=fn7Dr9s6rcqGUzsmyJxve_Z18J4AUaSm-uo2N3N_hfk,61728 -numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=xSY5fgfeBXN6fal4XDed-VUcgFIy9qKOosa7vQ5v1-U,61728 -numpy/_core/tests/examples/cython/__pycache__/setup.cpython-313.pyc,, -numpy/_core/tests/examples/cython/checks.pyx,sha256=sGva3PIcoahXBIF3QkBWPjVnEWHPGQM1ktNp8OYEwUo,8183 -numpy/_core/tests/examples/cython/meson.build,sha256=EaUdTgpleUBROExDaFVMnWIYW4XDxFLFGK9ej_pTtQg,1311 -numpy/_core/tests/examples/cython/setup.py,sha256=tPQ9m6dr48JSvLpgmV-aVnMWMV0islzlSrynB5yGYDY,894 -numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-313.pyc,, -numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=RcHe_nyyjv86gjF9E53cexQiGW-YNs8OGGqjrxCFhBc,363 -numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=4P5-yu0yr8NBa-TFtw4v30LGjccRroRAQFFLaztEK9I,214 -numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=drvrNSyOeF0Or0trDmayJWllTP7c4Nzpp9T0ydwPAGo,471 -numpy/_core/tests/examples/limited_api/meson.build,sha256=yitMzLuGDhWCjyavpm5UEBrhwKnfXOVAxA3ZL7PlB0Q,1686 -numpy/_core/tests/examples/limited_api/setup.py,sha256=N7kqsVp4iIE20IebigEJUW3nW2F0l6Vthb5qNvKHBmM,457 -numpy/_core/tests/test__exceptions.py,sha256=gy7-mZq7XS5z_w-us4gRIzC0H7XqC_62xaQQmWqLzSw,2970 -numpy/_core/tests/test_abc.py,sha256=u82wrSKXJ2V7AmNrh4klHxYiqOx0BYWJ4j7hqTMH--A,2275 -numpy/_core/tests/test_api.py,sha256=bURvc6MoIHlij2SrsgiDyafAQ-AyQDFFmbIzr-yo8Es,23546 -numpy/_core/tests/test_argparse.py,sha256=vPctuxToPkZMlbgjnzE924XkxXYUdBxlR6LsP2_-aQM,2914 -numpy/_core/tests/test_array_api_info.py,sha256=7n9-LJv-wgAMVbfK1JG7dQAU2WBYQbO7yeN4rP38Ltg,3174 -numpy/_core/tests/test_array_coercion.py,sha256=vG5HHfLgl1HcP6oemFxvpYqibS0eWqRAKCxLCiZBjaY,35744 -numpy/_core/tests/test_array_interface.py,sha256=E6QR-DJYTJX_F-i70PakQMmvxzfSBD-W1rFve70MFTg,7986 -numpy/_core/tests/test_arraymethod.py,sha256=b7DeRtgzSCTzoPiS1BT1Wwvpr31g_YP44Dd4V6uaR-U,3339 -numpy/_core/tests/test_arrayobject.py,sha256=cQu4aDjyF6EgoiGe5UISyOHGx5QEkdGvbfCXVuKjHQ8,2671 -numpy/_core/tests/test_arrayprint.py,sha256=RERzgbVQ3mumSGJZhYV9LesTkcsMH8TrHMBPmPwkBTg,50349 -numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=FRRWJBppa5v1axij6L14ENmzoZS8R_SyJKgHiAFI2KQ,5228 -numpy/_core/tests/test_casting_unittests.py,sha256=FCokQoS_56dOoBjq1WSp2UVE2NE9WS2w2u-4xNBQjMM,35126 -numpy/_core/tests/test_conversion_utils.py,sha256=cz2WEiCYSEP9m_7RHa2pS8WW0PcWO0E-LvpLTO72PkE,6814 -numpy/_core/tests/test_cpu_dispatcher.py,sha256=Bpb_ep7kT3OfNypV1pSOWCNlk8oT46kjZBEGS32qfCI,1597 -numpy/_core/tests/test_cpu_features.py,sha256=GO_Uf6FAK2pX8kiI9R1Uv7oEFzuPVzp3hu3bm6cZCuU,15838 -numpy/_core/tests/test_custom_dtypes.py,sha256=JEeRO7ykZSPYDTlKEV4EWjlMWJwEWieiCCsN4aEd0WA,11934 -numpy/_core/tests/test_cython.py,sha256=bgJxvIlQlMxxFA-Hqlgqo7NIvEKPZ7FvnY0av-EBfoM,8923 -numpy/_core/tests/test_datetime.py,sha256=3KyUxJqrgCRvUomM3Pxw_wCodRjWYRcp-92Bggiqk2U,124376 -numpy/_core/tests/test_defchararray.py,sha256=iOO8AUBOwhadTI8UUlOw-tI0Dd0l4k1rLY9gWFuQLbw,31423 -numpy/_core/tests/test_deprecations.py,sha256=y76kFkCfXImnwdjBA6WJSSKPtFl_d6K3O-hCRT6_LAQ,29255 -numpy/_core/tests/test_dlpack.py,sha256=MKc9PdCk1Tboz8RpMbrZzLFwkTEEEj-wQ5kZ1NeHtNI,5990 -numpy/_core/tests/test_dtype.py,sha256=aQLvZkJzZX5qt9uGfXilf-o8xlIQ8p-ZDBUczLQnED8,79365 -numpy/_core/tests/test_einsum.py,sha256=xOIA5Co2_FxHkzr0b-acAW5f_FOtnGDhQvS4gB1mLdM,54119 -numpy/_core/tests/test_errstate.py,sha256=MjV1p7tDq7LpAgIT730lMNVFsxN0RVK4CtRJgBkpVlk,4763 -numpy/_core/tests/test_extint128.py,sha256=YKIX0q9ENW0qehJtdaAAB2sFG0me42U2yJmq0kK6xGQ,5863 -numpy/_core/tests/test_function_base.py,sha256=1FGoTuZLK_r0567gNARFIXqLhIY6QA9mqkh6rMGMLNw,17950 -numpy/_core/tests/test_getlimits.py,sha256=k9_TaYqBCL-OvYpyvWAoTxpCwZSpxYFvvr2R7vuPEeg,7180 -numpy/_core/tests/test_half.py,sha256=7M6VWJnBU7pnpGuoZc1hiltB5-rn9PkDEXI-EmtNKSA,24880 -numpy/_core/tests/test_hashtable.py,sha256=-Zl-uppJbc9kwPN_ZlxJMA76yAQKsgGmQQWI8Y-sxaM,1182 -numpy/_core/tests/test_indexerrors.py,sha256=keWclNvFu3ukhVSXc97w3bJM8BvkOpul6kjNudf1F2Q,4858 -numpy/_core/tests/test_indexing.py,sha256=jbYs0Mdj_4w1XRBnrBCbUzZR9o1vdT3qHvo0YE3-yas,56741 -numpy/_core/tests/test_item_selection.py,sha256=zaGuMcTDsbCpQO1k9c9xuc4jUWhbArfn_1INfilf9hk,6623 -numpy/_core/tests/test_limited_api.py,sha256=oz7wOz7VRbrsP_60SaCiMl69GZlz-3J4b6S_9GsjF7A,3404 -numpy/_core/tests/test_longdouble.py,sha256=kcu2DpPuw-j0on0INw-LNMOjw4wuXI_fPbvn-9n-Oks,14285 -numpy/_core/tests/test_machar.py,sha256=z0mwyf6ASFI-gtMemFAag-8eEXKjb12mZ1BSpLYA52Q,1099 -numpy/_core/tests/test_mem_overlap.py,sha256=fZMHusU29yuYAdMqkmLcfj209q8xjaY23IxwBPSUnoE,30071 -numpy/_core/tests/test_mem_policy.py,sha256=Avw90zmQ5zjIvecpG0hV50UcKMaxVYkbmWQSdNuT6iA,17109 -numpy/_core/tests/test_memmap.py,sha256=4PvMpV7EpYuCAlPkO1s8TiME75_G_V1toBm0ADizLpY,8372 -numpy/_core/tests/test_multiarray.py,sha256=Su3uKM4LhZuMRhn8kyrwGqg-CZe93GRgaPgm0hrBZBs,402650 -numpy/_core/tests/test_multithreading.py,sha256=P6JP2x-YqSU6gnzLGtK2VJ1mWeoJP7i-zxPpU46EDxU,8899 -numpy/_core/tests/test_nditer.py,sha256=VrX91QX1nd3pWNM8MqxwYkDk_7swfuW0IACB86FBNVA,134550 -numpy/_core/tests/test_nep50_promotions.py,sha256=NbdzCpLbwWHWUXBk4JxM5FFIa-YqibgHV2rEawS3h2U,10354 -numpy/_core/tests/test_numeric.py,sha256=ol8-6PemWKQAGxpNGIgb-HjAwA13GU3IX4CBY84Wdms,162700 -numpy/_core/tests/test_numerictypes.py,sha256=hQ1YqasQ6mq--7fnKO08koqVrnsK2IwloWcdElKB7U4,23912 -numpy/_core/tests/test_overrides.py,sha256=_FsqndcyRN3r0JOp3Nn7_xAZPvGEypReo9PAdofeavY,28733 -numpy/_core/tests/test_print.py,sha256=HhOMC4roNrnXdncgpXPmFgsJWwcRpCc9u3KOIMSRxDw,7038 -numpy/_core/tests/test_protocols.py,sha256=19yXLJKXmy1-ArAclZ-F7PtgN6LmIHbiPvg6Vi2PkJk,1234 -numpy/_core/tests/test_records.py,sha256=beGD-yv67DC-eav0VNeGLh06uIMwKp3IDEV-i2KySN4,21074 -numpy/_core/tests/test_regression.py,sha256=5msZd_gdmBisnQrvKEysm-Nwjz84stp0n_c-Oqjmt3k,98058 -numpy/_core/tests/test_scalar_ctors.py,sha256=CrPYj6xo5A52VVqecc9S8Q0JQWPPyU2pND5KUNX_-pw,6923 -numpy/_core/tests/test_scalar_methods.py,sha256=CQARDMdU_T8LBg1sAdJ6PmRalpAK2CFSMH37AvLCmW0,9388 -numpy/_core/tests/test_scalarbuffer.py,sha256=0d8LgyIclgmi4wJM218frNDXa4V408McDFDkieBpJFo,5735 -numpy/_core/tests/test_scalarinherit.py,sha256=0JukiC7eR6NwWZgFy-YBmAXYIaA2BmudgY3Rt8ziX-I,2693 -numpy/_core/tests/test_scalarmath.py,sha256=2A6CgAaeFUEqUBo4beyWi6UBwA-eyaE_0Is364Rj3cQ,47796 -numpy/_core/tests/test_scalarprint.py,sha256=7BJSHWTeVvKtzt_IUgCgYcSp8uwTxbUlplGmV2edNHE,21058 -numpy/_core/tests/test_shape_base.py,sha256=rqKjKS69o6NPEtqVUvRm4vc2zlrJ87QEy4PipMDlMXI,31842 -numpy/_core/tests/test_simd.py,sha256=1KRDlvrx6MGvBLcFvGESoN5DSxQq0GKcvZeSrtRFL1Y,49985 -numpy/_core/tests/test_simd_module.py,sha256=s22tqYtgN0x-5B3HTXiGfIV2aTXyQQH18c1fYj6VRhg,4004 -numpy/_core/tests/test_stringdtype.py,sha256=B8B5ZhCCwWAbMa0xKJ9k3R02nBrGys_NPDccDeOZeT4,59491 -numpy/_core/tests/test_strings.py,sha256=G0gAIyOpSHUaFfEUl30h3JRouIw4fz42ibatl9Gcv00,52956 -numpy/_core/tests/test_ufunc.py,sha256=CthK8X2cvLRuASHDY4N1OHgqcN1pp5s1JZnd0VBA52U,135606 -numpy/_core/tests/test_umath.py,sha256=jgOcWeFjvQElbDJHdW0Eb1_IynBS2Kqp3Y2hpDmfUzM,198085 -numpy/_core/tests/test_umath_accuracy.py,sha256=ZW-NBEcRBWtbjzhPmk9fSpN3skQBuMgEoHS87zLmedk,5593 -numpy/_core/tests/test_umath_complex.py,sha256=ZRnJuFo6DQPz5tdUUZyHSamtaI2BFlLXzz6AtlILVIw,23912 -numpy/_core/tests/test_unicode.py,sha256=Y5VSmuMrpzuN9lNGB9gNDkUCl1c6qhiQRp2_kOc2V50,13221 -numpy/_core/umath.py,sha256=3_OTbmiMhaVNsxpH2xKV9l6za59rYXBry6gb1WgJaI0,2133 -numpy/_core/umath.pyi,sha256=9o4EBYeibP9abowHQHuo0iuhbUnfTWw5c8utNmKEduo,2840 -numpy/_distributor_init.py,sha256=ahBbZPz-mGZrmwx35FHQ26AiinST78FxvupiBBKGFp4,422 -numpy/_distributor_init.pyi,sha256=CSrbSp2YYxHTxlX7R0nT3RpH7EloB1wIvo7YOA7QWy8,28 -numpy/_expired_attrs_2_0.py,sha256=uPUSplSC6_x_NhynoAup3ZCf3ydt7MGrBNwJc2dBiL8,3983 -numpy/_expired_attrs_2_0.pyi,sha256=ZHjc6ZjYC1jKXXwLh4wylr6P1bYnlQ75sUigDNqaXoA,1332 -numpy/_globals.py,sha256=FWUxIto9hQ5Mi2NoxP6DeGpI3bgS8H9xq7jfzaVLtG0,3185 -numpy/_globals.pyi,sha256=kst3Vm7ZbznOtHsPya0PzU0KbjRGZ8xhMmTNMafvT-4,297 -numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/_pyinstaller/__pycache__/__init__.cpython-313.pyc,, -numpy/_pyinstaller/__pycache__/hook-numpy.cpython-313.pyc,, -numpy/_pyinstaller/hook-numpy.py,sha256=GFGizYFjd9HsYMOtby7gew94CkvTrRW77ECGPNUgGGc,1429 -numpy/_pyinstaller/hook-numpy.pyi,sha256=2Bcwj2FwR3bRdtm26pmpUELEhsiZ58tQv9Q7_1Yp3HU,362 -numpy/_pyinstaller/tests/__init__.py,sha256=ZKqNjqlKw1pYiv57onbjDJnJdVrLawbZAcl-mPZzcSw,345 -numpy/_pyinstaller/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-313.pyc,, -numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-313.pyc,, -numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=xt3dl_DjxuzVTPrqmVmMOZm5-24wBG2TxldQl78Xt1g,1175 -numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=31zWlvlAC2sfhdew97x8aDvcYUaV3Tc_0CwFk8pgKaM,1170 -numpy/_pytesttester.py,sha256=6Ii-VI4uz3wiQ5pzNZKdvUT6LOoN868rNzVff7rTlAk,6525 -numpy/_pytesttester.pyi,sha256=Cy1rd-sv9DvmAAEKREy9VI0hYTWVpA_MoBRVmzDyvcY,515 -numpy/_typing/__init__.py,sha256=eXfdON-ITGAVjpprnjbZC9kvco7c-aAolc377D2lqWE,5201 -numpy/_typing/__pycache__/__init__.cpython-313.pyc,, -numpy/_typing/__pycache__/_add_docstring.cpython-313.pyc,, -numpy/_typing/__pycache__/_array_like.cpython-313.pyc,, -numpy/_typing/__pycache__/_char_codes.cpython-313.pyc,, -numpy/_typing/__pycache__/_dtype_like.cpython-313.pyc,, -numpy/_typing/__pycache__/_extended_precision.cpython-313.pyc,, -numpy/_typing/__pycache__/_nbit.cpython-313.pyc,, -numpy/_typing/__pycache__/_nbit_base.cpython-313.pyc,, -numpy/_typing/__pycache__/_nested_sequence.cpython-313.pyc,, -numpy/_typing/__pycache__/_scalars.cpython-313.pyc,, -numpy/_typing/__pycache__/_shape.cpython-313.pyc,, -numpy/_typing/__pycache__/_ufunc.cpython-313.pyc,, -numpy/_typing/_add_docstring.py,sha256=YPYjlxfqC8kXM_amtLyKXJ4aSNKJcmLY-It_PnW1-l4,4148 -numpy/_typing/_array_like.py,sha256=OQB9L3K3TYX6_PRCtvUfy8BgYR1vYCsIkgIqlCTlkkk,5757 -numpy/_typing/_callable.pyi,sha256=fTq5cTBoaRZwl3jFCk_G1eYSEyxHrpCR5-VoQJMMzZM,12176 -numpy/_typing/_char_codes.py,sha256=Qj3t7j_gjoy7ECmVZzz3b5nVMGJBKU5tjPZJ1peGozo,9000 -numpy/_typing/_dtype_like.py,sha256=on_sUDNeC6eQ6Vrlsi4JSJAfxGmIj2M3zhOx0yJtN0Q,6213 -numpy/_typing/_extended_precision.py,sha256=5PhjET4NkRp-LSgffJqfcZ1C5Cp-xERB14FNXfUvRkU,804 -numpy/_typing/_nbit.py,sha256=4E8E67SkSewPvDR15I68KEOneF8gsc97mFEe9oYBcdQ,651 -numpy/_typing/_nbit_base.py,sha256=nN822ixIvBtkyDptX_LESrXoDZ4jjym5ph2FU6APEnk,2980 -numpy/_typing/_nested_sequence.py,sha256=CjG49p-dxretKeShOiyVvTqOoyM_mNyhXArIYY6nBh4,2697 -numpy/_typing/_scalars.py,sha256=sKaaEEZqAQtiEijeuH4U5KPNpG7FYsBtGO73l9dti9Q,1058 -numpy/_typing/_shape.py,sha256=3g0rNpZHxM7rPInBJMSGpbVD9Y0Lw1QtkFEN_yrWEeo,238 -numpy/_typing/_ufunc.py,sha256=SxToNG-O5NumF_yV7JRzAXloNbokV8B8JZC_EcbeNFk,160 -numpy/_typing/_ufunc.pyi,sha256=mhArbicCGnkzBdfyPxLwf6oyAPyR5T2C-jBOk9S3Vq8,27651 -numpy/_utils/__init__.py,sha256=mO41ldWPOHJkTkY-acmJ_8wM89IKRv2lWwUe_5XlUWo,3379 -numpy/_utils/__init__.pyi,sha256=Pmoon5FkvkkbGNr0Xzx6i6i8aoehSnLFlBXNmBRXwi0,769 -numpy/_utils/__pycache__/__init__.cpython-313.pyc,, -numpy/_utils/__pycache__/_convertions.cpython-313.pyc,, -numpy/_utils/__pycache__/_inspect.cpython-313.pyc,, -numpy/_utils/__pycache__/_pep440.cpython-313.pyc,, -numpy/_utils/_convertions.py,sha256=vetZFqC1qB-Z9jvc7RKuU_5ETOaSbjhbKa-sVwYV8TU,347 -numpy/_utils/_convertions.pyi,sha256=zkZfkdBk6-XcyD3zmr7E5sJbYasvyDCInUtWvrtjVhY,122 -numpy/_utils/_inspect.py,sha256=bSIacEhHLtYjTXaMVp1XFPY2IZfybb5bg8X5dYgc8JM,7626 -numpy/_utils/_inspect.pyi,sha256=H1QZ7zEgYyG9kwpfz8cEUhF_QfM96WVBoerQtJZNVDI,2326 -numpy/_utils/_pep440.py,sha256=y5Oppq3Kxn2dH3EWBYSENv_j8XjGUXWvNAiNCEJ-euI,14556 -numpy/_utils/_pep440.pyi,sha256=LdpDFW8iIj_bLbuTbvRr2XWmC9YS9lrpzLR7efqL2GU,3991 -numpy/char/__init__.py,sha256=oQZSAOs7rHme6CxfdL9nraYRNI3NU18MjzQ4kQmK2kA,95 -numpy/char/__init__.pyi,sha256=wolX_qE2bjsIcUfQrQzGjzkaqdMtuGWOVDA3q-2Jqj0,1650 -numpy/char/__pycache__/__init__.cpython-313.pyc,, -numpy/compat/__init__.py,sha256=oqsQeYKpQuJpuTLqMkZX6ssqQfSXs0Joj_S8Ms9KSNU,756 -numpy/compat/__pycache__/__init__.cpython-313.pyc,, -numpy/compat/__pycache__/py3k.cpython-313.pyc,, -numpy/compat/py3k.py,sha256=wcSRGrTokLPxLamRFwBnsWS9z5uAyzsMlTEnCWUqpWw,3946 -numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/compat/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/conftest.py,sha256=gViKWIQaPzu9tKZPTt6rIv3LxtvEe9nxrUgPZVDE8UY,8978 -numpy/core/__init__.py,sha256=_lpcaIqNg3TH53JE0JKVKD4X0DOTki2dSvQgjHj6Eek,1307 -numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/core/__pycache__/__init__.cpython-313.pyc,, -numpy/core/__pycache__/_dtype.cpython-313.pyc,, -numpy/core/__pycache__/_dtype_ctypes.cpython-313.pyc,, -numpy/core/__pycache__/_internal.cpython-313.pyc,, -numpy/core/__pycache__/_multiarray_umath.cpython-313.pyc,, -numpy/core/__pycache__/_utils.cpython-313.pyc,, -numpy/core/__pycache__/arrayprint.cpython-313.pyc,, -numpy/core/__pycache__/defchararray.cpython-313.pyc,, -numpy/core/__pycache__/einsumfunc.cpython-313.pyc,, -numpy/core/__pycache__/fromnumeric.cpython-313.pyc,, -numpy/core/__pycache__/function_base.cpython-313.pyc,, -numpy/core/__pycache__/getlimits.cpython-313.pyc,, -numpy/core/__pycache__/multiarray.cpython-313.pyc,, -numpy/core/__pycache__/numeric.cpython-313.pyc,, -numpy/core/__pycache__/numerictypes.cpython-313.pyc,, -numpy/core/__pycache__/overrides.cpython-313.pyc,, -numpy/core/__pycache__/records.cpython-313.pyc,, -numpy/core/__pycache__/shape_base.cpython-313.pyc,, -numpy/core/__pycache__/umath.cpython-313.pyc,, -numpy/core/_dtype.py,sha256=PcSCn7DCpgrvBjm-k4eCMcEiTnH-jPzQmh8FyzLVw9I,331 -numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/core/_dtype_ctypes.py,sha256=eiZNKCJbzZ1Ei9Tkd7Fffx8vWUsAKnFSK-5vza3vmEQ,359 -numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/core/_internal.py,sha256=HC1NrqDEgK-6M1M6-8ZTZSZF7xnIYPh_G_4j2BFBNLM,972 -numpy/core/_multiarray_umath.py,sha256=vO49_4x5SYg-BST541l73RmBm7pkqbwlssmwsRSdU80,2151 -numpy/core/_utils.py,sha256=dAaZtXVWhOEFiwmVsz8Mn77HsynMDKhZ7HkrjD1Q3vc,944 -numpy/core/arrayprint.py,sha256=qo9GIfdEmW9foxvP0vtFLRaAlSbOoOGJU-hBlQ5hIlA,347 -numpy/core/defchararray.py,sha256=-gCjc9ciILhSzAxtVXgiTwdpuNMD3R6p9tXHe_MLx9A,355 -numpy/core/einsumfunc.py,sha256=LkCSjRQ3HIF4fdRz7uEgl-1TyeT0gtGV5y8x9cQYsZ0,347 -numpy/core/fromnumeric.py,sha256=iQsih718r6QW80auPJbva99qeWfT5IK2S02sv4AFMUs,351 -numpy/core/function_base.py,sha256=V_-tUGZfgjYzjZxvhLNRtVXV2_v12rJsvAGpDXbfq8w,359 -numpy/core/getlimits.py,sha256=SQsTlDpDVz9AvFC-xvAJbhcm5svBD02qpE-HLgt17RA,343 -numpy/core/multiarray.py,sha256=2K7g3jXbH7wqupSsyr5wP0YoQSpXlZab9uDDbJtz2Bk,816 -numpy/core/numeric.py,sha256=nTvwcwAqkzCnYmqEt4J3dvqUodzXUlaI8H5YF5x65xg,370 -numpy/core/numerictypes.py,sha256=jmQ9c1WrWxlx8ODDZKOAqrixUu3Gx_NJD1SzT3wtb50,355 -numpy/core/overrides.py,sha256=Dq-lTb829gvg-HfRtY0BE6GE2UbI6iXkMIh8Gvkzt1g,343 -numpy/core/overrides.pyi,sha256=HScieJk23k4Lk14q8u9CEc3ZEVOQ6hGu_FeWDR2Tyu8,532 -numpy/core/records.py,sha256=5jPtgEtHaJ642Ct-G9uEwnF9y_TZnZAUXm_EUJEF8J8,335 -numpy/core/shape_base.py,sha256=itirz4hN3M8Ndgij4_ZVcra4qtRkK42Owp8qr9fFe5w,347 -numpy/core/umath.py,sha256=09uNybUqfWxdqkoYHzv6jrTDCXq6DDI-EdwaOKdijn4,327 -numpy/ctypeslib.py,sha256=9ejyo77Qqd54f9j7pRQQaABYAfVxjWfgKvia88T9hP4,19438 -numpy/ctypeslib.pyi,sha256=7CY_Na2E0uwZ88TzJ3pasogyLIr0wd8scdcxY0LK21A,8338 -numpy/doc/__pycache__/ufuncs.cpython-313.pyc,, -numpy/doc/ufuncs.py,sha256=jMnfQhRknVIhgFVS9z2l5oYM8N1tuQtf5bXMBL449oI,5552 -numpy/dtypes.py,sha256=cPkS6BLRvpfsUzhd7Vk1L7_VcenWb1nuHuCxc9fYC4I,1353 -numpy/dtypes.pyi,sha256=9Gys5OIDUcglbDCgnJqubAljutcy_NUtmqWUy9-rRB0,15787 -numpy/exceptions.py,sha256=8or6nB2di0rsXpxLrmoUI4nH5bsyAIInsBfMDYL1RS8,8085 -numpy/exceptions.pyi,sha256=baBkfJ_DQdH6AH7roIXq8JSlY5Wn4z_hdJVbo_1SQUE,776 -numpy/f2py/__init__.py,sha256=WZXe6JMmUBaRuBtosCRzno0roeUj8CEoQw9g2_RRokc,2590 -numpy/f2py/__init__.pyi,sha256=0_-xXhZztqkodDS2UJTGZAdLO8JkzE7LMJYeDZa46cY,1103 -numpy/f2py/__main__.py,sha256=TDesy_2fDX-g27uJt4yXIXWzSor138R2t2V7HFHwqAk,135 -numpy/f2py/__pycache__/__init__.cpython-313.pyc,, -numpy/f2py/__pycache__/__main__.cpython-313.pyc,, -numpy/f2py/__pycache__/__version__.cpython-313.pyc,, -numpy/f2py/__pycache__/_isocbind.cpython-313.pyc,, -numpy/f2py/__pycache__/_src_pyf.cpython-313.pyc,, -numpy/f2py/__pycache__/auxfuncs.cpython-313.pyc,, -numpy/f2py/__pycache__/capi_maps.cpython-313.pyc,, -numpy/f2py/__pycache__/cb_rules.cpython-313.pyc,, -numpy/f2py/__pycache__/cfuncs.cpython-313.pyc,, -numpy/f2py/__pycache__/common_rules.cpython-313.pyc,, -numpy/f2py/__pycache__/crackfortran.cpython-313.pyc,, -numpy/f2py/__pycache__/diagnose.cpython-313.pyc,, -numpy/f2py/__pycache__/f2py2e.cpython-313.pyc,, -numpy/f2py/__pycache__/f90mod_rules.cpython-313.pyc,, -numpy/f2py/__pycache__/func2subr.cpython-313.pyc,, -numpy/f2py/__pycache__/rules.cpython-313.pyc,, -numpy/f2py/__pycache__/symbolic.cpython-313.pyc,, -numpy/f2py/__pycache__/use_rules.cpython-313.pyc,, -numpy/f2py/__version__.py,sha256=TisKvgcg4vh5Fptw2GS1JB_3bAQsWZIKhclEX6ZcAho,35 -numpy/f2py/_backends/__init__.py,sha256=xIVHiF-velkBDPKwFS20PSg-XkFW5kLAVj5CSqNLddM,308 -numpy/f2py/_backends/__pycache__/__init__.cpython-313.pyc,, -numpy/f2py/_backends/__pycache__/_backend.cpython-313.pyc,, -numpy/f2py/_backends/__pycache__/_distutils.cpython-313.pyc,, -numpy/f2py/_backends/__pycache__/_meson.cpython-313.pyc,, -numpy/f2py/_backends/_backend.py,sha256=9RZDu4FCwCM7G39EX2YEt-Vnaz0U2WSp-QSAfz11BGE,1233 -numpy/f2py/_backends/_distutils.py,sha256=e3dqC9ddmppsCNhLngtOE3Z6WZnLfaG_N5xiIcHPVWI,2459 -numpy/f2py/_backends/_meson.py,sha256=GD5pv3ilTRjtU4wGWgWrakg4nFySOiaX4NdmgO3egYM,8322 -numpy/f2py/_backends/meson.build.template,sha256=6XD3j-K5pc1P_icgUWkrgEsyludQWsqS5rb6UB29tH0,1654 -numpy/f2py/_isocbind.py,sha256=QVoR_pD_bY9IgTaSHHUw_8EBg0mkaf3JZfwhLfHbz1Q,2422 -numpy/f2py/_src_pyf.py,sha256=3swmQKGTeQGVMLzTTkZqZHHQ5EP6RT2LjgaUnXv0S74,7904 -numpy/f2py/auxfuncs.py,sha256=5tF_ZvesfJDTmh-1Pq7NgV7ArDtD2aOGhwbb4VZtraE,28020 -numpy/f2py/capi_maps.py,sha256=Hj1g5T5Siyc4JWSZJPnbfXqPTCqoXblwiDET04UBh4k,31428 -numpy/f2py/cb_rules.py,sha256=hALemKsqa1qkTD2KqBcdGmRDhSTAuq1Z5ZsPlJjWdXw,25648 -numpy/f2py/cfuncs.py,sha256=qbuF9fJWlhVSZ3xIstFwrGWM7FO1Zy0DUzBk6HD11ik,54036 -numpy/f2py/common_rules.py,sha256=19VDEPQ9-Pzzknv03U23gWYesmDAzJrGxwdXqn7CxhQ,5277 -numpy/f2py/crackfortran.py,sha256=DnaLeMI4wdc9cUAUMSuCb_2_ZAzWuQfbi8puLeRNbeU,151841 -numpy/f2py/diagnose.py,sha256=0DtPTDjxbFUu0F_nDHfsD0vlCgnRhf8WZ1kHsXVWcpE,5351 -numpy/f2py/f2py2e.py,sha256=36qdKKlXxLiwFZoDwA9sYZMxH6IzoPY9alB8ZajnxDY,29621 -numpy/f2py/f90mod_rules.py,sha256=Q-e9Q79dkOvEBLoJDLTf7nX7WbtPf-qt4pbRI41kLYw,10144 -numpy/f2py/func2subr.py,sha256=Wro0C3NGSO-1g2zxBI8qg_Tl6KyczrCtCTJvhN4KtUQ,10621 -numpy/f2py/rules.py,sha256=lQjZ-e0LAArXNmso5c6H_IqXZiDdmcmmgmV3tztJ4UI,64516 -numpy/f2py/setup.cfg,sha256=828sy3JvJmMzVxLkC-y0lxcEMaDTnMc3l9dWqP4jYng,50 -numpy/f2py/src/fortranobject.c,sha256=R7AJfWjQiz2dLylWtFpvZByWvu9OCkG4UCkMa3t-jxw,47472 -numpy/f2py/src/fortranobject.h,sha256=uCcHO8mjuANlKb3c7YAZwM4pgT0CTaXWLYqgE27Mnt0,5996 -numpy/f2py/symbolic.py,sha256=BI5m8j7wEpq1u9yTDUBUtqUCCH1JBVVxyEFZRMjGWlA,54771 -numpy/f2py/tests/__init__.py,sha256=hiQX1lvI7rIYRNecVpg5D_0N6E0w94BSmexhEErutmI,343 -numpy/f2py/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_block_docstring.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_callback.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_character.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_common.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_crackfortran.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_data.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_docs.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_f2cmap.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_f2py2e.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_isoc.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_kind.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_mixed.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_modules.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_parameter.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_pyf_src.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_quoted_character.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_return_character.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_return_complex.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_return_integer.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_return_logical.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_return_real.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_routines.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_size.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_string.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_symbolic.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-313.pyc,, -numpy/f2py/tests/__pycache__/util.cpython-313.pyc,, -numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=aCaFEqfXp79pVXnTFtjZBWUY_5pu8wsehp1dEauOkSE,692 -numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=y3R2dDn0BUz-0bMggfT1jwXbhz_gniz7ONMpureEQew,111 -numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=0UkctY5oeFs9B9qnX8qhe3wTFZA_mF-FBBkJoy_iuQg,7713 -numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=zfuOShmuotzcLIQDnVFaARwvM66iLrOYzpquIGDbiKU,30 -numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=fqbSr7VlKfVrBulFgQtQA9fQf0mQvVbLi94e4FTST3k,494 -numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=9pbi88-uSNP5IwS49Kim982jDAuopo3tpEhg2SOU7no,540 -numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=9Cl1sdrihB8cCSsjoQGmOO8VRv9ni8Fjr0Aku1UdEWM,288 -numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=3L_F7n5ju9F0nxw95uBUaPeuiDOw6uHvB580eIj7bqI,134 -numpy/f2py/tests/src/block_docstring/foo.f,sha256=KVTeqSFpI94ibYIVvUW6lOQ9T2Bx5UzZEayP8Maf2H0,103 -numpy/f2py/tests/src/callback/foo.f,sha256=rLqaaaUpWFTaGVxNoGERtDKGCa5dLCTW5DglsFIx-wU,1316 -numpy/f2py/tests/src/callback/gh17797.f90,sha256=-_NvQK0MzlSR72PSuUE1FeUzzsMBUcPKsbraHIF7O24,155 -numpy/f2py/tests/src/callback/gh18335.f90,sha256=n_Rr99cI7iHBEPV3KGLEt0QKZtItEUKDdQkBt0GKKy4,523 -numpy/f2py/tests/src/callback/gh25211.f,sha256=ejY_ssadbZQfD5_-Xnx_ayzWXWLjkdy7DGp6C_uCUCY,189 -numpy/f2py/tests/src/callback/gh25211.pyf,sha256=nrzvt2QHZRCcugg0R-4FDMMl1MJmWCOAjR7Ta-pXz7Y,465 -numpy/f2py/tests/src/callback/gh26681.f90,sha256=ykwNXWyja5FfZk1bPihbYiMmMlbKhRPoPKva9dNFtLM,584 -numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=e3zYjFmiOxzdXoxzgkaQ-CV6sZ1t4aKugyhqRXmBNdQ,148 -numpy/f2py/tests/src/cli/hi77.f,sha256=bgBERF4EYxHlzJCvZCJOlEmUE1FIvipdmj4LjdmL_dE,74 -numpy/f2py/tests/src/cli/hiworld.f90,sha256=RncaEqGWmsH9Z8BMV-UmOTUyo3-e9xOQGAmNgDv6SfY,54 -numpy/f2py/tests/src/common/block.f,sha256=tcGKa42S-6bfA6fybpM0Su_xjysEVustkEJoF51o_pE,235 -numpy/f2py/tests/src/common/gh19161.f90,sha256=Vpb34lRVC96STWaJerqkDQeZf7mDOwWbud6pW62Tvm4,203 -numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=3ONHb4ZNx0XISvp8fArnUwR1W9rzetLFILTiETPUd80,221 -numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=JAzHD5aluoYw0jVGZjBYd1wTABU0PwNBD0cz3Av5AAk,511 -numpy/f2py/tests/src/crackfortran/data_common.f,sha256=rP3avnulWqJbGCFLWayjoFKSspGDHZMidPTurjz33Tc,201 -numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=LaPXVuo5lX0gFZVh76Hc7LM1sMk9EBPALuXBnHAGdOA,202 -numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=MAZ3gstsPqECk3nWQ5Ql-C5udrIv3sAciW1ZGTtHLts,713 -numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=FUPluNth5uHgyKqjQW7HKmyWg4wDXj3XPJCIC9ZZuOs,183 -numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=D9FT8Rx-mK2p8R6r4bWxxqgYhkXR6lNmPj2RXOseMpw,134 -numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=0G9bmfVafpuux4-ZgktYZ6ormwrWDTOhKMK4wmiSZlQ,391 -numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=acknjwoWYdA038oliYLjB4T1PHhXkKRLeJobIgB_Lbo,352 -numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=xPnKx4RcT1568q-q_O83DYpCgVYJ8z4WQ-yLmHPchJA,248 -numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=k2xjRpRaajMYpi5O-cldYPTZGFGB12PUGcj5Fm9joyk,131 -numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=20ukdZXq-qU0Zxzt4W6cO8tRxlNlQ456zgD09zdozCE,105 -numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=FvnIxy5fEOvzNb5WSkWzPk7yZ9yIv0yPZk9vNnS-83w,216 -numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=jELVfEGEF66z_Pv_iBHp3yGsGhadB0dnKCDtPcaz_CM,352 -numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=mTOEncxZlam6N-3I-IL0ua-iLkgqDrrVXNsE-7y7jAM,376 -numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=-IpkeTz0j9_lkQeN9mT7w3U1cAJjQxSMdAmyHdF8oVg,295 -numpy/f2py/tests/src/crackfortran/operators.f90,sha256=cb1JO2hIMCQejZO_UJWluBCP8LdXQbBJw2XN6YHB3JA,1233 -numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=9O2oWEquIUcbDB1wIzNeae3hx4gvXAoYW5tGfBt3KWk,185 -numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=nU_VXCKiniiUq_78KAWkXiN6oiMQh39emMxbgOVf9cg,177 -numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=-uz75kquU4wobaAPZ1DLKXJg6ySCZoDME1ce6YZ2q5Y,175 -numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=wDMoF7F7VFYdeocfTyWIh7noniEwExVb364HrhUSbSg,102 -numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=fwszymaWhcWO296u5ThHW5yMAkFhB6EtHWqqpc9FAVI,83 -numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=rphN_mmzjCCCkdPM0HjsiJV7rmxpo4GoCNp5qmBzv8U,307 -numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=Oir0PfE3mErnUQ42aFxiqAkcYn3B6b1FHIPGipDdekg,1032 -numpy/f2py/tests/src/kind/foo.f90,sha256=6_zq3OAWsuNJ5ftGTQAEynkHy-MnuLgBXmMIgbvL7yU,367 -numpy/f2py/tests/src/mixed/foo.f,sha256=Zgn0xDhhzfas3HrzgVSxIL1lGEF2mFRVohrvXN1thU0,90 -numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=6eEEYCH71gPp6lZ6e2afLrfS6F_fdP7GZDbgGJJ_6ns,187 -numpy/f2py/tests/src/mixed/foo_free.f90,sha256=UC6iVRcm0-aVXAILE5jZhivoGQbKU-prqv59HTbxUJA,147 -numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=EqMEuEV0_sx4XbFzftbU_6VfGtOw9Tbs0pm0eVEp2cA,188 -numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=DChVLgD7qTOpbYNmfGjPjfOx5YsphMIYwdwnF12X4xM,185 -numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=MMLPSzBwuGS4UwCXws9djH11F5tG5xFLc80CDb4U9Mk,423 -numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=1dJD1kDC_wwn7v_zF49D3n62T1x9wFxGKanQQz_VI7k,424 -numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=-asnMH7vZMwVIeMU2YiLWgYCUUUxZgPTpbAomgWByHs,236 -numpy/f2py/tests/src/modules/use_modules.f90,sha256=bveSAqXIZtd4NMlDfFei1ZlesFAa9An5LjkD-gDk2ms,418 -numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=IxBGWem-uv9eHgDhysEdGTmNKHR1gAiU7YJPo20eveM,164 -numpy/f2py/tests/src/parameter/constant_array.f90,sha256=fkYemwIBKsP63-FGKBW8mzOAp6k13eZOin8sQe1pyno,1513 -numpy/f2py/tests/src/parameter/constant_both.f90,sha256=L0rG6-ClvHx7Qsch46BUXRi_oIEL0uw5dpRHdOUQuv0,1996 -numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=lAT76HcXGMgr1NfKof-RIX3W2P_ik1PPqkRdJ6EyBmM,484 -numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=42jROArrG7vIag9wFa_Rr5DBnnNvGsrEUgpPU14vfIo,634 -numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=u9MRf894Cw0MVlSOUbMSnFSHP4Icz7RBO21QfMkIl-Q,632 -numpy/f2py/tests/src/parameter/constant_real.f90,sha256=QoPgKiHWrwI7w5ctYZugXWzaQsqSfGMO7Jskbg4CLTc,633 -numpy/f2py/tests/src/quoted_character/foo.f,sha256=0zXQbdaqB9nB8R4LF07KDMFDbxlNdiJjVdR8Nb3nzIM,496 -numpy/f2py/tests/src/regression/AB.inc,sha256=ydjTVb6QEw1iYw2tRiziqqzWcDHrJsNWr3m51-rqFXQ,17 -numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=vPJbhOlNsLrgN3su4ohHUSbxE4GGKU7SiJh7dhBvX3o,633 -numpy/f2py/tests/src/regression/datonly.f90,sha256=HuBLuEw0kNEplJ9TxxSNr7hLj-jx9ZNGaXC8iLm_kf8,409 -numpy/f2py/tests/src/regression/f77comments.f,sha256=FjP-07suTBdqgtwiENT04P-47UB4g9J5-20IQdXAHhM,652 -numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=KdKFcAc3ZrID-h4nTOJDdEYfQzR2kkn9VqQCorfJGpM,144 -numpy/f2py/tests/src/regression/f90continuation.f90,sha256=VweFIi5-xxZhtgSOh8i_FjMPXu_od9qjrDHq6ma5X5k,285 -numpy/f2py/tests/src/regression/incfile.f90,sha256=gq87H2CtCZUON9V5UzcK6x_fthnWDVuPFQLa0fece1M,97 -numpy/f2py/tests/src/regression/inout.f90,sha256=TlMxJjhjjiuLI--Tg2LshLnbfZpiKz37EpR_tPKKSx8,286 -numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=bWlj2Frch3onnUpd6DTaoLDa6htrrbkBiI9JIRbQPfE,105 -numpy/f2py/tests/src/return_character/foo77.f,sha256=tRyQSu9vNWtMRi7gjmMN-IZnS7ogr5YS0n38uax_Eo0,1025 -numpy/f2py/tests/src/return_character/foo90.f90,sha256=WPQZC6CjXLbUYpzy5LItEoHmRDFxW0ABB3emRACsjZU,1296 -numpy/f2py/tests/src/return_complex/foo77.f,sha256=7-iKoamJ-VObPFR-Tslhiw9E-ItIvankWMyxU5HqxII,1018 -numpy/f2py/tests/src/return_complex/foo90.f90,sha256=_GOKOZeooWp3pEaTBrZNmPmkgGodj33pJnJmySnp7aE,1286 -numpy/f2py/tests/src/return_integer/foo77.f,sha256=EKs1KeAOQBkIO99tMCx0H7_lpqvqpjie8zWZ6T_bAR4,1234 -numpy/f2py/tests/src/return_integer/foo90.f90,sha256=0aYWcaAVs7Lw3Qbf8hupfLC8YavRuPZVIwjHecIlMOo,1590 -numpy/f2py/tests/src/return_logical/foo77.f,sha256=Ax3tBVNAlxFtHhV8fziFcsTnoa8YJdapecMr6Qj7fLk,1244 -numpy/f2py/tests/src/return_logical/foo90.f90,sha256=IZXCerFecYT24zTQ_spIoPr6n-fRncaM0tkTs8JqO1E,1590 -numpy/f2py/tests/src/return_real/foo77.f,sha256=3nAY1YtzGk4osR2jZkHMVIUHxFoOtF1OLfWswpcV7kA,978 -numpy/f2py/tests/src/return_real/foo90.f90,sha256=38ZCnBGWb9arlJdnVWvZjVk8uesrQN8wG2GrXGcSIJs,1242 -numpy/f2py/tests/src/routines/funcfortranname.f,sha256=ruyXK6eQSLQnQ_rODT1qm1cJvpHrFhI6NRrnWvEIK0U,128 -numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=EgRw8ZWGdd2uK4qCZD89r9VQtEXmnKDx59OpB0K58as,451 -numpy/f2py/tests/src/routines/subrout.f,sha256=35DjHIj85ZLkxRxP4bs-WFTQ5y1AyDqBKAXTzSSTAxE,94 -numpy/f2py/tests/src/routines/subrout.pyf,sha256=xT_WnDpvpyPb0FMRAVTRRgm3nlfALf1Ojg8x3qZNv_4,332 -numpy/f2py/tests/src/size/foo.f90,sha256=nK_767f1TtqVr-dMalNkXmcKbSbLCiabhRkxSDCzLz0,859 -numpy/f2py/tests/src/string/char.f90,sha256=X_soOEV8cKsVZefi3iLT7ilHljjvJJ_i9VEHWOt0T9Y,647 -numpy/f2py/tests/src/string/fixed_string.f90,sha256=tCN5sA6e7M1ViZtBNvTnO7_efk7BHIjyhFKBoLC3US0,729 -numpy/f2py/tests/src/string/gh24008.f,sha256=Z6cq8SFGvmaA72qeH9tu1rP8pYjqm0ONpHn7nGbhoLA,225 -numpy/f2py/tests/src/string/gh24662.f90,sha256=xJkiYvrMT9Ipb9Cq7OXl1Ev6TISl8pq1MGemySzfGd0,204 -numpy/f2py/tests/src/string/gh25286.f90,sha256=lqEl81Iu9GIDTAbOfkkNGcGgDyyGnPB44mJw2iK1kng,318 -numpy/f2py/tests/src/string/gh25286.pyf,sha256=wYkkr5gEN9_RtGjpqh28X1k8KCgh0-Ds9XAt8IC9j4A,393 -numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=ZRvgSzRlaPEx8GyNt97FrRhtCg-r4ZTEDsHNBfit4m8,396 -numpy/f2py/tests/src/string/scalar_string.f90,sha256=U1QqVgbF1DbxdFekRjchyDlFRPnXwzG72kuE8A44Za8,185 -numpy/f2py/tests/src/string/string.f,sha256=JCwLuH21Ltag5cw_9geIQQJ4Hv_39NqG8Dzbqj1eDKE,260 -numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=MbbSUQI5Enzq46KWFHRzQbY7q6ZHJH_9NRL-C9i13Wg,199 -numpy/f2py/tests/test_abstract_interface.py,sha256=673rVYr6ZsMSb3lumjiqeyK2DjkMLEFrqmpRljYWRes,833 -numpy/f2py/tests/test_array_from_pyobj.py,sha256=8W5j1nqbXbUpCsCw3S5mpGfuStvkFCKVdfXi7eU2co4,24379 -numpy/f2py/tests/test_assumed_shape.py,sha256=IyqJPGpGVv_RaRCwrko_793jLxJC1495tR9gAbmTlR8,1515 -numpy/f2py/tests/test_block_docstring.py,sha256=0Dh1GXlaCg33DmlbhC08MOBMXdpMbk983MQB2hB7XhA,600 -numpy/f2py/tests/test_callback.py,sha256=pIloccFF6nJOMwD4yOiDWHFUtU2PfKkrZfWuhqIhBM0,7375 -numpy/f2py/tests/test_character.py,sha256=IuV6DQ--Tr-NEAWSxzWzrjDQtVAgXLiV-jfHi_dc5Sc,22544 -numpy/f2py/tests/test_common.py,sha256=z1qoOm6HFvLal_cOCPuNn7NVohWjWBcO2v1maVFfRhQ,661 -numpy/f2py/tests/test_crackfortran.py,sha256=0xxfF0AbYddVou72KbRZX2IMnSfUh3Cj5hh5FEH5vjM,16801 -numpy/f2py/tests/test_data.py,sha256=JSObh8NfZipQQp0_021GLVKhmwhiNxEvAf5Zm2q0dds,2958 -numpy/f2py/tests/test_docs.py,sha256=IAauf96ibmpi6hzND8dI_vfAnLoUn-GzHMVf05GIwJM,1909 -numpy/f2py/tests/test_f2cmap.py,sha256=2Yy4zuFrkn0QvCkiRjGiHqirp9bXe8ODSnM_LYNAUsM,400 -numpy/f2py/tests/test_f2py2e.py,sha256=BeUBmCNKiXYe2TxPMChpHMCo7MZnbqYHl6iToJ4q25g,28832 -numpy/f2py/tests/test_isoc.py,sha256=KGUijaN2Qum_qQD1Rc7m7B3dMTx47oRud8ZWNfc5M0Y,1481 -numpy/f2py/tests/test_kind.py,sha256=iVs-TL343aNa6NOaw31EaYB3scFdnU4n0_IKPdjyAco,1832 -numpy/f2py/tests/test_mixed.py,sha256=95O8xkouDaNFckMa2T4qnUfBpVEVugbM0iruQo9JFpw,893 -numpy/f2py/tests/test_modules.py,sha256=mMLzcjENVJ3on--z9qmbUthruWz02T49XiY_A0xbzkw,2380 -numpy/f2py/tests/test_parameter.py,sha256=KTmgD77wZFHqZyq4wfRGbR9RisNti8IgO-Q6mUneSwo,4753 -numpy/f2py/tests/test_pyf_src.py,sha256=RLm95aANGakQYCzk_UJjUcq0mOQH0LtD6HoZYkEiIrU,1179 -numpy/f2py/tests/test_quoted_character.py,sha256=cLPRMhNiCO0v-_A5jPkTg-Zv38U-bbJteuLOL9VSZik,493 -numpy/f2py/tests/test_regression.py,sha256=A3a3hbpMqUrFEKp3p3IxueubfaoZyJZBJQz7A0BJqe4,6023 -numpy/f2py/tests/test_return_character.py,sha256=9hAUrTWmHkSnRQM4pz43cLFBSEIU5sN8g2M8xaqBqBE,1557 -numpy/f2py/tests/test_return_complex.py,sha256=ynSaaMSxiBTApp-tIGwXHLe5gCjqm4qJCq_QNwihGWk,2481 -numpy/f2py/tests/test_return_integer.py,sha256=PNeeeykh0Q9oPxUCcuLC3Q1XFbRrk7jhQwK6erjau0M,1830 -numpy/f2py/tests/test_return_logical.py,sha256=gPBO6zxmwek0fUIvCDgybiltiNqiMwaIqqsY2o0PXtg,2081 -numpy/f2py/tests/test_return_real.py,sha256=e39QqQEDkpxkVEl_5qK67cu7uv0iZUaRA7tlYeKynV0,3354 -numpy/f2py/tests/test_routines.py,sha256=4Bg3qLRIyKFzdM3BoRW6vn6CKI2EUzlt5wnHDBzBx0c,822 -numpy/f2py/tests/test_semicolon_split.py,sha256=ZkWpV7iKLoSffVdoz_iDdmZnm0Ty4zZSG5git8dsBeY,1700 -numpy/f2py/tests/test_size.py,sha256=GKZ5zCsY-wWq4zwlBfMpLub-9Mziy5GFOC7dg39k7ng,1198 -numpy/f2py/tests/test_string.py,sha256=KEic6DcDoHZuqofWtytUAqaOC-GWR4SVa2jxsdXq1zw,3034 -numpy/f2py/tests/test_symbolic.py,sha256=Zk4h3WC2etMrIEyMrayPpGthpWfuS35Yz-4XzzGFcY4,18835 -numpy/f2py/tests/test_value_attrspec.py,sha256=CbcEA3U2rFrFE-7roKIXQXP02Vq7pgwicrP05XrizK0,343 -numpy/f2py/tests/util.py,sha256=9BhKV5A5gwhrBBdnI_aa0n5PwPXS4yRijd8VjBzflCA,12615 -numpy/f2py/use_rules.py,sha256=zWh8pG5ewfg_LInDmT48O7c0oBlDaGQ4exp5C5ZUZzU,3621 -numpy/fft/__init__.py,sha256=MwVEjIo3wDxMAbKERmmX3cHh8EK9nIw9vlUNTpOgNyo,8541 -numpy/fft/__init__.pyi,sha256=9LUY_NorLJecQQHN-0dLE9uVLhwv03Bh2iFVTOpSMW8,557 -numpy/fft/__pycache__/__init__.cpython-313.pyc,, -numpy/fft/__pycache__/_helper.cpython-313.pyc,, -numpy/fft/__pycache__/_pocketfft.cpython-313.pyc,, -numpy/fft/__pycache__/helper.cpython-313.pyc,, -numpy/fft/_helper.py,sha256=nAtQQ7eHZrQhws3IEIBtpnCWA4emPricOmNnXrm_bng,7010 -numpy/fft/_helper.pyi,sha256=Fraw7-4rRa4tl_UT1HWzvGrR2bE7rNcru0PpsC1_byU,1379 -numpy/fft/_pocketfft.py,sha256=3M0RsdVo_6SpjG12H7W67Wr5GGXc83ipSAx-4gCV2VY,64379 -numpy/fft/_pocketfft.pyi,sha256=PUfhum-xLMNaYacpoJZj3ho-wsoIWrbAS_pq84V8oEc,3292 -numpy/fft/_pocketfft_umath.cp313-win_amd64.lib,sha256=geFn0rRssDXbwv5ZGe8yZU-QRkECnvFdKyOao0keYDc,2176 -numpy/fft/_pocketfft_umath.cp313-win_amd64.pyd,sha256=UvRBVlR5tnYgCIgoesn0l_lnx2p0RE84G8BI__acPd8,279040 -numpy/fft/helper.py,sha256=Dvf6DS9pHTCmugMQy5IBwk5LlSt5PjdShv1IRsUySIY,626 -numpy/fft/helper.pyi,sha256=MDJI7k0BFz8N1DuYkyBCEdaT09d3CHEsBaG9JAgs2aI,913 -numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/fft/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/fft/tests/__pycache__/test_helper.cpython-313.pyc,, -numpy/fft/tests/__pycache__/test_pocketfft.cpython-313.pyc,, -numpy/fft/tests/test_helper.py,sha256=-CrZvGxoD1xhFNVsHJS3oNTw6yYoNq06CKHmWO_0fSk,6316 -numpy/fft/tests/test_pocketfft.py,sha256=QasTw3GPyU-MiB1qgtcDxBSjCGrBnCt0BTUmMjnrAFU,24999 -numpy/lib/__init__.py,sha256=pcYU9wc4cOsrPI9GocW4nkAHr28r3OkEFWx6b6tXsdY,3320 -numpy/lib/__init__.pyi,sha256=ytClnxgcmYBSM80EuL8ooDJr7uMZttvWzi2JexClhPQ,538 -numpy/lib/__pycache__/__init__.cpython-313.pyc,, -numpy/lib/__pycache__/_array_utils_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_arraypad_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_arraysetops_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_arrayterator_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_datasource.cpython-313.pyc,, -numpy/lib/__pycache__/_function_base_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_histograms_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_index_tricks_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_iotools.cpython-313.pyc,, -numpy/lib/__pycache__/_nanfunctions_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_npyio_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_polynomial_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_scimath_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_shape_base_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_stride_tricks_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_twodim_base_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_type_check_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_ufunclike_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_user_array_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_utils_impl.cpython-313.pyc,, -numpy/lib/__pycache__/_version.cpython-313.pyc,, -numpy/lib/__pycache__/array_utils.cpython-313.pyc,, -numpy/lib/__pycache__/format.cpython-313.pyc,, -numpy/lib/__pycache__/introspect.cpython-313.pyc,, -numpy/lib/__pycache__/mixins.cpython-313.pyc,, -numpy/lib/__pycache__/npyio.cpython-313.pyc,, -numpy/lib/__pycache__/recfunctions.cpython-313.pyc,, -numpy/lib/__pycache__/scimath.cpython-313.pyc,, -numpy/lib/__pycache__/stride_tricks.cpython-313.pyc,, -numpy/lib/__pycache__/user_array.cpython-313.pyc,, -numpy/lib/_array_utils_impl.py,sha256=8V5hh2JYzL0LKy2KBrRPh-FZHjfKrn7nyS_VNrvRSO0,1751 -numpy/lib/_array_utils_impl.pyi,sha256=PvJJhNRRyOcLtY-FGwkAmRCdJfHEWcl4BwnU11Waq_s,818 -numpy/lib/_arraypad_impl.py,sha256=Ri6I_s95XLEUwSykx9eOBz7JqiPNKQTMH24uQrDBD-k,33217 -numpy/lib/_arraypad_impl.pyi,sha256=bOjBqunPqcmMN4RiOMe0T5d03gIMxIm_fkGMn8ONiSM,1881 -numpy/lib/_arraysetops_impl.py,sha256=4_42VA7Lu4P-ODK9M3kusK-xKQP5u7tcThyuHMcn0Vo,40524 -numpy/lib/_arraysetops_impl.pyi,sha256=B8kt91kSzkrtA-fwqwWWLNpPDzEmcM0-BWKiYd9Ne0I,9948 -numpy/lib/_arrayterator_impl.py,sha256=n1_emvfqXcvOtLhaeMtCvYiZqBCnVCNfwpI95loCBa0,7410 -numpy/lib/_arrayterator_impl.pyi,sha256=U-olocb1ETuXURoG6QuYhq1xoeDxrtSQInphHsHCGiU,1873 -numpy/lib/_datasource.py,sha256=H7HKFHCye9r2mLDup6KYnuKcjUrOd3Gc2wFMn78rVGY,23429 -numpy/lib/_datasource.pyi,sha256=a_mEw94cyK-Ik7ZaQIDIJp8CB2pYV-1FEvRkZHCM20c,1027 -numpy/lib/_function_base_impl.py,sha256=3T1Z1wJatBn8fV7okN9moLoiL5jSiNMGRvex0jRfhvg,201865 -numpy/lib/_function_base_impl.pyi,sha256=1FuQSPYALot4Z8LXSVvcjiP8ZlZh0fvFBSPHcGgK4iY,23122 -numpy/lib/_histograms_impl.py,sha256=WCi3-3v3J6NeXxoQDy2MBxJCQSufgMkmtvr4IIgK-I4,39852 -numpy/lib/_histograms_impl.pyi,sha256=HFQ2VaV5St1hH7CrdnUOn2pKurKjXoNRN4TJ9D1vmYI,1118 -numpy/lib/_index_tricks_impl.py,sha256=34-lqDXxLap3V6I_C27OeyYJLi1fKIRyz-WJ3K2g7D8,33248 -numpy/lib/_index_tricks_impl.pyi,sha256=shCCeeX_Xd_fA18YRmhhtk4WU76BP1qFYOQPB2mzeTg,6521 -numpy/lib/_iotools.py,sha256=gfw4LwMszW5bDH78mM1Y_VWWCZ_u24uW-tuoZAVBM2k,31840 -numpy/lib/_iotools.pyi,sha256=_234y4IuHkYOY1Mt-7lJ-kJ3FJbhjoMRl0zZhyVS-dk,3493 -numpy/lib/_nanfunctions_impl.py,sha256=32gsaYfFPKAqeqxkjl9XAFYaKun6F9f9-Auhk0t73Jk,74178 -numpy/lib/_nanfunctions_impl.pyi,sha256=N8tDZ0fhyMiFD0-4M_rBP61DmDkIGiN171GNjwVxlck,886 -numpy/lib/_npyio_impl.py,sha256=53vlumSVsWMjaSaFNtM7Adb8xzBlEI1KiZB-Yxiuvx0,101972 -numpy/lib/_npyio_impl.pyi,sha256=YH-oiyiwd5li4imo6_kjvpxGgnO-_wQuD4NFsz0ClW0,9555 -numpy/lib/_polynomial_impl.py,sha256=o5Qd-VSe36gsgEuJBrjJrW4j5ZMMfYuq6howHR3TazE,45752 -numpy/lib/_polynomial_impl.pyi,sha256=MmlWawTNlEnyvlMokm_w3IyWpUIDQvPjixH3CGVurMo,7432 -numpy/lib/_scimath_impl.py,sha256=3P2GH0hldWMw6hgqNmv_KXlENfFYzeIy9gqrqcOugdg,16334 -numpy/lib/_scimath_impl.pyi,sha256=9aJJX_D19ccIrGqy0VCmRGAGVoluWCYQLW7_ecyk_XE,3049 -numpy/lib/_shape_base_impl.py,sha256=LgKYMnNErkMy8xbIumjaPJHFPOALvFTRwGQHYjfCtZo,40647 -numpy/lib/_shape_base_impl.pyi,sha256=fl0aTfotN-9IOeGsEAVLnexbM-njPFKnYU13cBujHkE,5513 -numpy/lib/_stride_tricks_impl.py,sha256=0Lrnvmponu10hS2g6E0Ec7sHuNrfNS5CoPZPqWPP74M,18574 -numpy/lib/_stride_tricks_impl.pyi,sha256=FeWPs1yD4uQQSis8w4cm9-YW7IQ6bv333JFaqIc0zrQ,1881 -numpy/lib/_twodim_base_impl.py,sha256=_pfbE4LTkMSssA5Piz1F2c9pOMtJMO7LNw_My6PF0kA,35052 -numpy/lib/_twodim_base_impl.pyi,sha256=JgqPjlBo1JFeYdMnb2NkMYyR8Dvami3hjk4VQ-MK6mY,11706 -numpy/lib/_type_check_impl.py,sha256=NZhF_zIbdmDzbLbwakOnPhl2eRz3lJW_rNzBCULLSEk,19919 -numpy/lib/_type_check_impl.pyi,sha256=si8-6dnzrcaeVtdqnoL6Cu82ZdoB9inkjmZxiRdaFz4,5366 -numpy/lib/_ufunclike_impl.py,sha256=mq924a_rI7wvsWoPKHyc38WLI11fxCAiog-k6gJ5br0,6516 -numpy/lib/_ufunclike_impl.pyi,sha256=4Q_uMOYYI58InUsBcBnq3l-JBROsvEUytTB7xYBT8ls,1389 -numpy/lib/_user_array_impl.py,sha256=mx1xZjZib3SxnopvslFEK6Z-ql_ZzgUsn1u0LZ8KnXw,8262 -numpy/lib/_user_array_impl.pyi,sha256=no_xh1L4-mCrXwMRcYGVEqRsznuuYq8kwgkRcaHuWkc,9521 -numpy/lib/_utils_impl.py,sha256=9jwNKayFoYxCrg4GgdFXBCZwfjMNczoaQBfs0msKxVs,24163 -numpy/lib/_utils_impl.pyi,sha256=Avu_JgLOX4FJnFI8KcqUWdx_V2ldS6_YHNMg9yvfygY,284 -numpy/lib/_version.py,sha256=m4Z1ufCoQH5yYndKrkXKiN3p8FIygUbeYt2fjGfi2Rs,5009 -numpy/lib/_version.pyi,sha256=zAmfNnFeke7_lHsvR94fafNBcuJHpZ1jaB2PyzEostc,658 -numpy/lib/array_utils.py,sha256=SyMHXlsOJMKwxkjQxjsxx3J2cgx_3J2N0qqmLZTQgMc,137 -numpy/lib/array_utils.pyi,sha256=YYnx_V4CMdSbJTCnYboN1swcswmlOD2e4ZvQj5WsSak,197 -numpy/lib/format.py,sha256=yKvqaH4nwrS7GPoQUV_YgnNC_s0KQaZ-08WG10q1x3I,37208 -numpy/lib/format.pyi,sha256=qF5MgX4HL45SWz12KobX03cr40MoDiXDs4vFltAZVuE,770 -numpy/lib/introspect.py,sha256=P7-Um4--wGHOWLVusNN1bhjMuA1g6kKmu-jx1GGeOPM,2810 -numpy/lib/introspect.pyi,sha256=IsntuFrlFhRBZcGGhRUTAgnONUHEbYw_2ApPmffx8QE,155 -numpy/lib/mixins.py,sha256=hSDMCuYP518waugn3Vdu_S4tbXtDeUkc-zB3wwzyoOI,7519 -numpy/lib/mixins.pyi,sha256=pBHGtj8_EFCwyv6uPlKEMrBTOysEezePNdjqUYMsgPM,3205 -numpy/lib/npyio.py,sha256=nZadg1IKRXTLZX_52TpjU-YutNH5QA_UU457rHfn6oc,65 -numpy/lib/npyio.pyi,sha256=6xZ6zF-6qKuSOfjjDL4YN43xKPYcD6IpzJiDiLpmSSs,121 -numpy/lib/recfunctions.py,sha256=SgWulquccYhucyVpz1aU6Qi8pbcMKyEppbsrpIsh-nM,61339 -numpy/lib/recfunctions.pyi,sha256=Hjbbqt7Jl-bmrWEBvR1ZZHL6EForacXyk73oRnlBXus,13718 -numpy/lib/scimath.py,sha256=HgFt3iWrgcxgV4Y6U-xyZZBM_MMewX62uP8HhOxhveY,122 -numpy/lib/scimath.pyi,sha256=PhlpjveDqnSQvLn2cQ1AQFNVpxECaBWgYvhK8S32jzo,245 -numpy/lib/stride_tricks.py,sha256=BDqFklWQ4eVAoAvtdb_3nT0YxXeMZOtPp6nBr7gKG64,85 -numpy/lib/stride_tricks.pyi,sha256=6-K3R7XBw_fcpHaAIs9y4LEc5i4r5gZUG-tg4EOR-ew,128 -numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/lib/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test__datasource.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test__iotools.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test__version.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_array_utils.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_arraypad.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_arraysetops.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_arrayterator.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_format.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_function_base.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_histograms.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_index_tricks.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_io.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_loadtxt.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_mixins.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_nanfunctions.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_packbits.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_polynomial.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_recfunctions.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_shape_base.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_stride_tricks.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_twodim_base.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_type_check.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_ufunclike.cpython-313.pyc,, -numpy/lib/tests/__pycache__/test_utils.cpython-313.pyc,, -numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258 -numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 -numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 -numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325 -numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453 -numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 -numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 -numpy/lib/tests/test__datasource.py,sha256=H6PZKQ0tY6r1bhrcLRKMjWdWop5P4Rj_SYvrU9ukDzc,10921 -numpy/lib/tests/test__iotools.py,sha256=ejbG7SVvTm55Lq5LdUza8-nIvF2mt-XYvfpzn13q038,14097 -numpy/lib/tests/test__version.py,sha256=v2TOlH4f1Pmzxn1HWby3eBgLO9tGnhwH2LvBXlXtHP4,2063 -numpy/lib/tests/test_array_utils.py,sha256=Fy8_PR6GHed-mStqcbfjTe8Q5zMZnJ9WzFzX6DjoRR0,1152 -numpy/lib/tests/test_arraypad.py,sha256=Nc4xoxjlZkuaFSWgc2uP9bIXiLaYcje1tFF2fbIMlAE,57480 -numpy/lib/tests/test_arraysetops.py,sha256=yQy2uGGx_oYJu8nDEYujP_NIlDBmxCiyH5a7t5UH8cA,39023 -numpy/lib/tests/test_arrayterator.py,sha256=IRVmzxbr9idboJjOHKuX_8NQhMAKs7pD1xWqmU3ZERw,1337 -numpy/lib/tests/test_format.py,sha256=6Kt8l-P9lYsCbY2KRQBusCrNeoGYlFTMcFagXs3gVeY,41937 -numpy/lib/tests/test_function_base.py,sha256=7jlXXT_0-ChV3Sak8bjxHGyfoXcramgCi6MTDIryaaw,173318 -numpy/lib/tests/test_histograms.py,sha256=4PnaePQSpV_HsKynnbe5Hc5L02Z66ecCL24cvAYoeRg,34535 -numpy/lib/tests/test_index_tricks.py,sha256=tgXpLGpT9XpO_djXCTKpM0-WF-AVE5GF8lbvIyUz9X4,20921 -numpy/lib/tests/test_io.py,sha256=cUYVHDew1N-OfetUt-8e19VqMMWAUKZNhN6udbxfcZw,112868 -numpy/lib/tests/test_loadtxt.py,sha256=pvRZMon6Vyy_pdbEiJdJi17RW6I-Rg83Uc3XwMCvew0,41622 -numpy/lib/tests/test_mixins.py,sha256=nIec_DZIDx7ONnlpq_Y2TLkIULAPvQ7LPqtMwEHuV4U,7246 -numpy/lib/tests/test_nanfunctions.py,sha256=oeuoa1r3zx5JJFkU_zdne8GMSWET3UPDxL1sdVZPfAM,54762 -numpy/lib/tests/test_packbits.py,sha256=yN8rYbPDteOPJf7dEeZkgSnyzIUKe_ituLYdsqxcqbQ,17920 -numpy/lib/tests/test_polynomial.py,sha256=qYZGXUIeyZoNjbkEYeuUq1ad2eCPDkNXj6MxakvbIvk,11731 -numpy/lib/tests/test_recfunctions.py,sha256=Yg2pQEcOgf4d3PgrGiwxhrrXfyPXRa3SsyItxkY_wwA,45029 -numpy/lib/tests/test_regression.py,sha256=aCW5aT1PJL1ZCwrHUSa7iixQLWMC3D5iFSRDsWE2Uag,7921 -numpy/lib/tests/test_shape_base.py,sha256=fYKyGdLTM-l2rlTHAzDJbObc_SQWXXF8QoKt266F7K4,28296 -numpy/lib/tests/test_stride_tricks.py,sha256=EKHYiPoawG_vu_tFmKi5Lmvfs0VEDcUW7feiWybUGXA,23644 -numpy/lib/tests/test_twodim_base.py,sha256=mNNXsDKT3hPpz-HB_1k8YTWpwdx7dnvmrWWS_Lkew30,19382 -numpy/lib/tests/test_type_check.py,sha256=2lnLRzUA0voTKURi-qXllYYxBAqpsVAJmMtLQCHoIYA,15145 -numpy/lib/tests/test_ufunclike.py,sha256=9C9LV3XZLaHNQoyRVZl-C4w9HcOTEJMDw2uXYXhf1u4,3123 -numpy/lib/tests/test_utils.py,sha256=KN1q-eFLmckYbOMTxPKTwFMPtzBHdAPb0j9ntfea_yM,2454 -numpy/lib/user_array.py,sha256=v3dCCNs-PZ7tHZ1vqGqdeV5FLHRiLLWrMZhdzQTSRAM,50 -numpy/lib/user_array.pyi,sha256=IaCNerLboKjt3Fm-_k_d8IqeyJf7Lc9Pr5ROUr6wleM,54 -numpy/linalg/__init__.py,sha256=AZnH2FnMk_bDy8VuOsihmoS-nICrpKIRMPNa5Puyk30,2201 -numpy/linalg/__init__.pyi,sha256=Czr1hGuEjSGY_J7NbFaprCisxeIANCZAYqKz0YRUQAI,1076 -numpy/linalg/__pycache__/__init__.cpython-313.pyc,, -numpy/linalg/__pycache__/_linalg.cpython-313.pyc,, -numpy/linalg/__pycache__/linalg.cpython-313.pyc,, -numpy/linalg/_linalg.py,sha256=yqAqD7BFR_C8y1I4BJ9KAiYkmhUJ1683g5hQoxvYjl4,118309 -numpy/linalg/_linalg.pyi,sha256=namoF69OjkhE0tz2ngdjcdU0NwPG4uhoDLjzb5_iAnc,11867 -numpy/linalg/_umath_linalg.cp313-win_amd64.lib,sha256=_qdjTm97jJF7CFFZ19lve5GMu6bu0BNP90I6YBeLu1Y,2120 -numpy/linalg/_umath_linalg.cp313-win_amd64.pyd,sha256=Et9V81Nm3g8ittDrISaxwevTK1ZEaP8tQRzlYfV_Sc4,108032 -numpy/linalg/_umath_linalg.pyi,sha256=g5NJoNte6CwuMFDfd55O8OvJv4lOi539VKAB-Mrc864,1470 -numpy/linalg/lapack_lite.cp313-win_amd64.lib,sha256=G9-WJipIoEv4P-pQa_zNSlN0zlR56e-GArTXpz4SO3Q,2084 -numpy/linalg/lapack_lite.cp313-win_amd64.pyd,sha256=qMEWgGVLc_XuvKeZKY9uPtnD2kkP5WRlPR42vdjCoKI,17920 -numpy/linalg/lapack_lite.pyi,sha256=sWKWBDR2UP0ez6ETdE0Rz-mp8m_gOCMo4CYVZajDMNo,2818 -numpy/linalg/linalg.py,sha256=1CC9jc-u61GePC5AuieDiyMyrVvgLD8ZJbTPvLfKjHc,600 -numpy/linalg/linalg.pyi,sha256=iGd8b4-gN1d92K7wfgDZxoHrVXnVC1c6vGqW4ZbWldY,1001 -numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/linalg/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/linalg/tests/__pycache__/test_deprecations.cpython-313.pyc,, -numpy/linalg/tests/__pycache__/test_linalg.cpython-313.pyc,, -numpy/linalg/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/linalg/tests/test_deprecations.py,sha256=GaeE3JnQlJLoAfbY93LmgCFUlV5M8IFmQ7EhF4WbqwU,660 -numpy/linalg/tests/test_linalg.py,sha256=D7AhxqGiHwPjIHI3JNkPcsYFOstTiKe_PTNl6d4YlSE,85701 -numpy/linalg/tests/test_regression.py,sha256=KRXOhAHjZbE3h6vqksTvQayrvhUkyRM8_O6Ky5s-Nqs,6866 -numpy/ma/API_CHANGES.txt,sha256=U39zA87aM_OIJhEKvHgL1RY1lhMJZc1Yj3DGLwbPbF0,3540 -numpy/ma/LICENSE,sha256=1427IIuA2StNMz5BpLquUNEkRPRuUxmfp3Jqkd5uLac,1616 -numpy/ma/README.rst,sha256=_MHrqHTE8L4wiJJqvaOh1l-xTxidwdilc_SZkFbgubM,10110 -numpy/ma/__init__.py,sha256=EFe3qk5iN_7Z__BwlkEW6xo2Zc6NnI8F7G2b1UVW4uY,1473 -numpy/ma/__init__.pyi,sha256=76dORzdLey4HoMD26xJFuw-2aIGrB30xt8rFqE8xafY,7404 -numpy/ma/__pycache__/__init__.cpython-313.pyc,, -numpy/ma/__pycache__/core.cpython-313.pyc,, -numpy/ma/__pycache__/extras.cpython-313.pyc,, -numpy/ma/__pycache__/mrecords.cpython-313.pyc,, -numpy/ma/__pycache__/testutils.cpython-313.pyc,, -numpy/ma/__pycache__/timer_comparison.cpython-313.pyc,, -numpy/ma/core.py,sha256=4Jv1_64eM6_aBIQ3sp4uKUU8sfxslMzeGB1AoyTTico,299477 -numpy/ma/core.pyi,sha256=2akub4Nv5xHCrhtBQ28pVG2BcnU8hi65gK-UkZP0_6o,18835 -numpy/ma/extras.py,sha256=0Od0rMKh6FLyG0byaU5kAeWcZCRfcVQRTNutMfmiCRo,72951 -numpy/ma/extras.pyi,sha256=YYuESxQTbtdLwxk_rZz7oZbg_JJMzBo92CEpFIpCWnA,3938 -numpy/ma/mrecords.py,sha256=BXglbMRYLeB5FxBcU_1vzmMZpF6iXaKS57qpTkWFm8A,27888 -numpy/ma/mrecords.pyi,sha256=oGSsEingxJ_A07fLNDrckjS7MwA8yZN_N6wkOEmRPeE,2078 -numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/ma/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_arrayobject.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_core.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_deprecations.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_extras.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_mrecords.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_old_ma.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/ma/tests/__pycache__/test_subclassing.cpython-313.pyc,, -numpy/ma/tests/test_arrayobject.py,sha256=ap06C0a0dGWcOknpctbhLbzHSNd2M9p_JL2jESqBBGk,1139 -numpy/ma/tests/test_core.py,sha256=UfmlFEHJCksx4ad9UsP77n9bOenWqqe4peM7G5JG18k,225055 -numpy/ma/tests/test_deprecations.py,sha256=WurKSuN6hsXmWxRoxstdVBXcKCTvYxlYz-ntSkW6qKc,2650 -numpy/ma/tests/test_extras.py,sha256=C_auxUGRJ38o-7LZGNTN5IdAi48c1QIY8bzM2NozB6g,80274 -numpy/ma/tests/test_mrecords.py,sha256=TzQwlvY1iJnKH7ARsOI9nNaNeTt1sGgZAj8NEjP7jY0,20348 -numpy/ma/tests/test_old_ma.py,sha256=tQ-IqKZ1NMHq5_8qkOaZWg_rZkWBpRaPnlodBRd_ABA,33629 -numpy/ma/tests/test_regression.py,sha256=J1ftHDKfIF3SUIgQlxJplCsYTrPpAyN4rf5K1Uw5T8w,3384 -numpy/ma/tests/test_subclassing.py,sha256=UFK0R44pRCmcENP2kbI_4hRMQ7YC6qjplZNM0WeqcCM,17469 -numpy/ma/testutils.py,sha256=86e8bckl-C24JBICXzVMI_s4RqtbgZqDLD0L5tZPTgc,10564 -numpy/ma/timer_comparison.py,sha256=a3kW2PlSCDXmVrDx0VGPQ9vhcQIuDUPEnKZ54zVP810,16153 -numpy/matlib.py,sha256=DJsayODBbd0n6MmhxPmgiL28ALyLgQdHtQ5BHKggY5I,11036 -numpy/matlib.pyi,sha256=Is_0Dii3OSM58bzPXiiJV46xGUK9Nb34adHOqNlMbME,10214 -numpy/matrixlib/__init__.py,sha256=9-DMlmdLxOk5HSGJ20AuTjKkGZ3MUPHCFjhE6sb4NMo,253 -numpy/matrixlib/__init__.pyi,sha256=ZAutkmA8BpttneOyZNqAjiYJN5F7sl-WSAfIkkI2qlI,109 -numpy/matrixlib/__pycache__/__init__.cpython-313.pyc,, -numpy/matrixlib/__pycache__/defmatrix.cpython-313.pyc,, -numpy/matrixlib/defmatrix.py,sha256=jMYex3MhNKlvjgcP9EAYMt_yrVQ0O67ZuXwMRvFCff0,31918 -numpy/matrixlib/defmatrix.pyi,sha256=a4tKYShYR9EeU9Ftub3_iyGzwunUIqK_6uz_B0ZAyXQ,495 -numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/matrixlib/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_interaction.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_numeric.cpython-313.pyc,, -numpy/matrixlib/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/matrixlib/tests/test_defmatrix.py,sha256=3cSTjFilFZVq2fMgfoUlx6hf9N4MSvBMhHcemoiUzLA,15488 -numpy/matrixlib/tests/test_interaction.py,sha256=9loMwSKXBOu09Z6aZ6_RG7ojbEfn19A8N39h12F5668,12249 -numpy/matrixlib/tests/test_masked_matrix.py,sha256=SjuUs4IhE3x2y8oM9uoWhKX4K1sX2JNkLQMlhMlvzD0,9146 -numpy/matrixlib/tests/test_matrix_linalg.py,sha256=9S9Zrk8PMLfEEo9wBx5LyrV_TbXhI6r-Hc5t594lQFY,2152 -numpy/matrixlib/tests/test_multiarray.py,sha256=E5jvWX9ypWYNHH7iqAW3xz3tMrEV-oNgjN3_oPzZzws,570 -numpy/matrixlib/tests/test_numeric.py,sha256=l-LFBKPoP3_O1iea23MmaACBLx_tSSdPcUBBRTiTbzk,458 -numpy/matrixlib/tests/test_regression.py,sha256=wpWVjM4pHRaiVX_Y5_zc6yNr4I5zWdmJfHTwbmBUhew,963 -numpy/polynomial/__init__.py,sha256=JAnPIGbR7QJilyIhHjVvA7SsWGSO1Sm0PCse-XWk3dY,6947 -numpy/polynomial/__init__.pyi,sha256=885H4pfwJHj0xFuPDsV6p_ON2nJenGjA5h8d4uMY-IY,711 -numpy/polynomial/__pycache__/__init__.cpython-313.pyc,, -numpy/polynomial/__pycache__/_polybase.cpython-313.pyc,, -numpy/polynomial/__pycache__/chebyshev.cpython-313.pyc,, -numpy/polynomial/__pycache__/hermite.cpython-313.pyc,, -numpy/polynomial/__pycache__/hermite_e.cpython-313.pyc,, -numpy/polynomial/__pycache__/laguerre.cpython-313.pyc,, -numpy/polynomial/__pycache__/legendre.cpython-313.pyc,, -numpy/polynomial/__pycache__/polynomial.cpython-313.pyc,, -numpy/polynomial/__pycache__/polyutils.cpython-313.pyc,, -numpy/polynomial/_polybase.py,sha256=SsFFCPQxtXzxDgXMsD2ovvoBL-1jQIrmdWCUMBizyPs,40648 -numpy/polynomial/_polybase.pyi,sha256=Kt1x4PgzInVS9mMR_C5d6yjJaIPcfMyhp0tp0Bz2FZk,8821 -numpy/polynomial/_polytypes.pyi,sha256=-NjNhcMP9dwCdWrIod0uRJmSNtqIQSQ6lSbvSy3aKd4,23455 -numpy/polynomial/chebyshev.py,sha256=f0h4dyuTy1KePOjKo7tBeYmLvrh1YcFBzi3i5wZyg1w,64168 -numpy/polynomial/chebyshev.pyi,sha256=AnJkNZoHyIUQvFbQfexdey-GJwN3fMjZs2pDZT6YzvQ,4917 -numpy/polynomial/hermite.py,sha256=XlsIKUW1sAGtdUqUpzZOt9BPyyDebQl_fK7zrlZI8GI,56206 -numpy/polynomial/hermite.pyi,sha256=xggYYL_74IGVlqmK9NXXIiSpGKELIcoqaOOJ0enXvPU,2551 -numpy/polynomial/hermite_e.py,sha256=QBvJfj8aOxTq4qFpY2Fjo0EZs5AEhd_ur4pIh5dq3XA,53850 -numpy/polynomial/hermite_e.pyi,sha256=CGq8MpTXOonV1JzfLdWuN_-pXOYEJG4qvNd977s11ho,2643 -numpy/polynomial/laguerre.py,sha256=ITXPSdc15HORhN5stSri5hGZyuCvv6ZxD2lYLMosSqQ,54054 -numpy/polynomial/laguerre.pyi,sha256=ftBF2ZU4CFriNY6xy8lGP-gNxRB4udAI4HVW7nkv2R0,2274 -numpy/polynomial/legendre.py,sha256=8WMBxMF_AQtfa4d46JYnQCYnbMFBTsixpVm-iBe5iDk,52599 -numpy/polynomial/legendre.pyi,sha256=590XJNm9Yl_ShYBZdcrlB65qs9DEh7OOAmeC_IXu5to,2272 -numpy/polynomial/polynomial.py,sha256=N1O1iPZeg15LQTg7W8Qcz4-J7EwDzHlhRKLFXsN10Aw,53819 -numpy/polynomial/polynomial.pyi,sha256=0KSIDRCJg1EnrZCuyQVCEKP07IiHTFHyaKPC4Po3jJI,2089 -numpy/polynomial/polyutils.py,sha256=wfNdfDePXKCqJIk8VSWjmApQN1TKpCe-YuBurYwJbi8,23287 -numpy/polynomial/polyutils.pyi,sha256=zA5UdU71NWqiKv3nAYAt5MAcJgAywHOj9lwjX8sbEro,10857 -numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/polynomial/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_classes.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_hermite.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_laguerre.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_legendre.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_polynomial.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_polyutils.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_printing.cpython-313.pyc,, -numpy/polynomial/tests/__pycache__/test_symbol.cpython-313.pyc,, -numpy/polynomial/tests/test_chebyshev.py,sha256=PI2XwvGGqQKEB1RxbsYRgeTG0cunB_8Otd9SBJozq-8,21141 -numpy/polynomial/tests/test_classes.py,sha256=VCcG2ICOteBolQHyfzYzMUhyqHlbAJxV8LdQm9NO50U,19057 -numpy/polynomial/tests/test_hermite.py,sha256=zHGmy1UAuKtLj5Key6BMne7ZRh3tZpowfleghQzyhqo,19131 -numpy/polynomial/tests/test_hermite_e.py,sha256=5ZBtGi2gkeldYVSh8xlQOLUDW6fcT4YdZiTrB6AaGJU,19467 -numpy/polynomial/tests/test_laguerre.py,sha256=Bm5SAKjOcQ6RlSsc8SRXYfU34mbdQ2fdMjf2E9ppznM,18047 -numpy/polynomial/tests/test_legendre.py,sha256=Vbye67yIzN7Ij2UwYZlhSt68hoNeukFHYd1QCvA70ZY,19240 -numpy/polynomial/tests/test_polynomial.py,sha256=zuJJoVLls3H2wnYeLjc514oBCx8hE5AvnbBgtQqJIzI,22660 -numpy/polynomial/tests/test_polyutils.py,sha256=b3vdtJVjC34AmEv96sw2IvIABNDqmYhCnMYZCvhtWzU,3897 -numpy/polynomial/tests/test_printing.py,sha256=_RIcZxPEUJUb8aSpdAkvnZBwBDfIyR8tKI2--w9Y64o,21854 -numpy/polynomial/tests/test_symbol.py,sha256=GZnqB4PLjZDWalREVOAI3qus9kjUDhCW-WZ_87jRmPY,5588 -numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/LICENSE.md,sha256=tLwvT6HJV3jx7T3Y8UcGvs45lHW5ePnzS1081yUhtIo,3582 -numpy/random/__init__.pxd,sha256=g3EaMi3yfmnqT-KEWj0cp6SWIxVN9ChFjEYXGOfOifE,445 -numpy/random/__init__.py,sha256=W_hFzGsKVQfdh3-U15gzsOKKAk8uZgioDkxKyuou4WA,7721 -numpy/random/__init__.pyi,sha256=tb8imrQCSdpOL9DxD9WTBEz_Buot9aycQ4YvUr3snhM,2284 -numpy/random/__pycache__/__init__.cpython-313.pyc,, -numpy/random/__pycache__/_pickle.cpython-313.pyc,, -numpy/random/_bounded_integers.cp313-win_amd64.lib,sha256=uMpX0ItaqNaKImMCUWz_cxY_gClTnftNOgZBxZqWv5o,18000 -numpy/random/_bounded_integers.cp313-win_amd64.pyd,sha256=u5Gu2xAd0Kl6vTv6WlasvePK2W6C8AZMKOYeq_LZBCo,224768 -numpy/random/_bounded_integers.pxd,sha256=EOKKUlF9bh0CLNEP8TzXzX4w_xV5kivr1Putfdf6yvU,1763 -numpy/random/_common.cp313-win_amd64.lib,sha256=FhxgAP-4i_aqWz_06IA6y1hE6_D5hBUWNBORKpD4ttg,2012 -numpy/random/_common.cp313-win_amd64.pyd,sha256=zubEfAnCwBTU37Rc9Sds-SovWpwFvrTUvRDcR1BSRtY,168448 -numpy/random/_common.pxd,sha256=2_9NLWFSnLG4iDd-KeYUBRa47QM8qceUsPiAkyWZ74I,5089 -numpy/random/_examples/cffi/__pycache__/extending.cpython-313.pyc,, -numpy/random/_examples/cffi/__pycache__/parse.cpython-313.pyc,, -numpy/random/_examples/cffi/extending.py,sha256=BgydYEYBb6hDghMF-KQFVc8ssUU1F5Dg-3GyeilT3Vg,920 -numpy/random/_examples/cffi/parse.py,sha256=eRBbVrnxvw0v3BS6JJvX1rjpm1MA7yZu-31QHMuNlp4,1805 -numpy/random/_examples/cython/extending.pyx,sha256=1lkq6zFifnwaMtAkVG0i_9SbMiNqplvqnHaqUpxqNzs,2344 -numpy/random/_examples/cython/extending_distributions.pyx,sha256=myr53bzJ2kVTltZx_MDcw3Q6bbh1MK1U22GKyaEi5C8,3963 -numpy/random/_examples/cython/meson.build,sha256=q_IFcVs_qzERJD_-8uaDnjps3QdaW49okZMbFtwkAPo,1747 -numpy/random/_examples/numba/__pycache__/extending.cpython-313.pyc,, -numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-313.pyc,, -numpy/random/_examples/numba/extending.py,sha256=vnqUqQRvlAI-3VYDzIxSQDlb-smBAyj8fA1-M2IrOQw,2041 -numpy/random/_examples/numba/extending_distributions.py,sha256=-aTxLIqnXW0XPtmEp0yJfaBTBcjEo9Q9SebKG_dOLvw,2103 -numpy/random/_generator.cp313-win_amd64.lib,sha256=9rl_OsTUjUfOt7fEpglT-o9R9iwYnaPNd-s267yg7P8,18400 -numpy/random/_generator.cp313-win_amd64.pyd,sha256=Ei7DqRtLrf8yjEWeA3rdR35PSrLYIeeaHerthiKF0lo,748544 -numpy/random/_generator.pyi,sha256=flo5onsP6p2SECRZg08N3Ix9JWrcWEtqLzM0JGJQf0o,24865 -numpy/random/_mt19937.cp313-win_amd64.lib,sha256=TKv1Ku9Wj_WU9t8VGOWm-F7UKOLvvvvPPL8P5WQzNrw,2032 -numpy/random/_mt19937.cp313-win_amd64.pyd,sha256=l7Kzq_YiuHcJs_4uSdDD3EQLZeB0OjBzYjQ1Ak68oCE,89088 -numpy/random/_mt19937.pyi,sha256=QB8vx8f-EGl-qz3iYGArFsfPb3Mgqldk128UeWX3kLs,800 -numpy/random/_pcg64.cp313-win_amd64.lib,sha256=3C4EpAGYXy2Zc-7jF9bdx0IGB1d5hFaEtZrvFvZ21uk,1996 -numpy/random/_pcg64.cp313-win_amd64.pyd,sha256=ffX8s0tG_204_mhfuVaHQGsgLsiClk1URlDNbmjj2TQ,95232 -numpy/random/_pcg64.pyi,sha256=TSID_lsjoPvfGIR4cbvGLg41VmbsHclheSt8pfBZPhs,1186 -numpy/random/_philox.cp313-win_amd64.lib,sha256=7Mk1BhUFzwXz8kR4UU_86OZvqzCPCkqEYN9HLFRN_Z8,2012 -numpy/random/_philox.cp313-win_amd64.pyd,sha256=lgJTensdbXddxDKdfSpr3JRq6Ca7gz71Z5qd5WfMYGE,80384 -numpy/random/_philox.pyi,sha256=e7J93SwcbYrDfBfJgnuVIngiEn7NSN7k576J9pz4d54,1044 -numpy/random/_pickle.py,sha256=D5MrszR_oDD4yss3bt94MPw54FNIyH6f4MtOWBYRDvk,2832 -numpy/random/_pickle.pyi,sha256=V4UAI1td1JPMHeNMZjon30x7E7SD3WZBALC8HzQFciU,1651 -numpy/random/_sfc64.cp313-win_amd64.lib,sha256=aKitBgL7vTu4-N6i6UCVxLiNlaRUVzDEHi_GVIEpbnY,1996 -numpy/random/_sfc64.cp313-win_amd64.pyd,sha256=BmcHxwy_0F9s4bw99xxnD2oFtgAXd93FmD0j6wTPr2Q,59904 -numpy/random/_sfc64.pyi,sha256=HCCIxo0H1b0_s5MEWrwttlElWEE5GKt5wV6LYxIvSxM,710 -numpy/random/bit_generator.cp313-win_amd64.lib,sha256=7l1Cu4aNYB89vqLj56eD4uUK-S-jCpPyhRB40Gg3PX4,2120 -numpy/random/bit_generator.cp313-win_amd64.pyd,sha256=AjVuSV2Sp54bT_darrtPk42FWdrzZD9GdIknwoHzGhI,171008 -numpy/random/bit_generator.pxd,sha256=LJpeB-EKeVV8_JO69sS33XJLZQ3DAhrUCNzs_ei7AoI,1042 -numpy/random/bit_generator.pyi,sha256=OqHUYtl94gRrT5AV-A7iV-0QKumGfmb3jrkCoUkI4Xc,3641 -numpy/random/c_distributions.pxd,sha256=02WeqbzQ4heQ1cZ7ShePejxmt5AOI5kTstBZ5w2WxD0,6454 -numpy/random/lib/npyrandom.lib,sha256=QSx4TSq4CcooYh9T_Qln9CMT9D0eixDKyokEiHATEoQ,148178 -numpy/random/mtrand.cp313-win_amd64.lib,sha256=UTiwKIE9oF5UlrLFIYmTBftFvFCbE_6Msdd9bjCRcHg,17122 -numpy/random/mtrand.cp313-win_amd64.pyd,sha256=xuAsQPUC9aPIYehR3M07Caw-qMXU1ojuOobnce9vDCg,635392 -numpy/random/mtrand.pyi,sha256=2e8aUstFMyrLOtQSV_SwgtM_F2UzmAF-DWKZH2xRocM,22676 -numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_direct.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_extending.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_generator_mt19937.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_random.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_randomstate.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_randomstate_regression.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_regression.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_seed_sequence.cpython-313.pyc,, -numpy/random/tests/__pycache__/test_smoke.cpython-313.pyc,, -numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/random/tests/data/__pycache__/__init__.cpython-313.pyc,, -numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203 -numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208 -numpy/random/tests/data/mt19937-testset-1.csv,sha256=bA5uuOXgLpkAwJjfV8oUePg3-eyaH4-gKe8AMcl2Xn0,16845 -numpy/random/tests/data/mt19937-testset-2.csv,sha256=SnOL1nyRbblYlC254PBUSc37NguV5xN-0W_B32IxDGE,16826 -numpy/random/tests/data/pcg64-testset-1.csv,sha256=wHoS7fIR3hMEdta7MtJ8EpIWX-Bw1yfSaVxiC15vxVs,24840 -numpy/random/tests/data/pcg64-testset-2.csv,sha256=6vlnVuW_4i6LEsVn6b40HjcBWWjoX5lboSCBDpDrzFs,24846 -numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=Fhha5-jrCmRk__rsvx6CbDFZ7EPc8BOPDTh-myZLkhM,24834 -numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=mNYzkCh0NMt1VvTrN08BbkpAbfkFxztNcsofgeW_0ns,24840 -numpy/random/tests/data/philox-testset-1.csv,sha256=QvpTynWHQjqTz3P2MPvtMLdg2VnM6TGTpXgp-_LeJ5g,24853 -numpy/random/tests/data/philox-testset-2.csv,sha256=-BNO1OCYtDIjnN5Q-AsQezBCGmVJUIs3qAMyj8SNtsA,24839 -numpy/random/tests/data/sfc64-testset-1.csv,sha256=sgkemW0lbKJ2wh1sBj6CfmXwFYTqfAk152P0r8emO38,24841 -numpy/random/tests/data/sfc64-testset-2.csv,sha256=mkp21SG8eCqsfNyQZdmiV41-xKcsV8eutT7rVnVEG50,24834 -numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290 -numpy/random/tests/test_direct.py,sha256=PI1C5R_WQGagdQ65sS74o_nq3ovYSDjExIDu9r3jY7k,20536 -numpy/random/tests/test_extending.py,sha256=zZBAB6VvMh-JO6kc_Fco8C4bl-wTw_GY_BCoTg-kQ-M,4561 -numpy/random/tests/test_generator_mt19937.py,sha256=ms_yBBSkxUKT0F7kjPM-PKwTi6SZvKMnmBdYkQS8a2E,120085 -numpy/random/tests/test_generator_mt19937_regressions.py,sha256=5wlQqn6jdLwPbGNZrF3RPwLn_xRj2CCA6DY167dHN7c,8300 -numpy/random/tests/test_random.py,sha256=TW-ikZicDVgTi9WeZOQwLCCCZ_Q_gWAom6PoztXSZ5k,71901 -numpy/random/tests/test_randomstate.py,sha256=RrgFeK2r5JcD4K8paWObS8nKufdGumLN2fdnvp974kI,87399 -numpy/random/tests/test_randomstate_regression.py,sha256=8FL4sxX1D1oMVX_F9u5vR8Zazo5V0Yj4bL7zsh57V-Y,8215 -numpy/random/tests/test_regression.py,sha256=_eoEa-QIYh33tESahMHsVZtCy9W_s5T5RPzI6QYS7LY,5611 -numpy/random/tests/test_seed_sequence.py,sha256=zWUvhWDxBmTN2WteSFQeJ29W0-2k3ZUze_3YtL4Kgms,3391 -numpy/random/tests/test_smoke.py,sha256=StTxeemamKeE_H_UHQWyDxIXJSbLQI4Yr5sDp3y6ZH4,28992 -numpy/rec/__init__.py,sha256=SMM69A-UzX5LD6JxSYXO-M9t4grwzRcqSAXXuMU5PSY,85 -numpy/rec/__init__.pyi,sha256=lPzA1S5UmKd5MvDDBb-afONgZYl0Gr3l5LxPB7Qyc_I,368 -numpy/rec/__pycache__/__init__.cpython-313.pyc,, -numpy/strings/__init__.py,sha256=NLFxhadn513TAXf8kgVguCvmyzXnP1JpVnNJtqfErX4,85 -numpy/strings/__init__.pyi,sha256=1Lax4CbaTiyckJDEl0FluWFnv7GZyOh5hxMnEVuNBmo,1390 -numpy/strings/__pycache__/__init__.cpython-313.pyc,, -numpy/testing/__init__.py,sha256=ENc09IN_D74xNvH33Z65Q2dkaSEvljHF_tz-BV-g_dU,617 -numpy/testing/__init__.pyi,sha256=hzSq3lVZ2gZbxMrQXNP3PaetjgJyKnfg50mkjTB8jXg,2147 -numpy/testing/__pycache__/__init__.cpython-313.pyc,, -numpy/testing/__pycache__/overrides.cpython-313.pyc,, -numpy/testing/__pycache__/print_coercion_tables.cpython-313.pyc,, -numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/_private/__pycache__/__init__.cpython-313.pyc,, -numpy/testing/_private/__pycache__/extbuild.cpython-313.pyc,, -numpy/testing/_private/__pycache__/utils.cpython-313.pyc,, -numpy/testing/_private/extbuild.py,sha256=ce56g9xEaJHUo5CqcmcpnUksdcS6tW76BNoAfGnxysg,8358 -numpy/testing/_private/extbuild.pyi,sha256=FWRL9bv2CK1FpFNLGXEJLvoZN6jgdQNnb62EENQ_u6Y,651 -numpy/testing/_private/utils.py,sha256=HU-1SLzJMa-OnJttrcLlA4UcY-FBoF7uhcxLMkNCt1s,98460 -numpy/testing/_private/utils.pyi,sha256=930ijrCmd_ZISmL4rGWSSutytCDzAiT-JJPl4fka2yY,13463 -numpy/testing/overrides.py,sha256=FRkp9cLvEwCdXWLinUH3hGf_u9SIzZk17QcRQfITZyk,2216 -numpy/testing/overrides.pyi,sha256=LMYa6hii8jPmR_eC-LHNrz3irrImvZcW29NxCkfgzNk,408 -numpy/testing/print_coercion_tables.py,sha256=BGTgZxvxnUNYqOwsceMR9xQ1LD6QUePsKLBsq8c8Vyo,6424 -numpy/testing/print_coercion_tables.pyi,sha256=O4nFjoyQ4AvDO2BrzsFi4QKaxsgmf1KDKAS-IEemPxw,848 -numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/testing/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/testing/tests/__pycache__/test_utils.cpython-313.pyc,, -numpy/testing/tests/test_utils.py,sha256=xoQskILg4xhRkfHLsljkXfDHYjTtT1QkLyvNaV2KBVk,72385 -numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/tests/__pycache__/test__all__.cpython-313.pyc,, -numpy/tests/__pycache__/test_configtool.cpython-313.pyc,, -numpy/tests/__pycache__/test_ctypeslib.cpython-313.pyc,, -numpy/tests/__pycache__/test_lazyloading.cpython-313.pyc,, -numpy/tests/__pycache__/test_matlib.cpython-313.pyc,, -numpy/tests/__pycache__/test_numpy_config.cpython-313.pyc,, -numpy/tests/__pycache__/test_numpy_version.cpython-313.pyc,, -numpy/tests/__pycache__/test_public_api.cpython-313.pyc,, -numpy/tests/__pycache__/test_reloading.cpython-313.pyc,, -numpy/tests/__pycache__/test_scripts.cpython-313.pyc,, -numpy/tests/__pycache__/test_warnings.cpython-313.pyc,, -numpy/tests/test__all__.py,sha256=JziA96KUyXwWCPExbQcJBqe_RU1xQVrVwi1xhO8tzqM,230 -numpy/tests/test_configtool.py,sha256=goqOIpRq8Hrig_d6vxZGu8zluQManELhkGGDl3g9qto,1598 -numpy/tests/test_ctypeslib.py,sha256=PSiQsEpT3CoLFp56zntAEkaJJ1VMHkvE0pr8-infzKM,12728 -numpy/tests/test_lazyloading.py,sha256=vsobnlXKUfdMdqMIAZBF_DRSbYNhYF3Za4cYv-J7qHA,1196 -numpy/tests/test_matlib.py,sha256=TUaQmGoz9fvQQ8FrooTq-g9BFiViGWjoTIGQSUUF6-Y,1910 -numpy/tests/test_numpy_config.py,sha256=F0vWlR3yQyfWI3XfCxKYc6f6k3ldLDypCHbUGU_gy8E,1277 -numpy/tests/test_numpy_version.py,sha256=n4cggUNnM9okmtxwyhYBWBFwJvKpY7NzYxMgrNwRU40,1808 -numpy/tests/test_public_api.py,sha256=bn39YfPIbaVvn4cOsw7escA3F-iWLAaMKBhgeSvAXYE,28474 -numpy/tests/test_reloading.py,sha256=spEldUm_nmV0tBoUG53a2ORCOjwfltimpKfGGTqa7pI,2441 -numpy/tests/test_scripts.py,sha256=6rZN5bnGpeR4vEjLBiKEUMXJiE2NVnbY1Q8xKPlOqA8,1692 -numpy/tests/test_warnings.py,sha256=iAipwlsADKIY0BdRHd6oRv4RzOI0p0nxcqSr9DoqeLI,2422 -numpy/typing/__init__.py,sha256=rGl883L4FnRPSzNe1Zyz7_KrHvxIMobSMoLuGPPhKNI,5442 -numpy/typing/__pycache__/__init__.cpython-313.pyc,, -numpy/typing/__pycache__/mypy_plugin.cpython-313.pyc,, -numpy/typing/mypy_plugin.py,sha256=BJQGuyCEXpt-DSVgwiG1LQWDoXhbWTBRqDA3q8kk2wI,6669 -numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -numpy/typing/tests/__pycache__/__init__.cpython-313.pyc,, -numpy/typing/tests/__pycache__/test_isfile.cpython-313.pyc,, -numpy/typing/tests/__pycache__/test_runtime.cpython-313.pyc,, -numpy/typing/tests/__pycache__/test_typing.cpython-313.pyc,, -numpy/typing/tests/data/fail/arithmetic.pyi,sha256=C-C5EOxTviVtzFk77RdVHTrEyQEJvUp2wrzlR9nQMOU,3904 -numpy/typing/tests/data/fail/array_constructors.pyi,sha256=mrcArR9EVNE4-9yKg-SgVv_Yp-4DpZ1Q_0cHiRwXRtI,1163 -numpy/typing/tests/data/fail/array_like.pyi,sha256=MUIx6Oc5bJeebr-TC4FhZFXnX9pJ5gQDv8moHmPek10,471 -numpy/typing/tests/data/fail/array_pad.pyi,sha256=JGCMd_sRBYlsPQ2d7EfLaNooTsg1P0jBuD5Ds2MeXAg,138 -numpy/typing/tests/data/fail/arrayprint.pyi,sha256=KAbzVtw1V65ImeO4MhlejQt8yYB3mhCHwt0eqVqqoTY,602 -numpy/typing/tests/data/fail/arrayterator.pyi,sha256=tRPWjCh1-sg5FXAyYeTbHSR983JUFlecRNcustDLt4E,484 -numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=QglRyKkdf96Z-klBfGQ1JSmtOFk3yeSDFz0MqKS-rj0,604 -numpy/typing/tests/data/fail/char.pyi,sha256=m8SxJUaMSj2SWFHhjtJHj0b1KMPg7f1tXBjpPG_pEso,2781 -numpy/typing/tests/data/fail/chararray.pyi,sha256=inRqI3ZlDZ-R6Wpe4VoQnNzuO874E6SNcbzM9bz4xjw,2368 -numpy/typing/tests/data/fail/comparisons.pyi,sha256=xrNXGulq1kVRufLUB7nG95g_YNr_wR5hbIdhy0tkRMc,849 -numpy/typing/tests/data/fail/constants.pyi,sha256=3IZ6T9p4n61qIXngrHB8VqEaqloxcNmbUz3YcSqNSXI,88 -numpy/typing/tests/data/fail/datasource.pyi,sha256=mX9ucsgNXNekVFuRVzBjleA-p8GpuwpbsHqiG6a9CpA,420 -numpy/typing/tests/data/fail/dtype.pyi,sha256=ltT4BFaX_KTVdRLw2dMg3_OiSNYjDSNrXsxby6eeLTw,354 -numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=dYOaJ0J4EUzdyUBikKHie99K8SMaYrlqN3R9aDcMeJ4,499 -numpy/typing/tests/data/fail/flatiter.pyi,sha256=u4-JnRsydg5BW3OcA9we8MXLJ6F5cuaxxw0BrHVA9kY,891 -numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=cN_nAgj2y2_wkErPsP1zAxG0CmHQmmeO4g7qkA9FsWY,5868 -numpy/typing/tests/data/fail/histograms.pyi,sha256=JteTXgK_kXD8UPdihMZ_T2VcM3rTBj6t-MMRP8UHvhw,379 -numpy/typing/tests/data/fail/index_tricks.pyi,sha256=63ADYRCVtf0Dapc2dJpYJZDSIXK3MhhW_1lG30d3-RY,523 -numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=uvVKoZP0Mx-8V8DMCnLWoe8lk6eRT3eSAxqNFpylwEQ,2751 -numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=PM1TD9h4tFNeMp4y6HlXHKuAHDW0bfNHw0UWLUHnLVk,928 -numpy/typing/tests/data/fail/lib_utils.pyi,sha256=chR5zMEM5KI2Aw0LPIlIC8CnEcPIHwyKMLzbPhXNYXU,99 -numpy/typing/tests/data/fail/lib_version.pyi,sha256=JWtuTLcjkZpGfXshlFpJO5vINxawn9S-mxLGH0-7kcw,164 -numpy/typing/tests/data/fail/linalg.pyi,sha256=j6GGpOENz0nuZsza0Dyfy6MtjfRltqrbY8K_7g5H92I,1370 -numpy/typing/tests/data/fail/memmap.pyi,sha256=eAX-nEKtOb06mL8EPECukmL8MwrehSVRu5TBlHiSBaQ,164 -numpy/typing/tests/data/fail/modules.pyi,sha256=HYfnYNKIRwGg2caw19iqN1MDcctFMQKlE4mqoasWDaM,638 -numpy/typing/tests/data/fail/multiarray.pyi,sha256=AMsYk58-B30xQTHirBGAC6vykmauw-S7H_YiHSLOAQA,1696 -numpy/typing/tests/data/fail/ndarray.pyi,sha256=5A83TCpAmaUC0rtOU0NVG0vsNfKo_-1SF5qtVT7eqoc,415 -numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=ew0rklpnwM-57zZTCY7nczMS_tj8y7rxKTcnmjayPlU,1036 -numpy/typing/tests/data/fail/nditer.pyi,sha256=We6p5_nmfUdd_4CtwYZc5O7MTSMyM-Xw7mEUzdKPcP4,333 -numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=7E1zJ2SZIF0ldbEmjtA_Bp6cV4Q-cS4Op0BJN3Vi3rc,444 -numpy/typing/tests/data/fail/npyio.pyi,sha256=CT-NXoisYmIy-WBGaZkCm8zHPCL2Ju5Moy021vnEhIU,653 -numpy/typing/tests/data/fail/numerictypes.pyi,sha256=wPJaHwMdiX1tJLdnYAgZ5z42tEhX-8EtGfWKU81czf4,125 -numpy/typing/tests/data/fail/random.pyi,sha256=v_Y-EfhC7PC8E3AH-v-AfiZVlJDSShL77WQ3yXWx5iE,2883 -numpy/typing/tests/data/fail/rec.pyi,sha256=BxH41lR1wLvLrlash9mzkPFngDAXSPQQXvuHxYylHAI,721 -numpy/typing/tests/data/fail/scalars.pyi,sha256=gN2pS35JX6MOCZTzL_1ml53510Kjr2dfVclLZrOwCpE,2951 -numpy/typing/tests/data/fail/shape.pyi,sha256=-SzfxgevV7APDLlq-Sh8KzsKdCjHUb5GXEeJ9H6tacQ,143 -numpy/typing/tests/data/fail/shape_base.pyi,sha256=ZU1KSP0k-i-npwIMUhp42-EMzrdZhOqPEnV8ah-ZJ6U,160 -numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=L0fJGun6CDq24yNdw2zeNVGGcIpEOyP2dmWj1pEbMz8,324 -numpy/typing/tests/data/fail/strings.pyi,sha256=XAiAwOERfMOL9INbER33qH-7_5rPGX4eubGcWsl36Fc,2429 -numpy/typing/tests/data/fail/testing.pyi,sha256=GYfvI1A2pB1Ii2jFVL-WGqRVimbFS2oCijmoWVbMAgw,1371 -numpy/typing/tests/data/fail/twodim_base.pyi,sha256=wzd-h1ye2BhMdIHlQ0ZcHfgYRBHVX2GJ3WGfMk5euPg,935 -numpy/typing/tests/data/fail/type_check.pyi,sha256=0KG0c2LNUbUFChTYtbJ38eJUmfvUJl4Cn5G0vh1Bkrw,392 -numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=WzZzWJ-cC39qAzak3Cf--XIZX11MqwsEa3bYYyzqsvY,755 -numpy/typing/tests/data/fail/ufunclike.pyi,sha256=89Fjsr7vmurRl90mVbC5L0xOwRIk0jg4mJrgkTDn4eM,648 -numpy/typing/tests/data/fail/ufuncs.pyi,sha256=2ATU0I4ZF8DB3vyodRDJIuXnXb-CcQpt-l4Kn00kJxA,493 -numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=4sTfiur0rV5CpjlYJC_1WV3KPnovteiImffvpYh19eU,190 -numpy/typing/tests/data/misc/extended_precision.pyi,sha256=RTsXUAM9iKX_L-iviwFVuUwKcqX9N8sRW5ZHAXjYtjc,909 -numpy/typing/tests/data/mypy.ini,sha256=TIOl-4bxGj7Q5DAYamOE_pBLnXMQf1quG7Maena9CRY,295 -numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/array_like.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/dtype.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/literal.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ma.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/mod.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/modules.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/nditer.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/numeric.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/random.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/scalars.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/shape.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/simple.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-313.pyc,, -numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-313.pyc,, -numpy/typing/tests/data/pass/arithmetic.py,sha256=T2IizTDJ0bkGhPk5rsD5dpeEmNfPxWrVpmOB1eyY7As,8043 -numpy/typing/tests/data/pass/array_constructors.py,sha256=MGzgCt7uTeC_b7wU2aPlvTuDzXfgOujx_lR0Vqfpny8,2584 -numpy/typing/tests/data/pass/array_like.py,sha256=qLqVJLU8bjSIB3xFNCzRNAcozCWRAVLagiYYG7ewQJA,1101 -numpy/typing/tests/data/pass/arrayprint.py,sha256=NTw1gJ9v3TDVwRov4zsg_27rI-ndKuG4mDidBWEKVyc,803 -numpy/typing/tests/data/pass/arrayterator.py,sha256=z4o0H08T7tbzzMWhu5ZXdVqbivjBicuFgRHBk_lpOck,420 -numpy/typing/tests/data/pass/bitwise_ops.py,sha256=8lfjgayfTDDcWi1O-rnxLu4FZqvskvGHvFXJpMQWQgc,1095 -numpy/typing/tests/data/pass/comparisons.py,sha256=-NSAhFNN3kWqu2CZqt2pq3kflTx6nDCWxkO3JIYl5NI,3613 -numpy/typing/tests/data/pass/dtype.py,sha256=YRsTwKEQ5iJtdKCEQIybU_nL8z8Wq9hU-BZmEO7HjQE,1127 -numpy/typing/tests/data/pass/einsumfunc.py,sha256=CXdLvQsU2iDqQc7d2TRRCSwguQzJ0SJDFn23SDeOOuY,1406 -numpy/typing/tests/data/pass/flatiter.py,sha256=2xtMPvDgfhgjZIqiN3B3Wvy6Q9oBeo9uh4UkCAQNmwg,190 -numpy/typing/tests/data/pass/fromnumeric.py,sha256=bP0hEQYYQJOn7-ce0rAf8cvuxZX3Ja6GSSlCtNhEBUM,4263 -numpy/typing/tests/data/pass/index_tricks.py,sha256=RyuEtqyZVlK9j403DVjMZFd80mvt-VAMi1uGvXurc0c,1462 -numpy/typing/tests/data/pass/lib_user_array.py,sha256=K69fg9dI5BaglzpiJh13swGHuyx3LBW_zmzBBOB1aWw,612 -numpy/typing/tests/data/pass/lib_utils.py,sha256=XEc0v7bwES-C5D4GkSJQSSTSAl5ng7tq6tCWj3jxbCM,336 -numpy/typing/tests/data/pass/lib_version.py,sha256=TlLZK8sekCMm__WWo22FZfZc40zpczENf6y_TNjBpCw,317 -numpy/typing/tests/data/pass/literal.py,sha256=sWAaQyBnm3jIEZrdqWe58U2sCzeE7mUSlG8tWIcQzRc,1555 -numpy/typing/tests/data/pass/ma.py,sha256=LfK4LXCWLLK5q0c1Me8STWbhGj9b_46LYvZwXGpaEjQ,179 -numpy/typing/tests/data/pass/mod.py,sha256=L1qLwjdrRo9Tx7mxWpf_ugdKdUprDYhPRbCvQd5QjXY,1725 -numpy/typing/tests/data/pass/modules.py,sha256=buzLurat4TIGmJuW3mGsGk7dKNmpBDfQOWWQXFfb9Uc,670 -numpy/typing/tests/data/pass/multiarray.py,sha256=i6VU-VN96Q16mRGzVoY3oTE2W1z16GOGTOVFxWGRacM,1407 -numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=6TnvucV8Vtte7dGWihx7YmrHlNOanqmLJIH1W8Wok0E,1612 -numpy/typing/tests/data/pass/ndarray_misc.py,sha256=nI6loZ67OjL3Uzu0AQYsHrI-a_gq5SCzVzJqSiTKDc0,3662 -numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=yaBK3hW5fe2VpvARkn_NMeF-JX-OajI8JiRWOA_Uk7Y,687 -numpy/typing/tests/data/pass/nditer.py,sha256=1wpRitCNZKCC3WJVrFSh22Z1D8jP2VxQAMtzH8NcpV8,67 -numpy/typing/tests/data/pass/numeric.py,sha256=E6JrIBZ8yaEDn4hkaePxcdYdkC6VKZUKSu_Z65Rsqkg,1720 -numpy/typing/tests/data/pass/numerictypes.py,sha256=JaCjk4zQPOI67XzqGyi3dI-GUMFM2AvDuniwzSQ7_Rk,348 -numpy/typing/tests/data/pass/random.py,sha256=wYwClLry-mN-QvaYg6AFGhwDuvoKQv-bl94fq10sL3k,63321 -numpy/typing/tests/data/pass/recfunctions.py,sha256=aeOxXwMkhc0aXyhmg4dW2QvekHDGAaYYTHVaQwrfKGY,5199 -numpy/typing/tests/data/pass/scalars.py,sha256=KfCYjDIxR9G2ypqCQJKQOuBWxiLqrGCV38q0JN3TqvA,3973 -numpy/typing/tests/data/pass/shape.py,sha256=Wr_y3KiVe5elHXLChRVupFvE_haiEFilCvk-ESR1Rcg,470 -numpy/typing/tests/data/pass/simple.py,sha256=aXvt9iCOV1lhQR11xVWgQIXXyXRHKOBfCtTjthZFtM0,2919 -numpy/typing/tests/data/pass/simple_py3.py,sha256=OBpoDmf5u4bRblugokiOZzufESsEmoU03MqipERrjLg,102 -numpy/typing/tests/data/pass/ufunc_config.py,sha256=gmMTPrq8gLXJZSBQoOpJcgzIzWgMx-k_etKPV4KSTJk,1269 -numpy/typing/tests/data/pass/ufunclike.py,sha256=jxTR61d0bmFg7JHZmw992ccRua00u4XWJYtcQRJwFS0,1172 -numpy/typing/tests/data/pass/ufuncs.py,sha256=gvdcCNoGUfN0CnQmn6k1j6ghdt8zGkJdcRcgctmU48A,438 -numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=q3c1SmMwhyYLYQsLjK02AXphk3-96YltSTdTfrElJzQ,167 -numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=OU8qyQpYk_4z8YO4Zl54qikYYpIHjRPq9doH8BYlWaY,24884 -numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=zeNMHOn1HoTFaJTXkz5_GuFg3OvRa7W-gdxdJl1FPG4,3119 -numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=jq0TvzyKKqLq3DWcuvhsI3oIbBtj6ikhO82S_ZWjI2I,12817 -numpy/typing/tests/data/reveal/arraypad.pyi,sha256=viQwv8d_Hsc5nhIqC4cGkRWbmXaqf3ehutPnmOleDkY,712 -numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=JZmfW3bqJWY6TUM3JDMyVBS3cSTopPkBF5O8yzD3kiU,844 -numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=gA2uqkyPESPjasbca_mZ-e41XkecogZVOpRDSZpSl38,4496 -numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=a0gVqOyltcfjoBqwXiouAt3ghZUwFUZo0s4xg1VZGrI,1098 -numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=8snEpx2ci_08vZuWWHZ81KrRflUxSGvtae2_9d5uKWo,5219 -numpy/typing/tests/data/reveal/char.pyi,sha256=AyDGKchLOkZ8TFaTl7DTb1-zLCNX1bsjHlMWlJW0QRU,11065 -numpy/typing/tests/data/reveal/chararray.pyi,sha256=0ZJp_G4AzrO0r2ntt082iCbeZ2cnoB1iPB4YoFvYNuc,6787 -numpy/typing/tests/data/reveal/comparisons.pyi,sha256=kMeqY8sAwL8_joBBTMo65V1wCAfQCjppBkmhN_ax0Ds,7491 -numpy/typing/tests/data/reveal/constants.pyi,sha256=pg8eBcAYp-7Xc-5iAgzPR8c4qpo89f9Cj2LKBatsu7Q,377 -numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=42zUf_JaEvC2F8JJtCv2G2YLR51L0V_wOpxulQKBJYk,4830 -numpy/typing/tests/data/reveal/datasource.pyi,sha256=H2QtFrQWad_gRlGkTZ7subBfTHjoAhwzUCdYuS_d9C0,638 -numpy/typing/tests/data/reveal/dtype.pyi,sha256=f-Ev24OaSP6ChpI6Xn5j4VOUKGoX6Ixvs95yJFjHuf0,5353 -numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=wx3i7hQdY1mhmu6fnvGLYlN0yVByZT-fPLEMhfEkLls,1997 -numpy/typing/tests/data/reveal/emath.pyi,sha256=TC5sIisHUbcS0EDnVfokZu072bfo2qy1lF6XYQBoTeI,2391 -numpy/typing/tests/data/reveal/fft.pyi,sha256=02MXu-BxNyrIcSVBXLgC0poNsNGiXDWhWkGbY2zvZmw,1700 -numpy/typing/tests/data/reveal/flatiter.pyi,sha256=0L4ImsC4qP-bujm0czaLAMs_J57bUEUJL6CNi8L44Gw,1426 -numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=cj1x-A12dZaf7wRzFkiNzsrRpOGlSXzcLftMYeqngBg,15260 -numpy/typing/tests/data/reveal/getlimits.pyi,sha256=jp4uMJIJ2QooTSo765kdY9x1sphgankB4soyWL-CJHs,1635 -numpy/typing/tests/data/reveal/histograms.pyi,sha256=GMA2nwIztaW6nYbJ8r6wNLiQvL0V0CssJ4uB_qEfiuw,1314 -numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=mLlymGWD-8uafZ4iDEgeeIi2BwAts5_w-6koRPdI2fQ,3343 -numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=pEVGWIuBbo8ymrIybovkhyNFL8iYF7LC-01O932gltQ,10087 -numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=5ipkWMQOnEq59oxAdHHqD-9Ion5CnfzcVc69K5y-KPs,6041 -numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=hc8SEa5GX2bvey1lPdjkHeQJsGzFTbYaqvkXQWzFGZA,466 -numpy/typing/tests/data/reveal/lib_version.pyi,sha256=nAmE8-EYApx_o3Ih6XNcnGAT9gmwYn3emcH_jMjvqF0,603 -numpy/typing/tests/data/reveal/linalg.pyi,sha256=v32iuvfKkf9b68M4EzApRNb33FYYK4l2h8WtA_DYSoM,6366 -numpy/typing/tests/data/reveal/matrix.pyi,sha256=iIyDGTXx-tNFvc3_cpTsh-5qdCT1RSZG7hoHku79BlA,3122 -numpy/typing/tests/data/reveal/memmap.pyi,sha256=NzJkfVKfej4RQeVY9K-hkN_h4_5AOy6k6WtXT8FMW1U,775 -numpy/typing/tests/data/reveal/mod.pyi,sha256=gdJr_fx0lxU9ISMQMtwb1_6J0J_N5b-04mPVUPSUpxk,7792 -numpy/typing/tests/data/reveal/modules.pyi,sha256=bVHJ0-4XHxHwF4G6YFtQx4E7i9NjXJX4Xl1-2suWxL4,1922 -numpy/typing/tests/data/reveal/multiarray.pyi,sha256=EWlKpzai4sIdRSi9HX-jfOfBWQY79e2L3gdcb3TSV5A,8061 -numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=L623ocsRjDAvFWEBnV1-D6E68v0beBDvXQ5tTDGSjaU,610 -numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=bWJadq5zn_6ya0mSryzYh2jmCqFS6y_5oVukAvUj-1w,2777 -numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=dP7RIN2JoQLyiATFYxxhH81ig76SXVCJXAxGhk7LPkE,3465 -numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=mUys3JQqq9AjiaJ9VaEE6mJ1806iqy9Gy1um7DHGqtk,8137 -numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=GOMRYAh2un_a9YMnhNus1icBEctWBM-JiE0XUGenADk,1444 -numpy/typing/tests/data/reveal/nditer.pyi,sha256=6sV8LI8D5F9nNWwbcLBQSWubRbnIZ-5EVveHi3G_bJs,1984 -numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=IHoDdBHrPR9t7Hlu5uS7Gwmdhpry1vnJxjoZYACWbjk,674 -numpy/typing/tests/data/reveal/npyio.pyi,sha256=fW3S6E7_Lc290CD_Cu_nwLiA7rDzyZTWsNp7opI5Ono,3608 -numpy/typing/tests/data/reveal/numeric.pyi,sha256=1viOAuH2hv0NietbvxyVEpeQJacMqy7ck-mQpRHxEB4,6218 -numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=UfImexHrS0BEJIHdtdHnDCFaeokhEKtA08_ArfhsXus,1414 -numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=lNwOtFZ9wv3CIuPJwnwUNHyAStBv97LP423erNq90X8,8220 -numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=BJX4_LBPqbPQ1eY-br4SqHUHO9zvIrOxa_J2TKrufWg,10984 -numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=NMH4MqVHDNNI1mLewAZg9Zin5EoDoYik_Q-YhbQa5gk,7268 -numpy/typing/tests/data/reveal/random.pyi,sha256=8llrjYb6sRt7qblJiOniDKCrC1ERE3JNfenqIPX-2UI,105880 -numpy/typing/tests/data/reveal/rec.pyi,sha256=sv6b2EJApQUcucb205S_GBtPVd675T_8VgdeVn78jqY,3944 -numpy/typing/tests/data/reveal/scalars.pyi,sha256=Ch1zywWqKdpyvTJDRGRdfzZ6to-VxWI512MuNwnY00M,6642 -numpy/typing/tests/data/reveal/shape.pyi,sha256=M0joDPodElAHLjI9FmofIgS45uSidjXOnfpbKwyBaZ8,307 -numpy/typing/tests/data/reveal/shape_base.pyi,sha256=tn2gaV-VfJLPp18NH3qGxXVoOZXm4nZj4pKrF5IonlE,2100 -numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=YFA73zIg8YRoDnDi6VKc-3CRZoOJZ_YhNlqiyqrthLE,1374 -numpy/typing/tests/data/reveal/strings.pyi,sha256=TSRS1RV7KdjVMjfAwJr_ja60et1LqWWrdD-hop_mikQ,9608 -numpy/typing/tests/data/reveal/testing.pyi,sha256=Ym8uuMNq1l6VqZsN43ghEL8DpZKFX4kisH-BV79xFNU,8683 -numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=g-fZ2g9vh4XYIltv2sSgsAtl0yg_EwW3NdhKIFFVG6o,4451 -numpy/typing/tests/data/reveal/type_check.pyi,sha256=i-8YsIOGxgxdleMzwO8ctaLfYzcnBUKDBY7TiwK-IHA,2790 -numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=w4XdMT8Rz2pu6XLmVhb-F4hnCb--A_dYYjBJCtWVZ5s,1222 -numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=7-LodGrHthBQuR42rQlXw3nQsVqHGYdxzu73B2ps1tY,1266 -numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=mSu8DtwP2b-l45nrxWwKiNGeALQbG6qAKBYg_OLOqWc,4944 -numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=TqQe7189p4B7PAbuIQbGNkLykuCpq2ngCB2Bscsh_bw,471 -numpy/typing/tests/test_isfile.py,sha256=slpVB1kHtrG5unlgYxl94Q_kOzDBPnDtFZQhLZdq9JM,897 -numpy/typing/tests/test_runtime.py,sha256=p-Ydvt0Rt6mPHmAKYOOAGxxXQnjoARJSVZmViKMAX0A,3384 -numpy/typing/tests/test_typing.py,sha256=ZVc9wJgtAKRX6S1lkSiR6Y9w_Dxwl0TLN-rAvzJBSFw,8594 -numpy/version.py,sha256=ao42Ds0h0708A3GuP1EUqJtvGwRBrupyVvp2QNYBXbQ,304 -numpy/version.pyi,sha256=WPYF3zFF92LnJu7CGTRsh4osMyXBuQRpMvAuoxKMrbw,408 diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/DELVEWHEEL b/blimgui/dist64/numpy-2.4.2.dist-info/DELVEWHEEL new file mode 100644 index 0000000..a852111 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/DELVEWHEEL @@ -0,0 +1,2 @@ +Version: 1.11.2 +Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-awnw0lvp\\cp314-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '--add-path', 'D:/a/numpy-release/numpy-release/.openblas/lib', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-awnw0lvp\\cp314-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-awnw0lvp\\cp314-win_amd64\\built_wheel\\numpy-2.4.2-cp314-cp314-win_amd64.whl'] diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/INSTALLER b/blimgui/dist64/numpy-2.4.2.dist-info/INSTALLER similarity index 100% rename from blimgui/dist64/numpy-2.2.5.dist-info/INSTALLER rename to blimgui/dist64/numpy-2.4.2.dist-info/INSTALLER diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/METADATA b/blimgui/dist64/numpy-2.4.2.dist-info/METADATA new file mode 100644 index 0000000..1914a43 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/METADATA @@ -0,0 +1,139 @@ +Metadata-Version: 2.4 +Name: numpy +Version: 2.4.2 +Summary: Fundamental package for array computing in Python +Author: Travis E. Oliphant et al. +Maintainer-Email: NumPy Developers +License-Expression: BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0 +License-File: LICENSE.txt +License-File: numpy/_core/include/numpy/libdivide/LICENSE.txt +License-File: numpy/_core/src/common/pythoncapi-compat/COPYING +License-File: numpy/_core/src/highway/LICENSE +License-File: numpy/_core/src/multiarray/dragon4_LICENSE.txt +License-File: numpy/_core/src/npysort/x86-simd-sort/LICENSE.md +License-File: numpy/_core/src/umath/svml/LICENSE +License-File: numpy/fft/pocketfft/LICENSE.md +License-File: numpy/linalg/lapack_lite/LICENSE.txt +License-File: numpy/ma/LICENSE +License-File: numpy/random/LICENSE.md +License-File: numpy/random/src/distributions/LICENSE.md +License-File: numpy/random/src/mt19937/LICENSE.md +License-File: numpy/random/src/pcg64/LICENSE.md +License-File: numpy/random/src/philox/LICENSE.md +License-File: numpy/random/src/sfc64/LICENSE.md +License-File: numpy/random/src/splitmix64/LICENSE.md +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: homepage, https://numpy.org +Project-URL: documentation, https://numpy.org/doc/ +Project-URL: source, https://github.com/numpy/numpy +Project-URL: download, https://pypi.org/project/numpy/#files +Project-URL: tracker, https://github.com/numpy/numpy/issues +Project-URL: release notes, https://numpy.org/doc/stable/release +Requires-Python: >=3.11 +Description-Content-Type: text/markdown + +

+ +


+ + +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( +https://numfocus.org) +[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( +https://pypi.org/project/numpy/) +[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( +https://anaconda.org/conda-forge/numpy) +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( +https://stackoverflow.com/questions/tagged/numpy) +[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( +https://doi.org/10.1038/s41586-020-2649-2) +[![LFX Health Score](https://insights.linuxfoundation.org/api/badge/health-score?project=numpy)](https://insights.linuxfoundation.org/project/numpy) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) +[![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) + + +NumPy is the fundamental package for scientific computing with Python. + +- **Website:** https://numpy.org +- **Documentation:** https://numpy.org/doc +- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion +- **Source code:** https://github.com/numpy/numpy +- **Contributing:** https://numpy.org/devdocs/dev/index.html +- **Bug reports:** https://github.com/numpy/numpy/issues +- **Report a security vulnerability:** https://tidelift.com/docs/security + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities + +Testing: + +NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: + + python -c "import numpy, sys; sys.exit(numpy.test() is False)" + +Code of Conduct +---------------------- + +NumPy is a community-driven open source project developed by a diverse group of +[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. + +Call for Contributions +---------------------- + +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. If you are considering larger contributions +to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- help us stay on top of new and old issues +- develop tutorials, presentations, and other educational materials +- maintain and improve [our website](https://github.com/numpy/numpy.org) +- develop graphic design for our brand assets and promotional materials +- translate website content +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. + +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invitation). + +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. + +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/RECORD b/blimgui/dist64/numpy-2.4.2.dist-info/RECORD new file mode 100644 index 0000000..57473fd --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/RECORD @@ -0,0 +1,1341 @@ +../../Scripts/f2py.exe,sha256=uS8-eYTeBc6_kyO3kizgqbdvooT4_D46dH4g8MD7_xs,108364 +../../Scripts/numpy-config.exe,sha256=IJcPiJ3pLYN_ZYA8r08YXVW90krQL6rR8mOaQN8BSsI,108364 +numpy-2.4.2.dist-info/DELVEWHEEL,sha256=kwPlYhkjTg_OQTR4GNv2rFShrDu3aSs21kjXUaCUgR0,462 +numpy-2.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-2.4.2.dist-info/METADATA,sha256=Z4I6ylNqyiVKm0ml3h-Ja0pjGziZKxOGfSz1uX7OXEY,6608 +numpy-2.4.2.dist-info/RECORD,, +numpy-2.4.2.dist-info/WHEEL,sha256=zFRw8EdpTGuqV4Z3iy5wfyaCeoMRqh3qJhpskUrtGT4,85 +numpy-2.4.2.dist-info/entry_points.txt,sha256=7Cb63gyL2sIRpsHdADpl6xaIW5JTlUI-k_yqEVr0BSw,220 +numpy-2.4.2.dist-info/licenses/LICENSE.txt,sha256=qATf8OrZ-txSk0VkELy_wyvwJL6cRRNFlmP7e0QtI0E,45831 +numpy-2.4.2.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 +numpy-2.4.2.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING,sha256=rqQZdscg-RvRlyXBaRahrOQOsepCYg00Dlh6XeS7zqA,704 +numpy-2.4.2.dist-info/licenses/numpy/_core/src/highway/LICENSE,sha256=0ggVLid3_9ASbqzBvSHAMg1p5Oa2ItIaKhKkfo73N2M,21155 +numpy-2.4.2.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt,sha256=sP1ZLU6tVjFI45d-e1iRadRqXvL_Ezx1WoGviAo_yRk,1441 +numpy-2.4.2.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md,sha256=pzKXeFNTAGZ5DEKBcIf0gptRjdmNNAyRasv9In4508s,1542 +numpy-2.4.2.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE,sha256=WFSDkFsT1LQvgAxVupnonW4uM8zV9QPiAcB1MdFI_0E,1573 +numpy-2.4.2.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md,sha256=jbAA4VjBYuckl4eFLYxpdkcGUCzO5yXAZkGzHB6VyK8,1523 +numpy-2.4.2.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt,sha256=GQYO9GXheX1oLuD8MIG9JasTlemV5D0lydE8ldmjzWs,2314 +numpy-2.4.2.dist-info/licenses/numpy/ma/LICENSE,sha256=1427IIuA2StNMz5BpLquUNEkRPRuUxmfp3Jqkd5uLac,1616 +numpy-2.4.2.dist-info/licenses/numpy/random/LICENSE.md,sha256=tLwvT6HJV3jx7T3Y8UcGvs45lHW5ePnzS1081yUhtIo,3582 +numpy-2.4.2.dist-info/licenses/numpy/random/src/distributions/LICENSE.md,sha256=z1YZ8lsRLVznyJbv_8jmELyzRd-tzBnywUYKpI__LJ4,2805 +numpy-2.4.2.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md,sha256=ksEGmYhDIfHClH_ipwqyOw-_FQf3Vtid0I84xh_D1Po,2986 +numpy-2.4.2.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md,sha256=QRkGY7d77lMCOGSVoHt-q_v6Vxqgri8R2mdkFiBi_E8,1176 +numpy-2.4.2.dist-info/licenses/numpy/random/src/philox/LICENSE.md,sha256=MirEbYcMiu6tbFOtzMyDkpb7PI5Fbq4VUIEpfa9_gBM,1540 +numpy-2.4.2.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md,sha256=KMrYLOE3ncUp3zDaeOwBLX0NHVdrA93LkziK7lNln1k,1253 +numpy-2.4.2.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md,sha256=RU7HNQ5HjgNwYYULZzlXU7QlscsKNLvCYpCwIL-J12M,340 +numpy.libs/libscipy_openblas64_-74a408729250596b0973e69fdd954eea.dll,sha256=dKQIcpJQWWsJc-af3ZVO6genD_Unodusz5riEke4A3c,20404736 +numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll,sha256=pMIim9wqKmMKzcCVtNhgCOXD47x3cxdDVPPaT1vrnN4,575056 +numpy/__config__.py,sha256=MwR0Vsx3zyBfzQJ7zhxveFnwwV1tmncS75q8HWuRiNE,5743 +numpy/__config__.pyi,sha256=L6Ml7eJWLoOvEOeI5FCVERMuiPVQUkFOuTmG5lduhzc,2486 +numpy/__init__.cython-30.pxd,sha256=PByDh5eNFDlvCdDfpw7J1RwWPUItbLHUeuoneaoWx7g,48396 +numpy/__init__.pxd,sha256=C1ZTNkpIVrh_YD9ZYrC0beDjHbDJgjcqFKUKddj28k4,44944 +numpy/__init__.py,sha256=sWpPNHxlg8h44Zc6IIVk4BaGt5zHCRGtAVfgXY7s2jc,27212 +numpy/__init__.pyi,sha256=36E0y_ydNWWLd4TItoFZjWMKOF_ILN0m2PyA1XAurZI,242576 +numpy/__pycache__/__config__.cpython-314.pyc,, +numpy/__pycache__/__init__.cpython-314.pyc,, +numpy/__pycache__/_array_api_info.cpython-314.pyc,, +numpy/__pycache__/_configtool.cpython-314.pyc,, +numpy/__pycache__/_distributor_init.cpython-314.pyc,, +numpy/__pycache__/_expired_attrs_2_0.cpython-314.pyc,, +numpy/__pycache__/_globals.cpython-314.pyc,, +numpy/__pycache__/_pytesttester.cpython-314.pyc,, +numpy/__pycache__/conftest.cpython-314.pyc,, +numpy/__pycache__/dtypes.cpython-314.pyc,, +numpy/__pycache__/exceptions.cpython-314.pyc,, +numpy/__pycache__/matlib.cpython-314.pyc,, +numpy/__pycache__/version.cpython-314.pyc,, +numpy/_array_api_info.py,sha256=AeGuRBPBcJ0PeOSi2AB3rkOHFTkWa1N7tYq_Gj9ri3o,10731 +numpy/_array_api_info.pyi,sha256=c77RlN8Pan3QlqXonfTzus7yWI3F4BDMtedWlVO3q-s,5068 +numpy/_configtool.py,sha256=qqay_oP0bqkryU55jFBE6RzgKJkquJ0bBvaYt9D-Gbs,1046 +numpy/_configtool.pyi,sha256=IlC395h8TlcZ4DiSW5i6NBQO9I74ERfXpwSYAktzoaU,25 +numpy/_core/__init__.py,sha256=c8V6fzpU4zOZ3bmxQusn7TquK_S-L0wVk5Z4jha2IOU,6865 +numpy/_core/__init__.pyi,sha256=eRDqlKeg5cDyDbN2Mfq8mUDRvD-hxmIkoEx-89UmMPk,10639 +numpy/_core/__pycache__/__init__.cpython-314.pyc,, +numpy/_core/__pycache__/_add_newdocs.cpython-314.pyc,, +numpy/_core/__pycache__/_add_newdocs_scalars.cpython-314.pyc,, +numpy/_core/__pycache__/_asarray.cpython-314.pyc,, +numpy/_core/__pycache__/_dtype.cpython-314.pyc,, +numpy/_core/__pycache__/_dtype_ctypes.cpython-314.pyc,, +numpy/_core/__pycache__/_exceptions.cpython-314.pyc,, +numpy/_core/__pycache__/_internal.cpython-314.pyc,, +numpy/_core/__pycache__/_methods.cpython-314.pyc,, +numpy/_core/__pycache__/_string_helpers.cpython-314.pyc,, +numpy/_core/__pycache__/_type_aliases.cpython-314.pyc,, +numpy/_core/__pycache__/_ufunc_config.cpython-314.pyc,, +numpy/_core/__pycache__/arrayprint.cpython-314.pyc,, +numpy/_core/__pycache__/cversions.cpython-314.pyc,, +numpy/_core/__pycache__/defchararray.cpython-314.pyc,, +numpy/_core/__pycache__/einsumfunc.cpython-314.pyc,, +numpy/_core/__pycache__/fromnumeric.cpython-314.pyc,, +numpy/_core/__pycache__/function_base.cpython-314.pyc,, +numpy/_core/__pycache__/getlimits.cpython-314.pyc,, +numpy/_core/__pycache__/memmap.cpython-314.pyc,, +numpy/_core/__pycache__/multiarray.cpython-314.pyc,, +numpy/_core/__pycache__/numeric.cpython-314.pyc,, +numpy/_core/__pycache__/numerictypes.cpython-314.pyc,, +numpy/_core/__pycache__/overrides.cpython-314.pyc,, +numpy/_core/__pycache__/printoptions.cpython-314.pyc,, +numpy/_core/__pycache__/records.cpython-314.pyc,, +numpy/_core/__pycache__/shape_base.cpython-314.pyc,, +numpy/_core/__pycache__/strings.cpython-314.pyc,, +numpy/_core/__pycache__/umath.cpython-314.pyc,, +numpy/_core/_add_newdocs.py,sha256=KKzqLHTCXxLzPTg8V8K531Bqh_Ai8BMSFu7EbXu3dUk,222742 +numpy/_core/_add_newdocs.pyi,sha256=ipvUNHAUG15ziSDqc4aGEfQ1jrqpW-zJrogC8G0DOog,136 +numpy/_core/_add_newdocs_scalars.py,sha256=ViMWUAIlsUKosd1mEpLo907cZeaS7Vg_GerzCDIAUUQ,13314 +numpy/_core/_add_newdocs_scalars.pyi,sha256=NNXiuLe2kckDpvpR_WTesOZG1AWSQapqIlGD7tgVtxE,565 +numpy/_core/_asarray.py,sha256=MU1nK76mQ4gDZCWZbdV-zmkmZphAVAkNwghbk4TgqlQ,4024 +numpy/_core/_asarray.pyi,sha256=7oijVMV32GQMEv5xV8sOYfjHHJCjZSo_oJT4zr6u-Kw,1190 +numpy/_core/_dtype.py,sha256=TEjPZXHmpw5-HhfGfKS-PfQT9A9sk8cnaE6icAXVb34,10913 +numpy/_core/_dtype.pyi,sha256=1_fGj6V5NxTYRkN2-cfZBOQOkc1rJ1wET2deS1ANpdw,1888 +numpy/_core/_dtype_ctypes.py,sha256=e8EgfaqXiJ8-UYi8FM5sm9W8ehqvcG_rTpDROaKwTKg,3846 +numpy/_core/_dtype_ctypes.pyi,sha256=d5BudSdtj6n046OX9c-rUoX5zVGghdoO22yEhkjVRoM,3765 +numpy/_core/_exceptions.py,sha256=umxWh9TLhDXy9LmW77wkIHMegFk_KFfBB5ndxsJd5F4,5321 +numpy/_core/_exceptions.pyi,sha256=cWcq9Uf4GrYuKI5IEH-ioVaYDYU1adD9WIPpNu02d-M,1904 +numpy/_core/_internal.py,sha256=LDBumIfjDMIO8cNSfo014AcYngsdkXms6TFLNetZuxY,30405 +numpy/_core/_internal.pyi,sha256=Kax8zq8oAmuPIpkUDU-7ojkjR3C_wtHcA8uNHeftJmI,2212 +numpy/_core/_methods.py,sha256=XdcMBZ5zWZFpUJiL-im2SCtsvyYn_NDirSWICkzt2fw,9645 +numpy/_core/_methods.pyi,sha256=7Mc4H9O3KYU9VwCsOo8X0q6mg9vDr2S6xbwuJ7PXPX4,548 +numpy/_core/_multiarray_tests.cp314-win_amd64.lib,sha256=u-vOqcrXfZWZWBaL1ibwilY9gXIq895YuuiCOZtWnxo,2418 +numpy/_core/_multiarray_tests.cp314-win_amd64.pyd,sha256=wqmK8hq9-LG2O0XaWqGKl96zuMj7f02xIpi8KjY9ePQ,65536 +numpy/_core/_multiarray_umath.cp314-win_amd64.lib,sha256=Bbh0CzOTHoaydnIGENRSgufu4bZAJpVBzl2X6_jl1eI,2192 +numpy/_core/_multiarray_umath.cp314-win_amd64.pyd,sha256=gE6cDTprMuSlNK5QZvVUYAfy67ayogUFFsEXLo6Qi3U,3721728 +numpy/_core/_operand_flag_tests.cp314-win_amd64.lib,sha256=MWn0xxfdoKISfCSc-5HCvlFiXgw9bLtJizH95iBBx1c,2228 +numpy/_core/_operand_flag_tests.cp314-win_amd64.pyd,sha256=f8UK-P7IsqvZj31x8DlhE0t1ckoTAKpl7RVhtXLMfGE,12288 +numpy/_core/_rational_tests.cp314-win_amd64.lib,sha256=2jsFgaKua4h7eKWB6G0LgDsiJhNHgkKQhiggl8Bqqio,2156 +numpy/_core/_rational_tests.cp314-win_amd64.pyd,sha256=eUofs--71fxZHMqE2D_TdzHDSYCWyHrAh2Z5x-f8UsY,39936 +numpy/_core/_simd.cp314-win_amd64.lib,sha256=Fc6lquTTJ-pHRIptm7kE6pcf4EiTikpjFPicZ7XpVPs,1976 +numpy/_core/_simd.cp314-win_amd64.pyd,sha256=PJaVeJ16KxdLWlevuVitvPRqdOlzXZ90bLM7h4XBVxw,831488 +numpy/_core/_simd.pyi,sha256=vLr3cmfU5D-UytIPEzQHUwoEOZGzfLI63uwPYN-bvIU,1013 +numpy/_core/_string_helpers.py,sha256=aX1N5UsNeUFy54o5tuTC6X6N9AJueFN2_6QIyIUD2Xg,2945 +numpy/_core/_string_helpers.pyi,sha256=bThH7ichGlrmQ6O38n71QcJ7Oi_mRPUf-DFMU5hdYU0,370 +numpy/_core/_struct_ufunc_tests.cp314-win_amd64.lib,sha256=8U4iVRK-vu-LT2FAefMq5IyuxUSTvMfNaaXEYLhRpYM,2228 +numpy/_core/_struct_ufunc_tests.cp314-win_amd64.pyd,sha256=dbbA_igJYRsG6n7vHsZSCmmZ-71yXG9XYysXTX9m-c4,14336 +numpy/_core/_type_aliases.py,sha256=U51em1dU7qluFYICpmrygSHLfk-y4UoqD0JJM-psCjE,3886 +numpy/_core/_type_aliases.pyi,sha256=-t_aBa6lQ06Z0wsOzpiE84XW0-2FU-hkA3HSTWlavAk,2306 +numpy/_core/_ufunc_config.py,sha256=xeJy3K7KPV5PpJbkW9ncWY_UnnLHlM9M6rX4C9eNs2k,16126 +numpy/_core/_ufunc_config.pyi,sha256=Fpec58dVzJmrKRSiOsnWZvdS5wuyORMmPkzoOy15YwY,1930 +numpy/_core/_umath_tests.cp314-win_amd64.lib,sha256=c5ptdB0KGpj70yCFqhlweoGe1eMcWObad5yaBzuMM1A,2104 +numpy/_core/_umath_tests.cp314-win_amd64.pyd,sha256=7a8ohvHOaQEoelB0tAfbhu5skSYaqveXKP8--hpHd3Y,33280 +numpy/_core/_umath_tests.pyi,sha256=KwadAqagA7lloXXeOQQp3Gkp0T_HuuT87pI4dz3ttTY,2344 +numpy/_core/arrayprint.py,sha256=lpxDgWjCALcJThUtdKVUVOiM4Vrd4Xn7qaHnquc0tzw,67050 +numpy/_core/arrayprint.pyi,sha256=El-bKIjihbdXa2N05qohoYm_amZG6gwGn1lLsc19f1E,4688 +numpy/_core/cversions.py,sha256=FISv1d4R917Bi5xJjKKy8Lo6AlFkV00WvSoB7l3acA4,360 +numpy/_core/defchararray.py,sha256=4S8fU1YI2vGGUGYxRSr0536JJTO-S-Y9aMYe6BnQipY,39231 +numpy/_core/defchararray.pyi,sha256=qeYhLiloeDFogxBxYd_Ax9DDHQNI8nu2wyp3X2qfLYw,29794 +numpy/_core/einsumfunc.py,sha256=_mRaqmbQeZ3TDW4Y6gLz4nmDE4WfFL9UxKSAJBzGujc,59578 +numpy/_core/einsumfunc.pyi,sha256=j7eufFy3WhOcAGWT9FO8dxHB2kTLJNH6S84MPxtALS8,5109 +numpy/_core/fromnumeric.py,sha256=5T-r7fxkHbdvJiBfWVD6mwI7bbi2E95F0z9XFeBq-CI,146891 +numpy/_core/fromnumeric.pyi,sha256=o6l_ETLBkqmhOq8Ga3QBxWznwpZbfY94fX1UDrJKsXM,46598 +numpy/_core/function_base.py,sha256=zBvtIUZoUVe_sGSPdnDmOXc9GMX232Iqx5FtkMp5e8c,20224 +numpy/_core/function_base.pyi,sha256=Z5DaKdqR3t1sun85xnT2t6OkUsyDSW6gAO8oNBmfEjY,7321 +numpy/_core/getlimits.py,sha256=tX7eDWVY3113HFBztt5JOttUTI31S4-rC0bVXYJRG4M,15465 +numpy/_core/getlimits.pyi,sha256=Ok2rv9SELwkPHbC4qttkIDRpUZbv7ey6LDvCGvUSEeE,3921 +numpy/_core/include/numpy/__multiarray_api.c,sha256=ucLypGZeaaHhl2OX-4YQrrCGTUl-8XwObmmN8zQjRjU,13074 +numpy/_core/include/numpy/__multiarray_api.h,sha256=9OcLvWKEidFXT_YzbInua3Ft8ruCXgUtG-vl9w9WluQ,63371 +numpy/_core/include/numpy/__ufunc_api.c,sha256=7mulmWQI9Hdlx3IjZF-GWhZYZtrFn4I6tCG0Uoo0LCc,1854 +numpy/_core/include/numpy/__ufunc_api.h,sha256=oxLZE6JL-8gWdzf5RA760Gr7KnjqXhMwycaO53cxaRk,13754 +numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s5TK2aPpClbw4CbVJCij__hzoh5IgHIIZK0k6FKtqfc,1947 +numpy/_core/include/numpy/_numpyconfig.h,sha256=jIeId55dTsklMKBXl8zsicKcReSVSYn7kIFMLOn-qKE,902 +numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=4ylG8s52kZEx__QODt_7Do8QitmhDSvTeZ7Lar0fOgo,4660 +numpy/_core/include/numpy/arrayobject.h,sha256=ghWzloPUkSaVkcsAnBnpbrxtXeXL-mkzVGJQEHFxjnk,211 +numpy/_core/include/numpy/arrayscalars.h,sha256=LKd4F3obZd65HWDmHD6GS9LJ91r2J0GX0VPVPtTBztE,4522 +numpy/_core/include/numpy/dtype_api.h,sha256=7HG7Pn8WOChR0ifvAOmVARSVnql7dqL07MQLWhRHpco,22002 +numpy/_core/include/numpy/halffloat.h,sha256=qYgX5iQfNzXICsnd0MCRq5ELhhfFjlRGm1xXGimQm44,2029 +numpy/_core/include/numpy/ndarrayobject.h,sha256=WWy6pljDNLhzFf0QWNfvH7-jO2afkRbbrtFYSet16v4,12359 +numpy/_core/include/numpy/ndarraytypes.h,sha256=N8FTdkiDL8l04ZriXBDDDEA0ycaPUVkb8WCAtkdLoLk,68964 +numpy/_core/include/numpy/npy_2_compat.h,sha256=VxsRXAtDfLlXkvH-ErZRSuH49k9EjcFwcSUSfTPRzAU,8795 +numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=uW0iF-qMwQNn4PvIfWCrYce6b4OrYUO4BWu-VYYAZag,885 +numpy/_core/include/numpy/npy_3kcompat.h,sha256=0yiCfXGefB938r_IykVf5DaRUJQeYOLvFY__VOF0ezI,10047 +numpy/_core/include/numpy/npy_common.h,sha256=fjuQ8bWFPrfhmrzet2yUQEmAeNwWn5AwgLYTbTMSEZE,33873 +numpy/_core/include/numpy/npy_cpu.h,sha256=G1W2dMD7IbMjCrOmAtEv7nNOH6awBsY0fotNAp3ebaE,4478 +numpy/_core/include/numpy/npy_endian.h,sha256=f1BT0ALH-Gca3cco_CgTm53HMLxmBmo4ZfzVxxLNlKM,2957 +numpy/_core/include/numpy/npy_math.h,sha256=ksdiKBXDfpEHB1s9m5yinyhjdcc0h-zJcfXEuoVHAd8,19460 +numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=jIcjEP2AbovDTfgE-qtvdP51_dVGjVnEGBX86rlGSKE,698 +numpy/_core/include/numpy/npy_os.h,sha256=j044vd1C1oCcW52r3htiVNhUaJSEqCjKrODwMHq3TU0,1298 +numpy/_core/include/numpy/numpyconfig.h,sha256=1EsdOVVuZb1tH7Yi1UO0q0yyZtuMRqMsnI0gBFhKNLo,7651 +numpy/_core/include/numpy/random/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 +numpy/_core/include/numpy/random/bitgen.h,sha256=_H0uXqmnub4PxnJWdMWaNqfpyFDu2KB0skf2wc5vjUc,508 +numpy/_core/include/numpy/random/distributions.h,sha256=GLURa3sFESZE0_0RK-3Gqmfa96itBHw8LlsNyy9EPt4,10070 +numpy/_core/include/numpy/random/libdivide.h,sha256=F9PLx6TcOk-sd0dObe0nWLyz4HhbHv2K7voR_kolpGU,82217 +numpy/_core/include/numpy/ufuncobject.h,sha256=uI5m_WOrFQtaL3BwgRmiZ7BN8CypKXfC5EcQfdhH-Eg,12123 +numpy/_core/include/numpy/utils.h,sha256=vzJAbatJYfxHmX2yL_xBirmB4mEGLOhJ92JlV9s8yPs,1222 +numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=hYWFyoBxE036dh19si8UPka01H2cv64qlc4ZtgoA_7A,156 +numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=e0rdsb00Y93VuammuvIIFlzZtnUAXwsS1XNKlCU8mFQ,381 +numpy/_core/lib/npymath.lib,sha256=oERoq0POVP7QAdsc5mpvQETAM6ZSorscghaiMcW0Arw,156686 +numpy/_core/lib/pkgconfig/numpy.pc,sha256=gd_zcQg2ptXJ6Rt_ip9sHb5WGZ7JhLpXRW6h8opUPbk,198 +numpy/_core/memmap.py,sha256=7HWQGjK5bS3SzAPx4wAlHwH6YFrX13sBpa6E52zxatc,13014 +numpy/_core/memmap.pyi,sha256=n0kBe4iQD5lcWvAvVhdUU18YIoPX6Sf5e2qh9IdO5uQ,50 +numpy/_core/multiarray.py,sha256=fiWmU-pL0WOeS7TT_flBueqKmz08gibdrBFCoanSSoE,58526 +numpy/_core/multiarray.pyi,sha256=JPMPttIq_yU0K1QgJ6r3mQwUWJzCcvoXqiTcSUWZ46k,36157 +numpy/_core/numeric.py,sha256=f36Y3XitfH2ga7zH2EZ4gctI1TywvTYoJyqtFepXpNA,85809 +numpy/_core/numeric.pyi,sha256=TwBDlYXpbMmBHv-uAIzagUCnTZdaxfKHLU-OblqprBw,32989 +numpy/_core/numerictypes.py,sha256=sBii4N4PX66DFZJ_QlDepfugXHToV4MqVTl9DrkTCko,16600 +numpy/_core/numerictypes.pyi,sha256=BuVBOeILfBBRLLVinSA_mCVqYtgoaM4ukXK2MOr4xVQ,3699 +numpy/_core/overrides.py,sha256=7xQXbKNNTDqDFR3cL6vcBbgczarMVdOiKjWfHM5zS4A,7668 +numpy/_core/overrides.pyi,sha256=yiWM44pF9yPJn3-VOebjxtwU0W9PyHV0QxCA2lkDrMQ,1777 +numpy/_core/printoptions.py,sha256=ZXekBr6fI18dVxsM6bxAGi80CiMlaMN4dpbPHDQiBOI,1088 +numpy/_core/printoptions.pyi,sha256=QE36MVL3BgqflyQuj6UOzywbnELMiLeyNz_1sALvOSU,622 +numpy/_core/records.py,sha256=AOi0UTbYqHe8U5AntKvc967O8hmtVw8pSqiahfJhtI8,37842 +numpy/_core/records.pyi,sha256=ecWlyL8cskXhQihOa2iyNNANhqYLhACgf2u7UaINjRk,9515 +numpy/_core/shape_base.py,sha256=oy42iPojaLjQjsQy1c35Xl72w0VtLZCbtipZlb8XGoo,33708 +numpy/_core/shape_base.pyi,sha256=_k-bDbnUgvQTYBn1I_Y75sUz23B-N3nhw07obXhbmp4,5437 +numpy/_core/strings.py,sha256=CJ6R0-LYceFfxuaOLhZL1tZwWvYdlaoaT3f0qLXpj1o,52391 +numpy/_core/strings.pyi,sha256=d3MRp3JMOqg7qiMNM5E3lJ6qWZmpR9m-h8DCNsrf0KI,14127 +numpy/_core/tests/__pycache__/_locales.cpython-314.pyc,, +numpy/_core/tests/__pycache__/_natype.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test__exceptions.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_abc.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_api.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_argparse.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_array_api_info.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_array_coercion.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_array_interface.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_arraymethod.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_arrayobject.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_arrayprint.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_casting_unittests.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_conversion_utils.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_cpu_features.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_cython.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_datetime.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_defchararray.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_deprecations.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_dlpack.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_dtype.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_einsum.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_errstate.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_extint128.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_finfo.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_function_base.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_getlimits.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_half.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_hashtable.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_indexerrors.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_indexing.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_item_selection.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_limited_api.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_longdouble.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_mem_overlap.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_mem_policy.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_memmap.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_multiarray.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_multiprocessing.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_multithreading.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_nditer.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_numeric.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_numerictypes.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_overrides.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_print.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_protocols.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_records.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_scalar_methods.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_scalarinherit.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_scalarmath.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_scalarprint.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_shape_base.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_simd.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_simd_module.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_stringdtype.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_strings.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_ufunc.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_umath.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_umath_complex.cpython-314.pyc,, +numpy/_core/tests/__pycache__/test_unicode.cpython-314.pyc,, +numpy/_core/tests/_locales.py,sha256=byq7PFI0o_eF8Ddsvgj2EQ7oEjgxYZEa2EW0SJmR_xc,2248 +numpy/_core/tests/_natype.py,sha256=ncMM01bhYe4KxM62PTwIaH6Xpze6D9mSVFZSQqqaZzQ,4531 +numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=9TBdxpPo0djv1CKxQ6_DbGKRxIZVawitAm7AMmWKroI,6012 +numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/_core/tests/data/umath-validation-set-README.txt,sha256=GfrkmU_wTjpLkOftWDuGayEDdV3RPpN2GRVQX61VgWI,982 +numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=VUdQdKBFrpXHLlPtX2WYIK_uwkaXgky85CZ4aNuvmD4,62794 +numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=tbuOQkvnYxSyJf_alGk3Zw3Vyv0HO5dMC1hUle2hWwQ,62794 +numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=JPEWWMxgPKdNprDq0pH5QhJ2oiVCzuDbK-3WhTKny8o,62768 +numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=fwuq25xeS57kBExBuSNfewgHb-mgoR9wUGVqcOXbfoI,61718 +numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=nu33YyL-ALXSSF5cupCTaf_jTPLK_QyUfciNQGpffkY,61734 +numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=wHSKFY2Yvbv3fnmmfLqPYpjhkEM88YHkFVpZQioyBDw,62768 +numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=FFi_XxEnGrfJd7OxtjVFT6WFC2tUqKhVV8fmQfb0z8o,62275 +numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=ccDri5_jQ84D_kAmSwZ_ztNUPIhzhgycDtNsPB7m8dc,60497 +numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=DnN6RGvKQHAWIofchmhGH7kkJej2VtNwGGMRZGzBkTQ,62298 +numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=mPhjF4KLe0bdwx38SJiNipD24ntLI_5aWc8h-V0UMgM,17903 +numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=sD94pK2EAZAyD2fDEocfw1oXNw1qTlW1TBwRlcpbcsI,60053 +numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=tyfZN5D8tlm7APgxCIPyuy774AZHytMOB59H9KewxEs,61728 +numpy/_core/tests/data/umath-validation-set-log.csv,sha256=CDPky64PjaURWhqkHxkLElmMiI21v5ugGGyzhdfUbnI,11963 +numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=dW6FPEBlRx2pcS-7eui_GtqTpXzOy147il55qdP-8Ak,70551 +numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=2aEsHVcvRym-4535CkvJTsmHywkt01ZMfmjl-d4fvVI,61732 +numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=aVZ7VMQ5urGOx5MMMOUmMKBhFLFE-U7y6DVCTeXQfo0,70546 +numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=GvPrQUEYMX1iB2zjbfK26JUJOxtqbfiRUgXuAO1QcP0,59981 +numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=lc7OYcYWWpkxbMuRAWmogQ5cKi7EwsQ2ibiMdpJWYbw,61722 +numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=fn7Dr9s6rcqGUzsmyJxve_Z18J4AUaSm-uo2N3N_hfk,61728 +numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=xSY5fgfeBXN6fal4XDed-VUcgFIy9qKOosa7vQ5v1-U,61728 +numpy/_core/tests/examples/cython/__pycache__/setup.cpython-314.pyc,, +numpy/_core/tests/examples/cython/checks.pyx,sha256=ALc3Mmp-LI2f3AA4E7sl-dBBYSF4l1_bG3WhO3M1mXs,11182 +numpy/_core/tests/examples/cython/meson.build,sha256=EaUdTgpleUBROExDaFVMnWIYW4XDxFLFGK9ej_pTtQg,1311 +numpy/_core/tests/examples/cython/setup.py,sha256=h5vJxfwGpwRWaa7iWTYeCstbcDNHN0Yd_rP963v7sZ0,898 +numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-314.pyc,, +numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=93ZjLJTtIfXUE7sc2QuEohjHzNSxapzZhgFXtTn8Yyg,326 +numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=4P5-yu0yr8NBa-TFtw4v30LGjccRroRAQFFLaztEK9I,214 +numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=YRgkeYJEtIfijcJwRqyz97ItrkUwOoiyrKy90WTkQW4,471 +numpy/_core/tests/examples/limited_api/meson.build,sha256=Sin_YDuMzgpDW8n2_WWJ5EYgRQg7FPs7JiJPaUEbFqw,1725 +numpy/_core/tests/examples/limited_api/setup.py,sha256=47iWsN-5wYB29Lb7vqSjzrAS3UtkdFufkt93XzzG-lE,461 +numpy/_core/tests/test__exceptions.py,sha256=ov3cdaYBfP28w_FcLF57ROlF5w6fwCFRNcmOVyRA-IU,3012 +numpy/_core/tests/test_abc.py,sha256=qdC7_lkQvaF_3A4xJ9H_Ih3FDlMpA9dxQHjsg4Tn-uc,2275 +numpy/_core/tests/test_api.py,sha256=mFEuLfhn2f0piUdgc-MvYw6jKKQJLdrkaN1tPZBbAUE,24915 +numpy/_core/tests/test_argparse.py,sha256=vPctuxToPkZMlbgjnzE924XkxXYUdBxlR6LsP2_-aQM,2914 +numpy/_core/tests/test_array_api_info.py,sha256=YySxzABrxjo2XVC9bwslv5VGBIiDK5N0DXpKLfhwBio,3176 +numpy/_core/tests/test_array_coercion.py,sha256=tjGrdd1RGUC9vyvu9SST6L06EubVwAoEPJD40CbtL1c,36319 +numpy/_core/tests/test_array_interface.py,sha256=s-mrGDOBpWOfIShNHrnfPuUeZDTBX5eD8R1kY4-JrUc,8065 +numpy/_core/tests/test_arraymethod.py,sha256=piiJcgPMH7cx15UykJyj_WVnzH51wIyxQIyftLtsmHE,3307 +numpy/_core/tests/test_arrayobject.py,sha256=xM3NSqoUuPJYGf_1v7DG0_g-1JCeq7KnevbIllgSbi8,3431 +numpy/_core/tests/test_arrayprint.py,sha256=UGJplHWOJaaZb6bYeKgK0akATbJP9l6H2NDvtLwb3kA,51898 +numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=fMotyIWxYMxJ_mF7zZMg3j3l7j-C6nfm_YUPw1ln5dA,5230 +numpy/_core/tests/test_casting_unittests.py,sha256=BUtpp1cJcfH-vfdicDThvcEUUtXpadhk6ulUvoVm0Aw,41899 +numpy/_core/tests/test_conversion_utils.py,sha256=Kh56ducSAax3n8E9cXQ66GvE1ZXZ_pkWTpQy0jTEwAk,6715 +numpy/_core/tests/test_cpu_dispatcher.py,sha256=oS8EAcRN88tQJ8DhSnwcLNNotmxbfMY-xrzntwcKFxw,1619 +numpy/_core/tests/test_cpu_features.py,sha256=egAbz8ZSIY-qHduR_-InVxJQOxwxrGch96gDvWSUXCw,16252 +numpy/_core/tests/test_custom_dtypes.py,sha256=QBlykvsEq9Wns38ey1gRHn0uaJjwsp2Cfm8FKiQiRHY,14889 +numpy/_core/tests/test_cython.py,sha256=LSVOHhtIggMCeftdS8R36TnzB0aBUR60gRu-LIB1y0o,10570 +numpy/_core/tests/test_datetime.py,sha256=NyZN30MA9NOSVHND6Ox_oePoz6AG_pgzSkvWk7ouswU,127545 +numpy/_core/tests/test_defchararray.py,sha256=bADzj5CgBanE83hUp9-bPgAIsD6X9nVP59M8koH_4nQ,31541 +numpy/_core/tests/test_deprecations.py,sha256=2c7iQRBI7123mHRy-9BkgNwtW57gq9Ooy5JtUfHdeG4,17940 +numpy/_core/tests/test_dlpack.py,sha256=YJu603N6gqqraJx8f5xzArbmYVFPFDJ32DZyYom2q5c,6021 +numpy/_core/tests/test_dtype.py,sha256=4TKjus3b0tRykFwp36QF3gzagffeyi6KKZJ7N_KS4ig,85332 +numpy/_core/tests/test_einsum.py,sha256=4XOd0yyzDKW4bh5bkFGT5QD2Qs5S_EgDgq0XHyDf4qc,59244 +numpy/_core/tests/test_errstate.py,sha256=UbPwl97JxUu050tMPJN2NTyqFkUaNpRKwqex34DxJ1E,4759 +numpy/_core/tests/test_extint128.py,sha256=pKScJ8lsYGfvX4rMTAf9bwoTzLYkK1hnLZmfF1jKAo4,5842 +numpy/_core/tests/test_finfo.py,sha256=Py3BfM_eaxrbYg0bhesDMT6LtiaGfCOjlXcTZ_CnvSs,2574 +numpy/_core/tests/test_function_base.py,sha256=NuclhwR2CVPl_bnKrFOPAksM7ssK9dmaFKaGrzyGqQs,18187 +numpy/_core/tests/test_getlimits.py,sha256=PV40xaQ1thmbtePVWRpYAvFrkvkS6RWi38cB-gbYxOE,5624 +numpy/_core/tests/test_half.py,sha256=SLfewT9i3igCumuVhWpRPT9YqyBYBYSMQ0HgXNuPKnY,25853 +numpy/_core/tests/test_hashtable.py,sha256=SDZHeVow_7hEus0A0hpG-yYorz6Z4jA734xJh2YP6SA,1184 +numpy/_core/tests/test_indexerrors.py,sha256=lQZFzPModGwJDh8kPxU_F1CJAhFJXAVZok31V6JkJyo,4835 +numpy/_core/tests/test_indexing.py,sha256=WRXgPDF4_20vmOi5xqV8U5HXjTghRTeadof0Zpm9Ylw,66315 +numpy/_core/tests/test_item_selection.py,sha256=erSTKqbX9C5i9EJeTI4tIenDW4vlo1PO-wqFRKtBB68,6798 +numpy/_core/tests/test_limited_api.py,sha256=bN6sU8V2vtFOxwhf3n0EtEXJuT5ifw2i6S2DHTDJHMQ,3565 +numpy/_core/tests/test_longdouble.py,sha256=JZ3s4NRsteMn5tW3nG8rPU4XKGlJ50Udu2mkRh0omAg,12824 +numpy/_core/tests/test_mem_overlap.py,sha256=-EocVf7okOS3d3khQz12zm1fRxsnfkXQBdJ2y5lZz8w,30250 +numpy/_core/tests/test_mem_policy.py,sha256=vAjCbfroLALRl2hd-5iqNbCiJbRdwEriKKEB_umWg5M,17301 +numpy/_core/tests/test_memmap.py,sha256=QSo4Z23N-GGkIVXqa8TXOVsCpY-vS8-Aei4wEQiELV8,8546 +numpy/_core/tests/test_multiarray.py,sha256=LWSS4gOS0MAdQuxJfXWgKQWpjQI6GCwtzBaRuEHreXA,431040 +numpy/_core/tests/test_multiprocessing.py,sha256=fxmHyEXpaYp2f-QEKFSnx5wBJqn3aH0dy_OHNcVb5-s,2089 +numpy/_core/tests/test_multithreading.py,sha256=kT3i0zCxPm7r3RHcfykhm2dKo4PqexVCYkCfEV8Pzyg,12498 +numpy/_core/tests/test_nditer.py,sha256=6jzzlDHpN0za_lCN9ghG8rB14YSWoBPwwlXYjmqYNaI,141372 +numpy/_core/tests/test_nep50_promotions.py,sha256=sw60eH2T2EBrpO0VeulZrHpnRCz6wE0MiA4U9B3_MFQ,10355 +numpy/_core/tests/test_numeric.py,sha256=VCpaYp6i1GdubO_mHtavO6ofEMjMJjZVnyKf5xbaTmM,164905 +numpy/_core/tests/test_numerictypes.py,sha256=fSNA-EhkNQwJU33c4wEBAEgV4rLeibDTdXK0bZG0FOg,24824 +numpy/_core/tests/test_overrides.py,sha256=UDdhmwueEuHjBJQf6NvJ7o8K5A2JRq92a5XG2LQaiEY,28577 +numpy/_core/tests/test_print.py,sha256=UPdSzvGrdCZnB8NoBkY9gKXOkr4NZsUjfaypNgm3iA4,7117 +numpy/_core/tests/test_protocols.py,sha256=b1clvp3Rr7EQ6x8Mtxm9jAiHxPymEU_VJBjwnMingUU,1219 +numpy/_core/tests/test_records.py,sha256=3_z_6E-TKMowzkXujZG0JyxkeiH471GMHWOQeheVm1g,21156 +numpy/_core/tests/test_regression.py,sha256=kmmkC60tAsF1tsl-TAGWm9bHoczfMHQpQ6ooNs5HBXg,98849 +numpy/_core/tests/test_scalar_ctors.py,sha256=RZBTUxU_z4rg7q2fMcRce_VYqjUgcBvL7SF3txjtrzc,6896 +numpy/_core/tests/test_scalar_methods.py,sha256=lJyS9hUf_99haU6K0FKeADApcIdD8iP6GALWb2ECIF8,12839 +numpy/_core/tests/test_scalarbuffer.py,sha256=FkcZR3rDC5tEYhknI_pMhVs-G9jQXcx7hu1WZCR6kvE,5787 +numpy/_core/tests/test_scalarinherit.py,sha256=WVjRrpNkKvQbO2n-joJ7EF6lG1zFcT4pgANAtvT7C5M,2692 +numpy/_core/tests/test_scalarmath.py,sha256=Q4vg6st6pVkDxGGwfJ85d0KFDKfcwzq2rZedow67dHQ,47385 +numpy/_core/tests/test_scalarprint.py,sha256=aDQz8rz3KXQ5EBiaIaxmvHpLaIUkZeI-8PoTqZ4kvyM,20108 +numpy/_core/tests/test_shape_base.py,sha256=CkSHU_VkczeAvIOHSpXt-YQzsBmVt5HnnQVG7i8qFi8,32771 +numpy/_core/tests/test_simd.py,sha256=85MWw13IIWQ88SI0I61gyQar94AaLTfJTvxyFFOxrVs,50239 +numpy/_core/tests/test_simd_module.py,sha256=ml8Wu6vrAwx7Qbs_nft2eR0o3TunJuNkETtVJ271mT4,4055 +numpy/_core/tests/test_stringdtype.py,sha256=jgjl_8BBUjcGCVve2NjOUYbWo7rlKp3uUKfWRukwO6o,60358 +numpy/_core/tests/test_strings.py,sha256=Id52CzqaLS3fF5A5fFbaKNept1FeipXZ92pwcuIRMEM,61679 +numpy/_core/tests/test_ufunc.py,sha256=r8ZWPx3wketfjfqjNqCZLOoShJSqJnIP_uExNWd099U,143118 +numpy/_core/tests/test_umath.py,sha256=Yelw68ER27ru-jee6G1Vs0TXsR0bpEb0RRPqNPiSc34,200811 +numpy/_core/tests/test_umath_accuracy.py,sha256=ZCqjVUPFdn_WaKDBwmND_aZZFzdPWomf6_Yui6BV_tk,5953 +numpy/_core/tests/test_umath_complex.py,sha256=CKzN_RQ8o9LPK7Ax9WaF-1kQpSZj8Lg9BGN45GrEMv4,24242 +numpy/_core/tests/test_unicode.py,sha256=6_hND3jRcv1MkmOpjKxZSG-uBKS3WvjUz3rozgimq8U,13356 +numpy/_core/umath.py,sha256=NWmvoyWQXFuCOCQi3rOXYAlNcIfhSiKm4WNzxCCn9fc,2200 +numpy/_core/umath.pyi,sha256=dTF_yXVG0t6Sw-q7oa2otpZV8E0LweJ9Ru6jF-ENQ7M,3948 +numpy/_distributor_init.py,sha256=h5_Cq7ItDrt1JZoAh04aO54ZXsXRRkyGoNFFH9T-08U,436 +numpy/_distributor_init.pyi,sha256=CSrbSp2YYxHTxlX7R0nT3RpH7EloB1wIvo7YOA7QWy8,28 +numpy/_expired_attrs_2_0.py,sha256=Qjxzbnge_WAK5nO2glybK2Bv9gNulmxGofjbGwLLkw8,3849 +numpy/_expired_attrs_2_0.pyi,sha256=V7NCR-ik42HqU4sRKPbPOyFNYoFwREWyw6MLxh28p9Y,1300 +numpy/_globals.py,sha256=QC41LPui5xIF0vbXrBtrBlxJV8JKvxZsCc3d9yPTmLY,4276 +numpy/_globals.pyi,sha256=kst3Vm7ZbznOtHsPya0PzU0KbjRGZ8xhMmTNMafvT-4,297 +numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__pycache__/__init__.cpython-314.pyc,, +numpy/_pyinstaller/__pycache__/hook-numpy.cpython-314.pyc,, +numpy/_pyinstaller/hook-numpy.py,sha256=bJTm7LIuDHC5QyGTqUWE48gYsRcolKm3naQXoE1o_C4,1398 +numpy/_pyinstaller/hook-numpy.pyi,sha256=28DtDC-8ixBS_9WXTKnyzC1o_7K92wUk8_UfN7WBd78,156 +numpy/_pyinstaller/tests/__init__.py,sha256=l38bo7dpp3u1lVMPErlct_5uBLKj35zuS_r35e7c19c,345 +numpy/_pyinstaller/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-314.pyc,, +numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-314.pyc,, +numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=xt3dl_DjxuzVTPrqmVmMOZm5-24wBG2TxldQl78Xt1g,1175 +numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=31zWlvlAC2sfhdew97x8aDvcYUaV3Tc_0CwFk8pgKaM,1170 +numpy/_pytesttester.py,sha256=USOh37bWhXAzQLeGD1U63XpDnbkatfr-aiQu-gHkdBA,6529 +numpy/_pytesttester.pyi,sha256=Rbl_BWY754H8JGfNwexVusncuX-3XOQv2PdW0Ua-FxI,521 +numpy/_typing/__init__.py,sha256=KACZs_8l8cNG4TolvZkqv5YKLA7EyG6om9ct4fsXppg,5639 +numpy/_typing/__pycache__/__init__.cpython-314.pyc,, +numpy/_typing/__pycache__/_add_docstring.cpython-314.pyc,, +numpy/_typing/__pycache__/_array_like.cpython-314.pyc,, +numpy/_typing/__pycache__/_char_codes.cpython-314.pyc,, +numpy/_typing/__pycache__/_dtype_like.cpython-314.pyc,, +numpy/_typing/__pycache__/_extended_precision.cpython-314.pyc,, +numpy/_typing/__pycache__/_nbit.cpython-314.pyc,, +numpy/_typing/__pycache__/_nbit_base.cpython-314.pyc,, +numpy/_typing/__pycache__/_nested_sequence.cpython-314.pyc,, +numpy/_typing/__pycache__/_scalars.cpython-314.pyc,, +numpy/_typing/__pycache__/_shape.cpython-314.pyc,, +numpy/_typing/__pycache__/_ufunc.cpython-314.pyc,, +numpy/_typing/_add_docstring.py,sha256=Oje462jvQMs5dDxRFWrDiKdK08-5sU-b6WKoSRAg2B4,4152 +numpy/_typing/_array_like.py,sha256=N-e4p17RNfe7H3jnpcX0cUXur4AIcjiTi95baW-0EZY,4294 +numpy/_typing/_char_codes.py,sha256=VZvjzpRG1Ehf2frndiRLLbPRa59A6FocdwGwjHEOorM,8977 +numpy/_typing/_dtype_like.py,sha256=HCEugkIDjsGQBYhk6QfnFtRtD-2ZigCSKeSm_7Z9cz0,3978 +numpy/_typing/_extended_precision.py,sha256=3jaNHY4qJwWODLFWvlfUQROLblfqqFDjOlp8bHnhMBI,449 +numpy/_typing/_nbit.py,sha256=pjOpz0sIdhphsXMK0dCQeQWXsrDpxCVZrYJ1wmALf04,651 +numpy/_typing/_nbit_base.py,sha256=PnQt_VbBKX_Uj17g_0yfoUqwh0bnN3LEyFHgR6GzNaw,3152 +numpy/_typing/_nbit_base.pyi,sha256=-9bQ2dXrhwqyROh5WXWM1APeDzi8QlwjJEAox-sriOU,778 +numpy/_typing/_nested_sequence.py,sha256=gZZRCnko04ZbsGaLbAx9VSsvKPR7UuwuzRAxwd1FYX0,2584 +numpy/_typing/_scalars.py,sha256=rTil_dSaoBGmmGD9QQZ0NqEP2BeZtkOEK9ZayDMB-l0,964 +numpy/_typing/_shape.py,sha256=5csdB-yj390thRrWPnwU7LcVfq-wYnd8QvXyuGdjAX4,283 +numpy/_typing/_ufunc.py,sha256=lok5QhQ5aJBARpyVoffrbeuEJsJ5vA6DaJ4aHTeUhms,163 +numpy/_typing/_ufunc.pyi,sha256=KwQANghYtbDNTZT7QvVj_aW99FgEfnpxhb5zU__W0Nk,30596 +numpy/_utils/__init__.py,sha256=q3vMrxeBeeU9pvCvLOkodDgzZS5V1jeI1_UZd4BbzDU,3572 +numpy/_utils/__init__.pyi,sha256=j1KKiEZW0MOWI3lwRzyIr-pHVqjJWkjb-NlwQok8838,728 +numpy/_utils/__pycache__/__init__.cpython-314.pyc,, +numpy/_utils/__pycache__/_convertions.cpython-314.pyc,, +numpy/_utils/__pycache__/_inspect.cpython-314.pyc,, +numpy/_utils/__pycache__/_pep440.cpython-314.pyc,, +numpy/_utils/_convertions.py,sha256=vetZFqC1qB-Z9jvc7RKuU_5ETOaSbjhbKa-sVwYV8TU,347 +numpy/_utils/_convertions.pyi,sha256=zkZfkdBk6-XcyD3zmr7E5sJbYasvyDCInUtWvrtjVhY,122 +numpy/_utils/_inspect.py,sha256=fpHbL1Gx7flw4HHjnNHNN-v8NKx1WgFBWgnX8T5hliY,7628 +numpy/_utils/_inspect.pyi,sha256=JLkhqPtHYvfBg5CN0VfGhO0u3ilzZGBThIfwxbk8YrI,2324 +numpy/_utils/_pep440.py,sha256=MZ5ZR1-o_4kA-68YcdUfkHkqUf3wRcKxQm08uv2GoE8,14474 +numpy/_utils/_pep440.pyi,sha256=cFEepudci4bYPJbIVshObOtflKvxGJVISkfemy8FPz8,3964 +numpy/char/__init__.py,sha256=KAKgke3wwjmEwxfiwkEXehe17DoN1OR_vkLBA9WFaGs,95 +numpy/char/__init__.pyi,sha256=XN-Twg_XKK4bMmir1UZ4nCtlW7nOezcU5Ix4N7X4OhQ,1651 +numpy/char/__pycache__/__init__.cpython-314.pyc,, +numpy/conftest.py,sha256=XdNfW6RkzEfbsZzdGxheAxmiAlH84kBueZKTN75WR7c,8900 +numpy/core/__init__.py,sha256=mCDTG1UnW38pcRG0sikf7oE2oP4MpO86ndHjquhL85U,1323 +numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/__pycache__/__init__.cpython-314.pyc,, +numpy/core/__pycache__/_dtype.cpython-314.pyc,, +numpy/core/__pycache__/_dtype_ctypes.cpython-314.pyc,, +numpy/core/__pycache__/_internal.cpython-314.pyc,, +numpy/core/__pycache__/_multiarray_umath.cpython-314.pyc,, +numpy/core/__pycache__/_utils.cpython-314.pyc,, +numpy/core/__pycache__/arrayprint.cpython-314.pyc,, +numpy/core/__pycache__/defchararray.cpython-314.pyc,, +numpy/core/__pycache__/einsumfunc.cpython-314.pyc,, +numpy/core/__pycache__/fromnumeric.cpython-314.pyc,, +numpy/core/__pycache__/function_base.cpython-314.pyc,, +numpy/core/__pycache__/getlimits.cpython-314.pyc,, +numpy/core/__pycache__/multiarray.cpython-314.pyc,, +numpy/core/__pycache__/numeric.cpython-314.pyc,, +numpy/core/__pycache__/numerictypes.cpython-314.pyc,, +numpy/core/__pycache__/overrides.cpython-314.pyc,, +numpy/core/__pycache__/records.cpython-314.pyc,, +numpy/core/__pycache__/shape_base.cpython-314.pyc,, +numpy/core/__pycache__/umath.cpython-314.pyc,, +numpy/core/_dtype.py,sha256=BW-GFvu8BQiN-j6-3mESSWN3IQv9w8wNa3N53lisryI,333 +numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_dtype_ctypes.py,sha256=pwXec_vp-L06nnzFO66mwjBuPpJPhICecnyfvW2yEMg,361 +numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_internal.py,sha256=i8Uf68tmcvQEmYoRWF3YqnJGCWI3GxZZEAecy5euNqg,976 +numpy/core/_multiarray_umath.py,sha256=zstXKBlwOv7q3YjVdb-zn4ypnyyI6fAGc6ahkEBav-g,2155 +numpy/core/_utils.py,sha256=dAaZtXVWhOEFiwmVsz8Mn77HsynMDKhZ7HkrjD1Q3vc,944 +numpy/core/arrayprint.py,sha256=zsOt7vFu-b1_7rPlKr7iGh12n4CmwzmU_fWV2CedPi4,349 +numpy/core/defchararray.py,sha256=4JjDjl62Abk7fNp-HZeuNSQUNNKJddNro6mD6ef4gq8,357 +numpy/core/einsumfunc.py,sha256=cW79vhPJJsi2oD-rXc_w5EKq4hlRo1YlyqI3ZnmsMx8,349 +numpy/core/fromnumeric.py,sha256=HD3e5PrYjtMOYQCMXTGZlgkikwvYWc1XRuNT5xhSxVg,353 +numpy/core/function_base.py,sha256=z5aEiXHQ4AAkHfGLQ8ul_hvjCWL2lDkgPLlWJE_4w-M,361 +numpy/core/getlimits.py,sha256=Tut0lg_HyjJXHSfB3c0Z0DvOR7amxUksqI_1MpOlXAo,345 +numpy/core/multiarray.py,sha256=nN54eP9dzhnY4oNoVBN2q7yDjF1w7PbMQx4a7oMqTVI,818 +numpy/core/numeric.py,sha256=Qev9oaDAGdyblrDjVXBzTY069E8Z-R3PIOnDp3K6XRY,372 +numpy/core/numerictypes.py,sha256=CHNOCimC3CarkejHOm-rV7b7bmykIlJAAhXj923pKq0,357 +numpy/core/overrides.py,sha256=wETB95vH9MSwFC3rg3GAUGozKJbCuKdVMhS_zC5baUw,345 +numpy/core/overrides.pyi,sha256=HScieJk23k4Lk14q8u9CEc3ZEVOQ6hGu_FeWDR2Tyu8,532 +numpy/core/records.py,sha256=xWh78TWkPZxZx5VY05Jia8-y1HyIzkEvmrF8BiKBb68,337 +numpy/core/shape_base.py,sha256=BWl-Of1Gl8nr0eBguDIdKbS5h9OdmO-VZPUQOe2e62Y,349 +numpy/core/umath.py,sha256=XggXI2bTIR9O4U2vyfgoja0kKI2q7sF3dMCWmukWIqQ,329 +numpy/ctypeslib/__init__.py,sha256=o9oMM6-vOwS4PVageFyXsh6x23hQtcsemoAVVR3kuHw,206 +numpy/ctypeslib/__init__.pyi,sha256=GTndWDhLTrUX0PBarv8JhXsXJXDPghNd6tZL3nd1ZYY,382 +numpy/ctypeslib/__pycache__/__init__.cpython-314.pyc,, +numpy/ctypeslib/__pycache__/_ctypeslib.cpython-314.pyc,, +numpy/ctypeslib/_ctypeslib.py,sha256=ry2JWVuqOh8IjE_m_UIyQoOpWkYoDue2xZAa3Mz8zdc,19682 +numpy/ctypeslib/_ctypeslib.pyi,sha256=7WyPfrcJRmdXKjiArSGGkJ9d3PcYJTi87CXgtes7ARU,8531 +numpy/doc/__pycache__/ufuncs.cpython-314.pyc,, +numpy/doc/ufuncs.py,sha256=jMnfQhRknVIhgFVS9z2l5oYM8N1tuQtf5bXMBL449oI,5552 +numpy/dtypes.py,sha256=cPkS6BLRvpfsUzhd7Vk1L7_VcenWb1nuHuCxc9fYC4I,1353 +numpy/dtypes.pyi,sha256=FqSJWLCA-I3MV8OHMluYNSLG6r1doOr6gFcOsYeFVxk,16159 +numpy/exceptions.py,sha256=HgT_ErZLiTfceCv0y2dYoestDpKt2IR66MHu-LAVpNI,7955 +numpy/exceptions.pyi,sha256=EdR0sub_Tjf2-7aPz_4VUGkLqc1w2MIDQjywR68x9C4,821 +numpy/f2py/__init__.py,sha256=1sHuSvD-wFPLK6vD_pjY527X4KP8Jlz3bsbqY2ImAaI,2534 +numpy/f2py/__init__.pyi,sha256=PDHjLyKbEWWTjnWcBuO0A6yVpG5lkMcyNPbwNl-R6IQ,118 +numpy/f2py/__main__.py,sha256=TDesy_2fDX-g27uJt4yXIXWzSor138R2t2V7HFHwqAk,135 +numpy/f2py/__pycache__/__init__.cpython-314.pyc,, +numpy/f2py/__pycache__/__main__.cpython-314.pyc,, +numpy/f2py/__pycache__/__version__.cpython-314.pyc,, +numpy/f2py/__pycache__/_isocbind.cpython-314.pyc,, +numpy/f2py/__pycache__/_src_pyf.cpython-314.pyc,, +numpy/f2py/__pycache__/auxfuncs.cpython-314.pyc,, +numpy/f2py/__pycache__/capi_maps.cpython-314.pyc,, +numpy/f2py/__pycache__/cb_rules.cpython-314.pyc,, +numpy/f2py/__pycache__/cfuncs.cpython-314.pyc,, +numpy/f2py/__pycache__/common_rules.cpython-314.pyc,, +numpy/f2py/__pycache__/crackfortran.cpython-314.pyc,, +numpy/f2py/__pycache__/diagnose.cpython-314.pyc,, +numpy/f2py/__pycache__/f2py2e.cpython-314.pyc,, +numpy/f2py/__pycache__/f90mod_rules.cpython-314.pyc,, +numpy/f2py/__pycache__/func2subr.cpython-314.pyc,, +numpy/f2py/__pycache__/rules.cpython-314.pyc,, +numpy/f2py/__pycache__/symbolic.cpython-314.pyc,, +numpy/f2py/__pycache__/use_rules.cpython-314.pyc,, +numpy/f2py/__version__.py,sha256=u3yEZEhZzW9QwLBqzFEO-zZDqsECiHs3ixdOlRnv9Jo,49 +numpy/f2py/__version__.pyi,sha256=8GyGk3Z3JL6jXsqXbhheqYSqtp9zqapNanxA7fHf_uA,46 +numpy/f2py/_backends/__init__.py,sha256=xIVHiF-velkBDPKwFS20PSg-XkFW5kLAVj5CSqNLddM,308 +numpy/f2py/_backends/__init__.pyi,sha256=RC41nCG_RhaOllATOhrOdFFDHGDEErv56plcdVo2GMM,141 +numpy/f2py/_backends/__pycache__/__init__.cpython-314.pyc,, +numpy/f2py/_backends/__pycache__/_backend.cpython-314.pyc,, +numpy/f2py/_backends/__pycache__/_distutils.cpython-314.pyc,, +numpy/f2py/_backends/__pycache__/_meson.cpython-314.pyc,, +numpy/f2py/_backends/_backend.py,sha256=9cxRVrA-5wcm2fnVdR-F08sYwGBf89ZZ7bl5VHqKabU,1195 +numpy/f2py/_backends/_backend.pyi,sha256=S3xxAntiuMAjkWSQgBX32XiB7AMlwb4SNahYmpUeSG0,1388 +numpy/f2py/_backends/_distutils.py,sha256=0SMBqxZgJBhfgX3HW0pEcL3S0qUFVCdSEsTLv1cEcJs,2461 +numpy/f2py/_backends/_distutils.pyi,sha256=HHVnI_ozA7-RQIcj-x_DW_crVJPNDSDk6CYVECtHABM,476 +numpy/f2py/_backends/_meson.py,sha256=8lnllh-SRsgnpwI-cZTWYLdG93wnkkXfjMH3uIejOaQ,8870 +numpy/f2py/_backends/_meson.pyi,sha256=kH_ZNzIbH0dM7oqB71VIuyDlkNDQQXlysJ6fzbt55cA,1960 +numpy/f2py/_backends/meson.build.template,sha256=0RdmYsEP0o6g7DJgWvSl3Mo18bu_IFS3zMusQ_T7fsI,1725 +numpy/f2py/_isocbind.py,sha256=QVoR_pD_bY9IgTaSHHUw_8EBg0mkaf3JZfwhLfHbz1Q,2422 +numpy/f2py/_isocbind.pyi,sha256=ByVGplEnG_CaErwiRY5khEoIICe4kFGm416sJnIh68s,352 +numpy/f2py/_src_pyf.py,sha256=u6eLk_jbxlnY3roCebGAk6wYrYJ79ZQUgl89Ck1uafI,7942 +numpy/f2py/_src_pyf.pyi,sha256=5bfPvrUIVP_e2hqhEbpe5y_ja6rcHxlDVe6gXVR69fI,1039 +numpy/f2py/auxfuncs.py,sha256=np0s118cTsP2XIJVdyFbyDMRq-3KF_IbuKQaE3Xt-rQ,27924 +numpy/f2py/auxfuncs.pyi,sha256=MizhsYeYkzJfY1HPxFC6YSr-Vwu2xpd6ImyLuItTmlQ,8254 +numpy/f2py/capi_maps.py,sha256=ncBlu6yE8AVLpMuyTeKL4gVncCeJj708gSXiHxnwgmY,30890 +numpy/f2py/capi_maps.pyi,sha256=2b-Sg7dCr0RqxWZ9FmLm4Vgfs9chnuaVWYZCsC4gDFY,1099 +numpy/f2py/cb_rules.py,sha256=Ad-tkBGZdwjsPyC4v8zmh7c5v5mIOKybO6k9lNrlf9g,25716 +numpy/f2py/cb_rules.pyi,sha256=VYhLJlRKpe2jE2XTKXHmljol-R09YK8tWscZ28pMicI,512 +numpy/f2py/cfuncs.py,sha256=V9GZ2E3s0_rragphRwNaCxq5Z3Mvbw-UDBR7wdApbnE,54223 +numpy/f2py/cfuncs.pyi,sha256=fWlbI1vH3IdXj3hmrda5Cl_wocO5Fn3uwTUoHBc_6Mg,833 +numpy/f2py/common_rules.py,sha256=HJ21QrdclhsGHj883Ab337-bSlPZopPALzXIMNfkT6c,5173 +numpy/f2py/common_rules.pyi,sha256=2d2LfXQr_st4cPnCZPQq5_hK9sTqj2436_t7Bf0PiSs,332 +numpy/f2py/crackfortran.py,sha256=3uovq4FHMoecI-qTZLi7vdUjFWJRdn94JidlFCHeQ_E,150604 +numpy/f2py/crackfortran.pyi,sha256=10qrHERdQbbr2hOSIP3V6w9gQJT6wrYpFCgS9-C4Bb8,10564 +numpy/f2py/diagnose.py,sha256=-UK2lwqufbuTqSex3w2H4-Qld7Z1NeutRllDNt6TDoA,5224 +numpy/f2py/diagnose.pyi,sha256=IW41dCKF39vknytu9aOQwmIWuk_WsCfkin8K1iFbmAc,24 +numpy/f2py/f2py2e.py,sha256=GJ-p9MAkTyhkyQ589ywmv5XiCPZkGy8larMERXyUi8c,29615 +numpy/f2py/f2py2e.pyi,sha256=2pETEpU4CJSky8mY9eBk_6z6Py63KDpINHwTFo-e1n8,2205 +numpy/f2py/f90mod_rules.py,sha256=GjvlboOdjc-lLmC0Tkxa8q_43fkfmo9dbWwv70xN_zI,10079 +numpy/f2py/f90mod_rules.pyi,sha256=0LIlPT9YI3Oit8aiP-i_JJPRsczDwzzQ96v14gMS1T0,467 +numpy/f2py/func2subr.py,sha256=p15rYW8c2kO-toes2Q9B9cjw9o9Jn-7pphHuxl0Jws0,10374 +numpy/f2py/func2subr.pyi,sha256=ide-SEoLyEEfa51Wqe6eKRJZvnuNYDJqD7BS0akqqzQ,393 +numpy/f2py/rules.py,sha256=U2u9J0IuC8hTJyNDO_jVdhfFrAxjsmW40y5edKDYMXQ,64723 +numpy/f2py/rules.pyi,sha256=x84T8VJwo8hmlLwpW54Q_q2ifUS5Dop9o0SHhyuxF48,1348 +numpy/f2py/setup.cfg,sha256=828sy3JvJmMzVxLkC-y0lxcEMaDTnMc3l9dWqP4jYng,50 +numpy/f2py/src/fortranobject.c,sha256=1SGmjDcW3_Ncx-aecMyjA_JfxyaW0TX6FL3mmG6wO10,47892 +numpy/f2py/src/fortranobject.h,sha256=uCcHO8mjuANlKb3c7YAZwM4pgT0CTaXWLYqgE27Mnt0,5996 +numpy/f2py/symbolic.py,sha256=YFKXeeLb5pq9cBDHLcJRq1eECpTlj0xQAsN8IKbMWgU,54828 +numpy/f2py/symbolic.pyi,sha256=BZrNj7NiDC5YZCVpnO7c7jtQETAnRokXEuUIMbg9hzM,6283 +numpy/f2py/tests/__init__.py,sha256=l38bo7dpp3u1lVMPErlct_5uBLKj35zuS_r35e7c19c,345 +numpy/f2py/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_block_docstring.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_callback.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_character.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_common.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_crackfortran.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_data.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_docs.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_f2cmap.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_f2py2e.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_isoc.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_kind.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_mixed.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_modules.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_parameter.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_pyf_src.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_quoted_character.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_return_character.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_return_complex.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_return_integer.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_return_logical.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_return_real.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_routines.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_size.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_string.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_symbolic.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-314.pyc,, +numpy/f2py/tests/__pycache__/util.cpython-314.pyc,, +numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=aCaFEqfXp79pVXnTFtjZBWUY_5pu8wsehp1dEauOkSE,692 +numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=y3R2dDn0BUz-0bMggfT1jwXbhz_gniz7ONMpureEQew,111 +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=bwr4ytJ-NtqWE_1Map5U3wPf_BC7pOe1oK94yNNQ5DM,7716 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=zfuOShmuotzcLIQDnVFaARwvM66iLrOYzpquIGDbiKU,30 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=fqbSr7VlKfVrBulFgQtQA9fQf0mQvVbLi94e4FTST3k,494 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=9pbi88-uSNP5IwS49Kim982jDAuopo3tpEhg2SOU7no,540 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=9Cl1sdrihB8cCSsjoQGmOO8VRv9ni8Fjr0Aku1UdEWM,288 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=3L_F7n5ju9F0nxw95uBUaPeuiDOw6uHvB580eIj7bqI,134 +numpy/f2py/tests/src/block_docstring/foo.f,sha256=KVTeqSFpI94ibYIVvUW6lOQ9T2Bx5UzZEayP8Maf2H0,103 +numpy/f2py/tests/src/callback/foo.f,sha256=rLqaaaUpWFTaGVxNoGERtDKGCa5dLCTW5DglsFIx-wU,1316 +numpy/f2py/tests/src/callback/gh17797.f90,sha256=-_NvQK0MzlSR72PSuUE1FeUzzsMBUcPKsbraHIF7O24,155 +numpy/f2py/tests/src/callback/gh18335.f90,sha256=n_Rr99cI7iHBEPV3KGLEt0QKZtItEUKDdQkBt0GKKy4,523 +numpy/f2py/tests/src/callback/gh25211.f,sha256=ejY_ssadbZQfD5_-Xnx_ayzWXWLjkdy7DGp6C_uCUCY,189 +numpy/f2py/tests/src/callback/gh25211.pyf,sha256=nrzvt2QHZRCcugg0R-4FDMMl1MJmWCOAjR7Ta-pXz7Y,465 +numpy/f2py/tests/src/callback/gh26681.f90,sha256=ykwNXWyja5FfZk1bPihbYiMmMlbKhRPoPKva9dNFtLM,584 +numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=e3zYjFmiOxzdXoxzgkaQ-CV6sZ1t4aKugyhqRXmBNdQ,148 +numpy/f2py/tests/src/cli/hi77.f,sha256=bgBERF4EYxHlzJCvZCJOlEmUE1FIvipdmj4LjdmL_dE,74 +numpy/f2py/tests/src/cli/hiworld.f90,sha256=RncaEqGWmsH9Z8BMV-UmOTUyo3-e9xOQGAmNgDv6SfY,54 +numpy/f2py/tests/src/common/block.f,sha256=tcGKa42S-6bfA6fybpM0Su_xjysEVustkEJoF51o_pE,235 +numpy/f2py/tests/src/common/gh19161.f90,sha256=Vpb34lRVC96STWaJerqkDQeZf7mDOwWbud6pW62Tvm4,203 +numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=3ONHb4ZNx0XISvp8fArnUwR1W9rzetLFILTiETPUd80,221 +numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=JAzHD5aluoYw0jVGZjBYd1wTABU0PwNBD0cz3Av5AAk,511 +numpy/f2py/tests/src/crackfortran/data_common.f,sha256=rP3avnulWqJbGCFLWayjoFKSspGDHZMidPTurjz33Tc,201 +numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=LaPXVuo5lX0gFZVh76Hc7LM1sMk9EBPALuXBnHAGdOA,202 +numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=MAZ3gstsPqECk3nWQ5Ql-C5udrIv3sAciW1ZGTtHLts,713 +numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=FUPluNth5uHgyKqjQW7HKmyWg4wDXj3XPJCIC9ZZuOs,183 +numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=D9FT8Rx-mK2p8R6r4bWxxqgYhkXR6lNmPj2RXOseMpw,134 +numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=0G9bmfVafpuux4-ZgktYZ6ormwrWDTOhKMK4wmiSZlQ,391 +numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=acknjwoWYdA038oliYLjB4T1PHhXkKRLeJobIgB_Lbo,352 +numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=xPnKx4RcT1568q-q_O83DYpCgVYJ8z4WQ-yLmHPchJA,248 +numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=k2xjRpRaajMYpi5O-cldYPTZGFGB12PUGcj5Fm9joyk,131 +numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=20ukdZXq-qU0Zxzt4W6cO8tRxlNlQ456zgD09zdozCE,105 +numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=FvnIxy5fEOvzNb5WSkWzPk7yZ9yIv0yPZk9vNnS-83w,216 +numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=jELVfEGEF66z_Pv_iBHp3yGsGhadB0dnKCDtPcaz_CM,352 +numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=mTOEncxZlam6N-3I-IL0ua-iLkgqDrrVXNsE-7y7jAM,376 +numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=-IpkeTz0j9_lkQeN9mT7w3U1cAJjQxSMdAmyHdF8oVg,295 +numpy/f2py/tests/src/crackfortran/operators.f90,sha256=cb1JO2hIMCQejZO_UJWluBCP8LdXQbBJw2XN6YHB3JA,1233 +numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=9O2oWEquIUcbDB1wIzNeae3hx4gvXAoYW5tGfBt3KWk,185 +numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=nU_VXCKiniiUq_78KAWkXiN6oiMQh39emMxbgOVf9cg,177 +numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=-uz75kquU4wobaAPZ1DLKXJg6ySCZoDME1ce6YZ2q5Y,175 +numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=wDMoF7F7VFYdeocfTyWIh7noniEwExVb364HrhUSbSg,102 +numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=fwszymaWhcWO296u5ThHW5yMAkFhB6EtHWqqpc9FAVI,83 +numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=rphN_mmzjCCCkdPM0HjsiJV7rmxpo4GoCNp5qmBzv8U,307 +numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=Oir0PfE3mErnUQ42aFxiqAkcYn3B6b1FHIPGipDdekg,1032 +numpy/f2py/tests/src/kind/foo.f90,sha256=6_zq3OAWsuNJ5ftGTQAEynkHy-MnuLgBXmMIgbvL7yU,367 +numpy/f2py/tests/src/mixed/foo.f,sha256=Zgn0xDhhzfas3HrzgVSxIL1lGEF2mFRVohrvXN1thU0,90 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=6eEEYCH71gPp6lZ6e2afLrfS6F_fdP7GZDbgGJJ_6ns,187 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=UC6iVRcm0-aVXAILE5jZhivoGQbKU-prqv59HTbxUJA,147 +numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=EqMEuEV0_sx4XbFzftbU_6VfGtOw9Tbs0pm0eVEp2cA,188 +numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=DChVLgD7qTOpbYNmfGjPjfOx5YsphMIYwdwnF12X4xM,185 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=MMLPSzBwuGS4UwCXws9djH11F5tG5xFLc80CDb4U9Mk,423 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=1dJD1kDC_wwn7v_zF49D3n62T1x9wFxGKanQQz_VI7k,424 +numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=-asnMH7vZMwVIeMU2YiLWgYCUUUxZgPTpbAomgWByHs,236 +numpy/f2py/tests/src/modules/use_modules.f90,sha256=bveSAqXIZtd4NMlDfFei1ZlesFAa9An5LjkD-gDk2ms,418 +numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=IxBGWem-uv9eHgDhysEdGTmNKHR1gAiU7YJPo20eveM,164 +numpy/f2py/tests/src/parameter/constant_array.f90,sha256=fkYemwIBKsP63-FGKBW8mzOAp6k13eZOin8sQe1pyno,1513 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=L0rG6-ClvHx7Qsch46BUXRi_oIEL0uw5dpRHdOUQuv0,1996 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=lAT76HcXGMgr1NfKof-RIX3W2P_ik1PPqkRdJ6EyBmM,484 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=42jROArrG7vIag9wFa_Rr5DBnnNvGsrEUgpPU14vfIo,634 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=u9MRf894Cw0MVlSOUbMSnFSHP4Icz7RBO21QfMkIl-Q,632 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=QoPgKiHWrwI7w5ctYZugXWzaQsqSfGMO7Jskbg4CLTc,633 +numpy/f2py/tests/src/quoted_character/foo.f,sha256=0zXQbdaqB9nB8R4LF07KDMFDbxlNdiJjVdR8Nb3nzIM,496 +numpy/f2py/tests/src/regression/AB.inc,sha256=ydjTVb6QEw1iYw2tRiziqqzWcDHrJsNWr3m51-rqFXQ,17 +numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=vPJbhOlNsLrgN3su4ohHUSbxE4GGKU7SiJh7dhBvX3o,633 +numpy/f2py/tests/src/regression/datonly.f90,sha256=HuBLuEw0kNEplJ9TxxSNr7hLj-jx9ZNGaXC8iLm_kf8,409 +numpy/f2py/tests/src/regression/f77comments.f,sha256=FjP-07suTBdqgtwiENT04P-47UB4g9J5-20IQdXAHhM,652 +numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=KdKFcAc3ZrID-h4nTOJDdEYfQzR2kkn9VqQCorfJGpM,144 +numpy/f2py/tests/src/regression/f90continuation.f90,sha256=VweFIi5-xxZhtgSOh8i_FjMPXu_od9qjrDHq6ma5X5k,285 +numpy/f2py/tests/src/regression/incfile.f90,sha256=gq87H2CtCZUON9V5UzcK6x_fthnWDVuPFQLa0fece1M,97 +numpy/f2py/tests/src/regression/inout.f90,sha256=TlMxJjhjjiuLI--Tg2LshLnbfZpiKz37EpR_tPKKSx8,286 +numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=bWlj2Frch3onnUpd6DTaoLDa6htrrbkBiI9JIRbQPfE,105 +numpy/f2py/tests/src/regression/mod_derived_types.f90,sha256=Cb9WV1sxoKt2wJCl1Z9QR42iLYX226f_boX-_ehDLAQ,589 +numpy/f2py/tests/src/return_character/foo77.f,sha256=tRyQSu9vNWtMRi7gjmMN-IZnS7ogr5YS0n38uax_Eo0,1025 +numpy/f2py/tests/src/return_character/foo90.f90,sha256=WPQZC6CjXLbUYpzy5LItEoHmRDFxW0ABB3emRACsjZU,1296 +numpy/f2py/tests/src/return_complex/foo77.f,sha256=7-iKoamJ-VObPFR-Tslhiw9E-ItIvankWMyxU5HqxII,1018 +numpy/f2py/tests/src/return_complex/foo90.f90,sha256=_GOKOZeooWp3pEaTBrZNmPmkgGodj33pJnJmySnp7aE,1286 +numpy/f2py/tests/src/return_integer/foo77.f,sha256=EKs1KeAOQBkIO99tMCx0H7_lpqvqpjie8zWZ6T_bAR4,1234 +numpy/f2py/tests/src/return_integer/foo90.f90,sha256=0aYWcaAVs7Lw3Qbf8hupfLC8YavRuPZVIwjHecIlMOo,1590 +numpy/f2py/tests/src/return_logical/foo77.f,sha256=Ax3tBVNAlxFtHhV8fziFcsTnoa8YJdapecMr6Qj7fLk,1244 +numpy/f2py/tests/src/return_logical/foo90.f90,sha256=IZXCerFecYT24zTQ_spIoPr6n-fRncaM0tkTs8JqO1E,1590 +numpy/f2py/tests/src/return_real/foo77.f,sha256=3nAY1YtzGk4osR2jZkHMVIUHxFoOtF1OLfWswpcV7kA,978 +numpy/f2py/tests/src/return_real/foo90.f90,sha256=38ZCnBGWb9arlJdnVWvZjVk8uesrQN8wG2GrXGcSIJs,1242 +numpy/f2py/tests/src/routines/funcfortranname.f,sha256=ruyXK6eQSLQnQ_rODT1qm1cJvpHrFhI6NRrnWvEIK0U,128 +numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=EgRw8ZWGdd2uK4qCZD89r9VQtEXmnKDx59OpB0K58as,451 +numpy/f2py/tests/src/routines/subrout.f,sha256=35DjHIj85ZLkxRxP4bs-WFTQ5y1AyDqBKAXTzSSTAxE,94 +numpy/f2py/tests/src/routines/subrout.pyf,sha256=xT_WnDpvpyPb0FMRAVTRRgm3nlfALf1Ojg8x3qZNv_4,332 +numpy/f2py/tests/src/size/foo.f90,sha256=nK_767f1TtqVr-dMalNkXmcKbSbLCiabhRkxSDCzLz0,859 +numpy/f2py/tests/src/string/char.f90,sha256=X_soOEV8cKsVZefi3iLT7ilHljjvJJ_i9VEHWOt0T9Y,647 +numpy/f2py/tests/src/string/fixed_string.f90,sha256=tCN5sA6e7M1ViZtBNvTnO7_efk7BHIjyhFKBoLC3US0,729 +numpy/f2py/tests/src/string/gh24008.f,sha256=Z6cq8SFGvmaA72qeH9tu1rP8pYjqm0ONpHn7nGbhoLA,225 +numpy/f2py/tests/src/string/gh24662.f90,sha256=xJkiYvrMT9Ipb9Cq7OXl1Ev6TISl8pq1MGemySzfGd0,204 +numpy/f2py/tests/src/string/gh25286.f90,sha256=lqEl81Iu9GIDTAbOfkkNGcGgDyyGnPB44mJw2iK1kng,318 +numpy/f2py/tests/src/string/gh25286.pyf,sha256=wYkkr5gEN9_RtGjpqh28X1k8KCgh0-Ds9XAt8IC9j4A,393 +numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=ZRvgSzRlaPEx8GyNt97FrRhtCg-r4ZTEDsHNBfit4m8,396 +numpy/f2py/tests/src/string/scalar_string.f90,sha256=U1QqVgbF1DbxdFekRjchyDlFRPnXwzG72kuE8A44Za8,185 +numpy/f2py/tests/src/string/string.f,sha256=JCwLuH21Ltag5cw_9geIQQJ4Hv_39NqG8Dzbqj1eDKE,260 +numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=MbbSUQI5Enzq46KWFHRzQbY7q6ZHJH_9NRL-C9i13Wg,199 +numpy/f2py/tests/test_abstract_interface.py,sha256=2fTmp5-yLaNKtWvP0jQ6_kqkyWI73kgjIl7Ara25cII,837 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=Yy6I46hlJLgSAlQ_RkRbZgZ_vTyH9BVDE5OxQ5TQRvM,24395 +numpy/f2py/tests/test_assumed_shape.py,sha256=WaIBz38eV2AzRwOvTvTaRkNks8c3H_61TGKtAReP6gk,1517 +numpy/f2py/tests/test_block_docstring.py,sha256=DOTSbdInRJCunaEycMGWQUy0b5rIeugPxUKmNg8FA34,604 +numpy/f2py/tests/test_callback.py,sha256=uVRfXR6q4ukZfbRUBCnc1cvKIEiaPFr6gc3mrgMzt1w,7362 +numpy/f2py/tests/test_character.py,sha256=dLj5WhKbP5CYnuQMsB3uWV5pcgoQCEscujuBSCXwkwA,22572 +numpy/f2py/tests/test_common.py,sha256=r_nJN4ZCZ3DstAadAoO_R_igybcb5zxwOJsv_-fRBa8,667 +numpy/f2py/tests/test_crackfortran.py,sha256=QNZ9VI61XDF5KoO8r9012AaGaVHzM7g1e2UVK0A4uUU,16834 +numpy/f2py/tests/test_data.py,sha256=XBQTj0WqR-XWHuRhT1PWXankrhlNxeqC6S03XA2b7AI,2966 +numpy/f2py/tests/test_docs.py,sha256=YwIFQGu4gwCrnNQ0i4O3sCs_ZCwovgr2fGt13DrfKbU,1996 +numpy/f2py/tests/test_f2cmap.py,sha256=hyzKOv261wPDWAmpuidAVFK3x9WXm64U_r3E_bmxNXg,404 +numpy/f2py/tests/test_f2py2e.py,sha256=QHYd7ghKHeUPB-piK0zvoM-BkmrH1Tvg8-wa4r7wMMI,29552 +numpy/f2py/tests/test_isoc.py,sha256=KK4VeoPhjF658msdnRYCWlzagFEea8h5SzdjB5FaDk0,1490 +numpy/f2py/tests/test_kind.py,sha256=0kASrNopTkYDN_d2j-i9Jf22nBjQof0eqGGcuEfEqI0,1897 +numpy/f2py/tests/test_mixed.py,sha256=3J9eftoqFIXViiqd21Ej02eNztjbIuzaqb_2OZYZUSw,897 +numpy/f2py/tests/test_modules.py,sha256=rbm9cPZilhdIuFA6rxqtRlPtyd0_IxKoFlB8VFA-1Vc,2384 +numpy/f2py/tests/test_parameter.py,sha256=wwyq8vA5FFljYfF55srLYqFvJ-ybdG2vEpr2CRdPqVs,4763 +numpy/f2py/tests/test_pyf_src.py,sha256=Tg8PzypY1P2pda3k2VbuKX97VCD7CtdgyJajP-U7AIc,1177 +numpy/f2py/tests/test_quoted_character.py,sha256=kvjgkp3bIP2lnkZ2MzS2yB5ytEJfHZydz24VAqs3UKM,495 +numpy/f2py/tests/test_regression.py,sha256=TriZy41MAIYRAHweZ-dg9GYyVmIjaIkPL4bAqmiCm50,6369 +numpy/f2py/tests/test_return_character.py,sha256=bdryZo5fXfTUE35M3_8gDqPCa9RKtkxROqBcnTjuwYo,1582 +numpy/f2py/tests/test_return_complex.py,sha256=1Nb6IsRfzHTCCiBTU5nHoY36w--Uh2goTedK-d8Xrc4,2507 +numpy/f2py/tests/test_return_integer.py,sha256=X3hYAJX9QaC0MYsIXg1Po-FRh4KCRN0KW0UFVpdkDww,1868 +numpy/f2py/tests/test_return_logical.py,sha256=ob4-_KkwWohpwF45SWKxiCKtktReLuqg4WQ8YFcMOQ0,2113 +numpy/f2py/tests/test_return_real.py,sha256=e7I27OmdJGCDQ9vX12U29M3WCOcmU1KRsivRb6BkKvo,3382 +numpy/f2py/tests/test_routines.py,sha256=6hOB8Rn4M-MCgDhKMoqI3YIn274wyWBzaLTjrJrTYCw,824 +numpy/f2py/tests/test_semicolon_split.py,sha256=5K_jZ2rJLxvwRDQu0gd_yiOEKiJmngvrH8vLeqsbC0Y,1702 +numpy/f2py/tests/test_size.py,sha256=D-7AOZtUn0DekZr-K753WQIcnt0prEdLigSuzjEicFg,1200 +numpy/f2py/tests/test_string.py,sha256=X6ECwK-mh-0Dfp8Pcag-jWxidoXtt009QDfjc8sfGT8,3038 +numpy/f2py/tests/test_symbolic.py,sha256=-NUoAJeeHZ0_Uj4NFnjz0awFk7vTZJ2rtHOErY0kkyk,19061 +numpy/f2py/tests/test_value_attrspec.py,sha256=P3ypxCXsakygnP1IdXH1hzw5VaYKGN40z3SOwK61IPU,345 +numpy/f2py/tests/util.py,sha256=YVm0U_jGp_LRO7k4FszcuC5AqoNsnf-cc564vEljABk,12554 +numpy/f2py/use_rules.py,sha256=MX3S-9SkSznXTwWWwaccMBhhyE_ZyoG3MCLLnsXXHEE,3475 +numpy/f2py/use_rules.pyi,sha256=J7S58xd70JkQBKtzl01T3uKmGfskGO6TiosmxUMW62Y,433 +numpy/fft/__init__.py,sha256=G7Nr6rpJggN1e3PgGggExF6Ei_Wt8n57eQIXf5vtzLM,8369 +numpy/fft/__init__.pyi,sha256=a2GtovDgo6O66DqPN7iVMBhl-6v_MjkTi2F5GGnKIAs,531 +numpy/fft/__pycache__/__init__.cpython-314.pyc,, +numpy/fft/__pycache__/_helper.cpython-314.pyc,, +numpy/fft/__pycache__/_pocketfft.cpython-314.pyc,, +numpy/fft/_helper.py,sha256=VpGmqY4O7zZWm0vg72mGu3AsZ-bca_mVG-olxoJYmmI,7022 +numpy/fft/_helper.pyi,sha256=UHKdQi3Rjz73WadYmvDCyRf95MSBNao3l1WZlJJHljs,1420 +numpy/fft/_pocketfft.py,sha256=UVXDxPok9kPHCjqH2XqrSTQ33fSCMIFmx_46Wj6V0TU,64292 +numpy/fft/_pocketfft.pyi,sha256=ksJK_WTkhCPo4ZAfdApGI3-uoK6WGH3NIbrrBx1jg5k,3353 +numpy/fft/_pocketfft_umath.cp314-win_amd64.lib,sha256=X4K-FHj8oYitFJYm4flebpzPbAvwDEOxMjSqLuksn9s,2176 +numpy/fft/_pocketfft_umath.cp314-win_amd64.pyd,sha256=19Tto_PH5PratugNe1ugL-zgRYiwxM-LYPeTr2F_y8Q,276480 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/fft/tests/__pycache__/test_helper.cpython-314.pyc,, +numpy/fft/tests/__pycache__/test_pocketfft.cpython-314.pyc,, +numpy/fft/tests/test_helper.py,sha256=Yff4EXasyH-nD6dyEcZfX4g61ZjQcAY7XHfQLnXI1EY,6321 +numpy/fft/tests/test_pocketfft.py,sha256=m98HSj9duHLP2IBxOXllDCj-92hNxCHgxFXXuExgqmI,25035 +numpy/lib/__init__.py,sha256=sZL_BFMWHTRmMAUWlbBJ-ngMCdk6UJ9FdU3n0S0SMEg,3101 +numpy/lib/__init__.pyi,sha256=PjSJHE90GngKOVep78JzBhBJFSBVCP5ZRrj0vMtG7G8,1547 +numpy/lib/__pycache__/__init__.cpython-314.pyc,, +numpy/lib/__pycache__/_array_utils_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_arraypad_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_arraysetops_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_arrayterator_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_datasource.cpython-314.pyc,, +numpy/lib/__pycache__/_format_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_function_base_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_histograms_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_index_tricks_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_iotools.cpython-314.pyc,, +numpy/lib/__pycache__/_nanfunctions_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_npyio_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_polynomial_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_scimath_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_shape_base_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_stride_tricks_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_twodim_base_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_type_check_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_ufunclike_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_user_array_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_utils_impl.cpython-314.pyc,, +numpy/lib/__pycache__/_version.cpython-314.pyc,, +numpy/lib/__pycache__/array_utils.cpython-314.pyc,, +numpy/lib/__pycache__/format.cpython-314.pyc,, +numpy/lib/__pycache__/introspect.cpython-314.pyc,, +numpy/lib/__pycache__/mixins.cpython-314.pyc,, +numpy/lib/__pycache__/npyio.cpython-314.pyc,, +numpy/lib/__pycache__/recfunctions.cpython-314.pyc,, +numpy/lib/__pycache__/scimath.cpython-314.pyc,, +numpy/lib/__pycache__/stride_tricks.cpython-314.pyc,, +numpy/lib/__pycache__/user_array.cpython-314.pyc,, +numpy/lib/_array_utils_impl.py,sha256=JQE9Ul515em350VcRXQaO4BmE0HwIRPdBpFNXu4Hnws,1759 +numpy/lib/_array_utils_impl.pyi,sha256=bymzyS0JT02-btcDJ90mhAVpoxnaZ9qJmIrfAg4I4nY,512 +numpy/lib/_arraypad_impl.py,sha256=rH_OU6Xvom3l4p2DbrhArQIPjsjM9EONxtSqKNzxd6g,34445 +numpy/lib/_arraypad_impl.pyi,sha256=aT7ma7wLUliNkdRK4j2XHEza25i0WzUJ-ETUPxYjCFU,2017 +numpy/lib/_arraysetops_impl.py,sha256=EVRhEQCqRxzheU68u5kGgS8pkls_sUSM9WXa9Z9Y3ho,38499 +numpy/lib/_arraysetops_impl.pyi,sha256=s-mUcIIa8xVOtYJx8WNnhPRrsnfMNUlBq1Imt66nCI0,13617 +numpy/lib/_arrayterator_impl.py,sha256=HUtCLBXcG7mC5AX3KuJcDjHD9FEPheCcHypT6PtSswY,7442 +numpy/lib/_arrayterator_impl.pyi,sha256=_vhCb927PfSM5E40NbAhIxxmcBxPhcpk9V4ZdzBTOIQ,1920 +numpy/lib/_datasource.py,sha256=csFfOVL00V76KM--z9wROAImHDPB5l5b-8h1AJli53c,23431 +numpy/lib/_datasource.pyi,sha256=g3laWGQw8jdnR8h_Bp9f1Mp71k5PmRxFBNuT1suuNqc,1025 +numpy/lib/_format_impl.py,sha256=_TFgQtrgndkQWxPSjdO95ld_cCgESpjsJ04BLqZPGNk,37920 +numpy/lib/_format_impl.pyi,sha256=OAyT3J74w528YjS9J_TV7rWtC8gXgBtXGn9IQ2Af5f8,2139 +numpy/lib/_function_base_impl.py,sha256=T8U10Vg7R3xpf9B5zvrWtLZmGSeb0OZfCQLIytnyeUk,199498 +numpy/lib/_function_base_impl.pyi,sha256=m0JuSD13oEeESW5jBSFnjJHkgLFsreEg0r-KwtqGNhU,77346 +numpy/lib/_histograms_impl.py,sha256=2dceYr7BOVTkypST4CVgskJzjI-ka_FrL5oxBW7rHEQ,39517 +numpy/lib/_histograms_impl.pyi,sha256=Kg1Q0AgAi6QatGM3paKB2VluGkUCKbtCM1gy4_atEhU,1095 +numpy/lib/_index_tricks_impl.py,sha256=ZzRtFdpbYtDEYox3-uzDtzEOILMZ0LdX67VkWNIyBpY,32567 +numpy/lib/_index_tricks_impl.pyi,sha256=So1jOZ2__eFpN9oS7NXgHDfKHBP9kVmX4IQp11KufK8,8483 +numpy/lib/_iotools.py,sha256=Yh7xIu5OnSNnj2aX-yW8viJNHNC33nhCDZtaep8TszE,31776 +numpy/lib/_iotools.pyi,sha256=epBkUTN7SvJbWiZ63WJOHCR6YUNICzk7UXE6ZqqPgXM,3768 +numpy/lib/_nanfunctions_impl.py,sha256=qmmx5H7q-atgoYQAWcDAHWOQ-WWXCrXyUewwzRygjss,73405 +numpy/lib/_nanfunctions_impl.pyi,sha256=cq5lGiV_WoVNwboSEenA6Zmzi0rBTMfK_NNiTHeZx7w,864 +numpy/lib/_npyio_impl.py,sha256=9mlk6CAckDnBIaOW1KxyePnNNUguRCE2co3lk2Xetjw,101219 +numpy/lib/_npyio_impl.pyi,sha256=fs-QJUwOMh6MTLE2fTfzK_e2Q16B7tszlProdbpcgUI,9793 +numpy/lib/_polynomial_impl.py,sha256=UqyQMpVAJRR6z9Hj9Rq7MK509cI8OncTkM2XLsYsmvU,45590 +numpy/lib/_polynomial_impl.pyi,sha256=Z92OU7G7sWt4lBdO9LYq_kn_3nA3HxOUEYQkP70MfEk,7887 +numpy/lib/_scimath_impl.py,sha256=VjysRI24mMd4GaaEy5e1npARDc0_TKgQqq6TRoiNdAY,16326 +numpy/lib/_scimath_impl.pyi,sha256=3-C37vHfGAP84SLbuxHrPAO8ZQ-vgTSXTyh5EjNHXh0,2867 +numpy/lib/_shape_base_impl.py,sha256=BbDGec_0jGK-YkAG0hY36eCE86a0tksMCF6ooLgYmVQ,40302 +numpy/lib/_shape_base_impl.pyi,sha256=ENzamOUsGz57nNl3w5juRq2RkWC4k0MpeVg1aWjV9W4,5777 +numpy/lib/_stride_tricks_impl.py,sha256=uuGAiZWcaK-N9dQkohbAtRlrYItoany4KAt4hIQs6N8,19697 +numpy/lib/_stride_tricks_impl.pyi,sha256=7zWJcIwwy9bIu-FjDQFZJA79GO6drAzf8QLkWzGQ7W0,2032 +numpy/lib/_twodim_base_impl.py,sha256=bKyDb9b4wvQADhgsUujOtJci0ztuvFo_5zQtsBHPlhI,35124 +numpy/lib/_twodim_base_impl.pyi,sha256=-vD7xZiLmOrK49OiYNwbF6vzkKXbSAYUnYIhqBrcJrM,13432 +numpy/lib/_type_check_impl.py,sha256=ONqM3mhe3ITonSZJZISpOJ_fk2WA4PrdecW-HUFvRHM,20624 +numpy/lib/_type_check_impl.pyi,sha256=C7guCWlJFMl_w0M2GjYFql8eOoF3kN1hgda1YKSbBU8,10042 +numpy/lib/_ufunclike_impl.py,sha256=DLbbgYDRSbyqmX3XzJTjOBRD5oOjUUEvpCViahyJqoA,6213 +numpy/lib/_ufunclike_impl.pyi,sha256=rS_jOTzvFASpAHFk6cdH8RGUrze2n5_FZnlgQeCRi5I,1774 +numpy/lib/_user_array_impl.py,sha256=NZi7p0dSNoEmUN4KR1aB1_gf9r0lYAe_rT1VNZCQc5w,8336 +numpy/lib/_user_array_impl.pyi,sha256=cbqaYFs4J-WBtMNKnSBviiIR0NDA5_g6_tAJpnuhqWE,9494 +numpy/lib/_utils_impl.py,sha256=QdnCyIkxeZaR2wwn4QXxXBCokxuXi-RG-J_GHG4eL-s,24283 +numpy/lib/_utils_impl.pyi,sha256=KTI0vNECNqvxwzhp3SrutN02KGV-WXna7q56IfDW8Uw,746 +numpy/lib/_version.py,sha256=J1pqujOE4R_kH7R1yxtimwbWVRxDNnBD82XIZ9ly1yY,4936 +numpy/lib/_version.pyi,sha256=zAmfNnFeke7_lHsvR94fafNBcuJHpZ1jaB2PyzEostc,658 +numpy/lib/array_utils.py,sha256=zmrUIVleWEWzl9XjEpUlDUQHt9qbbsytyu_jLj-OgnE,151 +numpy/lib/array_utils.pyi,sha256=YYnx_V4CMdSbJTCnYboN1swcswmlOD2e4ZvQj5WsSak,197 +numpy/lib/format.py,sha256=ii8MRQmPZ1nAaqnMqeYtKgdNRP52iZsHPo5rODl20UM,501 +numpy/lib/format.pyi,sha256=stg21MwwoAp4mfTkx9DD4lxKKXZzfABbqqiKim985Bk,876 +numpy/lib/introspect.py,sha256=HBySrZfK5neieljp9q29t7BPag8cFfqAuD8njgJDZ3g,2843 +numpy/lib/introspect.pyi,sha256=IsntuFrlFhRBZcGGhRUTAgnONUHEbYw_2ApPmffx8QE,155 +numpy/lib/mixins.py,sha256=uY-4dCmzviGP1kpfksxSLn__EA_oTwmLqyr4Dl_KA04,7375 +numpy/lib/mixins.pyi,sha256=QcVCn2u2NAWm5JiUNNpZZWIT43ZaB-rWBbP7fh3NktU,3260 +numpy/lib/npyio.py,sha256=EY5_tqGplRo-B3cJtqvf9HC34nQKr5JNgyykHWj5q_E,69 +numpy/lib/npyio.pyi,sha256=6xZ6zF-6qKuSOfjjDL4YN43xKPYcD6IpzJiDiLpmSSs,121 +numpy/lib/recfunctions.py,sha256=7to2wo6f8bxxUfVzHrMbo0gR9FNly3zdJK3jRirqrfE,61220 +numpy/lib/recfunctions.pyi,sha256=WuYzU-PEbvywxFThpCRCC9um0boqijX3tW8TJdrIeto,13896 +numpy/lib/scimath.py,sha256=3nsYqFdGoo0danaHnGK8Qrz0AAA31THUX0qnQsp5eu8,182 +numpy/lib/scimath.pyi,sha256=9y5MNnmU1oLBK7zs-tP8zCmh6QiY0gvmNLCIw3WjsNU,245 +numpy/lib/stride_tricks.py,sha256=qXan9_UpXFAoDLBAaJ1wYb0B86DgP48ogp5sUu3s1Lw,89 +numpy/lib/stride_tricks.pyi,sha256=6-K3R7XBw_fcpHaAIs9y4LEc5i4r5gZUG-tg4EOR-ew,128 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test__datasource.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test__iotools.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test__version.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_array_utils.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_arraypad.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_arraysetops.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_arrayterator.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_format.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_function_base.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_histograms.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_index_tricks.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_io.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_loadtxt.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_mixins.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_nanfunctions.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_packbits.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_polynomial.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_recfunctions.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_shape_base.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_stride_tricks.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_twodim_base.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_type_check.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_ufunclike.cpython-314.pyc,, +numpy/lib/tests/__pycache__/test_utils.cpython-314.pyc,, +numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258 +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325 +numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=davD8e4HZO8IzcSplB6w-j3G133L0AUBAa2GMAJpgYY,10892 +numpy/lib/tests/test__iotools.py,sha256=-BFgfKSpxhDXtn7OK_puB9fKKikBQaAyS_C8dJvb0KM,14188 +numpy/lib/tests/test__version.py,sha256=I6-cyr_7w1TUvC25hR1gTE3x8S-QRAXRsB4IjPxY3tg,2063 +numpy/lib/tests/test_array_utils.py,sha256=gRsql9I0f7RQk-1b8iPa1jDZsd_auCF05-ulxBCVfN4,1150 +numpy/lib/tests/test_arraypad.py,sha256=mlTByN5tvRlSQg3JGNY6GkROLS_ZkXieSkBst-Ct4Hk,58043 +numpy/lib/tests/test_arraysetops.py,sha256=lqLU-cpVmzWphPp7LfDCmFG-yCzPwuIuIF6ql3n60mU,49048 +numpy/lib/tests/test_arrayterator.py,sha256=4WinyEB5liYMGS__8tCeZJcoODkNMFdSMPFl4W-C_y0,1341 +numpy/lib/tests/test_format.py,sha256=SV3W7WdY-HSo_ASpICUt6XgN_va6Dngrm26o16aBx2c,43012 +numpy/lib/tests/test_function_base.py,sha256=zgPaj_Ikxstklgi4KFo2c41S47GYsbBaeGCDzOa1wck,182973 +numpy/lib/tests/test_histograms.py,sha256=ueQJepW8kS-_f2IsdXxEKIkXi2hwcaDJw2D3u3WhCCA,34806 +numpy/lib/tests/test_index_tricks.py,sha256=QeDRZu7H4gq8kUgOZX6awGC6uq3PTMV0hKXAlFj6rb0,25100 +numpy/lib/tests/test_io.py,sha256=-QP2cB5tyOwpBZms37WLtbr0-m2SdfrPxtbOl1u7qK4,114257 +numpy/lib/tests/test_loadtxt.py,sha256=-nMbVMrTNE9t-lPc4qDNfUrBGjTeAAsyWzm6fszlBjE,41590 +numpy/lib/tests/test_mixins.py,sha256=eWaFNkjo_IPlP4-7T-sitpZqhAgjUOjqkglUaPIdWXA,7224 +numpy/lib/tests/test_nanfunctions.py,sha256=sPWiCbDHTp4y3CEG7ART_UdFiaEWeSYJYPLfHeG54zQ,55735 +numpy/lib/tests/test_packbits.py,sha256=RAk590EWlPvH9M3trkBKb6MbKNhMjo-otRLEYznT7dM,17919 +numpy/lib/tests/test_polynomial.py,sha256=cWODMJ0jNINUibSTDTpa-DR6V92qFXd0qs9E9Tx8btk,12730 +numpy/lib/tests/test_recfunctions.py,sha256=DbbWdC6KvS3z6zekexcs4o6_drTwtGGq3QLba_hYPrw,45015 +numpy/lib/tests/test_regression.py,sha256=6Us-PZWNVT__IW4vj2-rA77Sdhpaoqo-_JgySVXL3dg,7947 +numpy/lib/tests/test_shape_base.py,sha256=R65ZPBfWnvzIyIrYbyJAUBUQqDbZX3gN0zx8sCydjKc,28219 +numpy/lib/tests/test_stride_tricks.py,sha256=TkLvnfcLK0CGyWu2I-60fYDgkxDskvjgr1de20z4VE4,23667 +numpy/lib/tests/test_twodim_base.py,sha256=j9PmcG03wmHEZjOL6pMp-kKOczqjcYYYh8ptdFSBPnA,19484 +numpy/lib/tests/test_type_check.py,sha256=UzaOYqNOWjSAxiur6FtERAEMIfYxk84nL_lpeDJGzxU,15269 +numpy/lib/tests/test_ufunclike.py,sha256=7oc71qsMf8NSPU-bMOZNw7H5wwksjPlxy9jJHTMK9Bc,3112 +numpy/lib/tests/test_utils.py,sha256=9XtDAa79N5LOqpLUHKY88ajRuY2gLzRMyJ1dry_4dkU,2454 +numpy/lib/user_array.py,sha256=5z7-hfXnWT5Oq4_WPnjNWGPc9RHUWZcyq4L9ZM3kkGQ,64 +numpy/lib/user_array.pyi,sha256=IaCNerLboKjt3Fm-_k_d8IqeyJf7Lc9Pr5ROUr6wleM,54 +numpy/linalg/__init__.py,sha256=N4KqBOUEZURrj_00q3Df_yLwPrmRMKHdm6UR1sibu2Y,2171 +numpy/linalg/__init__.pyi,sha256=5JqTmDBoOCqKUsQrQdMPV_kSfyan8ScHtFqqZRjrEE4,1087 +numpy/linalg/__pycache__/__init__.cpython-314.pyc,, +numpy/linalg/__pycache__/_linalg.cpython-314.pyc,, +numpy/linalg/_linalg.py,sha256=mYzZDZvLJE8w9TQTaS0QgQOeZaMto-wXb-9CKUi81Rg,118474 +numpy/linalg/_linalg.pyi,sha256=QDmN2W5x-FJRaqC0KdhXVn20skjIgTKkIeWW5crRht8,14024 +numpy/linalg/_umath_linalg.cp314-win_amd64.lib,sha256=IR2HZkUGcNJLZ5wQF7azF_eBvhoxwSrIGKHfagKXBZk,2120 +numpy/linalg/_umath_linalg.cp314-win_amd64.pyd,sha256=lAaBlc8E10OYHi5xJsHIr0OgBhdYPb4dQKRV_pA5Zqc,112128 +numpy/linalg/_umath_linalg.pyi,sha256=Q6Cr6NvaeEiY7GL9Oe57YGLl2Eby87BW6bswPmMUqKc,1451 +numpy/linalg/lapack_lite.cp314-win_amd64.lib,sha256=BTcmx8vLXDIwUeLayHxkdT298jXaWmOAmPwjLM3eFpE,2084 +numpy/linalg/lapack_lite.cp314-win_amd64.pyd,sha256=LwyT5recYWxGLB3F32ZUnND9SlCRGDehhbPN1T152fA,18944 +numpy/linalg/lapack_lite.pyi,sha256=CuG_G6XugM6ZNqvhXqw6JUPIoeGSpQyuCMTEudYIqzU,2850 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/linalg/tests/__pycache__/test_deprecations.cpython-314.pyc,, +numpy/linalg/tests/__pycache__/test_linalg.cpython-314.pyc,, +numpy/linalg/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/linalg/tests/test_deprecations.py,sha256=wEkvYP_0k-iqpVJh-bhgjFg67w4TCUthEe4adSSA8T8,637 +numpy/linalg/tests/test_linalg.py,sha256=mGBNdDIZ0SwUie32e0gbGUWEdgOkodVjoNvZGLkAp0U,87492 +numpy/linalg/tests/test_regression.py,sha256=NBImCcUaMF4lNaLRPB5tYW4LHlKUCZMKkuibML63_90,6978 +numpy/ma/API_CHANGES.txt,sha256=U39zA87aM_OIJhEKvHgL1RY1lhMJZc1Yj3DGLwbPbF0,3540 +numpy/ma/LICENSE,sha256=1427IIuA2StNMz5BpLquUNEkRPRuUxmfp3Jqkd5uLac,1616 +numpy/ma/README.rst,sha256=_MHrqHTE8L4wiJJqvaOh1l-xTxidwdilc_SZkFbgubM,10110 +numpy/ma/__init__.py,sha256=Zh2Hil4sdNNkf-0aJQrnOPmRkRwR4rOAzhN-n3RHsbU,1459 +numpy/ma/__init__.pyi,sha256=IorrWDELrFWTc_WfqWNCtWIcL0PrRE9aEZSzAABKMks,7404 +numpy/ma/__pycache__/__init__.cpython-314.pyc,, +numpy/ma/__pycache__/core.cpython-314.pyc,, +numpy/ma/__pycache__/extras.cpython-314.pyc,, +numpy/ma/__pycache__/mrecords.cpython-314.pyc,, +numpy/ma/__pycache__/testutils.cpython-314.pyc,, +numpy/ma/core.py,sha256=07dkfn4y_eOODNrgQBqYECjb8hTZad_bJ-0wU__nWXc,298146 +numpy/ma/core.pyi,sha256=3JZf_2zQ0BwAItjUGj9nXNsdM4igsMBfaqqvZBXWBv4,135211 +numpy/ma/extras.py,sha256=AJdPH7w71TeGQYgesG2iUkyLZBzGrpZzl6yLdT4UKmc,70056 +numpy/ma/extras.pyi,sha256=M3U-bBns8ejf-mZUUdTct4oMCReRmg6gUkN507SN7hA,9233 +numpy/ma/mrecords.py,sha256=wKhzJCGS9tJKoua0OBWeu41us9iWLnCfuXhMjeIKWTg,27248 +numpy/ma/mrecords.pyi,sha256=uKY5QR8liuxaLw8tNfDm9TBXxLdToXZ3tunufVcfPIk,2169 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_arrayobject.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_core.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_deprecations.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_extras.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_mrecords.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_old_ma.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/ma/tests/__pycache__/test_subclassing.cpython-314.pyc,, +numpy/ma/tests/test_arrayobject.py,sha256=ap06C0a0dGWcOknpctbhLbzHSNd2M9p_JL2jESqBBGk,1139 +numpy/ma/tests/test_core.py,sha256=AOvwXQu7hFU56UBdB8BpM5dYqhlMF-KOzWIk4vy8_CU,230489 +numpy/ma/tests/test_deprecations.py,sha256=K5g3yWbztFEvCHXO70DzI5hSr2QivnNzTOdMv8lipZg,2089 +numpy/ma/tests/test_extras.py,sha256=e-1k5fcukD-9faz-d-TPNMqnEmHX5WAt8LQTYBxOfOM,77205 +numpy/ma/tests/test_mrecords.py,sha256=vQb7I7b4_jqr31YFEJiH7XYOGcMGAmOGKq2sESYHQdk,20332 +numpy/ma/tests/test_old_ma.py,sha256=Urg21tx11ipDOWbXG6MhWVrKLKMx1oGUDyHopcG3u7A,34014 +numpy/ma/tests/test_regression.py,sha256=XExMB46BYWyB7l6Kh9iDIJJuPz-Imbifp1RGgMVgqLk,2802 +numpy/ma/tests/test_subclassing.py,sha256=gvAxc0vLQ5sjiARATy28NUS0McpNkSwhPJfpOSEGvvc,17439 +numpy/ma/testutils.py,sha256=HY8srt3kH3lIOo9cbh1cQ027EikCsvOLKiJzWJTKs24,10507 +numpy/ma/testutils.pyi,sha256=7DxEhRCp8m6AA0fSvWi9N2GYry2Q1h1x7X1sdxwcIDU,2359 +numpy/matlib.py,sha256=xGJk9kOBs7qqA8IqhqQuwufNMUvq6Af_mErXxmZHZxw,11018 +numpy/matlib.pyi,sha256=KyqhVD9Bd2VSjun8onDfZZ8dPalhvTbHODonNaQVDrQ,10282 +numpy/matrixlib/__init__.py,sha256=aPXbaN4OYDp9TFA8kGzt2gTBHb3o8Nanw-uM_3XoDF4,255 +numpy/matrixlib/__init__.pyi,sha256=HrRbMtTKOizGPMnwXzAi490CiNfiIQewrxYFkUw7dZI,91 +numpy/matrixlib/__pycache__/__init__.cpython-314.pyc,, +numpy/matrixlib/__pycache__/defmatrix.cpython-314.pyc,, +numpy/matrixlib/defmatrix.py,sha256=HaFYtHEIhmi4KKbwzUJw-5uk-Xah8JS9dRVi2bZH20w,31994 +numpy/matrixlib/defmatrix.pyi,sha256=ysdXgQgGzo2xugU7Gt2EVlhTvkbHKqFA58U2qU_xB5k,11284 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_interaction.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_numeric.cpython-314.pyc,, +numpy/matrixlib/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=i3KYI4VmOn7uhvUQsDzwkKKjAGdeUStamQgkjGoLgNY,15405 +numpy/matrixlib/tests/test_interaction.py,sha256=1nkRPWmfWfzOS9GjwOQV1D-Ke9W6UtkdwuMMVYcRIcg,12234 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=EDUuKmHgap_OJuKbWmOsvOFKChSLasDsjXr20O_OjS0,9062 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=c7ldJ6xG66FvJH5FD2TSr617E9PfPMYwkAzrjhTxVYY,2341 +numpy/matrixlib/tests/test_multiarray.py,sha256=WgUxpIxXbzpXTTnt8iG0HC6RoAcTUk0PsEiMHT2361E,572 +numpy/matrixlib/tests/test_numeric.py,sha256=4XC1O2mv7NYYP1siT6I0YAz-Cuhlw6ZN9Du2FmCUz8Y,465 +numpy/matrixlib/tests/test_regression.py,sha256=2-c4B5aKWbySZ95RlG5WUm8OLBO7sLtM2zIzQMwMv0U,965 +numpy/polynomial/__init__.py,sha256=ynOHE1Mc9eBZMNlroaE9meIW7wZkvo7bpGjFjtDB_AU,6913 +numpy/polynomial/__init__.pyi,sha256=PYepBP5jjaCvPS3pQsl1xwNxDlrr1fSr122kM5g-ITk,743 +numpy/polynomial/__pycache__/__init__.cpython-314.pyc,, +numpy/polynomial/__pycache__/_polybase.cpython-314.pyc,, +numpy/polynomial/__pycache__/chebyshev.cpython-314.pyc,, +numpy/polynomial/__pycache__/hermite.cpython-314.pyc,, +numpy/polynomial/__pycache__/hermite_e.cpython-314.pyc,, +numpy/polynomial/__pycache__/laguerre.cpython-314.pyc,, +numpy/polynomial/__pycache__/legendre.cpython-314.pyc,, +numpy/polynomial/__pycache__/polynomial.cpython-314.pyc,, +numpy/polynomial/__pycache__/polyutils.cpython-314.pyc,, +numpy/polynomial/_polybase.py,sha256=8YgTcSVA4nRyITvWfrvuFGkXAdRB8Cc01R3cMCaa2wI,40549 +numpy/polynomial/_polybase.pyi,sha256=cNLZAPgtsUwASh2wryJIYvL_Mb1QxTDg9W7hq1JJBGM,8029 +numpy/polynomial/_polytypes.pyi,sha256=n2k83CrvRgHiu0iXFX0fJi5ZZn6NdMrEiIXBUwfNGpA,16735 +numpy/polynomial/chebyshev.py,sha256=Ynm99PaDojlSocfwoxNPq9UtK5j8OXYewWcf4OmEGY0,64287 +numpy/polynomial/chebyshev.pyi,sha256=lXlO2RgDRAKae9-eXVqQxpvYAMVqVdzHbCbGhQyCZcs,5278 +numpy/polynomial/hermite.py,sha256=N27hINDzF30xIQYtoba5wjTQQfxawYJTFxTWs1rUad8,56309 +numpy/polynomial/hermite.pyi,sha256=0FPL8PWTXACYXPPwd0VYyLMqsInOueZcMLMyUOOZMZY,2744 +numpy/polynomial/hermite_e.py,sha256=55-P3r7rKLh-ow8GkLKnGRkBGCznDaAi5EsMqtTM_vI,53913 +numpy/polynomial/hermite_e.pyi,sha256=1GyXRnzSLmS9c8Mkc65DicVGm6eA7GeTLWJALXp64Qg,2811 +numpy/polynomial/laguerre.py,sha256=Ka3rdMc-r4ORg5I86B0D6d8OjZi_qF81QvZycmOIvN0,54120 +numpy/polynomial/laguerre.pyi,sha256=RdPkWG5ccTjbTQAGrVhR9G02f9kIJHUYhTBNy8n_2wE,2466 +numpy/polynomial/legendre.py,sha256=xaD3gIuBJoEOD1UmpYxKfpxG7IY20zUdx5_mxgmR2Mk,52705 +numpy/polynomial/legendre.pyi,sha256=QK2mezoldWP4o-KeMKZB0SegU9YVqDWICgDvQVFI06Q,2466 +numpy/polynomial/polynomial.py,sha256=0Gq3tj431sv96iLtzZ_6aWo_Qtl1ltPTgdDeMSVBTp0,54294 +numpy/polynomial/polynomial.pyi,sha256=2oxYaruIMqD7kEAveWpuRdHWL_0gvisBrESYgQQaHpw,3098 +numpy/polynomial/polyutils.py,sha256=VbYrTs-TW2PMEGqNIDkCQg20yrtGvNkeUOoLnhJ7B9U,23394 +numpy/polynomial/polyutils.pyi,sha256=0VlzUyEb2ISaDekQ-QV__RhEeXaxHiBKi4YVV4qaAIE,10801 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_classes.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_laguerre.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_legendre.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_polynomial.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_polyutils.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_printing.cpython-314.pyc,, +numpy/polynomial/tests/__pycache__/test_symbol.cpython-314.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=N8cKoEf-jyLt-8KnMv56eKm72Jeikk2SZoJBjmYAtug,21247 +numpy/polynomial/tests/test_classes.py,sha256=Nwokju4FZgTYZVO1kEzwDOa2oZ09s-33LUGR6t3Fc0g,19144 +numpy/polynomial/tests/test_hermite.py,sha256=D0oETFi8zgQdqKi79LbAOG5ElwpGkpJvKWduG21qPZA,19219 +numpy/polynomial/tests/test_hermite_e.py,sha256=mXIdu3lCRU8DANPKC9kHQMWz7AOkju0l4iLRSMMNSmo,19559 +numpy/polynomial/tests/test_laguerre.py,sha256=QHc3JnCbNsrjB9-0BWeDoj9UZMiVZ6kmvnXqoJi8Gfw,18151 +numpy/polynomial/tests/test_legendre.py,sha256=Q7dUXM6nMlIXlhn11vXJKXRfDtttjSjXdXsYyfI0yu4,19350 +numpy/polynomial/tests/test_polynomial.py,sha256=qd4oOgjOYMUC5aoJvofgxMITaarVN1DxDhWs973V0oc,24255 +numpy/polynomial/tests/test_polyutils.py,sha256=fmZ2xqo6-eas1wM-QelCEjB2lFezOPPuAVhDsu-dpKc,3882 +numpy/polynomial/tests/test_printing.py,sha256=W0RgyZlRAiKL-3uJcNgcXPU7-TCX1wvtIdWIBvGyepQ,22116 +numpy/polynomial/tests/test_symbol.py,sha256=JNF0yt5xmUERjflLOydG1pMo6wMRLtaPDcYUkvXcD-o,5592 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/LICENSE.md,sha256=tLwvT6HJV3jx7T3Y8UcGvs45lHW5ePnzS1081yUhtIo,3582 +numpy/random/__init__.pxd,sha256=g3EaMi3yfmnqT-KEWj0cp6SWIxVN9ChFjEYXGOfOifE,445 +numpy/random/__init__.py,sha256=8h45GRbXpL10xJzqw_n6Xgnm6SY_JUYCqH6SpeCnqUY,7693 +numpy/random/__init__.pyi,sha256=aqBCk_fpEZeoE94eNpGu37F6ZWfaDaajac9cWNm97So,2233 +numpy/random/__pycache__/__init__.cpython-314.pyc,, +numpy/random/__pycache__/_pickle.cpython-314.pyc,, +numpy/random/_bounded_integers.cp314-win_amd64.lib,sha256=vUkW3b7dvkNCKrlSwuccQSpA9VgExXWlXdWCXxF3sIQ,18000 +numpy/random/_bounded_integers.cp314-win_amd64.pyd,sha256=hhIqJEA5ZJJ5Z81_dlituY9OrQXbXLCOJDrtSmI0b-w,215040 +numpy/random/_bounded_integers.pxd,sha256=EOKKUlF9bh0CLNEP8TzXzX4w_xV5kivr1Putfdf6yvU,1763 +numpy/random/_bounded_integers.pyi,sha256=PFr_V0xYQhWjKk5oc83cYg_JcNZ2FEKTsjXlnxmkyB8,25 +numpy/random/_common.cp314-win_amd64.lib,sha256=qkMqwoieatc9edM6Nl3C4pIjHnbB_Fp_mGxuFs5C7Y8,2012 +numpy/random/_common.cp314-win_amd64.pyd,sha256=MMKxOBbV3ER62VfUnW-qsJYu3xy_ZRo5pMvVmdXCfZs,172544 +numpy/random/_common.pxd,sha256=e1YxzdJoTvmMIyge3O9yOzpVU7aKRQa2q6cR9c9DB3k,5156 +numpy/random/_common.pyi,sha256=UlOkH40kVn6TU0c6OhG3CocvGnuYAC_46fxZJ7B_7y8,437 +numpy/random/_examples/cffi/__pycache__/extending.cpython-314.pyc,, +numpy/random/_examples/cffi/__pycache__/parse.cpython-314.pyc,, +numpy/random/_examples/cffi/extending.py,sha256=jSc3Vc6Uxl3VWHmoaffez8qG0GTfrFMuUxDhuB9Y5z4,928 +numpy/random/_examples/cffi/parse.py,sha256=2hy5736s-oL5uYvlQf_acpo7srBC8WfffLUhMcm218c,1803 +numpy/random/_examples/cython/extending.pyx,sha256=1lkq6zFifnwaMtAkVG0i_9SbMiNqplvqnHaqUpxqNzs,2344 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=coVzQ6tOCHgZLO4tXIelEKcL3Rh5PIji8xJZGX4YuLA,3961 +numpy/random/_examples/cython/meson.build,sha256=q_IFcVs_qzERJD_-8uaDnjps3QdaW49okZMbFtwkAPo,1747 +numpy/random/_examples/numba/__pycache__/extending.cpython-314.pyc,, +numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-314.pyc,, +numpy/random/_examples/numba/extending.py,sha256=mo0o4VM-K1vUQxNl_Uqr35Acj9UewnkglS7-dFX8yuw,2045 +numpy/random/_examples/numba/extending_distributions.py,sha256=vQdhhOpuGlpG8hk-mKWv7Li3-rwvelv-1c67odurt9o,2103 +numpy/random/_generator.cp314-win_amd64.lib,sha256=rkoo3P4si_fIOszYC5Us4P_a0Mhjfbr0--HF-xyLp4k,18400 +numpy/random/_generator.cp314-win_amd64.pyd,sha256=nzii8IvTTYiElpU0Wyvmmr4f58oYzuKcBPpvyYHXrtE,596480 +numpy/random/_generator.pyi,sha256=89Tnzx4Si2MtC1aqF8MXXs5h66sm1CkBU6uXkec_eCg,25321 +numpy/random/_mt19937.cp314-win_amd64.lib,sha256=_EM6iSX6msUnRxa61m0GKtkwufE5nUHSRaMp1j95OoE,2032 +numpy/random/_mt19937.cp314-win_amd64.pyd,sha256=GaSVm1hu4BKZ3T_pJaVlYuaffgmtwlepvYI7tKw6T5A,84992 +numpy/random/_mt19937.pyi,sha256=50ES_I-RoPvqqAYCqm3gX8NVcu0R7O5Ru5LSLpEuHzg,849 +numpy/random/_pcg64.cp314-win_amd64.lib,sha256=Ryu1yjLYEtFtiry5cKHkPpMVcHffHlFQ_LP3ZGxv554,1996 +numpy/random/_pcg64.cp314-win_amd64.pyd,sha256=PUBXz3TAPlmosGhTqCzdGwhMlc7teVY_h82LaU1bDjg,94720 +numpy/random/_pcg64.pyi,sha256=PyZveXgEMEoHRprjXceyFBCwHkNctxcnSxk-tUWEzXE,1214 +numpy/random/_philox.cp314-win_amd64.lib,sha256=6O7NprFrVz3b4XbGOEhF4Fp3ciQwe1fS2JOoTTlaNhA,2012 +numpy/random/_philox.cp314-win_amd64.pyd,sha256=SLyhydFkufgD5y8GFRxcROcTe-7PPe4_0nwB3HepejI,79872 +numpy/random/_philox.pyi,sha256=qTNeVdTZEZubisdfN5avuGoR6qMElesP5MC4gEMna9A,1049 +numpy/random/_pickle.py,sha256=8fmUcgzHhq_F_eyesNUdFjV07Br1yzLBLsfe-GWyQrE,2830 +numpy/random/_pickle.pyi,sha256=hj1oBasr_ejSeUlT3-q7luwC4DQFqgCPXIL0wxuFjt4,1651 +numpy/random/_sfc64.cp314-win_amd64.lib,sha256=KN8k53d_Bhn_R2Dmrw2-xqysjtkAZRcu05CumMqlGFM,1996 +numpy/random/_sfc64.cp314-win_amd64.pyd,sha256=nSlGq750pIdoVQKMPfDfaSOsUzPg3MIsQR7OiWmGEU0,58880 +numpy/random/_sfc64.pyi,sha256=qGwQgSmP2_RUa-C2y0r93o9VTMxJnD1vp_LVgWsTAC8,716 +numpy/random/bit_generator.cp314-win_amd64.lib,sha256=6_5-ObjW2kCBrlXCWQjbPhW1pT-cGtrw7q_C5KDwyUE,2120 +numpy/random/bit_generator.cp314-win_amd64.pyd,sha256=PsjANkwNbxXHZTCSJoLrhKJfw-V91gj7zfPkw52AWig,160768 +numpy/random/bit_generator.pxd,sha256=TFR72-UsWpKMmiBsTg7eyiW-FT3hy6miVx2N49PVt-4,1244 +numpy/random/bit_generator.pyi,sha256=y9MsyLg-HH6BgrFRXtSyc4PY_H-pszQw57H8frQS5wQ,3726 +numpy/random/c_distributions.pxd,sha256=02WeqbzQ4heQ1cZ7ShePejxmt5AOI5kTstBZ5w2WxD0,6454 +numpy/random/lib/npyrandom.lib,sha256=QfX9LkrOWthS-aer3RqHi9s0_0B8bcjo6diHGgEqBvY,149648 +numpy/random/mtrand.cp314-win_amd64.lib,sha256=TNwsO5LojsdmXaM4w9jOw-OMQx0aCwPQEqq556Ugu18,17122 +numpy/random/mtrand.cp314-win_amd64.pyd,sha256=glG1Mmqi--mOvoREZGpD3tfjPsyuczwyIa0_FM2PMlo,496128 +numpy/random/mtrand.pyi,sha256=BnWlQNKdxYKPNaGys4YZeAqigQNcMyKNagl6u2-Wzu0,24560 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_direct.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_extending.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_random.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_randomstate.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_randomstate_regression.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_regression.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_seed_sequence.cpython-314.pyc,, +numpy/random/tests/__pycache__/test_smoke.cpython-314.pyc,, +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__pycache__/__init__.cpython-314.pyc,, +numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203 +numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208 +numpy/random/tests/data/mt19937-testset-1.csv,sha256=bA5uuOXgLpkAwJjfV8oUePg3-eyaH4-gKe8AMcl2Xn0,16845 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=SnOL1nyRbblYlC254PBUSc37NguV5xN-0W_B32IxDGE,16826 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=wHoS7fIR3hMEdta7MtJ8EpIWX-Bw1yfSaVxiC15vxVs,24840 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=6vlnVuW_4i6LEsVn6b40HjcBWWjoX5lboSCBDpDrzFs,24846 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=Fhha5-jrCmRk__rsvx6CbDFZ7EPc8BOPDTh-myZLkhM,24834 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=mNYzkCh0NMt1VvTrN08BbkpAbfkFxztNcsofgeW_0ns,24840 +numpy/random/tests/data/philox-testset-1.csv,sha256=QvpTynWHQjqTz3P2MPvtMLdg2VnM6TGTpXgp-_LeJ5g,24853 +numpy/random/tests/data/philox-testset-2.csv,sha256=-BNO1OCYtDIjnN5Q-AsQezBCGmVJUIs3qAMyj8SNtsA,24839 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=sgkemW0lbKJ2wh1sBj6CfmXwFYTqfAk152P0r8emO38,24841 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=mkp21SG8eCqsfNyQZdmiV41-xKcsV8eutT7rVnVEG50,24834 +numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290 +numpy/random/tests/test_direct.py,sha256=sHeziHsn1iDt9kFJvoQqa4X_EV8RmdJkR2zM7zn1HQI,20618 +numpy/random/tests/test_extending.py,sha256=Kbem3Is-tYB0hiTem8_B6u5jYGv_tZNabrg2WAMTEzY,4820 +numpy/random/tests/test_generator_mt19937.py,sha256=t6iPLJTxdqQKvlrvmSNJn_umqit25A3X1y_nlexd7CQ,121685 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=y5urqQyDTYBy7xuuYmRMVq7sZFLC2a5HIQV42ZQic4c,8859 +numpy/random/tests/test_random.py,sha256=NHNX5yUlOmhSqUAkgnFuAR5vMk6VpGHFtiUABW9PAGA,73002 +numpy/random/tests/test_randomstate.py,sha256=tjY3PCEP4KkqdxWGHq5w36aLrpeaXjk4QMe97xYJA4Q,89848 +numpy/random/tests/test_randomstate_regression.py,sha256=yZ_PzRwhYsce-5W2aAPJbiZMNXjOU4bK-WdomdHwes4,8260 +numpy/random/tests/test_regression.py,sha256=i-v9J5LdvqrurawZrE-q657A_85y-ivVB4Bj7bp5z1U,6499 +numpy/random/tests/test_seed_sequence.py,sha256=J_peqBY4MhduYR1fFYAfcN2wvyC7P6xmV4xbu6j9nbQ,3389 +numpy/random/tests/test_smoke.py,sha256=aTjGNBx4HDSe5M7SbmhO1-Fj1CqVhDyE5Ldgjk_a4-Y,30817 +numpy/rec/__init__.py,sha256=cgaZYq6w4qNo81NZGO-E4vkSj9eSO4SgNMOqiLglp4k,85 +numpy/rec/__init__.pyi,sha256=gGrssJCiTrltTcwaCjXB8saZBWiWCHOr2mJmUsFdU50,370 +numpy/rec/__pycache__/__init__.cpython-314.pyc,, +numpy/strings/__init__.py,sha256=JzKUIYVjG4wRzsKVAVg9XWrq2vjjdi679CZc_Txfn4w,85 +numpy/strings/__init__.pyi,sha256=LvbeB_oUcN7O0GUJx3gzfYs-KPPVhHm6EhJIG3hJAHQ,1416 +numpy/strings/__pycache__/__init__.cpython-314.pyc,, +numpy/testing/__init__.py,sha256=0Qkz0ITfPKHDe9kObKTEx3fdDGAb9tfMikpdPuiapyA,603 +numpy/testing/__init__.pyi,sha256=xZNpS-qElFjn5Ueka4pywGO-rI_jf52GKH6uDNCfdXg,2295 +numpy/testing/__pycache__/__init__.cpython-314.pyc,, +numpy/testing/__pycache__/overrides.cpython-314.pyc,, +numpy/testing/__pycache__/print_coercion_tables.cpython-314.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__pycache__/__init__.cpython-314.pyc,, +numpy/testing/_private/__pycache__/extbuild.cpython-314.pyc,, +numpy/testing/_private/__pycache__/utils.cpython-314.pyc,, +numpy/testing/_private/extbuild.py,sha256=ausYJDf2eOq9jYC3x_tFO_vBrMhjqjruVqHxgHjla8w,7966 +numpy/testing/_private/extbuild.pyi,sha256=f2h7VxBrN2uwqJIwB8pmv3c1G-sH7d3V4HCIL3GHYTA,678 +numpy/testing/_private/utils.py,sha256=uzxy7hWM8XH3MLyYdOOW5rmQRc78VDqDMjpYE2ytASY,101563 +numpy/testing/_private/utils.pyi,sha256=MAJBkXgDX7w49uUn1a8_orw9Gumd2aSzlPtgQ9ZfpAU,13730 +numpy/testing/overrides.py,sha256=rldmRQXc5c9jEs4hghDXvHA4sJD7HuMcpfGMmzSML9I,2218 +numpy/testing/overrides.pyi,sha256=ceLN7L1s2pQvLZQTIh89A_MoOnkkJoe-abXKT0Sk1bk,406 +numpy/testing/print_coercion_tables.py,sha256=lT8IdI1_lantwFVG0C0JagO0mUxnnXCA5wnPI81czYQ,6493 +numpy/testing/print_coercion_tables.pyi,sha256=ecFs2Qse4_H9AQC93fv5b2jmWMol941uoYlSuxhmNSU,846 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/testing/tests/__pycache__/test_utils.cpython-314.pyc,, +numpy/testing/tests/test_utils.py,sha256=BV8Aj5dGqZr32CvSO1Ag-idnHAJdDLnYKFSrJkXHcnQ,81824 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/tests/__pycache__/test__all__.cpython-314.pyc,, +numpy/tests/__pycache__/test_configtool.cpython-314.pyc,, +numpy/tests/__pycache__/test_ctypeslib.cpython-314.pyc,, +numpy/tests/__pycache__/test_lazyloading.cpython-314.pyc,, +numpy/tests/__pycache__/test_matlib.cpython-314.pyc,, +numpy/tests/__pycache__/test_numpy_config.cpython-314.pyc,, +numpy/tests/__pycache__/test_numpy_version.cpython-314.pyc,, +numpy/tests/__pycache__/test_public_api.cpython-314.pyc,, +numpy/tests/__pycache__/test_reloading.cpython-314.pyc,, +numpy/tests/__pycache__/test_scripts.cpython-314.pyc,, +numpy/tests/__pycache__/test_warnings.cpython-314.pyc,, +numpy/tests/test__all__.py,sha256=xZkp3RbMNpx4bFTvILKV8KTEMh5lvId7xcrhQS4LTe0,232 +numpy/tests/test_configtool.py,sha256=zieFjnFWqOsyLbtJJYIIdMMdThkDm-glvNGvN3_y7ow,1863 +numpy/tests/test_ctypeslib.py,sha256=7wJt8Im7-BUmTmtZ6rVeuHt__erJRRlK87dMaFZmL8E,13215 +numpy/tests/test_lazyloading.py,sha256=5YyD-WDS6uI_rIQBWmP6z7rCA9jtv4HAQ57NxysQKDs,1304 +numpy/tests/test_matlib.py,sha256=KmBMo3M7IARB8K5NLYk611RtsfW10_LgCQEBjdLEM9g,1913 +numpy/tests/test_numpy_config.py,sha256=8tTLdQi34xV1QTZw5TdaDQ9y25TVncGjvcmcXnQ2PEI,1364 +numpy/tests/test_numpy_version.py,sha256=EhDAFEamNCmRAiJEUSGtPa21IipODWrf6MN2Bem0az8,1798 +numpy/tests/test_public_api.py,sha256=iH-YBfcSa3Jc7_Mbxg4uWTNnJ1ZD9BDZn40L9jzIZ9g,28813 +numpy/tests/test_reloading.py,sha256=tEcoOR45hGgiQ-ocqrcRaKc7qKZXDwKM3ia6ivYpbm0,2762 +numpy/tests/test_scripts.py,sha256=KdTVn4N22QPEssua-dM83xExBCoVqIccBKaNBcjdVkE,1694 +numpy/tests/test_warnings.py,sha256=9-KePaaWfHgIAn6hVIy1gyio31zwMw8esAUYtvWTfzY,2499 +numpy/typing/__init__.py,sha256=7MEzTbsBZHmZmwuaEt2VsYQZPAjM2EGpa10Mdn35ceU,7217 +numpy/typing/__init__.pyi,sha256=gbeO9KFv0nrMve7BkrerjG92x442mEqEBDQZQuw2maI,130 +numpy/typing/__pycache__/__init__.cpython-314.pyc,, +numpy/typing/__pycache__/mypy_plugin.cpython-314.pyc,, +numpy/typing/mypy_plugin.py,sha256=eI-0xwotV4uhV7Dkpl6uuH15jSEwovgjfukbcLBRr34,7046 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/__pycache__/__init__.cpython-314.pyc,, +numpy/typing/tests/__pycache__/test_isfile.cpython-314.pyc,, +numpy/typing/tests/__pycache__/test_runtime.cpython-314.pyc,, +numpy/typing/tests/__pycache__/test_typing.cpython-314.pyc,, +numpy/typing/tests/data/fail/arithmetic.pyi,sha256=AMohW1_h2jlX8YPHH_sMm8lGS8ReUC0RkyHAQYe2k8I,3812 +numpy/typing/tests/data/fail/array_constructors.pyi,sha256=oDNMGHZnzHOO5_3zGxv5rQSu6kUlE_tgSuHcLpHvtKk,1234 +numpy/typing/tests/data/fail/array_like.pyi,sha256=9EcZ306eJOZrHSdWlTe0cEpYvmODaQ_nItcbhWaxFy8,511 +numpy/typing/tests/data/fail/array_pad.pyi,sha256=ExxQs_3s8xvgEsvj6O4aGWTIc0ukXtAtUM9JUs5D6pQ,143 +numpy/typing/tests/data/fail/arrayprint.pyi,sha256=TzEdr6oiRnYV1ZOGAYS6zzDWuRHl-aV8e407NnMzlxg,543 +numpy/typing/tests/data/fail/arrayterator.pyi,sha256=L2AC5qFmBrw9klehdV7d7gHvKpPX-7H1Iz9wryDQVQA,477 +numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=R-h3fWesS5CxenzKEYuyr2IpqSyMlGHQOAlro_smez4,397 +numpy/typing/tests/data/fail/char.pyi,sha256=PsgOrQuNsO2TD2HohOiOt7sYbs2q08hSEWxiEuiks1E,2737 +numpy/typing/tests/data/fail/chararray.pyi,sha256=LrTSnLGGEZrjeW_qpS-TGWz8vLVT0l7K5NdcFFucw24,2310 +numpy/typing/tests/data/fail/comparisons.pyi,sha256=_T0ZdZ0GYV_BkOqJjIzuWEQpN0lJgCc0fvx250U4x4o,763 +numpy/typing/tests/data/fail/constants.pyi,sha256=MnjvGyU_QKKiSZ-BzsHdojlRKz7GGvqQmu0mxbewcao,81 +numpy/typing/tests/data/fail/datasource.pyi,sha256=JO5en3G84i6LDK5b9JRDMg2GoOn5QBFI5qp_pAvupPY,436 +numpy/typing/tests/data/fail/dtype.pyi,sha256=TeNsugh9LvvX-u08MkROtFM_B080JZheXexyw9Wsq6E,322 +numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=DeLM2jL7ZBirjw9H2dfjXxbHwx2Y9gu3wp2NwDLYuIQ,470 +numpy/typing/tests/data/fail/flatiter.pyi,sha256=eWphjcSJXi_EM1MTT_6kB0MgO_jy_vPJkYtk1CMz6Ec,1169 +numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=hC2Nc8tUVV5-i43ak0WzB2wejwPsn2yIgQ6H13b_Sh8,5836 +numpy/typing/tests/data/fail/histograms.pyi,sha256=vkSk1v1Na4VUnRlwM8sqhxqPjhg2HH_9YtXnYNEHgPM,388 +numpy/typing/tests/data/fail/index_tricks.pyi,sha256=2VDHr1Of7fqh2uu0wf-epw7VZD2Fy0-W9ZPe1Fa87-8,531 +numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=HTiFQTO8plsdCb-F1VvA_wz50GswJhwB3od9mTDkkIE,2723 +numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=xjpuJ7DVIRxQok6f0-CGsSVg6QQuzZmyiURslKF3ctw,966 +numpy/typing/tests/data/fail/lib_utils.pyi,sha256=iBUetgF7F39F-yV2DBhIqvKywzK8kZoAL9N-kpR3pyk,101 +numpy/typing/tests/data/fail/lib_version.pyi,sha256=EfAZTQpzTJ1UCY3p9envuaJUCqforENP_QP_DVWU7Do,160 +numpy/typing/tests/data/fail/linalg.pyi,sha256=r9prpIwrCPu7pnoS2JuasfnA8TJASyJR6TCUfYaYAG0,1592 +numpy/typing/tests/data/fail/ma.pyi,sha256=bj3awspyo8Cla0q39oY9G6UQGBddG5RGTewTYAO4lcQ,7161 +numpy/typing/tests/data/fail/memmap.pyi,sha256=U8_bCFw8m8x7ZlWSpYmmKpC1BS8oCEwUEdUgCRf2FSg,174 +numpy/typing/tests/data/fail/modules.pyi,sha256=f14qw9HXlwJ7FARKYxNhaPou8OZ8mQXsU4P9cpFxbWI,620 +numpy/typing/tests/data/fail/multiarray.pyi,sha256=qPGgrMMc0-SLuHJk1k_RaiPvs3CO-Ob6B9vFOKaFr44,1718 +numpy/typing/tests/data/fail/ndarray.pyi,sha256=8wZpNNatpxbxNu8G2N_R0P-3UVZLVE_z6ZGmdndSWPM,392 +numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=S5wtoAY-Y1f7MM7kjW4OOMtJnuF5uLTvCll21nJgyvc,1451 +numpy/typing/tests/data/fail/nditer.pyi,sha256=Sp3-l4RWopWY5ekm7yoINo4UhVYaKl9vW56aV14QDcc,332 +numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=RZOHSAb-tA5eihm-dsD-r2WJqKaX-JbQ-fek0hw94nY,481 +numpy/typing/tests/data/fail/npyio.pyi,sha256=RrjdSUJE-K_dOF8Or_5bfpT2LYgFgHbS9fwVmWuJCBg,626 +numpy/typing/tests/data/fail/numerictypes.pyi,sha256=igYnLB91EuhrNDihL9IxwlM_xhwphCgMjWimzDaSNrk,129 +numpy/typing/tests/data/fail/random.pyi,sha256=ng4fxmdk1RFQdn3PNF7qSO2c2hVxQ07lwYwQGrSPsKQ,2965 +numpy/typing/tests/data/fail/rec.pyi,sha256=HkJQLYK6u8thA6alMB4pUeMBPXr6mgnqqTYmXSiM2PM,758 +numpy/typing/tests/data/fail/scalars.pyi,sha256=3ryg0rO8QAcUsN1VrXDnVp43AUSGBoEczk2Vm2BAl6Q,2924 +numpy/typing/tests/data/fail/shape.pyi,sha256=0uCGDpXresJPxRTEgNH8bP_FvCT0wV8Ob5Bu2_ZS428,139 +numpy/typing/tests/data/fail/shape_base.pyi,sha256=jfOTNjSqDVIGlAmwbORYfifftjvW-W4ngMRP883oSrU,165 +numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=kjsv-sHn8i-oxtLUcpeIrOzS00Hk6Af8clqnY-vwg6A,339 +numpy/typing/tests/data/fail/strings.pyi,sha256=T7nJ-2UqEa2ld_bZW4king8PnAG9w9LompbD9rtIb7Y,2385 +numpy/typing/tests/data/fail/testing.pyi,sha256=zBTsTLi6xASHSeRK4f9ub1810m33xrb2qFyGvH93iqo,1427 +numpy/typing/tests/data/fail/twodim_base.pyi,sha256=XqAijZdegv0eHZ4pZPTo9ruwkVaMT168nbqyZV0HcHM,1171 +numpy/typing/tests/data/fail/type_check.pyi,sha256=u65AFyNm-J4gQu1brfsGuyJ1pN8tDJQxtRSYDxebUfE,382 +numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=G17kqlgWREHukftCiDnCBRDjum5H1f8UIIifakzNBZc,610 +numpy/typing/tests/data/fail/ufunclike.pyi,sha256=7dpF86m9EeY_69dl_zX0joQvFIpKPXGQRRUJOipWTOw,670 +numpy/typing/tests/data/fail/ufuncs.pyi,sha256=7fKHGG69SFRRkqO5I-QqV-pVkQ3LTCPWgwgzOHGmhwI,522 +numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=kKR53mZ3zy1-JmyGgmWjBmJw04ipqnTzqu9B4nU5OlQ,205 +numpy/typing/tests/data/misc/extended_precision.pyi,sha256=g80l5fCRis1PKm55PutBymIhrs-v3vYF2d5ZqiajcbI,331 +numpy/typing/tests/data/mypy.ini,sha256=6Uoh_q6A2Jod1K_65IRTcaDffDktTEIK6FaUHHzCWPA,222 +numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_like.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/dtype.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/literal.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ma.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/mod.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/modules.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/nditer.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/numeric.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/random.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/scalars.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/shape.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-314.pyc,, +numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-314.pyc,, +numpy/typing/tests/data/pass/arithmetic.py,sha256=HuUKU7DSCqnO9oCLKQloFSTTgp4JBdhfyg6bp-lZdNU,8380 +numpy/typing/tests/data/pass/array_constructors.py,sha256=NXGxCHOAeh8uxlP46EUnbo_PXl0OX62WnAgBvrDvZno,2586 +numpy/typing/tests/data/pass/array_like.py,sha256=VWGx8wSe5z5XR2uOkNVvYNhMMkaBbhLdQHVlixbl4nM,1075 +numpy/typing/tests/data/pass/arrayprint.py,sha256=NTw1gJ9v3TDVwRov4zsg_27rI-ndKuG4mDidBWEKVyc,803 +numpy/typing/tests/data/pass/arrayterator.py,sha256=qPyDI_38M155brLW25CyVnf6-2Zfn0MEnj8NqadpEgA,422 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=i5NHgKmg2q27z6mlCjKjO3haydGRPZmUL3Js-5jAt20,1090 +numpy/typing/tests/data/pass/comparisons.py,sha256=gWDQU6bGY09YVG9MEYLjkJH9pdOGql1PFN6h-5pJNEI,3606 +numpy/typing/tests/data/pass/dtype.py,sha256=YRsTwKEQ5iJtdKCEQIybU_nL8z8Wq9hU-BZmEO7HjQE,1127 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=CXdLvQsU2iDqQc7d2TRRCSwguQzJ0SJDFn23SDeOOuY,1406 +numpy/typing/tests/data/pass/flatiter.py,sha256=JWG5gQ9RqSDrg9V2KlSgGtANnB7JDRFJM9e14wJ0Y_g,288 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=bP0hEQYYQJOn7-ce0rAf8cvuxZX3Ja6GSSlCtNhEBUM,4263 +numpy/typing/tests/data/pass/index_tricks.py,sha256=ymUrTbHcpYRgAPVxhwdXF0f_1aFBDBZEp_Ue3BxddZE,1466 +numpy/typing/tests/data/pass/lib_user_array.py,sha256=qvCmq32uZlgGL76u8sygvguBiTphLrt8X-mVaYnex0A,640 +numpy/typing/tests/data/pass/lib_utils.py,sha256=XEc0v7bwES-C5D4GkSJQSSTSAl5ng7tq6tCWj3jxbCM,336 +numpy/typing/tests/data/pass/lib_version.py,sha256=TlLZK8sekCMm__WWo22FZfZc40zpczENf6y_TNjBpCw,317 +numpy/typing/tests/data/pass/literal.py,sha256=wZ7S1bDJFyTkrQbgYRSXZGobU976J2F_620-lv_hZTk,1561 +numpy/typing/tests/data/pass/ma.py,sha256=_gssQTkMQTtFIec3A9OSSk50o_LHhFp1vp3jBlvkuIs,4121 +numpy/typing/tests/data/pass/mod.py,sha256=IZbpRvH19U4hbLJfm0CKyyihJ9bv-g561cYr74bDlao,1720 +numpy/typing/tests/data/pass/modules.py,sha256=buzLurat4TIGmJuW3mGsGk7dKNmpBDfQOWWQXFfb9Uc,670 +numpy/typing/tests/data/pass/multiarray.py,sha256=OGU_AH571v22-L_IHGNRdd4XcbqlZXtZLxZLYZdiBqs,1456 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=5Sy_SGRL_Nkb9j0yhsj0DtN9ba4aBc_KRLsUZK1SyNA,1530 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=A7PIVuXhVcN06uP_D-ZUdo_I8Xl1m74DZpjS4WLdTrY,3555 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=yaBK3hW5fe2VpvARkn_NMeF-JX-OajI8JiRWOA_Uk7Y,687 +numpy/typing/tests/data/pass/nditer.py,sha256=1wpRitCNZKCC3WJVrFSh22Z1D8jP2VxQAMtzH8NcpV8,67 +numpy/typing/tests/data/pass/numeric.py,sha256=ZWN3MW8Lk_qeqx3VC9cMOekFK21o02xnkpUJrTWYeUI,1582 +numpy/typing/tests/data/pass/numerictypes.py,sha256=JaCjk4zQPOI67XzqGyi3dI-GUMFM2AvDuniwzSQ7_Rk,348 +numpy/typing/tests/data/pass/random.py,sha256=zMhPsSbYUjSou6PggCqp3Q-hYDYZytT7IeF-wBAB5R0,63323 +numpy/typing/tests/data/pass/recfunctions.py,sha256=8nqw-j8pbjUNo6f92fFjAwm57zzDLP2PrY6DehmWv8k,5138 +numpy/typing/tests/data/pass/scalars.py,sha256=IEkuEFLTVcsPQsSqft9K3iRDJNWRoVB6r_y3m4yzGos,3974 +numpy/typing/tests/data/pass/shape.py,sha256=oSkR0akFaIBrXFcX4D14ZiR2Yw1rHsZSoLKFL_Qjav8,458 +numpy/typing/tests/data/pass/simple.py,sha256=F6Qi7OLrbUrcZJiBGGPEjuyvk1M7jpOaIyewTCJVW5E,2925 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=gmMTPrq8gLXJZSBQoOpJcgzIzWgMx-k_etKPV4KSTJk,1269 +numpy/typing/tests/data/pass/ufunclike.py,sha256=9t-6R0b7HmE5jczJp0dcNOP7Iz9zaUE0918AgFfeJfY,1403 +numpy/typing/tests/data/pass/ufuncs.py,sha256=gvdcCNoGUfN0CnQmn6k1j6ghdt8zGkJdcRcgctmU48A,438 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=q3c1SmMwhyYLYQsLjK02AXphk3-96YltSTdTfrElJzQ,167 +numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=JHcs_4SzHUK1uDM91UBlh2WTaXpU8ShW97TWT-RMcPA,27518 +numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=TOvbhGUiNYcKv9KQs0A4-vzM_l9vYWvDXoqBOOLo1d4,3087 +numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=eqe96eYVSybTarmGhyIe3CxsOSB3Pkfah6DQxYWl650,15042 +numpy/typing/tests/data/reveal/arraypad.pyi,sha256=hJeTeX66VR9hDRliaP195klegP9cmBnrZqDsEGLm30A,931 +numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=oQGscSvF8perObX6j1LduWS1eogRYyl7NM8piYuZPxc,797 +numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=-Fhtnyc-uzDT5zUlh7xSLmdkK05Y14MqpgdG38Xliu0,4485 +numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=-bqtQE71AcS2cisOcRAaDRxvp_STs41Dbzu7qUvaGRc,1054 +numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=z60Mwun7flBSAxDh_BHksPzPSikxvKnBkde6cRRnNuE,4803 +numpy/typing/tests/data/reveal/char.pyi,sha256=KekJM9d_iNnAa9OU98yiWrR7bJ4ZpxvUr04mVkWOdrA,11777 +numpy/typing/tests/data/reveal/chararray.pyi,sha256=vxQbEhNEOX6SghlIBBtJLNIc5XTiP_msVTRqBHja4gw,5403 +numpy/typing/tests/data/reveal/comparisons.pyi,sha256=h0PYWf2F-mRNAK7e_3H4-hu0msj2KxU3xaHb2aoqFnc,7445 +numpy/typing/tests/data/reveal/constants.pyi,sha256=DHycCQpNsu52JrhZ_Qds7f0F4U0rD4zWaVMOLwWR08o,347 +numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=TuMyVAki_VnT2jYiCGPNOvb9W3iWGL2sKKkbKul_5P0,4215 +numpy/typing/tests/data/reveal/datasource.pyi,sha256=07PFHAOF4kL5Wqq5pt1IKvx6VPYBZ9IK1_WXo_3II1E,606 +numpy/typing/tests/data/reveal/dtype.pyi,sha256=psnbSgAYVrU3MXJyV470NjQeqbdd9Re6q4_3n334ETg,4865 +numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=R-ve3Dda6S1ewKjlDoQ1XWx4Adj-JxFe9TFdrPeX2sA,1965 +numpy/typing/tests/data/reveal/emath.pyi,sha256=HGPBuE6CTTSHirCCQMklDGkvamoIILqpREfP9NJh49I,2179 +numpy/typing/tests/data/reveal/fft.pyi,sha256=uNGippaypaPH9ZmwaOXwPdHXDh_TW4BJlEkv9idK08Y,1638 +numpy/typing/tests/data/reveal/flatiter.pyi,sha256=WptTeykBGKmYBKse08jQ2nlP_706LL8qaqhm3IHjLT0,3348 +numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=FZNJHn7P-ddjrSSSUr-3qqWShbiGC5qkodDXXfvHp24,15526 +numpy/typing/tests/data/reveal/getlimits.pyi,sha256=DoHEcyug87JBCouEKZrf2pF2vliRF9rcQenn0Nd8MKo,1639 +numpy/typing/tests/data/reveal/histograms.pyi,sha256=2l6Af9c4cru2C6xSuL0iqW78OqEo4nrWFqARC5nA0Xo,1282 +numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=UoYlUdqV2jgImmKFDOkLECoLN1ZuTWs7P0VLczBi8a8,3311 +numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=mq7dYJPYd98XqBnc_n1V6JgX2HvgoB8Q_weGyUC8TjA,20372 +numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=fpINlyW5XcQl5I2sjXbb6REu8qosYsemET_GxjBAOtY,5849 +numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=0mEaIvr9BkYx2Gmmj7M0vvG_o4c0TgD0uU8uJsY7Jl0,453 +numpy/typing/tests/data/reveal/lib_version.pyi,sha256=5-H7IY5M-OT0Wu7d0FhO95s5jYafpMb2s-sYPNEisaQ,592 +numpy/typing/tests/data/reveal/linalg.pyi,sha256=4kzgLlgrDlYjvNlzosyqdJ2AV83AJqJsr08aZcGRl-4,7346 +numpy/typing/tests/data/reveal/ma.pyi,sha256=vfKs_ua565soL5Ik4M_RMNbd3EbNx_vKNXSKh32qhaY,51706 +numpy/typing/tests/data/reveal/matrix.pyi,sha256=xyhYRFmQUL9GdFmZnWNLPJPaRRmjJLdpozP6uUBHnUE,2903 +numpy/typing/tests/data/reveal/memmap.pyi,sha256=KdMkvJgXmWwOAE6H0BKQjB-A3go2keJAem59FUJtmVM,738 +numpy/typing/tests/data/reveal/mod.pyi,sha256=4d8e_54VcJJKaSHy_vVwLzQ25Nt6GJ5qHxFIxkkl0ec,7356 +numpy/typing/tests/data/reveal/modules.pyi,sha256=dgB-VZ5GQpABSsnAsnwfUx781fv7dia3TrDnguVippg,1909 +numpy/typing/tests/data/reveal/multiarray.pyi,sha256=C_u26_2lvuhh3x5zy-qQctGO1ks1gMsC_pQ3DtXhu5o,8250 +numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=9eWyNVc8VjggFJ112bPPKx5MOTLr5iurhIaETKjDc0s,694 +numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=F01Uub9Ect05XD8qz18nWpZ3AzhnTt-Ti8wsROZ1HWg,2993 +numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=KQIltfDd9yLcDTg86NCRDzQjOHXn1e6sYEQX3PcYb64,3335 +numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=um6vXze3eHUfMB_yWg9TFuIT4lKhB-o-YXgSLThtiEE,8937 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=xMPirePw0fwNYuTpnts3Bpt-CxorAUaF0ea0oopzgM8,1745 +numpy/typing/tests/data/reveal/nditer.pyi,sha256=eHIBPj1I75kErxXsTi0CQEjcJ_WvHXIx7QzEeTmxYDk,1947 +numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=aRkKbnRxI2A-xkg8iQZWOHU7672PlyMNpUHIkONymLw,637 +numpy/typing/tests/data/reveal/npyio.pyi,sha256=KA_lenEDj92tycocaW9y4Bz_jpZ5gsbHPxhYFn5nBDw,3583 +numpy/typing/tests/data/reveal/numeric.pyi,sha256=CivGCAYe2BlStSBMhO2hRUtmqFfRFrhuj97aK0PpQ-0,9242 +numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=5ouW5fRTVciZdx0svYChyTsWcmwPpqZqhmlfpdpa8b0,584 +numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=1zRBBhPgpZXTa2jJtXGDfVQL5L4zxozjSZ0MgQsdgnM,7843 +numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=vo7CfyWW7vuMhbyI95u53yS9t6ADyc2lGBJOtHbYD6c,10837 +numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=ppgLW-xn2DpkLOP8vfteKTmtCrAErE5k6_m6IbzNIm4,7018 +numpy/typing/tests/data/reveal/random.pyi,sha256=JlNcrkvVoyI8uezvQ9BBCTtFoVQPRgZ5zPXKiDdCFwo,105842 +numpy/typing/tests/data/reveal/rec.pyi,sha256=2Pr6-v4uSA_t13cYdLr2sKQSb1Ko9YuZQS-_UUAZwFo,3549 +numpy/typing/tests/data/reveal/scalars.pyi,sha256=SZ3zC7GNT-DeRXlLhz9_jvRY-TNSQvFqxXCdH3pL4HE,6569 +numpy/typing/tests/data/reveal/shape.pyi,sha256=9IilbiRez0Lbu7Zv_HvqdEwQhjRLzcbnKm-4wnG6d9c,275 +numpy/typing/tests/data/reveal/shape_base.pyi,sha256=1_G5HGC45IFPnetUyXXnyIhJYjNu0Mf7AEEtSz3QrmI,2058 +numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=PqrKANjOAnn_CQ2CjERvonBbM89PjhZpgnekiQpT04k,1342 +numpy/typing/tests/data/reveal/strings.pyi,sha256=D2r-lCrWhVM3HQ8X2cjLT2cwujxYLOb1ASrnHs2qPD8,9743 +numpy/typing/tests/data/reveal/testing.pyi,sha256=w2dTmMJnGi_jtBtCMjnUTqnrGLiYAvsksrTIwzk41HE,9031 +numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=4devsums2EaY0qVWJxJG8kopVXHxYHC_uV9cj_ml_4k,7700 +numpy/typing/tests/data/reveal/type_check.pyi,sha256=3on6Yhb-vylW0Etlpl_VP6bevzBipPn8J8W6EVE66eU,2459 +numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=fhhSCMGh5L2WU9x8DGHF3dxHugJEkIKiwf_J5mq62Js,1190 +numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=FrHumRFEU0_Ji-0HIIoOx1oHriVaTXKpNzLqiudAIcI,1377 +numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=Bn60mPM7tO4Uv1dgSRY8OgT_NvmW75BhnKTQfvZsey4,6487 +numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=kdpx5u0-zsWOPcsnciaqsACr7OKUuFWmMeMCizZDun8,460 +numpy/typing/tests/test_isfile.py,sha256=US1HhRtoDrAmlY_RuoE7ILspD0ujMifE5dydGhligTM,1085 +numpy/typing/tests/test_runtime.py,sha256=CoPFvSUANjIF7-3kPOxGXmzH6kSDFrqPq6u0__nbxXE,3186 +numpy/typing/tests/test_typing.py,sha256=j6wK6nH2Jx9V8ijLJrr8QdJjDv3NbJKCrEUNF0j7AxE,6494 +numpy/version.py,sha256=uzrYWl3fRxRj6IutUbyiRadLVcmSRIh4u_Ugc_U6D8o,304 +numpy/version.pyi,sha256=-JbleHX_16pnboC4DmzPym2X1EcI-w5cRoH0utivI34,278 diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/WHEEL b/blimgui/dist64/numpy-2.4.2.dist-info/WHEEL similarity index 69% rename from blimgui/dist64/numpy-2.2.5.dist-info/WHEEL rename to blimgui/dist64/numpy-2.4.2.dist-info/WHEEL index 382f468..38d9b67 100644 --- a/blimgui/dist64/numpy-2.2.5.dist-info/WHEEL +++ b/blimgui/dist64/numpy-2.4.2.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 Generator: meson Root-Is-Purelib: false -Tag: cp313-cp313-win_amd64 \ No newline at end of file +Tag: cp314-cp314-win_amd64 \ No newline at end of file diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/entry_points.txt b/blimgui/dist64/numpy-2.4.2.dist-info/entry_points.txt similarity index 78% rename from blimgui/dist64/numpy-2.2.5.dist-info/entry_points.txt rename to blimgui/dist64/numpy-2.4.2.dist-info/entry_points.txt index 963c00f..48c4f64 100644 --- a/blimgui/dist64/numpy-2.2.5.dist-info/entry_points.txt +++ b/blimgui/dist64/numpy-2.4.2.dist-info/entry_points.txt @@ -1,3 +1,6 @@ +[pkg_config] +numpy = numpy._core.lib.pkgconfig + [array_api] numpy = numpy diff --git a/blimgui/dist64/numpy-2.2.5.dist-info/LICENSE.txt b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/LICENSE.txt similarity index 95% rename from blimgui/dist64/numpy-2.2.5.dist-info/LICENSE.txt rename to blimgui/dist64/numpy-2.4.2.dist-info/licenses/LICENSE.txt index 80edcc2..a394e46 100644 --- a/blimgui/dist64/numpy-2.2.5.dist-info/LICENSE.txt +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/LICENSE.txt @@ -1,950 +1,914 @@ -Copyright (c) 2005-2024, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + ---- -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE - -Name: tempita -Files: numpy/_build_utils/tempita/* -License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs\libscipy_openblas*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: numpy.libs\libscipy_openblas*.dll +Description: bundled as a dynamically linked library +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: numpy.libs\libscipy_openblas*.dll +Description: bundled in OpenBLAS +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause-Open-MPI + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: numpy.libs\libscipy_openblas*.dll +Description: statically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran +License: GPL-3.0-or-later WITH GCC-exception-3.1 + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt new file mode 100644 index 0000000..d72a7c3 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING new file mode 100644 index 0000000..59a39fb --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING @@ -0,0 +1,14 @@ +BSD Zero Clause License + +Copyright Contributors to the pythoncapi_compat project. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/highway/LICENSE b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/highway/LICENSE new file mode 100644 index 0000000..1af4f15 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/highway/LICENSE @@ -0,0 +1,371 @@ +This project is primarily dual-licensed under your choice of either the Apache +License 2.0 or the BSD 3-Clause License. + +The following files are licensed under different terms: +* hwy/contrib/random/random-inl.h: CC0 1.0 Universal + +The full texts of all applicable licenses are included below, separated by +'---'. + +-------------------------------------------------------------------------------- +Apache License 2.0 +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +BSD 3-Clause License +-------------------------------------------------------------------------------- + +Copyright (c) The Highway Project Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +CC0 1.0 Universal +-------------------------------------------------------------------------------- + +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. \ No newline at end of file diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 0000000..7bd49e7 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md new file mode 100644 index 0000000..3e32165 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md @@ -0,0 +1,28 @@ +BSD 3-Clause License + +Copyright (c) 2022, Intel. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE new file mode 100644 index 0000000..4723d4e --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE @@ -0,0 +1,30 @@ +Copyright (c) 2005-2021, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md new file mode 100644 index 0000000..c3a4c06 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md @@ -0,0 +1,25 @@ +Copyright (C) 2010-2018 Max-Planck-Society +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt new file mode 100644 index 0000000..9b379c9 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt @@ -0,0 +1,48 @@ +Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. +Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/ma/LICENSE b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/ma/LICENSE new file mode 100644 index 0000000..b41aae0 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/LICENSE.md new file mode 100644 index 0000000..a6cf1b1 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/distributions/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/distributions/LICENSE.md new file mode 100644 index 0000000..31576ba --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/distributions/LICENSE.md @@ -0,0 +1,61 @@ +## NumPy + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +## Julia + +The ziggurat methods were derived from Julia. + +Copyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, +and other contributors: + +https://github.com/JuliaLang/julia/contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md new file mode 100644 index 0000000..f65c3d4 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md @@ -0,0 +1,61 @@ +# MT19937 + +Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) + +The rk_random and rk_seed functions algorithms and the original design of +the Mersenne Twister RNG: + + Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. The names of its contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Original algorithm for the implementation of rk_interval function from +Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by +Magnus Jonsson. + +Constants used in the rk_double implementation by Isaku Wada. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md new file mode 100644 index 0000000..7aac7a5 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md @@ -0,0 +1,22 @@ +# PCG64 + +## The MIT License + +PCG Random Number Generation for C. + +Copyright 2014 Melissa O'Neill + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/philox/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/philox/LICENSE.md new file mode 100644 index 0000000..9738e44 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/philox/LICENSE.md @@ -0,0 +1,31 @@ +# PHILOX + +Copyright 2010-2012, D. E. Shaw Research. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md new file mode 100644 index 0000000..21dd604 --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md @@ -0,0 +1,27 @@ +# SFC64 + +## The MIT License + +Adapted from a C++ implementation of Chris Doty-Humphrey's SFC PRNG. + +https://gist.github.com/imneme/f1f7821f07cf76504a97f6537c818083 + +Copyright (c) 2018 Melissa E. O'Neill + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md new file mode 100644 index 0000000..3c4d73b --- /dev/null +++ b/blimgui/dist64/numpy-2.4.2.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md @@ -0,0 +1,9 @@ +# SPLITMIX64 + +Written in 2015 by Sebastiano Vigna (vigna@acm.org) + +To the extent possible under law, the author has dedicated all copyright +and related and neighboring rights to this software to the public domain +worldwide. This software is distributed without any warranty. + +See . \ No newline at end of file diff --git a/blimgui/dist64/numpy.libs/libscipy_openblas64_-43e11ff0749b8cbe0a615c9cf6737e0e.dll b/blimgui/dist64/numpy.libs/libscipy_openblas64_-74a408729250596b0973e69fdd954eea.dll similarity index 76% rename from blimgui/dist64/numpy.libs/libscipy_openblas64_-43e11ff0749b8cbe0a615c9cf6737e0e.dll rename to blimgui/dist64/numpy.libs/libscipy_openblas64_-74a408729250596b0973e69fdd954eea.dll index c53e43b..089e989 100644 Binary files a/blimgui/dist64/numpy.libs/libscipy_openblas64_-43e11ff0749b8cbe0a615c9cf6737e0e.dll and b/blimgui/dist64/numpy.libs/libscipy_openblas64_-74a408729250596b0973e69fdd954eea.dll differ diff --git a/blimgui/dist64/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll b/blimgui/dist64/numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll similarity index 100% rename from blimgui/dist64/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll rename to blimgui/dist64/numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll diff --git a/blimgui/dist64/numpy/__config__.py b/blimgui/dist64/numpy/__config__.py index 93b6f72..4c9c24d 100644 --- a/blimgui/dist64/numpy/__config__.py +++ b/blimgui/dist64/numpy/__config__.py @@ -33,7 +33,7 @@ def _cleanup(d): "c": { "name": "msvc", "linker": r"link", - "version": "19.29.30159", + "version": "19.44.35222", "commands": r"cl", "args": r"", "linker args": r"", @@ -41,7 +41,7 @@ def _cleanup(d): "cython": { "name": "cython", "linker": r"cython", - "version": "3.0.12", + "version": "3.2.4", "commands": r"cython", "args": r"", "linker args": r"", @@ -49,7 +49,7 @@ def _cleanup(d): "c++": { "name": "msvc", "linker": r"link", - "version": "19.29.30159", + "version": "19.44.35222", "commands": r"cl", "args": r"", "linker args": r"", @@ -74,27 +74,27 @@ def _cleanup(d): "blas": { "name": "scipy-openblas", "found": bool("True".lower().replace("false", "")), - "version": "0.3.28", + "version": "0.3.31.dev", "detection method": "pkgconfig", - "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-065080rt/cp313-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", - "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-065080rt/cp313-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", - "openblas configuration": r"OpenBLAS 0.3.28 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24", - "pc file directory": r"D:/a/numpy/numpy/.openblas", + "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-awnw0lvp/cp314-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", + "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-awnw0lvp/cp314-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.31.dev USE64BITINT DYNAMIC_ARCH NO_AFFINITY SkylakeX MAX_THREADS=24", + "pc file directory": r"D:/a/numpy-release/numpy-release/.openblas", }, "lapack": { "name": "scipy-openblas", "found": bool("True".lower().replace("false", "")), - "version": "0.3.28", + "version": "0.3.31.dev", "detection method": "pkgconfig", - "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-065080rt/cp313-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", - "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-065080rt/cp313-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", - "openblas configuration": r"OpenBLAS 0.3.28 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24", - "pc file directory": r"D:/a/numpy/numpy/.openblas", + "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-awnw0lvp/cp314-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", + "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-awnw0lvp/cp314-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.31.dev USE64BITINT DYNAMIC_ARCH NO_AFFINITY SkylakeX MAX_THREADS=24", + "pc file directory": r"D:/a/numpy-release/numpy-release/.openblas", }, }, "Python Information": { - "path": r"C:\Users\runneradmin\AppData\Local\Temp\build-env-19lia66t\Scripts\python.exe", - "version": "3.13", + "path": r"C:\Users\runneradmin\AppData\Local\Temp\build-env-f3axtnip\Scripts\python.exe", + "version": "3.14", }, "SIMD Extensions": { "baseline": __cpu_baseline__, diff --git a/blimgui/dist64/numpy/__config__.pyi b/blimgui/dist64/numpy/__config__.pyi index 1b5bf94..85366b4 100644 --- a/blimgui/dist64/numpy/__config__.pyi +++ b/blimgui/dist64/numpy/__config__.pyi @@ -1,7 +1,13 @@ from enum import Enum from types import ModuleType -from typing import Final, Literal as L, TypedDict, overload, type_check_only -from typing_extensions import NotRequired +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) _CompilerConfigDictValue = TypedDict( "_CompilerConfigDictValue", @@ -25,7 +31,7 @@ _CompilerConfigDict = TypedDict( _MachineInformationDict = TypedDict( "_MachineInformationDict", { - "host":_MachineInformationDictValue, + "host": _MachineInformationDictValue, "build": _MachineInformationDictValue, "cross-compiled": NotRequired[L[True]], }, diff --git a/blimgui/dist64/numpy/__init__.cython-30.pxd b/blimgui/dist64/numpy/__init__.cython-30.pxd index 8c89049..2b51695 100644 --- a/blimgui/dist64/numpy/__init__.cython-30.pxd +++ b/blimgui/dist64/numpy/__init__.cython-30.pxd @@ -51,15 +51,11 @@ cdef extern from "numpy/arrayobject.h": ctypedef signed short npy_int16 ctypedef signed int npy_int32 ctypedef signed long long npy_int64 - ctypedef signed long long npy_int96 - ctypedef signed long long npy_int128 ctypedef unsigned char npy_uint8 ctypedef unsigned short npy_uint16 ctypedef unsigned int npy_uint32 ctypedef unsigned long long npy_uint64 - ctypedef unsigned long long npy_uint96 - ctypedef unsigned long long npy_uint128 ctypedef float npy_float32 ctypedef double npy_float64 @@ -117,6 +113,7 @@ cdef extern from "numpy/arrayobject.h": NPY_OBJECT NPY_STRING NPY_UNICODE + NPY_VSTRING NPY_VOID NPY_DATETIME NPY_TIMEDELTA @@ -127,28 +124,21 @@ cdef extern from "numpy/arrayobject.h": NPY_INT16 NPY_INT32 NPY_INT64 - NPY_INT128 - NPY_INT256 NPY_UINT8 NPY_UINT16 NPY_UINT32 NPY_UINT64 - NPY_UINT128 - NPY_UINT256 NPY_FLOAT16 NPY_FLOAT32 NPY_FLOAT64 NPY_FLOAT80 NPY_FLOAT96 NPY_FLOAT128 - NPY_FLOAT256 - NPY_COMPLEX32 NPY_COMPLEX64 NPY_COMPLEX128 NPY_COMPLEX160 NPY_COMPLEX192 NPY_COMPLEX256 - NPY_COMPLEX512 NPY_INTP NPY_UINTP @@ -166,6 +156,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP @@ -191,40 +182,6 @@ cdef extern from "numpy/arrayobject.h": NPY_SEARCHRIGHT enum: - # DEPRECATED since NumPy 1.7 ! Do not use in new code! - NPY_C_CONTIGUOUS - NPY_F_CONTIGUOUS - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - enum: - # Added in NumPy 1.7 to replace the deprecated enums above. NPY_ARRAY_C_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS NPY_ARRAY_OWNDATA @@ -788,15 +745,11 @@ ctypedef npy_int8 int8_t ctypedef npy_int16 int16_t ctypedef npy_int32 int32_t ctypedef npy_int64 int64_t -#ctypedef npy_int96 int96_t -#ctypedef npy_int128 int128_t ctypedef npy_uint8 uint8_t ctypedef npy_uint16 uint16_t ctypedef npy_uint32 uint32_t ctypedef npy_uint64 uint64_t -#ctypedef npy_uint96 uint96_t -#ctypedef npy_uint128 uint128_t ctypedef npy_float32 float32_t ctypedef npy_float64 float64_t @@ -971,10 +924,17 @@ cdef extern from "numpy/ufuncobject.h": PyUFunc_Zero PyUFunc_One PyUFunc_None + # deprecated UFUNC_FPE_DIVIDEBYZERO UFUNC_FPE_OVERFLOW UFUNC_FPE_UNDERFLOW UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int) @@ -1248,3 +1208,35 @@ cdef extern from "numpy/arrayobject.h": void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/blimgui/dist64/numpy/__init__.pxd b/blimgui/dist64/numpy/__init__.pxd index 8589e53..24d4637 100644 --- a/blimgui/dist64/numpy/__init__.pxd +++ b/blimgui/dist64/numpy/__init__.pxd @@ -60,15 +60,11 @@ cdef extern from "numpy/arrayobject.h": ctypedef signed short npy_int16 ctypedef signed int npy_int32 ctypedef signed long long npy_int64 - ctypedef signed long long npy_int96 - ctypedef signed long long npy_int128 ctypedef unsigned char npy_uint8 ctypedef unsigned short npy_uint16 ctypedef unsigned int npy_uint32 ctypedef unsigned long long npy_uint64 - ctypedef unsigned long long npy_uint96 - ctypedef unsigned long long npy_uint128 ctypedef float npy_float32 ctypedef double npy_float64 @@ -126,6 +122,7 @@ cdef extern from "numpy/arrayobject.h": NPY_OBJECT NPY_STRING NPY_UNICODE + NPY_VSTRING NPY_VOID NPY_DATETIME NPY_TIMEDELTA @@ -136,28 +133,21 @@ cdef extern from "numpy/arrayobject.h": NPY_INT16 NPY_INT32 NPY_INT64 - NPY_INT128 - NPY_INT256 NPY_UINT8 NPY_UINT16 NPY_UINT32 NPY_UINT64 - NPY_UINT128 - NPY_UINT256 NPY_FLOAT16 NPY_FLOAT32 NPY_FLOAT64 NPY_FLOAT80 NPY_FLOAT96 NPY_FLOAT128 - NPY_FLOAT256 - NPY_COMPLEX32 NPY_COMPLEX64 NPY_COMPLEX128 NPY_COMPLEX160 NPY_COMPLEX192 NPY_COMPLEX256 - NPY_COMPLEX512 NPY_INTP NPY_UINTP @@ -175,6 +165,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP @@ -200,40 +191,6 @@ cdef extern from "numpy/arrayobject.h": NPY_SEARCHRIGHT enum: - # DEPRECATED since NumPy 1.7 ! Do not use in new code! - NPY_C_CONTIGUOUS - NPY_F_CONTIGUOUS - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - enum: - # Added in NumPy 1.7 to replace the deprecated enums above. NPY_ARRAY_C_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS NPY_ARRAY_OWNDATA @@ -703,15 +660,11 @@ ctypedef npy_int8 int8_t ctypedef npy_int16 int16_t ctypedef npy_int32 int32_t ctypedef npy_int64 int64_t -#ctypedef npy_int96 int96_t -#ctypedef npy_int128 int128_t ctypedef npy_uint8 uint8_t ctypedef npy_uint16 uint16_t ctypedef npy_uint32 uint32_t ctypedef npy_uint64 uint64_t -#ctypedef npy_uint96 uint96_t -#ctypedef npy_uint128 uint128_t ctypedef npy_float32 float32_t ctypedef npy_float64 float64_t @@ -885,10 +838,16 @@ cdef extern from "numpy/ufuncobject.h": PyUFunc_Zero PyUFunc_One PyUFunc_None + # deprecated UFUNC_FPE_DIVIDEBYZERO UFUNC_FPE_OVERFLOW UFUNC_FPE_UNDERFLOW UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int) @@ -1162,3 +1121,35 @@ cdef extern from "numpy/arrayobject.h": void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/blimgui/dist64/numpy/__init__.py b/blimgui/dist64/numpy/__init__.py index 6e45aa1..4310912 100644 --- a/blimgui/dist64/numpy/__init__.py +++ b/blimgui/dist64/numpy/__init__.py @@ -88,32 +88,30 @@ # start delvewheel patch -def _delvewheel_patch_1_10_0(): +def _delvewheel_patch_1_11_2(): import os if os.path.isdir(libs_dir := os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'numpy.libs'))): os.add_dll_directory(libs_dir) -_delvewheel_patch_1_10_0() -del _delvewheel_patch_1_10_0 +_delvewheel_patch_1_11_2() +del _delvewheel_patch_1_11_2 # end delvewheel patch import os import sys import warnings -from ._globals import _NoValue, _CopyMode -from ._expired_attrs_2_0 import __expired_attributes__ - - # If a version with git hash was stored, use that instead from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue from .version import __version__ # We first need to detect if we're being called as part of the numpy setup # procedure itself in a reliable manner. try: - __NUMPY_SETUP__ + __NUMPY_SETUP__ # noqa: B018 except NameError: __NUMPY_SETUP__ = False @@ -126,60 +124,338 @@ def _delvewheel_patch_1_10_0(): try: from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + raise from . import _core from ._core import ( - False_, ScalarType, True_, - abs, absolute, acos, acosh, add, all, allclose, - amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, - arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, - argwhere, around, array, array2string, array_equal, array_equiv, - array_repr, array_str, asanyarray, asarray, ascontiguousarray, - asfortranarray, asin, asinh, atan, atanh, atan2, astype, atleast_1d, - atleast_2d, atleast_3d, base_repr, binary_repr, bitwise_and, - bitwise_count, bitwise_invert, bitwise_left_shift, bitwise_not, - bitwise_or, bitwise_right_shift, bitwise_xor, block, bool, bool_, - broadcast, busday_count, busday_offset, busdaycalendar, byte, bytes_, - can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, - complex128, complex64, complexfloating, compress, concat, concatenate, - conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, - count_nonzero, cross, csingle, cumprod, cumsum, cumulative_prod, - cumulative_sum, datetime64, datetime_as_string, datetime_data, - deg2rad, degrees, diagonal, divide, divmod, dot, double, dtype, e, - einsum, einsum_path, empty, empty_like, equal, errstate, euler_gamma, - exp, exp2, expm1, fabs, finfo, flatiter, flatnonzero, flexible, - float16, float32, float64, float_power, floating, floor, floor_divide, - fmax, fmin, fmod, format_float_positional, format_float_scientific, - frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter, - frompyfunc, fromstring, full, full_like, gcd, generic, geomspace, - get_printoptions, getbufsize, geterr, geterrcall, greater, - greater_equal, half, heaviside, hstack, hypot, identity, iinfo, - indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc, - integer, intp, invert, is_busday, isclose, isdtype, isfinite, - isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp, - left_shift, less, less_equal, lexsort, linspace, little_endian, log, - log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, - logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, - matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap, - min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, - ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, - nonzero, not_equal, number, object_, ones, ones_like, outer, partition, - permute_dims, pi, positive, pow, power, printoptions, prod, - promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, - reciprocal, record, remainder, repeat, require, reshape, resize, - result_type, right_shift, rint, roll, rollaxis, round, sctypeDict, - searchsorted, set_printoptions, setbufsize, seterr, seterrcall, shape, - shares_memory, short, sign, signbit, signedinteger, sin, single, sinh, - size, sort, spacing, sqrt, square, squeeze, stack, std, - str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, - timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, - ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, - vecmat, void, vstack, where, zeros, zeros_like + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, ) # NOTE: It's still under discussion whether these aliases @@ -191,68 +467,174 @@ def _delvewheel_patch_1_10_0(): pass del ta - from . import lib + from . import lib, matrixlib as _mat from .lib import scimath as emath - from .lib._histograms_impl import ( - histogram, histogram_bin_edges, histogramdd - ) - from .lib._nanfunctions_impl import ( - nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, - nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd, - nansum, nanvar + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, ) from .lib._function_base_impl import ( - select, piecewise, trim_zeros, copy, iterable, percentile, diff, - gradient, angle, unwrap, sort_complex, flip, rot90, extract, place, - vectorize, asarray_chkfinite, average, bincount, digitize, cov, - corrcoef, median, sinc, hamming, hanning, bartlett, blackman, - kaiser, trapezoid, trapz, i0, meshgrid, delete, insert, append, - interp, quantile + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trim_zeros, + unwrap, + vectorize, ) - from .lib._twodim_base_impl import ( - diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, - histogram2d, mask_indices, tril_indices, tril_indices_from, - triu_indices, triu_indices_from + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, ) - from .lib._shape_base_impl import ( - apply_over_axes, apply_along_axis, array_split, column_stack, dsplit, - dstack, expand_dims, hsplit, kron, put_along_axis, row_stack, split, - take_along_axis, tile, vsplit + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, ) - from .lib._type_check_impl import ( - iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, - real_if_close, typename, mintypecode, common_type + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, ) - from .lib._arraysetops_impl import ( - ediff1d, in1d, intersect1d, isin, setdiff1d, setxor1d, union1d, - unique, unique_all, unique_counts, unique_inverse, unique_values + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, ) - from .lib._ufunclike_impl import fix, isneginf, isposinf - from .lib._arraypad_impl import pad - from .lib._utils_impl import ( - show_runtime, get_include, info + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + row_stack, + split, + take_along_axis, + tile, + vsplit, ) from .lib._stride_tricks_impl import ( - broadcast_arrays, broadcast_shapes, broadcast_to - ) - from .lib._polynomial_impl import ( - poly, polyint, polyder, polyadd, polysub, polymul, polydiv, polyval, - polyfit, poly1d, roots - ) - from .lib._npyio_impl import ( - savetxt, loadtxt, genfromtxt, load, save, savez, packbits, - savez_compressed, unpackbits, fromregex + broadcast_arrays, + broadcast_shapes, + broadcast_to, ) - from .lib._index_tricks_impl import ( - diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate, - ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, - index_exp + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, ) - - from . import matrixlib as _mat - from .matrixlib import ( - asmatrix, bmat, matrix + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from # __getattr__. Note that `distutils` (deprecated) and `array_api` @@ -295,7 +677,6 @@ def _delvewheel_patch_1_10_0(): for n, extended_msg in _type_info } - # Some of these could be defined right away, but most were aliases to # the Python objects and only removed in NumPy 1.24. Defining them should # probably wait for NumPy 1.26 or 2.0. @@ -303,13 +684,10 @@ def _delvewheel_patch_1_10_0(): # import with `from numpy import *`. __future_scalars__ = {"str", "bytes", "object"} - __array_api_version__ = "2023.12" + __array_api_version__ = "2024.12" from ._array_api_info import __array_namespace_info__ - # now that numpy core module is imported, can initialize limits - _core.getlimits._register_known_types() - __all__ = list( __numpy_submodules__ | set(_core.__all__) | @@ -424,8 +802,7 @@ def __getattr__(attr): import numpy.char as char return char.chararray - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") def __dir__(): public_symbols = ( @@ -433,7 +810,7 @@ def __dir__(): ) public_symbols -= { "matrixlib", "matlib", "tests", "conftest", "version", - "compat", "distutils", "array_api" + "distutils", "array_api" } return list(public_symbols) @@ -485,26 +862,44 @@ def _mac_os_check(): from . import exceptions with warnings.catch_warnings(record=True) as w: _mac_os_check() - # Throw runtime error, if the test failed Check for warning and error_message + # Throw runtime error, if the test failed + # Check for warning and report the error_message if len(w) > 0: for _wn in w: if _wn.category is exceptions.RankWarning: - # Ignore other warnings, they may not be relevant (see gh-25433). + # Ignore other warnings, they may not be relevant (see gh-25433) error_message = ( f"{_wn.category.__name__}: {_wn.message}" ) msg = ( "Polyfit sanity test emitted a warning, most likely due " "to using a buggy Accelerate backend." - "\nIf you compiled yourself, more information is available at:" + "\nIf you compiled yourself, more information is available at:" # noqa: E501 "\nhttps://numpy.org/devdocs/building/index.html" "\nOtherwise report this to the vendor " - "that provided NumPy.\n\n{}\n".format(error_message)) + f"that provided NumPy.\n\n{error_message}\n") raise RuntimeError(msg) del _wn del w del _mac_os_check + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + def hugepage_setup(): """ We usually use madvise hugepages support, but on some old kernels it @@ -557,4 +952,4 @@ def _pyinstaller_hooks_dir(): # Remove symbols imported for internal use -del os, sys, warnings \ No newline at end of file +del os, sys, warnings diff --git a/blimgui/dist64/numpy/__init__.pyi b/blimgui/dist64/numpy/__init__.pyi index 87e0b2a..2833f3c 100644 --- a/blimgui/dist64/numpy/__init__.pyi +++ b/blimgui/dist64/numpy/__init__.pyi @@ -5,6 +5,7 @@ import mmap import ctypes as ct import array as _array import datetime as dt +import inspect from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -16,7 +17,7 @@ from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes -from numpy._typing import ( +from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, @@ -44,6 +45,7 @@ from numpy._typing import ( _DTypeLikeVoid, _VoidDTypeLike, # Shapes + _AnyShape, _Shape, _ShapeLike, # Scalars @@ -57,10 +59,8 @@ from numpy._typing import ( NBitBase, # NOTE: Do not remove the extended precision bit-types even if seemingly unused; # they're used by the mypy plugin - _256Bit, _128Bit, _96Bit, - _80Bit, _64Bit, _32Bit, _16Bit, @@ -122,10 +122,7 @@ from numpy._typing import ( _FloatingCodes, _ComplexFloatingCodes, _InexactCodes, - _NumberCodes, _CharacterCodes, - _FlexibleCodes, - _GenericCodes, # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -134,47 +131,12 @@ from numpy._typing import ( _GUFunc_Nin2_Nout1, ) -from numpy._typing._callable import ( - _BoolOp, - _BoolBitOp, - _BoolSub, - _BoolTrueDiv, - _BoolMod, - _BoolDivMod, - _IntTrueDiv, - _UnsignedIntOp, - _UnsignedIntBitOp, - _UnsignedIntMod, - _UnsignedIntDivMod, - _SignedIntOp, - _SignedIntBitOp, - _SignedIntMod, - _SignedIntDivMod, - _FloatOp, - _FloatMod, - _FloatDivMod, - _NumberOp, - _ComparisonOpLT, - _ComparisonOpLE, - _ComparisonOpGT, - _ComparisonOpGE, -) - -# NOTE: Numpy's mypy plugin is used for removing the types unavailable -# to the specific platform +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( - uint128, - uint256, - int128, - int256, - float80, float96, float128, - float256, - complex160, complex192, complex256, - complex512, ) from numpy._array_api_info import __array_namespace_info__ @@ -206,7 +168,11 @@ from typing import ( Final, Generic, Literal as L, + LiteralString, + Never, NoReturn, + Protocol, + Self, SupportsComplex, SupportsFloat, SupportsInt, @@ -214,6 +180,7 @@ from typing import ( TypeAlias, TypedDict, final, + overload, type_check_only, ) @@ -221,8 +188,8 @@ from typing import ( # if not available at runtime. This is because the `typeshed` stubs for the standard # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi -from _typeshed import StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, LiteralString, Never, Protocol, Self, TypeVar, Unpack, deprecated, overload +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, TypeVar, deprecated, override from numpy import ( char, @@ -326,8 +293,7 @@ from numpy._core._ufunc_config import ( getbufsize, seterrcall, geterrcall, - _ErrKind, - _ErrCall, + errstate, ) from numpy._core.arrayprint import ( @@ -345,6 +311,10 @@ from numpy._core.einsumfunc import ( einsum, einsum_path, ) +from numpy._core.getlimits import ( + finfo, + iinfo, +) from numpy._core.multiarray import ( array, @@ -437,6 +407,8 @@ from numpy._core.shape_base import ( ) from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue, _NoValueType from numpy.lib import ( scimath as emath, @@ -448,7 +420,6 @@ from numpy.lib._arraypad_impl import ( from numpy.lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, @@ -461,7 +432,7 @@ from numpy.lib._arraysetops_impl import ( unique_values, ) -from numpy.lib._function_base_impl import ( +from numpy.lib._function_base_impl import ( # type: ignore[deprecated] select, piecewise, trim_zeros, @@ -479,7 +450,6 @@ from numpy.lib._function_base_impl import ( place, asarray_chkfinite, average, - bincount, digitize, cov, corrcoef, @@ -491,7 +461,6 @@ from numpy.lib._function_base_impl import ( blackman, kaiser, trapezoid, - trapz, i0, meshgrid, delete, @@ -499,10 +468,9 @@ from numpy.lib._function_base_impl import ( append, interp, quantile, + vectorize, ) -from numpy._globals import _CopyMode - from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, @@ -551,8 +519,6 @@ from numpy.lib._npyio_impl import ( save, savez, savez_compressed, - packbits, - unpackbits, fromregex, ) @@ -569,7 +535,7 @@ from numpy.lib._polynomial_impl import ( polyfit, ) -from numpy.lib._shape_base_impl import ( +from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] column_stack, row_stack, dstack, @@ -640,6 +606,7 @@ from numpy.lib._utils_impl import ( from numpy.matrixlib import ( asmatrix, bmat, + matrix, ) __all__ = [ # noqa: RUF022 @@ -693,8 +660,7 @@ __all__ = [ # noqa: RUF022 "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", - "uint128", "uint256", "int128", "int256", "float80", "float96", "float128", - "float256", "complex160", "complex192", "complex256", "complex512", + "float96", "float128", "complex192", "complex256", "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", "printoptions", "format_float_positional", "format_float_scientific", "require", "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", @@ -721,7 +687,7 @@ __all__ = [ # noqa: RUF022 "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", - "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", "interp", "quantile", # lib._twodim_base_impl.__all__ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", @@ -735,7 +701,7 @@ __all__ = [ # noqa: RUF022 "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", # lib._arraysetops_impl.__all__ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", # lib._ufunclike_impl.__all__ "fix", "isneginf", "isposinf", @@ -764,8 +730,8 @@ __all__ = [ # noqa: RUF022 # Only use these for functions; never as generic type parameter. _AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) -_AnyShapeType = TypeVar( - "_AnyShapeType", +_AnyShapeT = TypeVar( + "_AnyShapeT", tuple[()], # 0-d tuple[int], # 1-d tuple[int, int], # 2-d @@ -777,7 +743,6 @@ _AnyShapeType = TypeVar( tuple[int, int, int, int, int, int, int, int], # 8-d tuple[int, ...], # N-d ) -_AnyNBitInexact = TypeVar("_AnyNBitInexact", _NBitHalf, _NBitSingle, _NBitDouble, _NBitLongDouble) _AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) _AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) _AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) @@ -792,41 +757,53 @@ _T_contra = TypeVar("_T_contra", contravariant=True) _RealT_co = TypeVar("_RealT_co", covariant=True) _ImagT_co = TypeVar("_ImagT_co", covariant=True) -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) - -_DType = TypeVar("_DType", bound=dtype[Any]) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) -_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) - -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) -_ArrayT_co = TypeVar("_ArrayT_co", bound=NDArray[Any], covariant=True) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer[Any] | np.bool | object_]) -_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating[Any] | integer[Any] | timedelta64 | np.bool | object_]) -_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number[Any] | timedelta64 | object_]) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) + +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) +_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) +_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) +_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) +_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) +_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) +_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) +_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) +_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) +_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) +_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) +_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) +_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) +_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) +_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) +_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) +_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) -_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, covariant=True) -_1DShapeT = TypeVar("_1DShapeT", bound=_1D) -_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, covariant=True) -_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], Unpack[tuple[L[1], ...]]]) # (1,) | (1, 1) | (1, 1, 1) | ... - -_SCT = TypeVar("_SCT", bound=generic) -_SCT_co = TypeVar("_SCT_co", bound=generic, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number[Any]) +_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) +_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_InexactT = TypeVar("_InexactT", bound=inexact) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_FloatingT_co = TypeVar("_FloatingT_co", bound=floating[Any], default=floating[Any], covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) -_IntegerT_co = TypeVar("_IntegerT_co", bound=integer[Any], default=integer[Any], covariant=True) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) -_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) -_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) -_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) _BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) _BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) -_NumberItemT_co = TypeVar("_NumberItemT_co", bound=int | float | complex, default=int | float | complex, covariant=True) -_InexactItemT_co = TypeVar("_InexactItemT_co", bound=float | complex, default=float | complex, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) _FlexibleItemT_co = TypeVar( "_FlexibleItemT_co", bound=_CharLike_co | tuple[Any, ...], @@ -837,6 +814,7 @@ _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_ _TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) _DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) _TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) ### Type Aliases (for internal use only) @@ -875,8 +853,6 @@ _SignedIntegerCType: TypeAlias = type[ ] # fmt: skip _FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] _IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType -_NumberCType: TypeAlias = _IntegerCType | _IntegerCType -_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor @@ -888,16 +864,16 @@ _BuiltinObjectLike: TypeAlias = ( ) # fmt: skip # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype[_SCT] +_dtype: TypeAlias = dtype[_ScalarT] _ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters _ByteOrder: TypeAlias = L[ - "S", # swap the current order (default) - "<", "L", "little", # little-endian - ">", "B", "big", # big endian - "=", "N", "native", # native order - "|", "I", # ignore + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore ] # fmt: skip _DTypeKind: TypeAlias = L[ "b", # boolean @@ -973,13 +949,13 @@ _DTypeNum: TypeAlias = L[ ] _DTypeBuiltinKind: TypeAlias = L[0, 1, 2] -_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12"] +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] -_OrderKACF: TypeAlias = L[None, "K", "A", "C", "F"] -_OrderACF: TypeAlias = L[None, "A", "C", "F"] -_OrderCF: TypeAlias = L[None, "C", "F"] +_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None +_OrderACF: TypeAlias = L["A", "C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 _ModeKind: TypeAlias = L["raise", "wrap", "clip"] _PartitionKind: TypeAlias = L["introselect"] @@ -994,11 +970,8 @@ _SortKind: TypeAlias = L[ _SortSide: TypeAlias = L["left", "right"] _ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -if sys.version_info >= (3, 11): - _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -else: - _ConvertibleToComplex: TypeAlias = complex | SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None @@ -1032,7 +1005,7 @@ _NDIterFlagsOp: TypeAlias = L[ "updateifcopy", "virtual", "writeonly", - "writemasked" + "writemasked", ] _MemMapModeKind: TypeAlias = L[ @@ -1044,7 +1017,7 @@ _MemMapModeKind: TypeAlias = L[ _DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] _DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] -_NaTValue: TypeAlias = L["NAT","NaT", "nat",b"NAT", b"NaT", b"nat"] +_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] _MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] _DayUnit: TypeAlias = L["W", "D", b"W", b"D"] @@ -1069,6 +1042,26 @@ class _FormerAttrsDict(TypedDict): ### Protocols (for internal use only) +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + @type_check_only class _SupportsFileMethods(SupportsFlush, Protocol): # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` @@ -1077,12 +1070,7 @@ class _SupportsFileMethods(SupportsFlush, Protocol): def seek(self, offset: int, whence: int, /) -> object: ... @type_check_only -class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): - pass - -@type_check_only -class _SupportsItem(Protocol[_T_co]): - def item(self, /) -> _T_co: ... +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... @type_check_only class _SupportsDLPack(Protocol[_T_contra]): @@ -1121,7 +1109,6 @@ class _HasDateAttributes(Protocol): @property def year(self) -> int: ... - ### Mixins (for internal use only) @type_check_only @@ -1166,7 +1153,7 @@ __NUMPY_SETUP__: Final[L[False]] = False __numpy_submodules__: Final[set[LiteralString]] = ... __former_attrs__: Final[_FormerAttrsDict] = ... __future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... -__array_api_version__: Final[L["2023.12"]] = "2023.12" +__array_api_version__: Final[L["2024.12"]] = "2024.12" test: Final[PytestTester] = ... @type_check_only @@ -1183,30 +1170,32 @@ class _DTypeMeta(type): def _legacy(cls, /) -> bool: ... @final -class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): - names: None | tuple[builtins.str, ...] +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): + names: tuple[builtins.str, ...] | None def __hash__(self) -> int: ... # `None` results in the default dtype @overload def __new__( cls, - dtype: None | type[float64], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ... ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a - # `dtype: dtype[_SCT]` attribute + # `dtype: dtype[_ScalarT]` attribute @overload def __new__( cls, - dtype: _DTypeLike[_SCT], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: _DTypeLike[_ScalarT], + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_SCT]: ... + ) -> dtype[_ScalarT]: ... # Builtin types # @@ -1221,54 +1210,55 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... - # NOTE: `_: type[int]` also accepts `type[int | bool]` @overload def __new__( cls, - dtype: type[int | int_ | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[int], # also accepts `type[builtins.bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... - # NOTE: `_: type[float]` also accepts `type[float | int | bool]` - # NOTE: `float64` inherits from `float` at runtime; but this isn't - # reflected in these stubs. So an explicit `float64` is required here. @overload def __new__( cls, - dtype: None | type[float | float64 | int_ | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[float], # also accepts `type[int | bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... - # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` @overload def __new__( cls, - dtype: type[complex | complex128 | float64 | int_ | np.bool], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[complex], # also accepts `type[float | int | bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[complex128 | float64 | int_ | np.bool]: ... @overload def __new__( cls, - dtype: type[bytes], # also includes `type[bytes_]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[bytes | ct.c_char] | _BytesCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[bytes_]: ... @overload def __new__( cls, - dtype: type[str], # also includes `type[str_]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[str] | _StrCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[str_]: ... # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to @@ -1280,9 +1270,10 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[memoryview | void], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[void]: ... # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, @@ -1290,136 +1281,213 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[_BuiltinObjectLike | object_], - align: builtins.bool = ..., - copy: builtins.bool = ..., + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... - # Unions of builtins. + # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: type[bytes | str], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[character]: ... + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: type[bytes | str | memoryview], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[flexible]: ... + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... - - # `unsignedinteger` string-based representations and ctypes - @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... - @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... - @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... - @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... - @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... - @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... - @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @overload def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ... + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1427,104 +1495,90 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[unsignedinteger[Any]]: ... + ) -> dtype[unsignedinteger]: ... @overload def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[signedinteger[Any]]: ... + ) -> dtype[signedinteger]: ... @overload def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[integer[Any]]: ... + ) -> dtype[integer]: ... @overload def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[floating[Any]]: ... + ) -> dtype[floating]: ... @overload def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complexfloating[Any, Any]]: ... + ) -> dtype[complexfloating]: ... @overload def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[inexact[Any]]: ... - @overload - def __new__( - cls, - dtype: _NumberCodes | _NumberCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[number[Any]]: ... + ) -> dtype[inexact]: ... @overload def __new__( cls, - dtype: _CharacterCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., ) -> dtype[character]: ... - @overload - def __new__( - cls, - dtype: _FlexibleCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[flexible]: ... - @overload - def __new__( - cls, - dtype: _GenericCodes | _GenericCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[generic]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload def __new__( cls, dtype: builtins.str, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[Any]: ... + ) -> dtype: ... # Catch-all overload for object-likes - # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some - # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes - # the subtyping relation, the (gradual) typing analogue of `issubclass()`). - # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types @overload def __new__( cls, dtype: type[object], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_ | Any]: ... @@ -1533,28 +1587,28 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @overload def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype[Any]: ... + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DType, value: L[1], /) -> _DType: ... + def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... @overload - def __mul__(self: _FlexDType, value: SupportsIndex, /) -> _FlexDType: ... + def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... @overload def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... # NOTE: `__rmul__` seems to be broken when used in combination with - # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for + # literals as of mypy 0.902. Set the return-type to `dtype` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDType, value: SupportsIndex, /) -> _FlexDType: ... + def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... @overload - def __rmul__(self, value: SupportsIndex, /) -> dtype[Any]: ... + def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are @@ -1565,7 +1619,7 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @property def alignment(self) -> int: ... @property - def base(self) -> dtype[Any]: ... + def base(self) -> dtype: ... @property def byteorder(self) -> _ByteOrderChar: ... @property @@ -1573,7 +1627,7 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @property def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... @property - def fields(self,) -> None | MappingProxyType[LiteralString, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... + def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ... @property def flags(self) -> int: ... @property @@ -1589,64 +1643,80 @@ class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): @property def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ... + def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... @property def name(self) -> LiteralString: ... @property def num(self) -> _DTypeNum: ... @property - def shape(self) -> tuple[()] | _Shape: ... + def shape(self) -> _AnyShape: ... @property def ndim(self) -> int: ... @property - def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... @property def str(self) -> LiteralString: ... @property - def type(self) -> type[_SCT_co]: ... - + def type(self) -> type[_ScalarT_co]: ... @final class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property - def base(self) -> _ArrayT_co: ... + def base(self, /) -> _ArrayT_co: ... @property - def coords(self) -> _Shape: ... + def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... @property - def index(self) -> int: ... - def copy(self) -> _ArrayT_co: ... - def __iter__(self) -> Self: ... - def __next__(self: flatiter[NDArray[_SCT]]) -> _SCT: ... - def __len__(self) -> int: ... - @overload + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] def __getitem__( - self: flatiter[NDArray[_SCT]], - key: int | integer[Any] | tuple[int | integer[Any]], - ) -> _SCT: ... - @overload + self: flatiter[ndarray[Any, _DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], + /, + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload # 2d; _[[*[*]]] def __getitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - ) -> _ArrayT_co: ... - # TODO: `__setitem__` operates via `unsafe` casting rules, and can - # thus accept any type accepted by the relevant underlying `np.generic` - # constructor. - # This means that `value` must in reality be a supertype of `npt.ArrayLike`. - def __setitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - value: Any, - ) -> None: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, _DType]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DType]: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DType, /) -> ndarray[_1DShapeT, _DType]: ... - @overload - def __array__(self: flatiter[ndarray[_Shape, _DType]], dtype: None = ..., /) -> ndarray[_Shape, _DType]: ... - @overload - def __array__(self, dtype: _DType, /) -> ndarray[_Shape, _DType]: ... + self: flatiter[ndarray[Any, _DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], _DTypeT]: ... + @overload # ?d + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: NDArray[integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, _DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__( + self: flatiter[ndarray[Any, _DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: @@ -1673,7 +1743,7 @@ class _ArrayOrScalarCommon: def __int__(self, /) -> int: ... def __float__(self, /) -> float: ... def __copy__(self) -> Self: ... - def __deepcopy__(self, memo: None | dict[int, Any], /) -> Self: ... + def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 @@ -1684,12 +1754,24 @@ class _ArrayOrScalarCommon: def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... - # NOTE: `tostring()` is deprecated and therefore excluded - # def tostring(self, order=...): ... - def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... - def to_device(self, device: L["cpu"], /, *, stream: None | int | Any = ...) -> Self: ... + def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + + # NOTE: for `generic`, these two methods don't do anything + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, + /, + *, + write: builtins.bool | None = None, + align: builtins.bool | None = None, + uic: builtins.bool | None = None, + ) -> None: ... @property def __array_interface__(self) -> dict[str, Any]: ... @@ -1701,7 +1783,7 @@ class _ArrayOrScalarCommon: def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape - _DType_co, # DType + _DTypeT_co, # DType np.bool, # F-continuous bytes | list[Any], # Data ], /) -> None: ... @@ -1711,31 +1793,32 @@ class _ArrayOrScalarCommon: def argsort( self, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., - ) -> NDArray[Any]: ... + stable: builtins.bool | None = ..., + ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmax(self, /, axis: SupportsIndex | None, out: _ArrayT, *, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload - def argmax(self, /, axis: SupportsIndex | None = None, *, out: _ArrayT, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmin(self, /, axis: SupportsIndex | None, out: _ArrayT, *, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload - def argmin(self, /, axis: SupportsIndex | None = None, *, out: _ArrayT, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray @@ -1771,6 +1854,7 @@ class _ArrayOrScalarCommon: @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -1778,6 +1862,7 @@ class _ArrayOrScalarCommon: @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -1791,9 +1876,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def max( @@ -1801,9 +1887,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def max( @@ -1812,9 +1899,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1823,9 +1910,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def min( @@ -1833,9 +1921,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def min( @@ -1844,9 +1933,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1856,9 +1945,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def sum( @@ -1867,9 +1957,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def sum( @@ -1879,9 +1970,9 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1891,9 +1982,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1902,9 +1994,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def prod( @@ -1914,9 +2007,9 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1925,9 +2018,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def mean( @@ -1936,9 +2029,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def mean( @@ -1948,8 +2041,8 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1959,11 +2052,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( @@ -1972,11 +2065,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload def std( @@ -1986,10 +2079,10 @@ class _ArrayOrScalarCommon: *, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1999,11 +2092,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( @@ -2012,11 +2105,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload def var( @@ -2026,36 +2119,36 @@ class _ArrayOrScalarCommon: *, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property - def base(self) -> None | NDArray[Any]: ... + def base(self) -> NDArray[Any] | None: ... @property def ndim(self) -> int: ... @property def size(self) -> int: ... @property - def real(self: _HasDTypeWithRealAndImag[_SCT, object], /) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @real.setter def real(self, value: ArrayLike, /) -> None: ... @property - def imag(self: _HasDTypeWithRealAndImag[object, _SCT], /) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @imag.setter def imag(self, value: ArrayLike, /) -> None: ... def __new__( cls, shape: _ShapeLike, - dtype: DTypeLike = ..., - buffer: None | _SupportsBuffer = ..., + dtype: DTypeLike | None = ..., + buffer: _SupportsBuffer | None = ..., offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., + strides: _ShapeLike | None = ..., order: _OrderKACF = ..., ) -> Self: ... @@ -2065,13 +2158,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: None | bool = ... - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DType, /, *, copy: None | bool = ... - ) -> ndarray[_ShapeT_co, _DType]: ... + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... def __array_ufunc__( self, @@ -2092,26 +2181,27 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` # is a pseudo-abstract method the type has been narrowed down in order to # grant subclasses a bit more flexibility - def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ... + def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... def __array_wrap__( self, - array: ndarray[_ShapeT, _DType], - context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., return_scalar: builtins.bool = ..., /, - ) -> ndarray[_ShapeT, _DType]: ... + ) -> ndarray[_ShapeT, _DTypeT]: ... + # Keep in sync with `MaskedArray.__getitem__` @overload - def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_Shape, _DType_co]: ... + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload - def __getitem__(self, key: _ToIndices, /) -> ndarray[_Shape, _DType_co]: ... - @overload - def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype[Any]]: ... + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... @overload # flexible | object_ | bool def __setitem__( @@ -2162,16 +2252,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @property def ctypes(self) -> _ctypes[int]: ... + + # @property def shape(self) -> _ShapeT_co: ... @shape.setter + @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) def shape(self, value: _ShapeLike) -> None: ... + + # @property def strides(self) -> _Shape: ... @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... + + # def byteswap(self, inplace: builtins.bool = ...) -> Self: ... - def fill(self, value: Any) -> None: ... @property def flat(self) -> flatiter[Self]: ... @@ -2185,6 +2282,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *args: SupportsIndex, ) -> str: ... + # keep in sync with `ma.MaskedArray.tolist` + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... @overload def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... @overload @@ -2197,27 +2297,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - - def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + # keep in sync with `ma.MaskedArray.squeeze` def squeeze( self, - axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ..., - ) -> ndarray[_Shape, _DType_co]: ... + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - ) -> ndarray[_Shape, _DType_co]: ... + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... @overload - def transpose(self, axes: None | _ShapeLike, /) -> Self: ... + def transpose(self, axes: _ShapeLike | None, /) -> Self: ... @overload - def transpose(self, *axes: SupportsIndex) -> Self: ... + def transpose(self, /, *axes: SupportsIndex) -> Self: ... @overload def all( @@ -2231,7 +2327,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def all( self, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: SupportsIndex = False, *, @@ -2240,7 +2336,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def all( self, - axis: None | int | tuple[int, ...], + axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: SupportsIndex = False, *, @@ -2249,7 +2345,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def all( self, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, keepdims: SupportsIndex = False, @@ -2268,7 +2364,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def any( self, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: SupportsIndex = False, *, @@ -2277,7 +2373,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def any( self, - axis: None | int | tuple[int, ...], + axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: SupportsIndex = False, *, @@ -2286,7 +2382,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def any( self, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, keepdims: SupportsIndex = False, @@ -2297,8 +2393,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def partition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2306,8 +2402,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def partition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, @@ -2317,8 +2413,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def argpartition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2326,114 +2422,142 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def argpartition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... - # + # keep in sync with `ma.MaskedArray.diagonal` def diagonal( self, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - ) -> ndarray[_Shape, _DType_co]: ... + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... - - # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> tuple[NDArray[intp], ...]: ... + def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... - # `put` is technically available to `generic`, - # but is pointless as `generic`s are immutable - def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + # `nonzero()` raises for 0d arrays/generics + def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... @overload - def searchsorted( # type: ignore[misc] + def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like - side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> intp: ... @overload def searchsorted( self, # >= 1D array v: ArrayLike, - side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> NDArray[intp]: ... def sort( self, - axis: SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., - *, - stable: None | bool = ..., + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: builtins.bool | None = None, ) -> None: ... + # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _ArrayT = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, ) -> _ArrayT: ... @overload - def take( # type: ignore[misc] - self: NDArray[_SCT], + def take( + self: NDArray[_ScalarT], indices: _IntLike_co, - axis: None | SupportsIndex = ..., - out: None = ..., + /, + axis: SupportsIndex | None = ..., + out: None = None, mode: _ModeKind = ..., - ) -> _SCT: ... + ) -> _ScalarT: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: None = ..., + /, + axis: SupportsIndex | None = ..., + out: None = None, mode: _ModeKind = ..., - ) -> ndarray[_Shape, _DType_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def take( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _ArrayT = ..., + /, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... - - def repeat( + @overload + def take( self, - repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - ) -> ndarray[_Shape, _DType_co]: ... + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + # keep in sync with `ma.MaskedArray.repeat` + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... - def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DType_co]: ... - def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DType_co]: ... + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... @@ -2445,16 +2569,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[()], _DType_co]: ... + ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d def reshape( self, - shape: _AnyShapeType, + shape: _AnyShapeT, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_AnyShapeType, _DType_co]: ... + ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -2463,7 +2587,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[int], _DType_co]: ... + ) -> ndarray[tuple[int], _DTypeT_co]: ... @overload # (index, index) def reshape( self, @@ -2473,7 +2597,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[int, int], _DType_co]: ... + ) -> ndarray[tuple[int, int], _DTypeT_co]: ... @overload # (index, index, index) def reshape( self, @@ -2484,7 +2608,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[int, int, int], _DType_co]: ... + ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... @overload # (index, index, index, index) def reshape( self, @@ -2496,7 +2620,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[int, int, int, int], _DType_co]: ... + ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... @overload # (int, *(index, ...)) def reshape( self, @@ -2505,7 +2629,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *shape: SupportsIndex, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_Shape, _DType_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload # (sequence[index]) def reshape( self, @@ -2514,48 +2638,48 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_Shape, _DType_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def astype( self, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... + ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[Any]]: ... + ) -> ndarray[_ShapeT_co, dtype]: ... # @overload # () def view(self, /) -> Self: ... @overload # (dtype: T) - def view(self, /, dtype: _DType | _HasDType[_DType]) -> ndarray[_ShapeT_co, _DType]: ... + def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]: ... + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @overload # (type: T) def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... @overload # (_: T) def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... @overload # (dtype: ?) - def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype[Any]]: ... - @overload # (dtype: ?, type: type[T]) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + @overload # (dtype: ?, type: T) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: CanIndex = 0) -> None: ... + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, dtype: _DTypeLike[_SCT], offset: SupportsIndex = 0) -> NDArray[_SCT]: ... + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... @overload - def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... @@ -2563,12 +2687,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... - @overload # == 1-d & object_ - def __iter__(self: ndarray[tuple[int], dtype[object_]], /) -> Iterator[Any]: ... - @overload # == 1-d - def __iter__(self: ndarray[tuple[int], dtype[_SCT]], /) -> Iterator[_SCT]: ... + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, Unpack[tuple[int, ...]]], dtype[_SCT]], /) -> Iterator[NDArray[_SCT]]: ... + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... @@ -2648,13 +2779,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[_ShapeType, dtypes.Complex64DType], /) -> ndarray[_ShapeType, dtypes.Float32DType]: ... + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeType, dtypes.Complex128DType], /) -> ndarray[_ShapeType, dtypes.Float64DType]: ... + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeType, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeType, dtypes.LongDoubleDType]: ... + # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[_ShapeType, dtype[complex128]], /) -> ndarray[_ShapeType, dtype[float64]]: ... + # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... @overload def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... @overload @@ -2711,15 +2842,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload @@ -2734,7 +2865,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -2759,7 +2890,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -2784,7 +2915,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: NDArray[floating[_64Bit]], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2805,7 +2936,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: NDArray[floating[_64Bit]], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2817,6 +2948,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + # Keep in sync with `MaskedArray.__add__` @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2826,11 +2958,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __add__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __add__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2850,10 +2982,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload def __add__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2863,11 +3006,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __radd__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2887,10 +3030,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2900,11 +3054,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __sub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2928,6 +3082,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rsub__` @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2937,11 +3092,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rsub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2965,6 +3120,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__mul__` @overload def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2974,11 +3130,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2988,7 +3144,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -2996,10 +3152,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload + def __mul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3009,11 +3172,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3023,7 +3186,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3031,16 +3194,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload + def __rmul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__truediv__` @overload - def __truediv__(self: _ArrayInt_co | NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3066,12 +3236,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rtruediv__` @overload - def __rtruediv__(self: _ArrayInt_co | NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3095,6 +3266,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__floordiv__` @overload def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload @@ -3104,7 +3276,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __floordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -3124,6 +3296,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rfloordiv__` @overload def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload @@ -3133,7 +3306,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rfloordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -3141,7 +3314,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload @@ -3151,173 +3324,179 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __and__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __or__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... @overload @@ -3331,239 +3510,143 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. - @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # += + @overload # type: ignore[misc] + def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __isub__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __isub__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __isub__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __isub__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __isub__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __isub__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... @overload - def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... @overload - def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... @overload - def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # -= + @overload # type: ignore[misc] + def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __imul__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imul__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imul__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imul__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __imul__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... @overload - def __imul__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # *= + @overload # type: ignore[misc] + def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __itruediv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __itruediv__( - self: NDArray[complexfloating[Any]], - other: _ArrayLikeComplex_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # @= + @overload # type: ignore[misc] + def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ifloordiv__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ifloordiv__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ifloordiv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __ifloordiv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __ifloordiv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __ifloordiv__( - self: NDArray[complexfloating[Any]], - other: _ArrayLikeComplex_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # **= + @overload # type: ignore[misc] + def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __ipow__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __ipow__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # /= + @overload # type: ignore[misc] + def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __imod__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imod__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imod__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imod__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... @overload - def __imod__( - self: NDArray[timedelta64], - other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ilshift__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ilshift__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... @overload - def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # %= + # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __irshift__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... @overload - def __irshift__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... @overload - def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # <<= + # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iand__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iand__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # >>= + # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ixor__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ixor__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # &= + # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ior__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ior__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # ^= + # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imatmul__(self: NDArray[unsignedinteger[Any]], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imatmul__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imatmul__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __imatmul__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __imatmul__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # |= + # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __imatmul__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # def __dlpack__( - self: NDArray[number[Any]], + self: NDArray[number], /, *, stream: int | Any | None = None, @@ -3575,7 +3658,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property - def dtype(self) -> _DType_co: ... + def dtype(self) -> _DTypeT_co: ... # NOTE: while `np.generic` is not technically an instance of `ABCMeta`, # the `@abstractmethod` decorator is herein used to (forcefully) deny @@ -3585,14 +3668,54 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __init__(self, *args: Any, **kwargs: Any) -> None: ... - def __hash__(self) -> int: ... + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... + + # NOTE: Technically this doesn't exist at runtime, but it is unlikely to lead to + # type-unsafe situations (the abstract scalar types cannot be instantiated + # themselves) and is convenient to have, so we include it regardless. See + # https://github.com/numpy/numpy/issues/30445 for use-cases and discussion. + def __hash__(self, /) -> int: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload - def __array__(self, dtype: _DType, /) -> ndarray[tuple[()], _DType]: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[tuple[()], dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT: ... + @overload + def __array_wrap__( + self, + array: ndarray[_Shape1T, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[_Shape1T, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... @property def base(self) -> None: ... @@ -3611,85 +3734,110 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def item(self, /) -> _ItemT_co: ... @overload def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override def tolist(self, /) -> _ItemT_co: ... - def byteswap(self, inplace: L[False] = ...) -> Self: ... + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, + ) -> Never: ... + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] + + # NOTE: this wont't raise, but won't do anything either + @overload + def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... + + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + # @overload def astype( self, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> _SCT: ... + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> _ScalarT: ... @overload def astype( self, - dtype: DTypeLike, - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> Any: ... + /, + dtype: DTypeLike | None, + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, # thus the array `type` is irrelevant to the output type @overload - def view(self, type: type[NDArray[Any]] = ...) -> Self: ... + def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view( - self, - dtype: _DTypeLike[_SCT], - type: type[NDArray[Any]] = ..., - ) -> _SCT: ... + def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... @overload - def view( - self, - dtype: DTypeLike, - type: type[NDArray[Any]] = ..., - ) -> Any: ... + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield( - self, - dtype: _DTypeLike[_SCT], - offset: SupportsIndex = ... - ) -> _SCT: ... + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> Any: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _IntLike_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Self: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload def take( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _ArrayT = ..., - mode: _ModeKind = ..., + /, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", ) -> _ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, axis: None | SupportsIndex = ...) -> NDArray[Self]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... @@ -3702,7 +3850,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): order: _OrderACF = "C", copy: builtins.bool | None = None, ) -> Self: ... - @overload # ((1, *(1, ...))@_ShapeType) + @overload # ((1, *(1, ...))@_ShapeT) def reshape( self, shape: _1NShapeT, @@ -3774,10 +3922,10 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *sizes6_: SupportsIndex, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], Unpack[tuple[L[1], ...]]], dtype[Self]]: ... + ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... - def squeeze(self, axis: None | L[0] | tuple[()] = ...) -> Self: ... - def transpose(self, axes: None | tuple[()] = ..., /) -> Self: ... + def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... + def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @overload def all( @@ -3794,21 +3942,21 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_SCT]], + out: ndarray[tuple[()], dtype[_ScalarT]], keepdims: SupportsIndex = False, *, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _SCT: ... + ) -> _ScalarT: ... @overload def all( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_SCT]], + out: ndarray[tuple[()], dtype[_ScalarT]], keepdims: SupportsIndex = False, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _SCT: ... + ) -> _ScalarT: ... @overload def any( @@ -3825,52 +3973,73 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_SCT]], + out: ndarray[tuple[()], dtype[_ScalarT]], keepdims: SupportsIndex = False, *, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _SCT: ... + ) -> _ScalarT: ... @overload def any( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_SCT]], + out: ndarray[tuple[()], dtype[_ScalarT]], keepdims: SupportsIndex = False, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _SCT: ... + ) -> _ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _dtype[Self]: ... class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): - @abstractmethod - def __init__(self, value: _NumberItemT_co, /) -> None: ... + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __neg__(self) -> Self: ... def __pos__(self) -> Self: ... def __abs__(self) -> Self: ... - __add__: _NumberOp - __radd__: _NumberOp - __sub__: _NumberOp - __rsub__: _NumberOp - __mul__: _NumberOp - __rmul__: _NumberOp - __floordiv__: _NumberOp - __rfloordiv__: _NumberOp - __pow__: _NumberOp - __rpow__: _NumberOp - __truediv__: _NumberOp - __rtruediv__: _NumberOp - - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property @@ -3882,24 +4051,26 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def imag(self) -> np.bool[L[False]]: ... + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... @overload - def __init__(self: np.bool[L[False]], /) -> None: ... + def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... @overload - def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... + def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... @overload - def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... - @overload - def __init__(self, value: object, /) -> None: ... + def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... def __bool__(self, /) -> _BoolItemT_co: ... + @overload def __int__(self: np.bool[L[False]], /) -> L[0]: ... @overload def __int__(self: np.bool[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... - @deprecated("In future, it will be an error for 'np.bool' scalars to be interpreted as an index") - def __index__(self, /) -> L[0, 1]: ... + def __abs__(self) -> Self: ... @overload @@ -3909,98 +4080,284 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __invert__(self, /) -> np.bool: ... - __add__: _BoolOp[np.bool] - __radd__: _BoolOp[np.bool] - __sub__: _BoolSub - __rsub__: _BoolSub - __mul__: _BoolOp[np.bool] - __rmul__: _BoolOp[np.bool] - __truediv__: _BoolTrueDiv - __rtruediv__: _BoolTrueDiv - __floordiv__: _BoolOp[int8] - __rfloordiv__: _BoolOp[int8] - __pow__: _BoolOp[int8] - __rpow__: _BoolOp[int8] - - __lshift__: _BoolBitOp[int8] - __rlshift__: _BoolBitOp[int8] - __rshift__: _BoolBitOp[int8] - __rrshift__: _BoolBitOp[int8] - @overload - def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... - @overload - def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + def __add__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload - def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __add__(self, other: int, /) -> int_: ... @overload - def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + def __add__(self, other: float, /) -> float64: ... @overload - def __and__(self, other: int, /) -> np.bool | intp: ... - __rand__ = __and__ + def __add__(self, other: complex, /) -> complex128: ... @overload - def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... - @overload - def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __radd__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __radd__(self, other: builtins.bool, /) -> bool_: ... @overload - def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __radd__(self, other: int, /) -> int_: ... @overload - def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + def __radd__(self, other: float, /) -> float64: ... @overload - def __xor__(self, other: int, /) -> np.bool | intp: ... - __rxor__ = __xor__ + def __radd__(self, other: complex, /) -> complex128: ... @overload - def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + def __sub__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __sub__(self, other: int, /) -> int_: ... @overload - def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + def __sub__(self, other: float, /) -> float64: ... @overload - def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __sub__(self, other: complex, /) -> complex128: ... + @overload - def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rsub__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __or__(self, other: int, /) -> np.bool | intp: ... - __ror__ = __or__ - - __mod__: _BoolMod - __rmod__: _BoolMod - __divmod__: _BoolDivMod - __rdivmod__: _BoolDivMod - - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... -# NOTE: This should _not_ be `Final` or a `TypeAlias` -bool_ = bool + @overload + def __mul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... -# NOTE: The `object_` constructor returns the passed object, so instances with type -# `object_` cannot exists (at runtime). -# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't -# be made generic. -@final -class object_(_RealMixin, generic[Any]): @overload - def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + def __rmul__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __rmul__(self, other: builtins.bool, /) -> bool_: ... @overload - def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __rmul__(self, other: int, /) -> int_: ... @overload - def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + + @overload + def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rmod__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rlshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rrshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... + + @overload + def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + @overload + def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __and__(self, other: int, /) -> np.bool | intp: ... + __rand__ = __and__ + + @overload + def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + @overload + def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __xor__(self, other: int, /) -> np.bool | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __or__(self, other: int, /) -> np.bool | intp: ... + __ror__ = __or__ + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +# NOTE: This should _not_ be `Final` or a `TypeAlias` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + @overload + def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] @overload def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] - def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... @@ -4010,56 +4367,268 @@ class object_(_RealMixin, generic[Any]): class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): @abstractmethod - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes def bit_count(self, /) -> int: ... def __index__(self, /) -> int: ... def __invert__(self, /) -> Self: ... - __truediv__: _IntTrueDiv[_NBit] - __rtruediv__: _IntTrueDiv[_NBit] - def __mod__(self, value: _IntLike_co, /) -> integer[Any]: ... - def __rmod__(self, value: _IntLike_co, /) -> integer[Any]: ... + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __rlshift__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __rshift__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __rrshift__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __and__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __rand__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __or__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __ror__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __xor__(self, other: _IntLike_co, /) -> integer[Any]: ... - def __rxor__(self, other: _IntLike_co, /) -> integer[Any]: ... - -class signedinteger(integer[_NBit1]): - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... - - __add__: _SignedIntOp[_NBit1] - __radd__: _SignedIntOp[_NBit1] - __sub__: _SignedIntOp[_NBit1] - __rsub__: _SignedIntOp[_NBit1] - __mul__: _SignedIntOp[_NBit1] - __rmul__: _SignedIntOp[_NBit1] - __floordiv__: _SignedIntOp[_NBit1] - __rfloordiv__: _SignedIntOp[_NBit1] - __pow__: _SignedIntOp[_NBit1] - __rpow__: _SignedIntOp[_NBit1] - __lshift__: _SignedIntBitOp[_NBit1] - __rlshift__: _SignedIntBitOp[_NBit1] - __rshift__: _SignedIntBitOp[_NBit1] - __rrshift__: _SignedIntBitOp[_NBit1] - __and__: _SignedIntBitOp[_NBit1] - __rand__: _SignedIntBitOp[_NBit1] - __xor__: _SignedIntBitOp[_NBit1] - __rxor__: _SignedIntBitOp[_NBit1] - __or__: _SignedIntBitOp[_NBit1] - __ror__: _SignedIntBitOp[_NBit1] - __mod__: _SignedIntMod[_NBit1] - __rmod__: _SignedIntMod[_NBit1] - __divmod__: _SignedIntDivMod[_NBit1] - __rdivmod__: _SignedIntDivMod[_NBit1] + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBit]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... int8 = signedinteger[_8Bit] int16 = signedinteger[_16Bit] @@ -4075,33 +4644,249 @@ long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] class unsignedinteger(integer[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... - - __add__: _UnsignedIntOp[_NBit1] - __radd__: _UnsignedIntOp[_NBit1] - __sub__: _UnsignedIntOp[_NBit1] - __rsub__: _UnsignedIntOp[_NBit1] - __mul__: _UnsignedIntOp[_NBit1] - __rmul__: _UnsignedIntOp[_NBit1] - __floordiv__: _UnsignedIntOp[_NBit1] - __rfloordiv__: _UnsignedIntOp[_NBit1] - __pow__: _UnsignedIntOp[_NBit1] - __rpow__: _UnsignedIntOp[_NBit1] - __lshift__: _UnsignedIntBitOp[_NBit1] - __rlshift__: _UnsignedIntBitOp[_NBit1] - __rshift__: _UnsignedIntBitOp[_NBit1] - __rrshift__: _UnsignedIntBitOp[_NBit1] - __and__: _UnsignedIntBitOp[_NBit1] - __rand__: _UnsignedIntBitOp[_NBit1] - __xor__: _UnsignedIntBitOp[_NBit1] - __rxor__: _UnsignedIntBitOp[_NBit1] - __or__: _UnsignedIntBitOp[_NBit1] - __ror__: _UnsignedIntBitOp[_NBit1] - __mod__: _UnsignedIntMod[_NBit1] - __rmod__: _UnsignedIntMod[_NBit1] - __divmod__: _UnsignedIntDivMod[_NBit1] - __rdivmod__: _UnsignedIntDivMod[_NBit1] + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... uint8: TypeAlias = unsignedinteger[_8Bit] uint16: TypeAlias = unsignedinteger[_16Bit] @@ -4116,29 +4901,158 @@ uint: TypeAlias = uintp ulong: TypeAlias = unsignedinteger[_NBitLong] ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] -class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): - @abstractmethod - def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ... +class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): + @abstractmethod + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... -class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): - def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ... - - __add__: _FloatOp[_NBit1] - __radd__: _FloatOp[_NBit1] - __sub__: _FloatOp[_NBit1] - __rsub__: _FloatOp[_NBit1] - __mul__: _FloatOp[_NBit1] - __rmul__: _FloatOp[_NBit1] - __truediv__: _FloatOp[_NBit1] - __rtruediv__: _FloatOp[_NBit1] - __floordiv__: _FloatOp[_NBit1] - __rfloordiv__: _FloatOp[_NBit1] - __pow__: _FloatOp[_NBit1] - __rpow__: _FloatOp[_NBit1] - __mod__: _FloatMod[_NBit1] - __rmod__: _FloatMod[_NBit1] - __divmod__: _FloatDivMod[_NBit1] - __rdivmod__: _FloatDivMod[_NBit1] + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes def is_integer(self, /) -> builtins.bool: ... @@ -4149,9 +5063,6 @@ float32: TypeAlias = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] - def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... - - # @property def itemsize(self) -> L[8]: ... @property @@ -4163,11 +5074,17 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @property def imag(self) -> Self: ... def conjugate(self) -> Self: ... - def __getformat__(self, typestr: L["double", "float"], /) -> str: ... def __getnewargs__(self, /) -> tuple[float]: ... + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + # float64-specific operator overrides - @overload + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + + @overload # type: ignore[override] def __add__(self, other: _Float64_co, /) -> float64: ... @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -4175,16 +5092,17 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __radd__(self, other: _Float64_co, /) -> float64: ... - @overload - def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Float64_co, /) -> float64: ... @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -4192,16 +5110,17 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rsub__(self, other: _Float64_co, /) -> float64: ... - @overload - def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Float64_co, /) -> float64: ... @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -4209,16 +5128,17 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rmul__(self, other: _Float64_co, /) -> float64: ... - @overload - def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Float64_co, /) -> float64: ... @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -4226,16 +5146,17 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: _Float64_co, /) -> float64: ... - @overload - def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __floordiv__(self, other: _Float64_co, /) -> float64: ... @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -4243,8 +5164,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload @@ -4252,33 +5174,37 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload # type: ignore[override] + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload - def __pow__(self, other: _Float64_co, /) -> float64: ... - @overload - def __pow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... - @overload - def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: complex, /) -> float64 | complex128: ... + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload - def __rpow__(self, other: _Float64_co, /) -> float64: ... + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] @overload - def __rpow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload - def __rpow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload - def __rpow__(self, other: complex, /) -> float64 | complex128: ... - - def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] - def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... - def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] - def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. @@ -4287,108 +5213,103 @@ longdouble: TypeAlias = floating[_NBitLongDouble] class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): @overload - def __init__( - self, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, /, - ) -> None: ... + ) -> Self: ... @overload - def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... # type: ignore[override] + def real(self) -> floating[_NBit1]: ... @property - def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def imag(self) -> floating[_NBit2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] - @deprecated( - "The Python built-in `round` is deprecated for complex scalars, and will raise a `TypeError` in a future release. " - "Use `np.round` or `scalar.round` instead." - ) - def __round__(self, /, ndigits: SupportsIndex | None = None) -> Self: ... - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload # type: ignore[override] + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload - def __pow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... - @overload - def __pow__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... - @overload - def __pow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload - def __rpow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload - def __rpow__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... - @overload - def __rpow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... -class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] + @overload # type: ignore[override] + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload - def __new__( - cls, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., - /, - ) -> Self: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload - def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - # +complex64: TypeAlias = complexfloating[_32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): @property def itemsize(self) -> L[16]: ... @property @@ -4404,39 +5325,41 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc def __getnewargs__(self, /) -> tuple[float, float]: ... # complex128-specific operator overrides - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __radd__(self, other: _Complex128_co, /) -> complex128: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + @overload # type: ignore[override] + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: _Complex128_co, /) -> complex128: ... - @overload - def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rpow__(self, other: _Complex128_co, /) -> complex128: ... + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property @@ -4445,26 +5368,26 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... @overload - def __init__(self: timedelta64[L[0]], /) -> None: ... + def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... @overload - def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... @overload - def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... @overload - def __init__( - self: timedelta64[dt.timedelta], + def __new__( + cls, value: dt.timedelta | _IntLike_co, format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, - ) -> None: ... + ) -> timedelta64[dt.timedelta]: ... @overload - def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -4489,19 +5412,19 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] __radd__ = __add__ @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer[Any] | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating[Any], /) -> timedelta64[_AnyTD64Item | None]: ... + def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... @overload - def __mul__(self, x: float | np.floating[Any] | np.integer[Any] | np.bool, /) -> timedelta64: ... + def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... __rmul__ = __mul__ @overload - def __mod__(self, x: timedelta64[None | L[0]], /) -> timedelta64[None]: ... + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... @overload def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload @@ -4515,31 +5438,34 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __mod__(self, x: timedelta64, /) -> timedelta64: ... - # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads + # reflect. However, mypy does not seem to like this, so we ignore the errors. @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload - def __rmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> timedelta64[None]: ... + def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] @overload def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] # keep in sync with __mod__ @overload - def __divmod__(self, x: timedelta64[None | L[0]], /) -> tuple[int64, timedelta64[None]]: ... + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... @overload def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload @@ -4549,19 +5475,21 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] # keep in sync with __rmod__ @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __rdivmod__( # type: ignore[misc] + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] @overload def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @@ -4574,20 +5502,22 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @@ -4599,6 +5529,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + @overload def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... @overload @@ -4612,15 +5543,41 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... - __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] + # comparison ops + + @overload + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property @@ -4629,28 +5586,30 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ... + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ... + def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... @overload - def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... @overload - def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... @overload - def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __init__( - self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / - ) -> None: ... + def __new__( + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... @overload - def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @overload - def __add__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... @overload @@ -4668,7 +5627,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): __radd__ = __add__ @overload - def __sub__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... @overload @@ -4700,8 +5659,9 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __sub__(self, x: datetime64, /) -> timedelta64: ... + # NOTE: mypy gets confused by the non-commutativity of subtraction here @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... @overload @@ -4709,26 +5669,50 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] @overload - def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload - def __rsub__(self, x: datetime64, /) -> timedelta64: ... + def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] - __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... -class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] -class void(flexible[bytes | tuple[Any, ...]]): +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload - def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ... + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @@ -4738,48 +5722,46 @@ class void(flexible[bytes | tuple[Any, ...]]): def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... -class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] @abstractmethod - def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ... + def __new__(cls, value: object = ..., /) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart -class bytes_(character[bytes], bytes): +class bytes_(character[bytes], bytes): # type: ignore[misc] @overload - def __new__(cls, o: object = ..., /) -> Self: ... + def __new__(cls, value: object = b"", /) -> Self: ... @overload - def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... # - @overload - def __init__(self, o: object = ..., /) -> None: ... - @overload - def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... + @override + def __hash__(self, /) -> int: ... # def __bytes__(self, /) -> bytes: ... -class str_(character[str], str): +class str_(character[str], str): # type: ignore[misc] @overload - def __new__(cls, value: object = ..., /) -> Self: ... + def __new__(cls, value: object = "", /) -> Self: ... @overload - def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... # - @overload - def __init__(self, value: object = ..., /) -> None: ... - @overload - def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ... + @override + def __hash__(self, /) -> int: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: + __signature__: Final[inspect.Signature] + @property def __name__(self) -> LiteralString: ... @property - def __qualname__(self) -> LiteralString: ... + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def __doc__(self) -> str: ... + def __doc__(self) -> str: ... # type: ignore[override] @property def nin(self) -> int: ... @property @@ -4804,127 +5786,148 @@ class ufunc: def identity(self) -> Any: ... # This is None for ufuncs and a string for gufuncs. @property - def signature(self) -> None | LiteralString: ... + def signature(self) -> LiteralString | None: ... + + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def outer(self, *args: Any, **kwargs: Any) -> Any: ... - # Similarly at won't be defined for ufuncs that return multiple + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - def at(self, /, *args: Any, **kwargs: Any) -> None: ... + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... # def resolve_dtypes( self, /, - dtypes: tuple[dtype[Any] | type | None, ...], + dtypes: tuple[dtype | type | None, ...], *, - signature: tuple[dtype[Any] | None, ...] | None = None, + signature: tuple[dtype | None, ...] | None = None, casting: _CastingKind | None = None, reduction: builtins.bool = False, - ) -> tuple[dtype[Any], ...]: ... + ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` -absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] -add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] -arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] -arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] -arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] -arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] -arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] -arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] -arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] -bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] -bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] -bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] -cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] -ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] -cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] -cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] -deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] -degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] -divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] -equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] -exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] -exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] -expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] -fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] -float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] -floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] -floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] -fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] -fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] -fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] -frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] -gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] -greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] -greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] -heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] -hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] -invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] -isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] -isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] -isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] -lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] -ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] -left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] -less: _UFunc_Nin2_Nout1[L['less'], L[23], None] -less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] -log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] -log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] -log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] -log: _UFunc_Nin1_Nout1[L['log'], L[10], None] -logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] -logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] -logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] -logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] -logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] -logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] -matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] -maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] -minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] -multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] -negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] -nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] -not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] -positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] -power: _UFunc_Nin2_Nout1[L['power'], L[18], None] -rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] -radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] -reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] -remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] -rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] -sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] -signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] -sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] -sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] -spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] -sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] -square: _UFunc_Nin1_Nout1[L['square'], L[18], None] -subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] -tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] -tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] -trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] -vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos @@ -4936,31 +5939,14 @@ atanh = arctanh atan2 = arctan2 concat = concatenate bitwise_left_shift = left_shift +bitwise_not = invert bitwise_invert = invert bitwise_right_shift = right_shift +conj = conjugate +mod = remainder permute_dims = transpose pow = power - -class errstate: - def __init__( - self, - *, - call: _ErrCall = ..., - all: None | _ErrKind = ..., - divide: None | _ErrKind = ..., - over: None | _ErrKind = ..., - under: None | _ErrKind = ..., - invalid: None | _ErrKind = ..., - ) -> None: ... - def __enter__(self) -> None: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | TracebackType, - /, - ) -> None: ... - def __call__(self, func: _CallableT) -> _CallableT: ... +true_divide = divide # TODO: The type of each `__next__` and `iters` return-type depends # on the length and dtype of `args`; we can't describe this behavior yet @@ -4979,7 +5965,7 @@ class broadcast: @property def numiter(self) -> int: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> _AnyShape: ... @property def size(self) -> int: ... def __next__(self) -> tuple[Any, ...]: ... @@ -4988,90 +5974,54 @@ class broadcast: @final class busdaycalendar: - def __new__( - cls, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - ) -> busdaycalendar: ... - @property - def weekmask(self) -> NDArray[np.bool]: ... - @property - def holidays(self) -> NDArray[datetime64]: ... - -class finfo(Generic[_FloatingT_co]): - dtype: Final[dtype[_FloatingT_co]] - bits: Final[int] - eps: Final[_FloatingT_co] - epsneg: Final[_FloatingT_co] - iexp: Final[int] - machep: Final[int] - max: Final[_FloatingT_co] - maxexp: Final[int] - min: Final[_FloatingT_co] - minexp: Final[int] - negep: Final[int] - nexp: Final[int] - nmant: Final[int] - precision: Final[int] - resolution: Final[_FloatingT_co] - smallest_subnormal: Final[_FloatingT_co] - @property - def smallest_normal(self) -> _FloatingT_co: ... - @property - def tiny(self) -> _FloatingT_co: ... - @overload - def __new__( - cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]] - ) -> finfo[floating[_NBit1]]: ... - @overload - def __new__( - cls, dtype: complex | float | type[complex] | type[float] - ) -> finfo[float64]: ... - @overload - def __new__( - cls, dtype: str - ) -> finfo[floating[Any]]: ... - - -class iinfo(Generic[_IntegerT_co]): - dtype: Final[dtype[_IntegerT_co]] - kind: Final[LiteralString] - bits: Final[int] - key: Final[LiteralString] + def __init__( + self, + /, + weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", + holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, + ) -> None: ... @property - def min(self) -> int: ... + def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... @property - def max(self) -> int: ... + def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... +@final +class nditer: @overload - def __new__( - cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] - ) -> iinfo[_IntegerT_co]: ... - @overload - def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... @overload - def __new__(cls, dtype: str) -> iinfo[Any]: ... + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... -@final -class nditer: - def __new__( - cls, - op: ArrayLike | Sequence[ArrayLike | None], - flags: None | Sequence[_NDIterFlagsKind] = ..., - op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., - order: _OrderKACF = ..., - casting: _CastingKind = ..., - op_axes: None | Sequence[Sequence[SupportsIndex]] = ..., - itershape: None | _ShapeLike = ..., - buffersize: SupportsIndex = ..., - ) -> nditer: ... def __enter__(self) -> nditer: ... def __exit__( self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | TracebackType, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, ) -> None: ... def __iter__(self) -> nditer: ... def __next__(self) -> tuple[NDArray[Any], ...]: ... @@ -5091,7 +6041,7 @@ class nditer: def remove_multi_index(self) -> None: ... def reset(self) -> None: ... @property - def dtypes(self) -> tuple[dtype[Any], ...]: ... + def dtypes(self) -> tuple[dtype, ...]: ... @property def finished(self) -> builtins.bool: ... @property @@ -5125,7 +6075,7 @@ class nditer: @property def value(self) -> tuple[NDArray[Any], ...]: ... -class memmap(ndarray[_ShapeT_co, _DType_co]): +class memmap(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: ClassVar[float] filename: str | None offset: int @@ -5135,60 +6085,40 @@ class memmap(ndarray[_ShapeT_co, _DType_co]): subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: type[uint8] = ..., - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: None | int | tuple[int, ...] = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype[uint8]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: _DTypeLike[_SCT], - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: None | int | tuple[int, ...] = ..., - order: _OrderKACF = ..., - ) -> memmap[Any, dtype[_SCT]]: ... + dtype: _DTypeLike[_ScalarT], + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype[_ScalarT]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: DTypeLike, - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: None | int | tuple[int, ...] = ..., - order: _OrderKACF = ..., - ) -> memmap[Any, dtype[Any]]: ... + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype]: ... def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, - array: memmap[_ShapeT_co, _DType_co], - context: None | tuple[ufunc, tuple[Any, ...], int] = ..., - return_scalar: builtins.bool = ..., + array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: builtins.bool = False, ) -> Any: ... def flush(self) -> None: ... -# TODO: Add a mypy plugin for managing functions whose output type is dependent -# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) -class vectorize: - pyfunc: Callable[..., Any] - cache: builtins.bool - signature: None | LiteralString - otypes: None | LiteralString - excluded: set[int | str] - __doc__: None | str - def __init__( - self, - pyfunc: Callable[..., Any], - otypes: None | str | Iterable[DTypeLike] = ..., - doc: None | str = ..., - excluded: None | Iterable[int | str] = ..., - cache: builtins.bool = ..., - signature: None | str = ..., - ) -> None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - class poly1d: @property def variable(self) -> LiteralString: ... @@ -5224,9 +6154,9 @@ class poly1d: __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype[Any]]: ... + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... @overload - def __array__(self, /, t: _DType, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DType]: ... + def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... @@ -5238,8 +6168,8 @@ class poly1d: def __init__( self, c_or_r: ArrayLike, - r: builtins.bool = ..., - variable: None | str = ..., + r: builtins.bool = False, + variable: str | None = None, ) -> None: ... def __len__(self) -> int: ... def __neg__(self) -> poly1d: ... @@ -5251,175 +6181,22 @@ class poly1d: def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted def __sub__(self, other: ArrayLike, /) -> poly1d: ... def __rsub__(self, other: ArrayLike, /) -> poly1d: ... - def __div__(self, other: ArrayLike, /) -> poly1d: ... def __truediv__(self, other: ArrayLike, /) -> poly1d: ... - def __rdiv__(self, other: ArrayLike, /) -> poly1d: ... def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... def __getitem__(self, val: int, /) -> Any: ... def __setitem__(self, key: int, val: Any, /) -> None: ... def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... def integ( self, - m: SupportsInt | SupportsIndex = ..., - k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, ) -> poly1d: ... - -class matrix(ndarray[_2DShapeT_co, _DType_co]): - __array_priority__: ClassVar[float] - def __new__( - subtype, - data: ArrayLike, - dtype: DTypeLike = ..., - copy: builtins.bool = ..., - ) -> matrix[_2D, Any]: ... - def __array_finalize__(self, obj: object) -> None: ... - - @overload - def __getitem__( - self, - key: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - ), - /, - ) -> Any: ... - @overload - def __getitem__( - self, - key: ( - None - | slice - | EllipsisType - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] - ), - /, - ) -> matrix[_2D, _DType_co]: ... - @overload - def __getitem__(self: NDArray[void], key: str, /) -> matrix[_2D, dtype[Any]]: ... - @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_2DShapeT_co, dtype[void]]: ... - - def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... - def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... - def __imul__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DType_co]: ... - def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... - def __ipow__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DType_co]: ... - - @overload - def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... - @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... - @overload - def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... - @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... - @overload - def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... - @overload - def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[_2D, Any]: ... - @overload - def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ..., ddof: float = ...) -> _ArrayT: ... - - @overload - def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... - @overload - def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[_2D, Any]: ... - @overload - def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ..., ddof: float = ...) -> _ArrayT: ... - - @overload - def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... - @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... - @overload - def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def any(self, axis: None = ..., out: None = ...) -> np.bool: ... - @overload - def any(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def any(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def all(self, axis: None = ..., out: None = ...) -> np.bool: ... - @overload - def all(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def all(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def max(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> _SCT: ... - @overload - def max(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DType_co]: ... - @overload - def max(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def min(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> _SCT: ... - @overload - def min(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DType_co]: ... - @overload - def min(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def argmax(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> intp: ... - @overload - def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmax(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def argmin(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> intp: ... - @overload - def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmin(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - @overload - def ptp(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> _SCT: ... - @overload - def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DType_co]: ... - @overload - def ptp(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - - def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[_2D, _DType_co]: ... - def tolist(self: matrix[Any, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] - def ravel(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] - - @property - def T(self) -> matrix[_2D, _DType_co]: ... - @property - def I(self) -> matrix[_2D, Any]: ... - @property - def A(self) -> ndarray[_2DShapeT_co, _DType_co]: ... - @property - def A1(self) -> ndarray[_Shape, _DType_co]: ... - @property - def H(self) -> matrix[_2D, _DType_co]: ... - def getT(self) -> matrix[_2D, _DType_co]: ... - def getI(self) -> matrix[_2D, Any]: ... - def getA(self) -> ndarray[_2DShapeT_co, _DType_co]: ... - def getA1(self) -> ndarray[_Shape, _DType_co]: ... - def getH(self) -> matrix[_2D, _DType_co]: ... - - def from_dlpack( x: _SupportsDLPack[None], /, *, device: L["cpu"] | None = None, copy: builtins.bool | None = None, -) -> NDArray[number[Any] | np.bool]: ... +) -> NDArray[number | np.bool]: ... diff --git a/blimgui/dist64/numpy/_array_api_info.py b/blimgui/dist64/numpy/_array_api_info.py index a9b18eb..394c4d7 100644 --- a/blimgui/dist64/numpy/_array_api_info.py +++ b/blimgui/dist64/numpy/_array_api_info.py @@ -8,24 +8,26 @@ """ from numpy._core import ( - dtype, bool, - intp, + complex64, + complex128, + dtype, + float32, + float64, int8, int16, int32, int64, + intp, uint8, uint16, uint32, uint64, - float32, - float64, - complex64, - complex128, ) +from numpy._utils import set_module +@set_module('numpy') class __array_namespace_info__: """ Get the array API inspection namespace for NumPy. @@ -58,8 +60,6 @@ class __array_namespace_info__: """ - __module__ = 'numpy' - def capabilities(self): """ Return a dictionary of array API library capabilities. @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): diff --git a/blimgui/dist64/numpy/_array_api_info.pyi b/blimgui/dist64/numpy/_array_api_info.pyi index 4eca38b..742f4ae 100644 --- a/blimgui/dist64/numpy/_array_api_info.pyi +++ b/blimgui/dist64/numpy/_array_api_info.pyi @@ -1,6 +1,6 @@ from typing import ( - ClassVar, Literal, + Never, TypeAlias, TypedDict, TypeVar, @@ -8,13 +8,11 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import Never import numpy as np - _Device: TypeAlias = Literal["cpu"] -_DeviceLike: TypeAlias = None | _Device +_DeviceLike: TypeAlias = _Device | None _Capabilities = TypedDict( "_Capabilities", @@ -34,7 +32,6 @@ _DefaultDTypes = TypedDict( }, ) - _KindBool: TypeAlias = Literal["bool"] _KindInt: TypeAlias = Literal["signed integer"] _KindUInt: TypeAlias = Literal["unsigned integer"] @@ -52,7 +49,6 @@ _Kind: TypeAlias = ( | _KindNumber ) - _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") _T3 = TypeVar("_T3") @@ -121,14 +117,14 @@ _EmptyDict: TypeAlias = dict[Never, Never] @final class __array_namespace_info__: - __module__: ClassVar[Literal['numpy']] + __module__: Literal["numpy"] = "numpy" def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... def default_dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, ) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @@ -136,49 +132,49 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., - kind: None = ..., + device: _DeviceLike = None, + kind: None = None, ) -> _DTypes: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindBool], ) -> _DTypesBool: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindInt], ) -> _DTypesInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindUInt], ) -> _DTypesUInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindFloat], ) -> _DTypesFloat: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindComplex], ) -> _DTypesComplex: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: ( _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt] @@ -188,7 +184,7 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: ( _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex] @@ -198,13 +194,13 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[()], ) -> _EmptyDict: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[_Kind, ...], ) -> _DTypesUnion: ... diff --git a/blimgui/dist64/numpy/_configtool.py b/blimgui/dist64/numpy/_configtool.py index 1be660d..4a0181c 100644 --- a/blimgui/dist64/numpy/_configtool.py +++ b/blimgui/dist64/numpy/_configtool.py @@ -1,9 +1,9 @@ import argparse -from pathlib import Path import sys +from pathlib import Path -from .version import __version__ from .lib._utils_impl import get_include +from .version import __version__ def main() -> None: diff --git a/blimgui/dist64/numpy/_core/__init__.py b/blimgui/dist64/numpy/_core/__init__.py index 5dc01a2..625cf5e 100644 --- a/blimgui/dist64/numpy/_core/__init__.py +++ b/blimgui/dist64/numpy/_core/__init__.py @@ -10,46 +10,82 @@ from numpy.version import version as __version__ - # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core env_added = [] -for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: +for envkey in ['OPENBLAS_MAIN_FREE']: if envkey not in os.environ: - os.environ[envkey] = '1' + # Note: using `putenv` (and `unsetenv` further down) instead of updating + # `os.environ` on purpose to avoid a race condition, see gh-30627. + os.putenv(envkey, '1') env_added.append(envkey) try: from . import multiarray except ImportError as exc: import sys - msg = """ + + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + + # Basically always, the problem should be that the C module is wrong/missing... + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): + import sys + candidates = [] + for path in __path__: + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, did NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) - raise ImportError(msg) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + + raise ImportError(msg) from exc finally: for envkey in env_added: - del os.environ[envkey] + os.unsetenv(envkey) del envkey del env_added del os @@ -69,37 +105,35 @@ raise ImportError(msg.format(path)) from . import numerictypes as nt -from .numerictypes import sctypes, sctypeDict +from .numerictypes import sctypeDict, sctypes + multiarray.set_typeDict(nt.sctypeDict) -from . import numeric -from .numeric import * -from . import fromnumeric +from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base +from .einsumfunc import * from .fromnumeric import * -from .records import record, recarray -# Note: module name memmap is overwritten by a class with same name -from .memmap import * -from . import function_base from .function_base import * -from . import _machar -from . import getlimits from .getlimits import * -from . import shape_base + +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .numeric import * +from .records import recarray, record from .shape_base import * -from . import einsumfunc -from .einsumfunc import * -del nt -from .numeric import absolute as abs +del nt # do this after everything else, to minimize the chance of this misleadingly # appearing in an import-time traceback -from . import _add_newdocs -from . import _add_newdocs_scalars # add these for module-freeze analysis (like PyInstaller) -from . import _dtype_ctypes -from . import _internal -from . import _dtype -from . import _methods +from . import ( + _add_newdocs, + _add_newdocs_scalars, + _dtype, + _dtype_ctypes, + _internal, + _methods, +) +from .numeric import absolute as abs acos = numeric.arccos acosh = numeric.arccosh @@ -155,18 +189,6 @@ def _DType_reduce(DType): return _DType_reconstruct, (scalar_type,) -def __getattr__(name): - # Deprecated 2022-11-22, NumPy 1.25. - if name == "MachAr": - import warnings - warnings.warn( - "The `np._core.MachAr` is considered private API (NumPy 1.24)", - DeprecationWarning, stacklevel=2, - ) - return _machar.MachAr - raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") - - import copyreg copyreg.pickle(ufunc, _ufunc_reduce) @@ -176,5 +198,6 @@ def __getattr__(name): del copyreg, _ufunc_reduce, _DType_reduce from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/_core/__init__.pyi b/blimgui/dist64/numpy/_core/__init__.pyi index b46d5e3..0a9b283 100644 --- a/blimgui/dist64/numpy/_core/__init__.pyi +++ b/blimgui/dist64/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + transpose as permute_dims, + var, +) +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + concatenate as concat, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + absolute as abs, + add, + arccos, + arccos as acos, + arccosh, + arccosh as acosh, + arcsin, + arcsin as asin, + arcsinh, + arcsinh as asinh, + arctan, + arctan as atan, + arctan2, + arctan2 as atan2, + arctanh, + arctanh as atanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + invert as bitwise_invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + left_shift as bitwise_left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + power as pow, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + right_shift as bitwise_right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] diff --git a/blimgui/dist64/numpy/_core/_add_newdocs.py b/blimgui/dist64/numpy/_core/_add_newdocs.py index fc362d4..1de668e 100644 --- a/blimgui/dist64/numpy/_core/_add_newdocs.py +++ b/blimgui/dist64/numpy/_core/_add_newdocs.py @@ -9,9 +9,10 @@ """ -from numpy._core.function_base import add_newdoc -from numpy._core.overrides import get_array_function_like_doc +import textwrap +from numpy._core.function_base import add_newdoc +from numpy._core.overrides import get_array_function_like_doc # noqa: F401 ############################################################################### # @@ -81,7 +82,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. @@ -100,7 +100,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('index', """ Current flat index into the array. @@ -119,17 +118,25 @@ """)) -# flatiter functions +# flatiter methods add_newdoc('numpy._core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator + """ + __array__($self, dtype=None, /, *, copy=None) + -- - """)) + flat.__array__([dtype], *, copy=None) + Get array from iterator + + """)) add_newdoc('numpy._core', 'flatiter', ('copy', """ - copy() + copy($self, /) + -- + + flat.copy() Get a copy of the iterator as a 1-D array. @@ -155,6 +162,19 @@ add_newdoc('numpy._core', 'nditer', """ + nditer( + op, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + op_axes=None, + itershape=None, + buffersize=0, + ) + -- + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) @@ -166,63 +186,62 @@ ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. @@ -423,10 +442,22 @@ """) +# nditer attributes + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + # nditer methods add_newdoc('numpy._core', 'nditer', ('copy', """ + copy($self, /) + -- + copy() Get a copy of the iterator in its current state. @@ -445,15 +476,11 @@ """)) -add_newdoc('numpy._core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - add_newdoc('numpy._core', 'nditer', ('debug_print', """ + debug_print($self, /) + -- + debug_print() Print the current state of the `nditer` instance and debug info to stdout. @@ -462,6 +489,9 @@ add_newdoc('numpy._core', 'nditer', ('enable_external_loop', """ + enable_external_loop($self, /) + -- + enable_external_loop() When the "external_loop" was not used during construction, but @@ -472,6 +502,9 @@ add_newdoc('numpy._core', 'nditer', ('iternext', """ + iternext($self, /) + -- + iternext() Check whether iterations are left, and perform a single internal iteration @@ -487,6 +520,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_axis', """ + remove_axis($self, i, /) + -- + remove_axis(i, /) Removes axis `i` from the iterator. Requires that the flag "multi_index" @@ -496,6 +532,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_multi_index', """ + remove_multi_index($self, /) + -- + remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing @@ -505,32 +544,62 @@ add_newdoc('numpy._core', 'nditer', ('reset', """ + reset($self, /) + -- + reset() Reset the iterator to its initial state. """)) +add_newdoc('numpy._core', 'nditer', ('close', + """ + close($self, /) + -- + + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + :ref:`nditer-context-manager` + + """)) + +# nested_iters + add_newdoc('numpy._core', 'nested_iters', """ - nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ - order="K", casting="safe", buffersize=0) + nested_iters( + op, + axes, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + buffersize=0, + ) + -- + + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, + order='K', casting='safe', buffersize=0) Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. + outermost loop, the last in the innermost loop. Advancing one will + change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - axes : list of list of int Each item is used as an "op_axes" argument to an nditer - flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name @@ -575,20 +644,6 @@ """) -add_newdoc('numpy._core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - ############################################################################### # # broadcast @@ -597,6 +652,9 @@ add_newdoc('numpy._core', 'broadcast', """ + broadcast(*arrays) + -- + Produce an object that mimics broadcasting. Parameters @@ -766,8 +824,13 @@ """)) +# methods + add_newdoc('numpy._core', 'broadcast', ('reset', """ + reset($self, /) + -- + reset() Reset the broadcasted result's iterator(s). @@ -806,8 +869,21 @@ add_newdoc('numpy._core.multiarray', 'array', """ + array( + object, + dtype=None, + *, + copy=True, + order='K', + subok=False, + ndmin=0, + ndmax=0, + like=None, + ) + -- + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - like=None) + ndmax=0, like=None) Create an array. @@ -856,6 +932,16 @@ Specifies the minimum number of dimensions that the resulting array should have. Ones will be prepended to the shape as needed to meet this requirement. + ndmax : int, optional + Specifies the maximum number of dimensions to create when inferring + shape from nested sequences. By default (ndmax=0), NumPy recurses + through all nesting levels (up to the compile-time constant + ``NPY_MAXDIMS``). + Setting ``ndmax`` stops recursion at the specified depth, preserving + deeper nested structures as objects instead of promoting them to + higher-dimensional arrays. In this case, ``dtype=object`` is required. + + .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -875,7 +961,7 @@ ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. - copy: Return an array copy of the given object. + copy : Return an array copy of the given object. Notes @@ -927,10 +1013,28 @@ matrix([[1, 2], [3, 4]]) + Limiting the maximum dimensions with ``ndmax``: + + >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + """) add_newdoc('numpy._core.multiarray', 'asarray', """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an array. @@ -944,12 +1048,13 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'K'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) is the default and preserves the input order for the output. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -979,12 +1084,10 @@ -------- asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1023,6 +1126,9 @@ add_newdoc('numpy._core.multiarray', 'asanyarray', """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -1036,12 +1142,14 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1072,13 +1180,10 @@ -------- asarray : Similar function which always returns ndarrays. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1099,6 +1204,9 @@ add_newdoc('numpy._core.multiarray', 'ascontiguousarray', """ + ascontiguousarray(a, dtype=None, *, like=None) + -- + ascontiguousarray(a, dtype=None, *, like=None) Return a contiguous array (ndim >= 1) in memory (C order). @@ -1121,8 +1229,7 @@ See Also -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. require : Return an ndarray that satisfies requirements. ndarray.flags : Information about the memory layout of the array. @@ -1162,6 +1269,9 @@ add_newdoc('numpy._core.multiarray', 'asfortranarray', """ + asfortranarray(a, dtype=None, *, like=None) + -- + asfortranarray(a, dtype=None, *, like=None) Return an array (ndim >= 1) laid out in Fortran order in memory. @@ -1225,7 +1335,10 @@ add_newdoc('numpy._core.multiarray', 'empty', """ - empty(shape, dtype=float, order='C', *, device=None, like=None) + empty(shape, dtype=None, order='C', *, device=None, like=None) + -- + + empty(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, without initializing entries. @@ -1238,8 +1351,7 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1295,11 +1407,14 @@ string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. - """) + """) # sufficient null bytes for all number dtypes add_newdoc('numpy._core.multiarray', 'zeros', """ - zeros(shape, dtype=float, order='C', *, like=None) + zeros(shape, dtype=None, order='C', *, device=None, like=None) + -- + + zeros(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, filled with zeros. @@ -1312,8 +1427,12 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1355,13 +1474,15 @@ """) add_newdoc('numpy._core.multiarray', 'set_typeDict', - """set_typeDict(dict) + """ + set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. """) +# Signature can be updated for 2.5.0 release, see gh-30235 for details add_newdoc('numpy._core.multiarray', 'fromstring', """ fromstring(string, dtype=float, count=-1, *, sep, like=None) @@ -1373,7 +1494,7 @@ string : str A string containing the data. dtype : data-type, optional - The data type of the array; default: float. For binary input data, + The data type of the array; default: `numpy.float64`. For binary input data, the data must be in exactly this format. Most builtin numeric types are supported and extension types may be supported. count : int, optional @@ -1424,6 +1545,9 @@ add_newdoc('numpy._core.multiarray', 'compare_chararrays', """ + compare_chararrays(a1, a2, cmp, rstrip) + -- + compare_chararrays(a1, a2, cmp, rstrip) Performs element-wise comparison of two string arrays using the @@ -1435,20 +1559,20 @@ Arrays to be compared. cmp : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. + rstrip : bool + If True, the spaces at the end of strings are removed before the comparison. Returns ------- out : ndarray - The output array of type Boolean with the same shape as a and b. + The output array of type `numpy.bool` with the same shape as `a1` and `a2`. Raises ------ ValueError If `cmp` is not valid. TypeError - If at least one of `a` or `b` is a non-string array + If at least one of `a1` or `a2` is a non-string array Examples -------- @@ -1462,6 +1586,9 @@ add_newdoc('numpy._core.multiarray', 'fromiter', """ + fromiter(iter, dtype, count=-1, *, like=None) + -- + fromiter(iter, dtype, count=-1, *, like=None) Create a new 1-dimensional array from an iterable object. @@ -1517,6 +1644,9 @@ add_newdoc('numpy._core.multiarray', 'fromfile', """ + fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) + -- + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1528,7 +1658,9 @@ Parameters ---------- file : file or str or Path - Open file object or filename. + An open file object, a string containing the filename, or a Path object. + When reading from a file object it must support random access + (i.e. it must have tell and seek methods). dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order @@ -1600,6 +1732,9 @@ add_newdoc('numpy._core.multiarray', 'frombuffer', """ + frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) + -- + frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1609,7 +1744,7 @@ buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional - Data-type of the returned array; default: float. + Data-type of the returned array. Default is `numpy.float64`. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional @@ -1660,6 +1795,9 @@ add_newdoc('numpy._core.multiarray', 'from_dlpack', """ + from_dlpack(x, /, *, device=None, copy=None) + -- + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` @@ -1710,6 +1848,9 @@ add_newdoc('numpy._core.multiarray', 'arange', """ + arange(start_or_stop, /, stop=None, step=1, *, dtype=None, device=None, like=None) + -- + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) Return evenly spaced values within a given interval. @@ -1834,13 +1975,16 @@ add_newdoc('numpy._core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) - Construct an empty array. Used by Pickles. + Construct an empty array. Used by Pickle. """) add_newdoc('numpy._core.multiarray', 'promote_types', """ - promote_types(type1, type2) + promote_types(type1, type2, /) + -- + + promote_types(type1, type2, /) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. @@ -2227,8 +2371,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) + ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) + -- + + ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2251,6 +2397,7 @@ Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. + Default is `numpy.float64`. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional @@ -2368,21 +2515,6 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """ - a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) - - DLPack Protocol: Part of the Array API. - - """)) - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """ - a.__dlpack_device__() - - DLPack Protocol: Part of the Array API. - - """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ @@ -2514,13 +2646,15 @@ Examples -------- + >>> import numpy as np + >>> x = np.arange(4).reshape((2, 2)) >>> x array([[0, 1], [2, 3]]) >>> x.dtype - dtype('int32') - >>> type(x.dtype) - + dtype('int64') # may vary (OS, bitness) + >>> isinstance(x.dtype, np.dtype) + True """)) @@ -2769,7 +2903,7 @@ >>> y.shape = (3, 6) Traceback (most recent call last): File "", line 1, in - ValueError: total size of new array must be unchanged + ValueError: cannot reshape array of size 24 into shape (3,6) >>> np.zeros((4,2))[::2].shape = (-1,) Traceback (most recent call last): File "", line 1, in @@ -2852,31 +2986,32 @@ Examples -------- >>> import numpy as np - >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) + >>> y = np.reshape(np.arange(2 * 3 * 4, dtype=np.int32), (2, 3, 4)) >>> y array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], - [20, 21, 22, 23]]]) + [20, 21, 22, 23]]], dtype=np.int32) >>> y.strides (48, 16, 4) - >>> y[1,1,1] - 17 - >>> offset=sum(y.strides * np.array((1,1,1))) - >>> offset/y.itemsize - 17 - - >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) + >>> y[1, 1, 1] + np.int32(17) + >>> offset = sum(y.strides * np.array((1, 1, 1))) + >>> offset // y.itemsize + np.int64(17) + + >>> x = np.reshape(np.arange(5*6*7*8, dtype=np.int32), (5, 6, 7, 8)) + >>> x = x.transpose(2, 3, 1, 0) >>> x.strides (32, 4, 224, 1344) - >>> i = np.array([3,5,2,2]) + >>> i = np.array([3, 5, 2, 2], dtype=np.int32) >>> offset = sum(i * x.strides) - >>> x[3,5,2,2] - 813 - >>> offset / x.itemsize - 813 + >>> x[3, 5, 2, 2] + np.int32(813) + >>> offset // x.itemsize + np.int64(813) """)) @@ -2951,6 +3086,7 @@ [5, 7]]]) """)) + ############################################################################## # # ndarray methods @@ -2960,6 +3096,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ + __array__($self, dtype=None, /, *, copy=None) + -- + a.__array__([dtype], *, copy=None) For ``dtype`` parameter it returns a new reference to self if @@ -2979,6 +3118,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', """ + __array_finalize__($self, obj, /) + -- + a.__array_finalize__(obj, /) Present so subclasses can call super. Does nothing. @@ -2986,29 +3128,48 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_function__', """ - a.__array_wrap__(array[, context], /) + __array_function__($self, /, func, types, args, kwargs) + -- - Returns a view of `array` with the same type as self. + a.__array_function__(func, types, args, kwargs) + + See :ref:`NEP 18 ` and :ref:`NEP 35 ` for details. """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', """ - a.__copy__() + __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) + -- - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + a.__array_ufunc__(ufunc, method, /, *inputs, **kwargs) - Equivalent to ``a.copy(order='K')``. + See :ref:`NEP 13 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) + + Returns a view of `array` with the same type as self. """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', """ - a.__class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + ndarray[shape, dtype] Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3021,11 +3182,10 @@ Examples -------- - >>> from typing import Any >>> import numpy as np - >>> np.ndarray[Any, np.dtype[Any]] - numpy.ndarray[typing.Any, numpy.dtype[typing.Any]] + >>> np.ndarray[tuple[int], np.dtype[np.uint8]] + numpy.ndarray[tuple[int], numpy.dtype[numpy.uint8]] See Also -------- @@ -3036,17 +3196,36 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', """ - a.__deepcopy__(memo, /) + __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) + -- - Used if :func:`copy.deepcopy` is called on an array. + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + Exports the array for consumption by ``from_dlpack()`` as a DLPack capsule. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + __dlpack_device__($self, /) + -- + + a.__dlpack_device__() + + Returns device type (``1``) and device ID (``0``) in DLPack format. + Meant for use within ``from_dlpack()``. """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ + __reduce__($self, /) + -- + a.__reduce__() For pickling. @@ -3054,8 +3233,23 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce_ex__', + """ + __reduce_ex__($self, protocol, /) + -- + + a.__reduce_ex__(protocol, /) + + For pickling. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ + __setstate__($self, state, /) + -- + a.__setstate__(state, /) For unpickling. @@ -3076,97 +3270,242 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('all', +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', """ - a.all(axis=None, out=None, keepdims=False, *, where=True) + dot($self, other, /, out=None) + -- - Returns True if all elements evaluate to True. + a.dot(other, /, out=None) - Refer to `numpy.all` for full documentation. + Refer to :func:`numpy.dot` for full documentation. See Also -------- - numpy.all : equivalent function + numpy.dot : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('any', +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ - a.any(axis=None, out=None, keepdims=False, *, where=True) + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) + -- - Returns True if any of the elements of `a` evaluate to True. + a.argpartition(kth, axis=-1, kind='introselect', order=None) - Refer to `numpy.any` for full documentation. + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. See Also -------- - numpy.any : equivalent function + numpy.argpartition : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', +add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ - a.argmax(axis=None, out=None, *, keepdims=False) + partition($self, kth, /, axis=-1, kind='introselect', order=None) + -- - Return indices of the maximum values along the given axis. + a.partition(kth, axis=-1, kind='introselect', order=None) - Refer to `numpy.argmax` for full documentation. + Partially sorts the elements in the array in such a way that the value of + the element in k-th position is in the position it would be in a sorted + array. In the output array, all elements smaller than the k-th element + are located to the left of this element and all equal or greater are + located to its right. The ordering of the elements in the two partitions + on the either side of the k-th element in the output array is undefined. + + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. See Also -------- - numpy.argmax : equivalent function + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. - """)) + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + Examples + -------- + >>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', - """ - a.argmin(axis=None, out=None, *, keepdims=False) + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) - Return indices of the minimum values along the given axis. + """)) - Refer to `numpy.argmin` for detailed documentation. - See Also - -------- - numpy.argmin : equivalent function +############################################################################## +# +# methods from both `ndarray` and `generic` +# +############################################################################## - """)) +_METHOD_DOC_TEMPLATE = """{name}({params}) +-- +{doc}""" -add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', +def _array_method_doc(name: str, params: str, doc: str) -> None: """ - a.argsort(axis=-1, kind=None, order=None) + Interenal helper function for adding docstrings to a common method of + `numpy.ndarray` and `numpy.generic`. - Returns the indices that would sort this array. + The provided docstring will be added to the given `numpy.ndarray` method. + For the `numpy.generic` method, a shorter docstring indicating that it is + identical to the `ndarray` method will be created. + Both methods will have a proper and identical `__text_signature__`. - Refer to `numpy.argsort` for full documentation. + Parameters + ---------- + name : str + Name of the method. + params : str + Parameter signature for the method without parentheses, for example, + ``"a, /, dtype=None, *, copy=False"``. + Parameter defaults must be understood by `ast.literal_eval`, i.e. strings, + bytes, numbers, tuples, lists, dicts, sets, booleans, or None. + doc : str + The full docstring for the `ndarray` method. + """ - See Also - -------- - numpy.argsort : equivalent function + # prepend the pos-only `$self` parameter to the method signature + if "/" not in params: + params = f"/, {params}" if params else "/" + params = f"$self, {params}" - """)) + # add docstring to `np.ndarray.{name}` + doc = textwrap.dedent(doc).strip() + doc_array = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc) + add_newdoc("numpy._core.multiarray", "ndarray", (name, doc_array)) + # add docstring to `np.generic.{name}` + doc_scalar = f"Scalar method identical to `ndarray.{name}`." + doc_scalar = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc_scalar) + add_newdoc("numpy._core.numerictypes", "generic", (name, doc_scalar)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', + +_array_method_doc('__array_namespace__', "*, api_version=None", """ - a.argpartition(kth, axis=-1, kind='introselect', order=None) + a.__array_namespace__(*, api_version=None) - Returns the indices that would partition this array. + For Array API compatibility. + """) - Refer to `numpy.argpartition` for full documentation. +_array_method_doc('__copy__', "", + """ + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + """) + +_array_method_doc('__deepcopy__', "memo, /", + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + """) + +_array_method_doc('all', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.all(axis=None, out=None, *, keepdims=, where=) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. See Also -------- - numpy.argpartition : equivalent function + numpy.all : equivalent function + """) - """)) +_array_method_doc('any', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.any(axis=None, out=None, *, keepdims=, where=) + Returns True if any of the elements of `a` evaluate to True. -add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + """) + +_array_method_doc('argmax', "axis=None, out=None, *, keepdims=False", + """ + a.argmax(axis=None, out=None, *, keepdims=False) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + """) + +_array_method_doc('argmin', "axis=None, out=None, *, keepdims=False", + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + """) + +_array_method_doc('argsort', "axis=-1, kind=None, order=None, *, stable=None", + """ + a.argsort(axis=-1, kind=None, order=None, *, stable=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + """) + +_array_method_doc('astype', "dtype, order='K', casting='unsafe', subok=True, copy=True", """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) @@ -3183,7 +3522,7 @@ 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + casting : {'no', 'equiv', 'safe', 'same_kind', 'same_value', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. @@ -3193,6 +3532,12 @@ * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. + * 'same_value' means any data conversions may be done, but the values + must not change, including rounding of floats or overflow of ints + + .. versionadded:: 2.4 + Support for ``'same_value'`` was added. + subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. @@ -3215,6 +3560,9 @@ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. + ValueError + When casting using ``'same_value'`` and the values change or would + overflow Examples -------- @@ -3226,10 +3574,16 @@ >>> x.astype(int) array([1, 2, 2]) - """)) + >>> x.astype(int, casting="same_value") + Traceback (most recent call last): + ... + ValueError: could not cast 'same_value' double to long + >>> x[:2].astype(int, casting="same_value") + array([1, 2]) + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', +_array_method_doc('byteswap', "inplace=False", """ a.byteswap(inplace=False) @@ -3280,11 +3634,9 @@ >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', +_array_method_doc('choose', "choices, out=None, mode='raise'", """ a.choose(choices, out=None, mode='raise') @@ -3295,13 +3647,11 @@ See Also -------- numpy.choose : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', +_array_method_doc('clip', "min=None, max=None, out=None, **kwargs", """ - a.clip(min=None, max=None, out=None, **kwargs) + a.clip(min=, max=, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3311,11 +3661,9 @@ See Also -------- numpy.clip : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', +_array_method_doc('compress', "condition, axis=None, out=None", """ a.compress(condition, axis=None, out=None) @@ -3326,11 +3674,9 @@ See Also -------- numpy.compress : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', +_array_method_doc('conj', "", """ a.conj() @@ -3341,11 +3687,9 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', +_array_method_doc('conjugate', "", """ a.conjugate() @@ -3356,11 +3700,9 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', +_array_method_doc('copy', "order='C'", """ a.copy(order='C') @@ -3429,11 +3771,9 @@ array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', +_array_method_doc('cumprod', "axis=None, dtype=None, out=None", """ a.cumprod(axis=None, dtype=None, out=None) @@ -3444,11 +3784,9 @@ See Also -------- numpy.cumprod : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', +_array_method_doc('cumsum', "axis=None, dtype=None, out=None", """ a.cumsum(axis=None, dtype=None, out=None) @@ -3459,11 +3797,9 @@ See Also -------- numpy.cumsum : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', +_array_method_doc('diagonal', "offset=0, axis1=0, axis2=1", """ a.diagonal(offset=0, axis1=0, axis2=1) @@ -3476,14 +3812,9 @@ See Also -------- numpy.diagonal : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', +_array_method_doc('dump', "file", """ a.dump(file) @@ -3494,25 +3825,21 @@ ---------- file : str or Path A string naming the dump file. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', +_array_method_doc('dumps', "", """ a.dumps() Returns the pickle of the array as a string. - pickle.loads will convert the string back to an array. + ``pickle.loads`` will convert the string back to an array. Parameters ---------- None + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', +_array_method_doc('fill', "value", """ a.fill(value) @@ -3552,11 +3879,9 @@ >>> a[...] = np.array(3) >>> a array([3, 3], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', +_array_method_doc('flatten', "order='C'", """ a.flatten(order='C') @@ -3591,11 +3916,9 @@ array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', +_array_method_doc('getfield', "dtype, offset=0", """ a.getfield(dtype, offset=0) @@ -3634,11 +3957,9 @@ >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('item', +_array_method_doc('item', "*args", """ a.item(*args) @@ -3700,13 +4021,13 @@ >>> a = np.array([np.int64(1)], dtype=object) >>> a.item() #return np.int64 np.int64(1) + """) - """)) +_KWARGS_REDUCE = "keepdims=, initial=, where=" - -add_newdoc('numpy._core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) +_array_method_doc('max', "axis=None, out=None, **kwargs", + f""" + a.max(axis=None, out=None, *, {_KWARGS_REDUCE}) Return the maximum along a given axis. @@ -3715,89 +4036,89 @@ See Also -------- numpy.amax : equivalent function + """) - """)) - +_array_method_doc('min', "axis=None, out=None, **kwargs", + f""" + a.min(axis=None, out=None, *, {_KWARGS_REDUCE}) -add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) - - Returns the average of the array elements along given axis. + Return the minimum along a given axis. - Refer to `numpy.mean` for full documentation. + Refer to `numpy.amin` for full documentation. See Also -------- - numpy.mean : equivalent function - - """)) - + numpy.amin : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) +_array_method_doc('prod', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.prod(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) - Return the minimum along a given axis. + Return the product of the array elements over the given axis - Refer to `numpy.amin` for full documentation. + Refer to `numpy.prod` for full documentation. See Also -------- - numpy.amin : equivalent function + numpy.prod : equivalent function + """) - """)) +_array_method_doc('sum', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.sum(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + + Return the sum of the array elements over the given axis. + Refer to `numpy.sum` for full documentation. -add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', + See Also + -------- + numpy.sum : equivalent function + """) + +_array_method_doc('mean', "axis=None, dtype=None, out=None, **kwargs", """ - a.nonzero() + a.mean(axis=None, dtype=None, out=None, *, keepdims=, where=) - Return the indices of the elements that are non-zero. + Returns the average of the array elements along given axis. - Refer to `numpy.nonzero` for full documentation. + Refer to `numpy.mean` for full documentation. See Also -------- - numpy.nonzero : equivalent function - - """)) - + numpy.mean : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', +_array_method_doc('nonzero', "", """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, - initial=1, where=True) + a.nonzero() - Return the product of the array elements over the given axis + Return the indices of the elements that are non-zero. - Refer to `numpy.prod` for full documentation. + Refer to `numpy.nonzero` for full documentation. See Also -------- - numpy.prod : equivalent function - - """)) - + numpy.nonzero : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('put', +_array_method_doc('put', "indices, values, /, mode='raise'", """ a.put(indices, values, mode='raise') - Set ``a.flat[n] = values[n]`` for all `n` in indices. + Set ``a.flat[n] = values[n]`` for all ``n`` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', +_array_method_doc('ravel', "order='C'", """ - a.ravel([order]) + a.ravel(order='C') Return a flattened array. @@ -3806,13 +4127,10 @@ See Also -------- numpy.ravel : equivalent function - ndarray.flat : a flat iterator on the array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', +_array_method_doc('repeat', "repeats, /, axis=None", """ a.repeat(repeats, axis=None) @@ -3823,13 +4141,12 @@ See Also -------- numpy.repeat : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', +_array_method_doc('reshape', "*shape, order='C', copy=None", """ a.reshape(shape, /, *, order='C', copy=None) + a.reshape(*shape, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -3843,15 +4160,13 @@ ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - + For example, ``a.reshape(4, 2)`` is equivalent to ``a.reshape((4, 2))``. + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', +_array_method_doc('resize', "*new_shape, refcheck=True", """ - a.resize(new_shape, refcheck=True) + a.resize(new_shape, /, *, refcheck=True) + a.resize(*new_shape, refcheck=True) Change shape and size of array in-place. @@ -3939,11 +4254,9 @@ array([[0]]) >>> c array([[0]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('round', +_array_method_doc('round', "decimals=0, out=None", """ a.round(decimals=0, out=None) @@ -3954,26 +4267,22 @@ See Also -------- numpy.around : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', +_array_method_doc('searchsorted', "v, /, side='left', sorter=None", """ a.searchsorted(v, side='left', sorter=None) - Find indices where elements of v should be inserted in a to maintain order. + Find indices where elements of `v` should be inserted in `a` to maintain order. - For full documentation, see `numpy.searchsorted` + For full documentation, see `numpy.searchsorted`. See Also -------- numpy.searchsorted : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', +_array_method_doc('setfield', "val, /, dtype, offset=0", """ a.setfield(val, dtype, offset=0) @@ -4021,11 +4330,9 @@ array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', +_array_method_doc('setflags', "*, write=None, align=None, uic=None", """ a.setflags(write=None, align=None, uic=None) @@ -4098,13 +4405,11 @@ Traceback (most recent call last): File "", line 1, in ValueError: cannot set WRITEBACKIFCOPY flag to True + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', +_array_method_doc('sort', "axis=-1, kind=None, order=None, *, stable=None", """ - a.sort(axis=-1, kind=None, order=None) + a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4120,108 +4425,54 @@ is retained for backwards compatibility. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. A single field can - be specified as a string, and not all fields need be specified, - but unspecified fields will still be used, in the order in which - they come up in the dtype, to break ties. - - See Also - -------- - numpy.sort : Return a sorted copy of an array. - numpy.argsort : Indirect sort. - numpy.lexsort : Indirect stable sort on multiple keys. - numpy.searchsorted : Find elements in sorted array. - numpy.partition: Partial sort. - - Notes - ----- - See `numpy.sort` for notes on the different sorting algorithms. - - Examples - -------- - >>> import numpy as np - >>> a = np.array([[1,4], [3,1]]) - >>> a.sort(axis=1) - >>> a - array([[1, 4], - [1, 3]]) - >>> a.sort(axis=0) - >>> a - array([[1, 3], - [1, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) - >>> a.sort(order='y') - >>> a - array([(b'c', 1), (b'a', 2)], - dtype=[('x', 'S1'), ('y', '>> import numpy as np - >>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) >>> a - array([2, 1, 3, 4]) # may vary - - >>> a.partition((1, 3)) + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) >>> a - array([1, 2, 3, 4]) - """)) + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '>> a.tolist() 1 - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ +_array_method_doc('tobytes', "order='C'", + """ a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4438,23 +4699,9 @@ True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tostring', r""" - a.tostring(order='C') - - A compatibility alias for `~ndarray.tobytes`, with exactly the same - behavior. - - Despite its name, it returns :class:`bytes` not :class:`str`\ s. - - .. deprecated:: 1.19.0 - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', +_array_method_doc('trace', "offset=0, axis1=0, axis2=1, dtype=None, out=None", """ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) @@ -4465,11 +4712,9 @@ See Also -------- numpy.trace : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', +_array_method_doc('transpose', "*axes", """ a.transpose(*axes) @@ -4522,26 +4767,9 @@ array([1, 2, 3, 4]) >>> a.transpose() array([1, 2, 3, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('view', +_array_method_doc('view', "*args, **kwargs", """ a.view([dtype][, type]) @@ -4658,8 +4886,7 @@ [[2312, 2826], [5396, 5910]]], dtype=int16) - - """)) + """) ############################################################################## @@ -4670,6 +4897,9 @@ add_newdoc('numpy._core.umath', 'frompyfunc', """ + frompyfunc(func, /, nin, nout, **kwargs) + -- + frompyfunc(func, /, nin, nout, *[, identity]) Takes an arbitrary Python function and returns a NumPy ufunc. @@ -4764,7 +4994,7 @@ add_newdoc('numpy._core.multiarray', 'get_handler_name', """ - get_handler_name(a: ndarray) -> str,None + get_handler_name(a: ndarray) -> str | None Return the name of the memory handler used by `a`. If not provided, return the name of the memory handler that will be used to allocate data for the @@ -4915,12 +5145,17 @@ ---------- *x : array_like Input arrays. - out : ndarray, None, or tuple of ndarray and None, optional - Alternate array object(s) in which to put the result; if provided, it - must have a shape that the inputs broadcast to. A tuple of arrays - (possible only as a keyword argument) must have length equal to the - number of outputs; use None for uninitialized outputs to be - allocated by the ufunc. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location(s) into which the result(s) are stored. + If not provided or None, new array(s) are created by the ufunc. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional, + or a tuple with length equal to the number of outputs (where None + can be used for allocation by the ufunc). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + where : array_like, optional This condition is broadcast over the input. At locations where the condition is True, the `out` array will be set to the ufunc result. @@ -4962,8 +5197,8 @@ 0 >>> np.multiply.identity 1 - >>> np.power.identity - 1 + >>> print(np.power.identity) + None >>> print(np.exp.identity) None """)) @@ -5051,15 +5286,15 @@ -------- >>> import numpy as np >>> np.add.ntypes - 18 + 22 >>> np.multiply.ntypes - 18 + 23 >>> np.power.ntypes - 17 + 21 >>> np.exp.ntypes - 7 + 10 >>> np.remainder.ntypes - 14 + 16 """)) @@ -5078,26 +5313,16 @@ -------- >>> import numpy as np >>> np.add.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.multiply.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', ... >>> np.power.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', - 'OO->O'] + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... >>> np.exp.types - ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + ['e->e', 'f->f', 'd->d', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] >>> np.remainder.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... """)) @@ -5141,6 +5366,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduce', """ + reduce($self, array, /, axis=0, dtype=None, out=None, **kwargs) + -- + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) Reduces `array`'s dimension by one, by applying ufunc along one axis. @@ -5182,11 +5410,17 @@ ``out`` if given, and the data type of ``array`` otherwise (though upcast to conserve precision for some cases, such as ``numpy.add.reduce`` for integer or boolean input). - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional + (which is useful especially for object dtype), or a 1-element tuple + (latter for consistency with ``ufunc.__call__``). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, @@ -5261,6 +5495,9 @@ add_newdoc('numpy._core', 'ufunc', ('accumulate', """ + accumulate($self, array, /, axis=0, dtype=None, out=None) + -- + accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. @@ -5291,10 +5528,11 @@ to the data-type of the output array if such is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. Returns ------- @@ -5338,6 +5576,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduceat', """ + reduceat($self, array, /, indices, axis=0, dtype=None, out=None) + -- + reduceat(array, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. @@ -5372,10 +5613,11 @@ upcast to conserve precision for some cases, such as ``numpy.add.reduce`` for integer or boolean input). out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. Returns ------- @@ -5445,6 +5687,9 @@ add_newdoc('numpy._core', 'ufunc', ('outer', r""" + outer($self, A, B, /, **kwargs) + -- + outer(A, B, /, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. @@ -5516,6 +5761,9 @@ add_newdoc('numpy._core', 'ufunc', ('at', """ + at($self, a, indices, b=None, /) + -- + at(a, indices, b=None, /) Performs unbuffered in place operation on operand 'a' for elements @@ -5567,6 +5815,9 @@ add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', """ + resolve_dtypes($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) Find the dtypes NumPy will use for the operation. Both input and @@ -5639,6 +5890,9 @@ add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', """ + _resolve_dtypes_and_context($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) See `numpy.ufunc.resolve_dtypes` for parameter information. This @@ -5662,6 +5916,9 @@ add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', """ + _get_strided_loop($self, call_info, /, *, fixed_strides=None) + -- + _get_strided_loop(call_info, /, *, fixed_strides=None) This function fills in the ``call_info`` capsule to include all @@ -5717,7 +5974,6 @@ """)) - ############################################################################## # # Documentation for dtype attributes and methods @@ -5732,6 +5988,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ + dtype(dtype, align=False, copy=False, **kwargs) + -- + dtype(dtype, align=False, copy=False, [metadata]) Create a data type object. @@ -5952,7 +6211,7 @@ >>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} + {'name': (dtype('>> (arr + arr).dtype.metadata mappingproxy({'key': 'value'}) - But if the arrays have different dtype metadata, the metadata may be - dropped: + If the arrays have different dtype metadata, the first one wins: >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) >>> arr2 = np.array([3, 2, 1], dtype=dt2) - >>> (arr + arr2).dtype.metadata is None - True # The metadata field is cleared so None is returned + >>> print((arr + arr2).dtype.metadata) + {'key': 'value'} """)) add_newdoc('numpy._core.multiarray', 'dtype', ('name', @@ -6255,11 +6514,11 @@ Examples -------- >>> import numpy as np - >>> x = numpy.dtype('8f') + >>> x = np.dtype('8f') >>> x.subdtype (dtype('float32'), (8,)) - >>> x = numpy.dtype('i2') + >>> x = np.dtype('i2') >>> x.subdtype >>> @@ -6277,11 +6536,11 @@ Examples -------- >>> import numpy as np - >>> x = numpy.dtype('8f') + >>> x = np.dtype('8f') >>> x.base dtype('float32') - >>> x = numpy.dtype('i2') + >>> x = np.dtype('i2') >>> x.base dtype('int16') @@ -6298,6 +6557,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', """ + newbyteorder($self, new_order='S', /) + -- + newbyteorder(new_order='S', /) Return a new dtype with a different byte order. @@ -6416,7 +6678,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', """ - __ge__(value, /) + __gt__(value, /) Return ``self > value``. @@ -6454,6 +6716,9 @@ add_newdoc('numpy._core.multiarray', 'busdaycalendar', """ + busdaycalendar(weekmask='1111100', holidays=None) + -- + busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information @@ -6577,6 +6842,9 @@ add_newdoc('numpy._core.multiarray', 'datetime_data', """ + datetime_data(dtype, /) + -- + datetime_data(dtype, /) Get information about the step size of a date or time type. @@ -6634,21 +6902,11 @@ # Attributes -def refer_to_array_attribute(attr, method=True): - docstring = """ - Scalar {} identical to the corresponding array attribute. - - Please see `ndarray.{}`. - """ - - return attr, docstring.format("method" if method else "attribute", attr) +add_newdoc('numpy._core.numerictypes', 'generic', ('T', + """Scalar attribute identical to `ndarray.T`.""")) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('T', method=False)) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('base', method=False)) +add_newdoc('numpy._core.numerictypes', 'generic', ('base', + """Scalar attribute identical to `ndarray.base`.""")) add_newdoc('numpy._core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) @@ -6685,153 +6943,12 @@ def refer_to_array_attribute(attr, method=True): # Methods -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('all')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('any')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmax')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmin')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argsort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('astype')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('byteswap')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('choose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('clip')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('compress')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('conjugate')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('copy')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumprod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumsum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('diagonal')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dump')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dumps')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('fill')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('flatten')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('getfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('item')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('max')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('mean')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('min')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('nonzero')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('prod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('put')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('ravel')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('repeat')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('reshape')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('resize')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('round')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('searchsorted')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setflags')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('squeeze')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('std')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('swapaxes')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('take')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tofile')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tolist')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tostring')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('trace')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('transpose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('var')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('view')) - add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', """ - __class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + number.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.number` type. @@ -6922,8 +7039,67 @@ def refer_to_array_attribute(attr, method=True): """) +############################################################################## +# +# Documentation for `dtypes.*` classes +# +############################################################################## + +for _dtype_name, _signature, _sctype_name in ( + ("BoolDType", "()", "bool"), + ("Int8DType", "()", "int8"), + ("UInt8DType", "()", "uint8"), + ("Int16DType", "()", "int16"), + ("UInt16DType", "()", "uint16"), + ("Int32DType", "()", "int32"), + ("IntDType", "()", "intc"), + ("UInt32DType", "()", "uint32"), + ("UIntDType", "()", "uintc"), + ("Int64DType", "()", "int64"), + ("UInt64DType", "()", "uint64"), + ("LongLongDType", "()", "longlong"), + ("ULongLongDType", "()", "ulonglong"), + ("Float16DType", "()", "float16"), + ("Float32DType", "()", "float32"), + ("Float64DType", "()", "float64"), + ("LongDoubleDType", "()", "longdouble"), + ("Complex64DType", "()", "complex64"), + ("Complex128DType", "()", "complex128"), + ("CLongDoubleDType", "()", "clongdouble"), + ("ObjectDType", "()", "object"), + ("BytesDType", "(size, /)", "bytes_"), + ("StrDType", "(size, /)", "str_"), + ("VoidDType", "(length, /)", "void"), + ("DateTime64DType", "(unit, /)", "datetime64"), + ("TimeDelta64DType", "(unit, /)", "timedelta64"), +): + _extra_docs = "" + if _dtype_name in {"VoidDType", "DateTime64DType", "TimeDelta64DType"}: + _extra_docs = f""" + .. warning:: + ``np.dtypes.{_dtype_name}`` cannot be instantiated directly. + Use ``np.dtype("{_sctype_name}[{{unit}}]")`` instead. + """ + + add_newdoc('numpy.dtypes', _dtype_name, + f""" + {_dtype_name}{_signature} + -- + + DType class corresponding to the `numpy.{_sctype_name}` scalar type. + {_extra_docs} + See `numpy.dtype` for the typical way to create dtype instances + and :ref:`arrays.dtypes` for additional information. + """) + + del _dtype_name, _signature, _sctype_name, _extra_docs # avoid namespace pollution + + add_newdoc('numpy._core.multiarray', 'StringDType', """ + StringDType(*, coerce=True, **kwargs) + -- + StringDType(*, na_object=np._NoValue, coerce=True) Create a StringDType instance. @@ -6965,9 +7141,10 @@ def refer_to_array_attribute(attr, method=True): array([False, True, False]) >>> np.array([1.2, object(), "hello world"], - ... dtype=StringDType(coerce=True)) - ValueError: StringDType only allows string data when string coercion - is disabled. + ... dtype=StringDType(coerce=False)) + Traceback (most recent call last): + ... + ValueError: StringDType only allows string data when string coercion is disabled. >>> np.array(["hello", "world"], dtype=StringDType(coerce=True)) array(["hello", "world"], dtype=StringDType(coerce=True)) diff --git a/blimgui/dist64/numpy/_core/_add_newdocs.pyi b/blimgui/dist64/numpy/_core/_add_newdocs.pyi index 0a34ac4..83db789 100644 --- a/blimgui/dist64/numpy/_core/_add_newdocs.pyi +++ b/blimgui/dist64/numpy/_core/_add_newdocs.pyi @@ -1,3 +1,2 @@ +from .function_base import add_newdoc as add_newdoc from .overrides import get_array_function_like_doc as get_array_function_like_doc - -def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... diff --git a/blimgui/dist64/numpy/_core/_add_newdocs_scalars.py b/blimgui/dist64/numpy/_core/_add_newdocs_scalars.py index 5239268..375d217 100644 --- a/blimgui/dist64/numpy/_core/_add_newdocs_scalars.py +++ b/blimgui/dist64/numpy/_core/_add_newdocs_scalars.py @@ -3,10 +3,10 @@ our sphinx ``conf.py`` during doc builds, where we want to avoid showing platform-dependent information. """ -import sys import os -from numpy._core import dtype -from numpy._core import numerictypes as _numerictypes +import sys + +from numpy._core import dtype, numerictypes as _numerictypes from numpy._core.function_base import add_newdoc ############################################################################## @@ -48,7 +48,7 @@ def type_aliases_gen(): ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) +]) def _get_platform_and_machine(): @@ -67,258 +67,240 @@ def _get_platform_and_machine(): _system, _machine = _get_platform_and_machine() _doc_alias_string = f":Alias on this platform ({_system} {_machine}):" +# docstring prefix that cpython uses to populate `__text_signature__` +_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature} +-- -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): - # note: `:field: value` is rST syntax which renders as field lists. - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else \ - f":Canonical name: `numpy.{obj}`\n " - if fixed_aliases: - alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " - for alias in fixed_aliases) - else: - alias_doc = '' - alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = f""" - {doc.strip()} - - :Character code: ``'{character_code}'`` - {canonical_name_doc}{alias_doc} - """ - - add_newdoc('numpy._core.numerictypes', obj, docstring) - - -_bool_docstring = ( - """ - Boolean type (True or False), stored as a byte. - - .. warning:: - - The :class:`bool` type is not a subclass of the :class:`int_` type - (the :class:`bool` is not even a number type). This is different - than Python's default implementation of :class:`bool` as a - sub-class of :class:`int`. - """ -) - -add_newdoc_for_scalar_type('bool', [], _bool_docstring) - -add_newdoc_for_scalar_type('bool_', [], _bool_docstring) - -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) - -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) - -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) - -# TODO: These docs probably need an if to highlight the default rather than -# the C-types (and be correct). -add_newdoc_for_scalar_type('int_', [], - """ - Default signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) - -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) - -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) +{docstring}""" -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: + # note: `:field: value` is rST syntax which renders as field lists. + cls = getattr(_numerictypes, name) + module = cls.__module__ -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) + lines_extra = [ + "", # blank line after main doc + f":Character code: ``{dtype(cls).char!r}``", + ] -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) + if name != cls.__name__: + lines_extra.append(f":Canonical name: `{module}.{name}`") -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) + lines_extra.extend( + f"{_doc_alias_string} `{module}.{alias}`: {doc}." + for alias_type, alias, doc in possible_aliases + if alias_type is cls + ) -add_newdoc_for_scalar_type('double', [], - """ - Double-precision floating-point number type, compatible with Python - :class:`float` and C ``double``. - """) + docstring = _ARGUMENT_CLINIC_TEMPLATE.format( + name=cls.__name__, # must match the class name + signature=text_signature, + docstring="\n".join([doc.strip(), *lines_extra]), + ) + add_newdoc('numpy._core.numerictypes', name, docstring) -add_newdoc_for_scalar_type('longdouble', [], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) -add_newdoc_for_scalar_type('csingle', [], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) +for bool_name in ('bool', 'bool_'): + add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """ +Boolean type (True or False), stored as a byte. -add_newdoc_for_scalar_type('cdouble', [], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python :class:`complex`. - """) +.. warning:: -add_newdoc_for_scalar_type('clongdouble', [], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. +""") -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -add_newdoc_for_scalar_type('str_', [], - r""" - A unicode string. - - This type strips trailing null codepoints. - - >>> s = np.str_("abc\x00") - >>> s - 'abc' - - Unlike the builtin :class:`str`, this supports the - :ref:`python:bufferobjects`, exposing its contents as UCS4: - - >>> m = memoryview(np.str_("abc")) - >>> m.format - '3w' - >>> m.tobytes() - b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' - """) - -add_newdoc_for_scalar_type('bytes_', [], - r""" - A byte string. - - When used in arrays, this type strips trailing null bytes. - """) - -add_newdoc_for_scalar_type('void', [], - r""" - np.void(length_or_data, /, dtype=None) - - Create a new structured or unstructured void scalar. - - Parameters - ---------- - length_or_data : int, array-like, bytes-like, object - One of multiple meanings (see notes). The length or - bytes data of an unstructured void. Or alternatively, - the data to be stored in the new scalar when `dtype` - is provided. - This can be an array-like, in which case an array may - be returned. - dtype : dtype, optional - If provided the dtype of the new scalar. This dtype must - be "void" dtype (i.e. a structured or unstructured void, - see also :ref:`defining-structured-types`). - - .. versionadded:: 1.24 - - Notes - ----- - For historical reasons and because void scalars can represent both - arbitrary byte data and structured dtypes, the void constructor - has three calling conventions: - - 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five - ``\0`` bytes. The 5 can be a Python or NumPy integer. - 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. - The dtype itemsize will match the byte string length, here ``"V10"``. - 3. When a ``dtype=`` is passed the call is roughly the same as an - array creation. However, a void scalar rather than array is returned. - - Please see the examples which show all three different conventions. +add_newdoc_for_scalar_type('byte', '(value=0, /)', """ +Signed integer type, compatible with C ``char``. +""") - Examples - -------- - >>> np.void(5) - np.void(b'\x00\x00\x00\x00\x00') - >>> np.void(b'abcd') - np.void(b'\x61\x62\x63\x64') - >>> np.void((3.2, b'eggs'), dtype="d,S5") - np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) - np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) - - """) - -add_newdoc_for_scalar_type('datetime64', [], - """ - If created from a 64-bit integer, it represents an offset from - ``1970-01-01T00:00:00``. - If created from string, the string can be in ISO 8601 date - or datetime format. - - When parsing a string to create a datetime object, if the string contains - a trailing timezone (A 'Z' or a timezone offset), the timezone will be - dropped and a User Warning is given. - - Datetime64 objects should be considered to be UTC and therefore have an - offset of +0000. +add_newdoc_for_scalar_type('short', '(value=0, /)', """ +Signed integer type, compatible with C ``short``. +""") - >>> np.datetime64(10, 'Y') - np.datetime64('1980') - >>> np.datetime64('1980', 'Y') - np.datetime64('1980') - >>> np.datetime64(10, 'D') - np.datetime64('1970-01-11') +add_newdoc_for_scalar_type('intc', '(value=0, /)', """ +Signed integer type, compatible with C ``int``. +""") - See :ref:`arrays.datetime` for more information. - """) +add_newdoc_for_scalar_type('long', '(value=0, /)', """ +Signed integer type, compatible with C ``long``. +""") -add_newdoc_for_scalar_type('timedelta64', [], - """ - A timedelta stored as a 64-bit integer. - - See :ref:`arrays.datetime` for more information. - """) +# TODO: These docs probably need an if to highlight the default rather than +# the C-types (and be correct). +add_newdoc_for_scalar_type('int_', '(value=0, /)', """ +Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('longlong', '(value=0, /)', """ +Signed integer type, compatible with C ``long long``. +""") + +add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned char``. +""") + +add_newdoc_for_scalar_type('ushort', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned short``. +""") + +add_newdoc_for_scalar_type('uintc', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned int``. +""") + +add_newdoc_for_scalar_type('uint', '(value=0, /)', """ +Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('ulong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long``. +""") + +add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long long``. +""") + +add_newdoc_for_scalar_type('half', '(value=0, /)', """ +Half-precision floating-point number type. +""") + +add_newdoc_for_scalar_type('single', '(value=0, /)', """ +Single-precision floating-point number type, compatible with C ``float``. +""") + +add_newdoc_for_scalar_type('double', '(value=0, /)', """ +Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``. +""") + +add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """ +Extended-precision floating-point number type, compatible with C ``long double`` +but not necessarily with IEEE 754 quadruple-precision. +""") + +add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """ +Complex number type composed of two single-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two double-precision floating-point numbers, +compatible with Python :class:`complex`. +""") + +add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two extended-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('object_', '(value=None, /)', """ +Any Python object. +""") + +add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r""" +A unicode string. + +This type strips trailing null codepoints. + +>>> s = np.str_("abc\x00") +>>> s +'abc' + +Unlike the builtin :class:`str`, this supports the +:ref:`python:bufferobjects`, exposing its contents as UCS4: + +>>> m = memoryview(np.str_("abc")) +>>> m.format +'3w' +>>> m.tobytes() +b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' +""") + +add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r""" +A byte string. + +When used in arrays, this type strips trailing null bytes. +""") + +add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r""" +np.void(length_or_data, /, dtype=None) + +Create a new structured or unstructured void scalar. + +Parameters +---------- +length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. +dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + +Notes +----- +For historical reasons and because void scalars can represent both +arbitrary byte data and structured dtypes, the void constructor +has three calling conventions: + +1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. +2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. +3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + +Please see the examples which show all three different conventions. + +Examples +-------- +>>> np.void(5) +np.void(b'\x00\x00\x00\x00\x00') +>>> np.void(b'abcd') +np.void(b'\x61\x62\x63\x64') +>>> np.void((3.2, b'eggs'), dtype="d,S5") +np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) +np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) +""") + +add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """ +If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. +If created from string, the string can be in ISO 8601 date or datetime format. + +When parsing a string to create a datetime object, if the string contains +a trailing timezone (A 'Z' or a timezone offset), the timezone will be +dropped and a User Warning is given. + +Datetime64 objects should be considered to be UTC and therefore have an +offset of +0000. + +>>> np.datetime64(10, 'Y') +np.datetime64('1980') +>>> np.datetime64('1980', 'Y') +np.datetime64('1980') +>>> np.datetime64(10, 'D') +np.datetime64('1970-01-11') + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """ +A timedelta stored as a 64-bit integer. + +See :ref:`arrays.datetime` for more information. +""") add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', """ + is_integer($self, /) + -- + integer.is_integer() -> bool Return ``True`` if the number is finite with integral value. @@ -337,23 +319,29 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): # TODO: work out how to put this on the base class, np.floating for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', - """ - {ftype}.as_integer_ratio() -> (int, int) + f""" + as_integer_ratio($self, /) + -- + + {float_name}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original floating point number, and with a positive denominator. Raise `OverflowError` on infinities and a `ValueError` on NaNs. - >>> np.{ftype}(10.0).as_integer_ratio() + >>> np.{float_name}(10.0).as_integer_ratio() (10, 1) - >>> np.{ftype}(0.0).as_integer_ratio() + >>> np.{float_name}(0.0).as_integer_ratio() (0, 1) - >>> np.{ftype}(-.25).as_integer_ratio() + >>> np.{float_name}(-.25).as_integer_ratio() (-1, 4) - """.format(ftype=float_name))) + """)) add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', f""" + is_integer($self, /) + -- + {float_name}.is_integer() -> bool Return ``True`` if the floating point number is finite with integral @@ -370,10 +358,14 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64', + 'longlong', 'ulonglong'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" + bit_count($self, /) + -- + {int_name}.bit_count() -> int Computes the number of 1-bits in the absolute value of the input. diff --git a/blimgui/dist64/numpy/_core/_add_newdocs_scalars.pyi b/blimgui/dist64/numpy/_core/_add_newdocs_scalars.pyi index 3c256a0..7a84de8 100644 --- a/blimgui/dist64/numpy/_core/_add_newdocs_scalars.pyi +++ b/blimgui/dist64/numpy/_core/_add_newdocs_scalars.pyi @@ -1,4 +1,3 @@ -from collections.abc import Iterable from typing import Final import numpy as np @@ -8,9 +7,10 @@ _system: Final[str] = ... _machine: Final[str] = ... _doc_alias_string: Final[str] = ... _bool_docstring: Final[str] = ... +bool_name: str = ... int_name: str = ... float_name: str = ... def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... -def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ... def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/blimgui/dist64/numpy/_core/_asarray.py b/blimgui/dist64/numpy/_core/_asarray.py index 86b8d14..cfbe1e6 100644 --- a/blimgui/dist64/numpy/_core/_asarray.py +++ b/blimgui/dist64/numpy/_core/_asarray.py @@ -3,13 +3,8 @@ `require` fits this category despite its name not matching this pattern. """ -from .overrides import ( - array_function_dispatch, - finalize_array_function_like, - set_module, -) from .multiarray import array, asanyarray - +from .overrides import array_function_dispatch, finalize_array_function_like, set_module __all__ = ["require"] diff --git a/blimgui/dist64/numpy/_core/_asarray.pyi b/blimgui/dist64/numpy/_core/_asarray.pyi index a89a278..79dfbec 100644 --- a/blimgui/dist64/numpy/_core/_asarray.pyi +++ b/blimgui/dist64/numpy/_core/_asarray.pyi @@ -1,9 +1,11 @@ from collections.abc import Iterable -from typing import Any, TypeAlias, TypeVar, overload, Literal +from typing import Any, Literal, TypeAlias, TypeVar, overload -from numpy._typing import NDArray, DTypeLike, _SupportsArrayFunc +from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +__all__ = ["require"] + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) _Requirements: TypeAlias = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", @@ -17,25 +19,25 @@ _RequirementsWithE: TypeAlias = _Requirements | _E @overload def require( - a: _ArrayType, - dtype: None = ..., - requirements: None | _Requirements | Iterable[_Requirements] = ..., + a: _ArrayT, + dtype: None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... -) -> _ArrayType: ... + like: _SupportsArrayFunc | None = None +) -> _ArrayT: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _E | Iterable[_RequirementsWithE] = ..., + dtype: DTypeLike | None = None, + requirements: _E | Iterable[_RequirementsWithE] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: None | _Requirements | Iterable[_Requirements] = ..., + dtype: DTypeLike | None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/blimgui/dist64/numpy/_core/_dtype.py b/blimgui/dist64/numpy/_core/_dtype.py index 0f360da..eedb63a 100644 --- a/blimgui/dist64/numpy/_core/_dtype.py +++ b/blimgui/dist64/numpy/_core/_dtype.py @@ -5,7 +5,6 @@ """ import numpy as np - _kind_to_stem = { 'u': 'uint', 'i': 'int', @@ -26,8 +25,7 @@ def _kind_name(dtype): return _kind_to_stem[dtype.kind] except KeyError as e: raise RuntimeError( - "internal dtype error, unknown kind {!r}" - .format(dtype.kind) + f"internal dtype error, unknown kind {dtype.kind!r}" ) from None @@ -46,7 +44,7 @@ def __repr__(dtype): arg_str = _construction_repr(dtype, include_align=False) if dtype.isalignedstruct: arg_str = arg_str + ", align=True" - return "dtype({})".format(arg_str) + return f"dtype({arg_str})" def _unpack_field(dtype, offset, title=None): @@ -122,7 +120,7 @@ def _scalar_str(dtype, short): elif dtype.type == np.str_: if _isunsized(dtype): - return "'%sU'" % byteorder + return f"'{byteorder}U'" else: return "'%sU%d'" % (byteorder, dtype.itemsize / 4) @@ -141,10 +139,13 @@ def _scalar_str(dtype, short): return "'V%d'" % dtype.itemsize elif dtype.type == np.datetime64: - return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) + return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" elif dtype.type == np.timedelta64: - return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) + return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'" + + elif dtype.isbuiltin == 2: + return dtype.type.__name__ elif np.issubdtype(dtype, np.number): # Short repr with endianness, like ' _Name: ... -def __str__(dtype: np.dtype[Any]) -> str: ... -def __repr__(dtype: np.dtype[Any]) -> str: ... +def _kind_name(dtype: np.dtype) -> _Name: ... +def __str__(dtype: np.dtype) -> str: ... +def __repr__(dtype: np.dtype) -> str: ... # -def _isunsized(dtype: np.dtype[Any]) -> bool: ... -def _is_packed(dtype: np.dtype[Any]) -> bool: ... -def _name_includes_bit_suffix(dtype: np.dtype[Any]) -> bool: ... +def _isunsized(dtype: np.dtype) -> bool: ... +def _is_packed(dtype: np.dtype) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ... # -def _construction_repr(dtype: np.dtype[Any], include_align: bool = False, short: bool = False) -> str: ... -def _scalar_str(dtype: np.dtype[Any], short: bool) -> str: ... -def _byte_order_str(dtype: np.dtype[Any]) -> str: ... -def _datetime_metadata_str(dtype: np.dtype[Any]) -> str: ... -def _struct_dict_str(dtype: np.dtype[Any], includealignedflag: bool) -> str: ... -def _struct_list_str(dtype: np.dtype[Any]) -> str: ... -def _struct_str(dtype: np.dtype[Any], include_align: bool) -> str: ... -def _subarray_str(dtype: np.dtype[Any]) -> str: ... -def _name_get(dtype: np.dtype[Any]) -> str: ... +def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype, short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype) -> str: ... +def _datetime_metadata_str(dtype: np.dtype) -> str: ... +def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype) -> str: ... +def _struct_str(dtype: np.dtype, include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype) -> str: ... +def _name_get(dtype: np.dtype) -> str: ... # @overload -def _unpack_field(dtype: np.dtype[Any], offset: int, title: _T) -> tuple[np.dtype[Any], int, _T]: ... +def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... @overload -def _unpack_field(dtype: np.dtype[Any], offset: int, title: None = None) -> tuple[np.dtype[Any], int, None]: ... +def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/blimgui/dist64/numpy/_core/_dtype_ctypes.py b/blimgui/dist64/numpy/_core/_dtype_ctypes.py index 7ccf513..4777417 100644 --- a/blimgui/dist64/numpy/_core/_dtype_ctypes.py +++ b/blimgui/dist64/numpy/_core/_dtype_ctypes.py @@ -57,11 +57,11 @@ def _from_ctypes_structure(t): offsets.append(current_offset) current_offset += ctypes.sizeof(ftyp) - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) else: fields = [] for fname, ftyp in t._fields_: @@ -93,11 +93,11 @@ def _from_ctypes_union(t): formats.append(dtype_from_ctypes_type(ftyp)) offsets.append(0) # Union fields are offset to 0 - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) def dtype_from_ctypes_type(t): @@ -117,4 +117,4 @@ def dtype_from_ctypes_type(t): return _from_ctypes_scalar(t) else: raise NotImplementedError( - "Unknown ctypes type {}".format(t.__name__)) + f"Unknown ctypes type {t.__name__}") diff --git a/blimgui/dist64/numpy/_core/_exceptions.py b/blimgui/dist64/numpy/_core/_exceptions.py index 240c2dd..2d8e535 100644 --- a/blimgui/dist64/numpy/_core/_exceptions.py +++ b/blimgui/dist64/numpy/_core/_exceptions.py @@ -5,7 +5,6 @@ By putting the formatting in `__str__`, we also avoid paying the cost for users who silence the exceptions. """ -from .._utils import set_module def _unpack_tuple(tup): if len(tup) == 1: @@ -44,12 +43,9 @@ def __init__(self, ufunc, dtypes): def __str__(self): return ( - "ufunc {!r} did not contain a loop with signature matching types " - "{!r} -> {!r}" - ).format( - self.ufunc.__name__, - _unpack_tuple(self.dtypes[:self.ufunc.nin]), - _unpack_tuple(self.dtypes[self.ufunc.nin:]) + f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature " + f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} " + f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}" ) @@ -86,12 +82,10 @@ def __init__(self, ufunc, casting, from_, to, i): def __str__(self): # only show the number if more than one input exists - i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" + i_str = f"{self.in_i} " if self.ufunc.nin != 1 else "" return ( - "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting + f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" ) @@ -104,12 +98,10 @@ def __init__(self, ufunc, casting, from_, to, i): def __str__(self): # only show the number if more than one output exists - i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" + i_str = f"{self.out_i} " if self.ufunc.nout != 1 else "" return ( - "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting + f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" ) @@ -156,17 +148,15 @@ def _size_to_string(num_bytes): # format with a sensible number of digits if unit_i == 0: # no decimal point on bytes - return '{:.0f} {}'.format(n_units, unit_name) + return f'{n_units:.0f} {unit_name}' elif round(n_units) < 1000: # 3 significant figures, if none are dropped to the left of the . - return '{:#.3g} {}'.format(n_units, unit_name) + return f'{n_units:#.3g} {unit_name}' else: # just give all the digits otherwise - return '{:#.0f} {}'.format(n_units, unit_name) + return f'{n_units:#.0f} {unit_name}' def __str__(self): size_str = self._size_to_string(self._total_size) - return ( - "Unable to allocate {} for an array with shape {} and data type {}" - .format(size_str, self.shape, self.dtype) - ) + return (f"Unable to allocate {size_str} for an array with shape " + f"{self.shape} and data type {self.dtype}") diff --git a/blimgui/dist64/numpy/_core/_exceptions.pyi b/blimgui/dist64/numpy/_core/_exceptions.pyi index e192b2b..71e3908 100644 --- a/blimgui/dist64/numpy/_core/_exceptions.pyi +++ b/blimgui/dist64/numpy/_core/_exceptions.pyi @@ -1,16 +1,13 @@ from collections.abc import Iterable -from typing import Any, Final, overload - -from typing_extensions import TypeVar, Unpack +from typing import Any, Final, TypeVar, overload import numpy as np from numpy import _CastingKind -from numpy._utils import set_module as set_module ### _T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]]) +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) _ExceptionT = TypeVar("_ExceptionT", bound=Exception) ### @@ -20,47 +17,31 @@ class UFuncTypeError(TypeError): def __init__(self, /, ufunc: np.ufunc) -> None: ... class _UFuncNoLoopError(UFuncTypeError): - dtypes: tuple[np.dtype[Any], ...] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype[Any], np.dtype[Any]] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + dtypes: tuple[np.dtype, np.dtype] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... class _UFuncCastingError(UFuncTypeError): casting: Final[_CastingKind] - from_: Final[np.dtype[Any]] - to: Final[np.dtype[Any]] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ... + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... class _UFuncInputCastingError(_UFuncCastingError): in_i: Final[int] - def __init__( - self, - /, - ufunc: np.ufunc, - casting: _CastingKind, - from_: np.dtype[Any], - to: np.dtype[Any], - i: int, - ) -> None: ... + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... class _UFuncOutputCastingError(_UFuncCastingError): out_i: Final[int] - def __init__( - self, - /, - ufunc: np.ufunc, - casting: _CastingKind, - from_: np.dtype[Any], - to: np.dtype[Any], - i: int, - ) -> None: ... + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... class _ArrayMemoryError(MemoryError): shape: tuple[int, ...] - dtype: np.dtype[Any] - def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ... + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... @property def _total_size(self) -> int: ... @staticmethod diff --git a/blimgui/dist64/numpy/_core/_internal.py b/blimgui/dist64/numpy/_core/_internal.py index dbdc78d..7aa9051 100644 --- a/blimgui/dist64/numpy/_core/_internal.py +++ b/blimgui/dist64/numpy/_core/_internal.py @@ -10,9 +10,11 @@ import sys import warnings -from ..exceptions import DTypePromotionError -from .multiarray import dtype, array, ndarray, promote_types, StringDType from numpy import _NoValue +from numpy.exceptions import DTypePromotionError + +from .multiarray import StringDType, array, dtype, promote_types + try: import ctypes except ImportError: @@ -158,7 +160,7 @@ def _commastring(astr): (order1, repeats, order2, dtype) = mo.groups() except (TypeError, AttributeError): raise ValueError( - f'format number {len(result)+1} of "{astr}" is not recognized' + f'format number {len(result) + 1} of "{astr}" is not recognized' ) from None startindex = mo.end() # Separator or ending padding @@ -170,7 +172,7 @@ def _commastring(astr): if not mo: raise ValueError( 'format number %d of "%s" is not recognized' % - (len(result)+1, astr)) + (len(result) + 1, astr)) startindex = mo.end() islist = True @@ -183,8 +185,7 @@ def _commastring(astr): order2 = _convorder.get(order2, order2) if (order1 != order2): raise ValueError( - 'inconsistent byte-order specification %s and %s' % - (order1, order2)) + f'inconsistent byte-order specification {order1} and {order2}') order = order1 if order in ('|', '=', _nbo): @@ -302,7 +303,7 @@ def shape_as(self, obj): """ if self._zerod: return None - return (obj*self._arr.ndim)(*self._arr.shape) + return (obj * self._arr.ndim)(*self._arr.shape) def strides_as(self, obj): """ @@ -311,7 +312,7 @@ def strides_as(self, obj): """ if self._zerod: return None - return (obj*self._arr.ndim)(*self._arr.strides) + return (obj * self._arr.ndim)(*self._arr.strides) @property def data(self): @@ -364,46 +365,6 @@ def _as_parameter_(self): """ return self.data_as(ctypes.c_void_p) - # Numpy 1.21.0, 2021-05-18 - - def get_data(self): - """Deprecated getter for the `_ctypes.data` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_data" is deprecated. Use "data" instead', - DeprecationWarning, stacklevel=2) - return self.data - - def get_shape(self): - """Deprecated getter for the `_ctypes.shape` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_shape" is deprecated. Use "shape" instead', - DeprecationWarning, stacklevel=2) - return self.shape - - def get_strides(self): - """Deprecated getter for the `_ctypes.strides` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_strides" is deprecated. Use "strides" instead', - DeprecationWarning, stacklevel=2) - return self.strides - - def get_as_parameter(self): - """Deprecated getter for the `_ctypes._as_parameter_` property. - - .. deprecated:: 1.21 - """ - warnings.warn( - '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', - DeprecationWarning, stacklevel=2, - ) - return self._as_parameter_ - def _newnames(datatype, order): """ @@ -669,12 +630,12 @@ def _dtype_from_pep3118(spec): return dtype def __dtype_from_pep3118(stream, is_subdtype): - field_spec = dict( - names=[], - formats=[], - offsets=[], - itemsize=0 - ) + field_spec = { + 'names': [], + 'formats': [], + 'offsets': [], + 'itemsize': 0 + } offset = 0 common_alignment = 1 is_padding = False @@ -739,11 +700,10 @@ def __dtype_from_pep3118(stream, is_subdtype): elif stream.next in _pep3118_unsupported_map: desc = _pep3118_unsupported_map[stream.next] raise NotImplementedError( - "Unrepresentable PEP 3118 data type {!r} ({})" - .format(stream.next, desc)) + f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})") else: raise ValueError( - "Unknown PEP 3118 data type specifier %r" % stream.s + f"Unknown PEP 3118 data type specifier {stream.s!r}" ) # @@ -834,21 +794,21 @@ def _fix_names(field_spec): def _add_trailing_padding(value, padding): """Inject the specified number of padding bytes at the end of a dtype""" if value.fields is None: - field_spec = dict( - names=['f0'], - formats=[value], - offsets=[0], - itemsize=value.itemsize - ) + field_spec = { + 'names': ['f0'], + 'formats': [value], + 'offsets': [0], + 'itemsize': value.itemsize + } else: fields = value.fields names = value.names - field_spec = dict( - names=names, - formats=[fields[name][0] for name in names], - offsets=[fields[name][1] for name in names], - itemsize=value.itemsize - ) + field_spec = { + 'names': names, + 'formats': [fields[name][0] for name in names], + 'offsets': [fields[name][1] for name in names], + 'itemsize': value.itemsize + } field_spec['itemsize'] += padding return dtype(field_spec) @@ -873,21 +833,21 @@ def _lcm(a, b): def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): """ Format the error message for when __array_ufunc__ gives up. """ - args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + - ['{}={!r}'.format(k, v) + args_string = ', '.join([f'{arg!r}' for arg in inputs] + + [f'{k}={v!r}' for k, v in kwargs.items()]) args = inputs + kwargs.get('out', ()) types_string = ', '.join(repr(type(arg).__name__) for arg in args) return ('operand type(s) all returned NotImplemented from ' - '__array_ufunc__({!r}, {!r}, {}): {}' - .format(ufunc, method, args_string, types_string)) + f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}' + ) def array_function_errmsg_formatter(public_api, types): """ Format the error message for when __array_ufunc__ gives up. """ - func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) - return ("no implementation found for '{}' on types that implement " - '__array_function__: {}'.format(func_name, list(types))) + func_name = f'{public_api.__module__}.{public_api.__name__}' + return (f"no implementation found for '{func_name}' on types that implement " + f'__array_function__: {list(types)}') def _ufunc_doc_signature_formatter(ufunc): @@ -895,13 +855,15 @@ def _ufunc_doc_signature_formatter(ufunc): Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring + + Keep in sync with `_ufunc_inspect_signature_builder`. """ # input arguments are simple if ufunc.nin == 1: in_args = 'x' else: - in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) + in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin)) # output arguments are both keyword or positional if ufunc.nout == 0: @@ -911,8 +873,8 @@ def _ufunc_doc_signature_formatter(ufunc): else: out_args = '[, {positional}], / [, out={default}]'.format( positional=', '.join( - 'out{}'.format(i+1) for i in range(ufunc.nout)), - default=repr((None,)*ufunc.nout) + f'out{i + 1}' for i in range(ufunc.nout)), + default=repr((None,) * ufunc.nout) ) # keyword only args depend on whether this is a gufunc @@ -930,13 +892,56 @@ def _ufunc_doc_signature_formatter(ufunc): kwargs += "[, signature, axes, axis]" # join all the parts together - return '{name}({in_args}{out_args}, *{kwargs})'.format( - name=ufunc.__name__, - in_args=in_args, - out_args=out_args, - kwargs=kwargs + return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' + + +def _ufunc_inspect_signature_builder(ufunc): + """ + Builds a ``__signature__`` string. + + Should be kept in sync with `_ufunc_doc_signature_formatter`. + """ + + from inspect import Parameter, Signature + + params = [] + + # positional-only input parameters + if ufunc.nin == 1: + params.append(Parameter("x", Parameter.POSITIONAL_ONLY)) + else: + params.extend( + Parameter(f"x{i}", Parameter.POSITIONAL_ONLY) + for i in range(1, ufunc.nin + 1) + ) + + # for the sake of simplicity, we only consider a single output parameter + if ufunc.nout == 1: + out_default = None + else: + out_default = (None,) * ufunc.nout + params.append( + Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default), ) + if ufunc.signature is None: + params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True)) + else: + # NOTE: not all gufuncs support the `axis` parameters + params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False)) + + params.extend(( + Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'), + Parameter("order", Parameter.KEYWORD_ONLY, default='K'), + Parameter("dtype", Parameter.KEYWORD_ONLY, default=None), + Parameter("subok", Parameter.KEYWORD_ONLY, default=True), + Parameter("signature", Parameter.KEYWORD_ONLY, default=None), + )) + + return Signature(params) + def npy_ctypes_check(cls): # determine if a class comes from ctypes, in order to work around diff --git a/blimgui/dist64/numpy/_core/_internal.pyi b/blimgui/dist64/numpy/_core/_internal.pyi index 7d06421..8100bad 100644 --- a/blimgui/dist64/numpy/_core/_internal.pyi +++ b/blimgui/dist64/numpy/_core/_internal.pyi @@ -1,9 +1,8 @@ import ctypes as ct import re from collections.abc import Callable, Iterable -from typing import Any, Final, Generic, overload - -from typing_extensions import Self, TypeVar, deprecated +from typing import Any, Final, Generic, Self, overload +from typing_extensions import TypeVar import numpy as np import numpy.typing as npt @@ -48,16 +47,6 @@ class _ctypes(Generic[_PT_co]): def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - # - @deprecated('"get_data" is deprecated. Use "data" instead') - def get_data(self, /) -> _PT_co: ... - @deprecated('"get_shape" is deprecated. Use "shape" instead') - def get_shape(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_strides" is deprecated. Use "strides" instead') - def get_strides(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') - def get_as_parameter(self, /) -> ct.c_void_p: ... - class dummy_ctype(Generic[_T_co]): _cls: type[_T_co] diff --git a/blimgui/dist64/numpy/_core/_machar.py b/blimgui/dist64/numpy/_core/_machar.py deleted file mode 100644 index efec0c3..0000000 --- a/blimgui/dist64/numpy/_core/_machar.py +++ /dev/null @@ -1,356 +0,0 @@ -""" -Machine arithmetic - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -__all__ = ['MachAr'] - -from .fromnumeric import any -from ._ufunc_config import errstate -from .._utils import set_module - -# Need to speed this up...especially for longdouble - -# Deprecated 2021-10-20, NumPy 1.22 -class MachAr: - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating-point number ``beta**minexp`` (the smallest [in - magnitude] positive floating point number with full precision). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - An alias for `smallest_normal`, kept for backwards compatibility. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - smallest_normal : float - The smallest positive floating point number with 1 as leading bit in - the mantissa following IEEE-754. Same as `xmin`. - smallest_subnormal : float - The smallest positive floating point number with 0 as leading bit in - the mantissa following IEEE-754. - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float,int_conv=int, - float_to_float=float, - float_to_str=lambda v:'%24.16e' % v, - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the running arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp-a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp-a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp-tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp-one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp-one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp*one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y*y - a = z*one # Check here for underflow - temp = z*t - if any(a+a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1*beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1*beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax*one != xmax): - xmax = one - beta*epsneg - xmax = xmax / (xmin*beta*beta*beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - smallest_subnormal = abs(xmin / beta ** (it)) - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - self.smallest_normal = self.xmin - self._str_smallest_normal = float_to_str(self.xmin) - self.smallest_subnormal = float_to_float(smallest_subnormal) - self._str_smallest_subnormal = float_to_str(smallest_subnormal) - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - 'smallest_normal=%(smallest_normal)s ' - 'smallest_subnormal=%(smallest_subnormal)s\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/blimgui/dist64/numpy/_core/_machar.pyi b/blimgui/dist64/numpy/_core/_machar.pyi deleted file mode 100644 index e192b2b..0000000 --- a/blimgui/dist64/numpy/_core/_machar.pyi +++ /dev/null @@ -1,73 +0,0 @@ -from collections.abc import Iterable -from typing import Any, Final, overload - -from typing_extensions import TypeVar, Unpack - -import numpy as np -from numpy import _CastingKind -from numpy._utils import set_module as set_module - -### - -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - -class UFuncTypeError(TypeError): - ufunc: Final[np.ufunc] - def __init__(self, /, ufunc: np.ufunc) -> None: ... - -class _UFuncNoLoopError(UFuncTypeError): - dtypes: tuple[np.dtype[Any], ...] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... - -class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype[Any], np.dtype[Any]] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... - -class _UFuncCastingError(UFuncTypeError): - casting: Final[_CastingKind] - from_: Final[np.dtype[Any]] - to: Final[np.dtype[Any]] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ... - -class _UFuncInputCastingError(_UFuncCastingError): - in_i: Final[int] - def __init__( - self, - /, - ufunc: np.ufunc, - casting: _CastingKind, - from_: np.dtype[Any], - to: np.dtype[Any], - i: int, - ) -> None: ... - -class _UFuncOutputCastingError(_UFuncCastingError): - out_i: Final[int] - def __init__( - self, - /, - ufunc: np.ufunc, - casting: _CastingKind, - from_: np.dtype[Any], - to: np.dtype[Any], - i: int, - ) -> None: ... - -class _ArrayMemoryError(MemoryError): - shape: tuple[int, ...] - dtype: np.dtype[Any] - def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ... - @property - def _total_size(self) -> int: ... - @staticmethod - def _size_to_string(num_bytes: int) -> str: ... - -@overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... -@overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/blimgui/dist64/numpy/_core/_methods.py b/blimgui/dist64/numpy/_core/_methods.py index 5def148..99a9ad8 100644 --- a/blimgui/dist64/numpy/_core/_methods.py +++ b/blimgui/dist64/numpy/_core/_methods.py @@ -9,11 +9,8 @@ from contextlib import nullcontext import numpy as np -from numpy._core import multiarray as mu -from numpy._core import umath as um +from numpy._core import multiarray as mu, numerictypes as nt, umath as um from numpy._core.multiarray import asanyarray -from numpy._core import numerictypes as nt -from numpy._core import _exceptions from numpy._globals import _NoValue # save those O(100) nanoseconds! @@ -28,13 +25,13 @@ # Complex types to -> (2,)float view for fast-path computation in _var() _complex_to_float = { - nt.dtype(nt.csingle) : nt.dtype(nt.single), - nt.dtype(nt.cdouble) : nt.dtype(nt.double), + nt.dtype(nt.csingle): nt.dtype(nt.single), + nt.dtype(nt.cdouble): nt.dtype(nt.double), } # Special case for windows: ensure double takes precedence if nt.dtype(nt.longdouble) != nt.dtype(nt.double): _complex_to_float.update({ - nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble), + nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble), }) # avoid keyword arguments to speed up parsing, saves about 15%-20% for very @@ -122,7 +119,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default if dtype is None: @@ -188,15 +185,14 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # Compute sum of squared deviations from mean # Note that x may not be inexact and that we need it to be an array, # not a scalar. - x = asanyarray(arr - arrmean) - + x = um.subtract(arr, arrmean, out=...) if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) + x = um.square(x, out=x) # Fast-paths for built-in complex types - elif x.dtype in _complex_to_float: - xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) - um.multiply(xv, xv, out=xv) - x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None: + xv = x.view(dtype=(_float_dtype, (2,))) + um.square(xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real) # Most general case; includes handling object arrays containing imaginary # numbers and complex types with non-native byteorder else: diff --git a/blimgui/dist64/numpy/_core/_methods.pyi b/blimgui/dist64/numpy/_core/_methods.pyi index 44a2530..4571b01 100644 --- a/blimgui/dist64/numpy/_core/_methods.pyi +++ b/blimgui/dist64/numpy/_core/_methods.pyi @@ -1,7 +1,5 @@ from collections.abc import Callable -from typing import Any, TypeAlias - -from typing_extensions import Concatenate +from typing import Any, Concatenate, TypeAlias import numpy as np diff --git a/blimgui/dist64/numpy/_core/_multiarray_tests.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_multiarray_tests.cp313-win_amd64.lib deleted file mode 100644 index 65e820a..0000000 Binary files a/blimgui/dist64/numpy/_core/_multiarray_tests.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_multiarray_tests.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_multiarray_tests.cp314-win_amd64.lib new file mode 100644 index 0000000..3fc0f18 Binary files /dev/null and b/blimgui/dist64/numpy/_core/_multiarray_tests.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_multiarray_umath.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_multiarray_umath.cp313-win_amd64.lib deleted file mode 100644 index 2a246e2..0000000 Binary files a/blimgui/dist64/numpy/_core/_multiarray_umath.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_multiarray_umath.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_multiarray_umath.cp314-win_amd64.lib new file mode 100644 index 0000000..1596cc7 Binary files /dev/null and b/blimgui/dist64/numpy/_core/_multiarray_umath.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_operand_flag_tests.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_operand_flag_tests.cp313-win_amd64.lib deleted file mode 100644 index 02ccd41..0000000 Binary files a/blimgui/dist64/numpy/_core/_operand_flag_tests.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_operand_flag_tests.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_operand_flag_tests.cp314-win_amd64.lib new file mode 100644 index 0000000..59c1f63 Binary files /dev/null and b/blimgui/dist64/numpy/_core/_operand_flag_tests.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_rational_tests.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_rational_tests.cp313-win_amd64.lib deleted file mode 100644 index a0f8cec..0000000 Binary files a/blimgui/dist64/numpy/_core/_rational_tests.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_rational_tests.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_rational_tests.cp314-win_amd64.lib new file mode 100644 index 0000000..e859b95 Binary files /dev/null and b/blimgui/dist64/numpy/_core/_rational_tests.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_simd.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_simd.cp313-win_amd64.lib deleted file mode 100644 index df95afc..0000000 Binary files a/blimgui/dist64/numpy/_core/_simd.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_simd.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_simd.cp314-win_amd64.lib new file mode 100644 index 0000000..2d693ed Binary files /dev/null and b/blimgui/dist64/numpy/_core/_simd.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_simd.pyi b/blimgui/dist64/numpy/_core/_simd.pyi index 630c163..2dcb46d 100644 --- a/blimgui/dist64/numpy/_core/_simd.pyi +++ b/blimgui/dist64/numpy/_core/_simd.pyi @@ -8,6 +8,13 @@ AVX2: ModuleType | None = ... AVX512F: ModuleType | None = ... AVX512_SKX: ModuleType | None = ... +# NOTE: these 2 are only defined on systems with an arm processor +ASIMD: ModuleType | None = ... +NEON: ModuleType | None = ... + +# NOTE: This is only defined on systems with an riscv64 processor. +RVV: ModuleType | None = ... + baseline: ModuleType | None = ... @type_check_only @@ -17,6 +24,9 @@ class SimdTargets(TypedDict): FMA3: ModuleType | None AVX512F: ModuleType | None AVX512_SKX: ModuleType | None + ASIMD: ModuleType | None + NEON: ModuleType | None + RVV: ModuleType | None baseline: ModuleType | None targets: SimdTargets = ... diff --git a/blimgui/dist64/numpy/_core/_string_helpers.py b/blimgui/dist64/numpy/_core/_string_helpers.py index c4b69af..2790a35 100644 --- a/blimgui/dist64/numpy/_core/_string_helpers.py +++ b/blimgui/dist64/numpy/_core/_string_helpers.py @@ -7,10 +7,10 @@ # Construct the translation tables directly # "A" = chr(65), "a" = chr(97) _all_chars = tuple(map(chr, range(256))) -_ascii_upper = _all_chars[65:65+26] -_ascii_lower = _all_chars[97:97+26] -LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65+26:] -UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97+26:] +_ascii_upper = _all_chars[65:65 + 26] +_ascii_lower = _all_chars[97:97 + 26] +LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:] +UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:] def english_lower(s): diff --git a/blimgui/dist64/numpy/_core/_struct_ufunc_tests.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_struct_ufunc_tests.cp313-win_amd64.lib deleted file mode 100644 index 7f850a0..0000000 Binary files a/blimgui/dist64/numpy/_core/_struct_ufunc_tests.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_struct_ufunc_tests.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_struct_ufunc_tests.cp314-win_amd64.lib new file mode 100644 index 0000000..7611c8b Binary files /dev/null and b/blimgui/dist64/numpy/_core/_struct_ufunc_tests.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_type_aliases.py b/blimgui/dist64/numpy/_core/_type_aliases.py index f193118..85f9a6f 100644 --- a/blimgui/dist64/numpy/_core/_type_aliases.py +++ b/blimgui/dist64/numpy/_core/_type_aliases.py @@ -18,7 +18,7 @@ """ import numpy._core.multiarray as ma -from numpy._core.multiarray import typeinfo, dtype +from numpy._core.multiarray import dtype, typeinfo ###################################### # Building `sctypeDict` and `allTypes` @@ -36,6 +36,7 @@ for _abstract_type_name in _abstract_type_names: allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + del _abstract_type_name for k, v in typeinfo.items(): if k.startswith("NPY_") and v not in c_names_dict: @@ -44,6 +45,8 @@ concrete_type = v.type allTypes[k] = concrete_type sctypeDict[k] = concrete_type + del concrete_type + del k, v _aliases = { "double": "float64", @@ -60,6 +63,7 @@ for k, v in _aliases.items(): sctypeDict[k] = allTypes[v] allTypes[k] = allTypes[v] + del k, v # extra aliases are added only to `sctypeDict` # to support dtype name access, such as`np.dtype("float")` @@ -76,18 +80,21 @@ for k, v in _extra_aliases.items(): sctypeDict[k] = allTypes[v] + del k, v # include extended precision sized aliases for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: - longdouble_type: type = allTypes[full_name] + longdouble_type = allTypes[full_name] - bits: int = dtype(longdouble_type).itemsize * 8 - base_name: str = "complex" if is_complex else "float" - extended_prec_name: str = f"{base_name}{bits}" + bits = dtype(longdouble_type).itemsize * 8 + base_name = "complex" if is_complex else "float" + extended_prec_name = f"{base_name}{bits}" if extended_prec_name not in allTypes: sctypeDict[extended_prec_name] = longdouble_type allTypes[extended_prec_name] = longdouble_type + del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name + #################### # Building `sctypes` @@ -110,10 +117,15 @@ ]: if issubclass(concrete_type, abstract_type): sctypes[type_group].add(concrete_type) + del type_group, abstract_type break + del type_info, concrete_type + # sort sctype groups by bitsize for sctype_key in sctypes.keys(): sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) sctypes[sctype_key] = sctype_list + + del sctype_key, sctype_list diff --git a/blimgui/dist64/numpy/_core/_type_aliases.pyi b/blimgui/dist64/numpy/_core/_type_aliases.pyi index 6d28b11..f1c252b 100644 --- a/blimgui/dist64/numpy/_core/_type_aliases.pyi +++ b/blimgui/dist64/numpy/_core/_type_aliases.pyi @@ -1,18 +1,8 @@ from collections.abc import Collection -from typing import Any, Final, Literal as L, TypeAlias, TypedDict, type_check_only +from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only import numpy as np -__all__ = ( - "_abstract_type_names", - "_aliases", - "_extra_aliases", - "allTypes", - "c_names_dict", - "sctypeDict", - "sctypes", -) - sctypeDict: Final[dict[str, type[np.generic]]] allTypes: Final[dict[str, type[np.generic]]] @@ -87,10 +77,10 @@ _extra_aliases: Final[_ExtraAliasesType] @type_check_only class _SCTypes(TypedDict): - int: Collection[type[np.signedinteger[Any]]] - uint: Collection[type[np.unsignedinteger[Any]]] - float: Collection[type[np.floating[Any]]] - complex: Collection[type[np.complexfloating[Any, Any]]] + int: Collection[type[np.signedinteger]] + uint: Collection[type[np.unsignedinteger]] + float: Collection[type[np.floating]] + complex: Collection[type[np.complexfloating]] others: Collection[type[np.flexible | np.bool | np.object_]] sctypes: Final[_SCTypes] diff --git a/blimgui/dist64/numpy/_core/_ufunc_config.py b/blimgui/dist64/numpy/_core/_ufunc_config.py index 6383a92..989cf5c 100644 --- a/blimgui/dist64/numpy/_core/_ufunc_config.py +++ b/blimgui/dist64/numpy/_core/_ufunc_config.py @@ -4,12 +4,11 @@ This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and `_extobj_contextvar` from umath. """ -import contextlib -import contextvars import functools -from .._utils import set_module -from .umath import _make_extobj, _get_extobj_dict, _extobj_contextvar +from numpy._utils import set_module + +from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj __all__ = [ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", @@ -58,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate + Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: @@ -69,6 +69,8 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. + **Concurrency note:** see :ref:`fp_error_handling` + .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples @@ -128,6 +130,8 @@ def geterr(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- >>> import numpy as np @@ -173,6 +177,10 @@ def setbufsize(size): bufsize : int Previous size of ufunc buffer in bytes. + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- When exiting a `numpy.errstate` context manager the bufsize is restored: @@ -188,6 +196,8 @@ def setbufsize(size): 8192 """ + if size < 0: + raise ValueError("buffer size must be non-negative") old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) @@ -204,6 +214,12 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples -------- >>> import numpy as np @@ -255,6 +271,11 @@ def seterrcall(func): -------- seterr, geterr, geterrcall + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- Callback upon error: @@ -330,6 +351,8 @@ def geterrcall(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np @@ -398,6 +421,8 @@ class errstate: For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np @@ -425,7 +450,14 @@ class errstate: """ __slots__ = ( - "_call", "_all", "_divide", "_over", "_under", "_invalid", "_token") + "_all", + "_call", + "_divide", + "_invalid", + "_over", + "_token", + "_under", + ) def __init__(self, *, call=_Unspecified, all=None, divide=None, over=None, under=None, invalid=None): diff --git a/blimgui/dist64/numpy/_core/_ufunc_config.pyi b/blimgui/dist64/numpy/_core/_ufunc_config.pyi index 283b7bf..e0d51cb 100644 --- a/blimgui/dist64/numpy/_core/_ufunc_config.pyi +++ b/blimgui/dist64/numpy/_core/_ufunc_config.pyi @@ -1,12 +1,22 @@ from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypedDict, type_check_only +from types import TracebackType +from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only -from numpy import errstate as errstate +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrFunc: TypeAlias = Callable[[str, int], Any] -_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) @type_check_only class _ErrDict(TypedDict): @@ -15,25 +25,45 @@ class _ErrDict(TypedDict): under: _ErrKind invalid: _ErrKind -@type_check_only -class _ErrDictOptional(TypedDict, total=False): - all: None | _ErrKind - divide: None | _ErrKind - over: None | _ErrKind - under: None | _ErrKind - invalid: None | _ErrKind +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... def seterr( - all: None | _ErrKind = ..., - divide: None | _ErrKind = ..., - over: None | _ErrKind = ..., - under: None | _ErrKind = ..., - invalid: None | _ErrKind = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> _ErrDict: ... def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... def geterrcall() -> _ErrCall | None: ... - -# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` diff --git a/blimgui/dist64/numpy/_core/_umath_tests.cp313-win_amd64.lib b/blimgui/dist64/numpy/_core/_umath_tests.cp313-win_amd64.lib deleted file mode 100644 index 887e034..0000000 Binary files a/blimgui/dist64/numpy/_core/_umath_tests.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/_core/_umath_tests.cp314-win_amd64.lib b/blimgui/dist64/numpy/_core/_umath_tests.cp314-win_amd64.lib new file mode 100644 index 0000000..080f099 Binary files /dev/null and b/blimgui/dist64/numpy/_core/_umath_tests.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/_core/_umath_tests.pyi b/blimgui/dist64/numpy/_core/_umath_tests.pyi new file mode 100644 index 0000000..696cec3 --- /dev/null +++ b/blimgui/dist64/numpy/_core/_umath_tests.pyi @@ -0,0 +1,47 @@ +# undocumented internal testing module for ufunc features, defined in +# numpy/_core/src/umath/_umath_tests.c.src + +from typing import Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy._typing import _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1 + +@type_check_only +class _TestDispatchResult(TypedDict): + func: str # e.g. 'func_AVX2' + var: str # e.g. 'var_AVX2' + func_xb: str # e.g. 'func_AVX2' + var_xb: str # e.g. 'var_AVX2' + all: list[str] # e.g. ['func_AVX2', 'func_SSE41', 'func'] + +### + +# undocumented +def test_signature( + nin: int, nout: int, signature: str, / +) -> tuple[ + L[0, 1], # core_enabled (0 for scalar ufunc; 1 for generalized ufunc) + tuple[int, ...] | None, # core_num_dims + tuple[int, ...] | None, # core_dim_ixs + tuple[int, ...] | None, # core_dim_flags + tuple[int, ...] | None, # core_dim_sizes +]: ... + +# undocumented +def test_dispatch() -> _TestDispatchResult: ... + +# undocumented ufuncs and gufuncs +always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... +always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... +always_error_gufunc: Final[_GUFunc_Nin2_Nout1[L["always_error_gufunc"], L[1], None, L["(i),()->()"]]] = ... +inner1d: Final[_GUFunc_Nin2_Nout1[L["inner1d"], L[2], None, L["(i),(i)->()"]]] = ... +innerwt: Final[np.ufunc] = ... # we have no specialized type for 3->1 gufuncs +matrix_multiply: Final[_GUFunc_Nin2_Nout1[L["matrix_multiply"], L[3], None, L["(m,n),(n,p)->(m,p)"]]] = ... +matmul: Final[_GUFunc_Nin2_Nout1[L["matmul"], L[3], None, L["(m?,n),(n,p?)->(m?,p?)"]]] = ... +euclidean_pdist: Final[_GUFunc_Nin2_Nout1[L["euclidean_pdist"], L[2], None, L["(n,d)->(p)"]]] = ... +cumsum: Final[np.ufunc] = ... # we have no specialized type for 1->1 gufuncs +inner1d_no_doc: Final[_GUFunc_Nin2_Nout1[L["inner1d_no_doc"], L[2], None, L["(i),(i)->()"]]] = ... +cross1d: Final[_GUFunc_Nin2_Nout1[L["cross1d"], L[2], None, L["(3),(3)->(3)"]]] = ... +_pickleable_module_global_ufunc: Final[np.ufunc] = ... # 0->0 ufunc; segfaults if called +indexed_negative: Final[_UFunc_Nin1_Nout1[L["indexed_negative"], L[0], L[0]]] = ... # ntypes=0; can't be called +conv1d_full: Final[_GUFunc_Nin2_Nout1[L["conv1d_full"], L[1], None, L["(m),(n)->(p)"]]] = ... diff --git a/blimgui/dist64/numpy/_core/arrayprint.py b/blimgui/dist64/numpy/_core/arrayprint.py index 0b91dc1..9856786 100644 --- a/blimgui/dist64/numpy/_core/arrayprint.py +++ b/blimgui/dist64/numpy/_core/arrayprint.py @@ -25,26 +25,33 @@ import functools import numbers import sys + try: from _thread import get_ident except ImportError: from _dummy_thread import get_ident +import contextlib +import operator +import warnings + import numpy as np + from . import numerictypes as _nt -from .umath import absolute, isinf, isfinite, isnat -from . import multiarray -from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, ndarray) from .fromnumeric import any -from .numeric import concatenate, asarray, errstate -from .numerictypes import (longlong, intc, int_, float64, complex128, - flexible) +from .multiarray import ( + array, + datetime_as_string, + datetime_data, + dragon4_positional, + dragon4_scientific, + ndarray, +) +from .numeric import asarray, concatenate, errstate +from .numerictypes import complex128, flexible, float64, int_ from .overrides import array_function_dispatch, set_module from .printoptions import format_options -import operator -import warnings -import contextlib +from .umath import absolute, isfinite, isinf, isnat def _make_options_dict(precision=None, threshold=None, edgeitems=None, @@ -64,7 +71,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] if floatmode not in modes + [None]: raise ValueError("floatmode option must be one of " + - ", ".join('"{}"'.format(m) for m in modes)) + ", ".join(f'"{m}"' for m in modes)) if sign not in [None, '-', '+', ' ']: raise ValueError("sign option must be one of ' ', '+', or '-'") @@ -85,12 +92,14 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, options['legacy'] = 125 elif legacy == '2.1': options['legacy'] = 201 + elif legacy == '2.2': + options['legacy'] = 202 elif legacy is None: pass # OK, do nothing. else: warnings.warn( "legacy printing option can currently only be '1.13', '1.21', " - "'1.25', '2.1, or `False`", stacklevel=3) + "'1.25', '2.1', '2.2' or `False`", stacklevel=3) if threshold is not None: # forbid the bad threshold arg suggested by stack overflow, gh-12351 @@ -219,6 +228,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, If set to ``'2.1'``, shape information is not given when arrays are summarized (i.e., multiple elements replaced with ``...``). + If set to ``'2.2'``, the transition to use scientific notation for + printing ``np.float16`` and ``np.float32`` types may happen later or + not at all for larger values. + If set to `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward @@ -235,11 +248,15 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- get_printoptions, printoptions, array2string + Notes ----- - `formatter` is always reset with a call to `set_printoptions`. - Use `printoptions` as a context manager to set the values temporarily. + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` Examples -------- @@ -339,6 +356,12 @@ def get_printoptions(): For a full description of these options, see `set_printoptions`. + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + See Also -------- set_printoptions, printoptions @@ -359,7 +382,8 @@ def get_printoptions(): """ opts = format_options.get().copy() opts['legacy'] = { - 113: '1.13', 121: '1.21', 125: '1.25', sys.maxsize: False, + 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', + 202: '2.2', sys.maxsize: False, }[opts['legacy']] return opts @@ -396,6 +420,12 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + """ token = _set_printoptions(*args, **kwargs) @@ -416,7 +446,7 @@ def _leading_trailing(a, edgeitems, index=()): if axis == a.ndim: return a[index] - if a.shape[axis] > 2*edgeitems: + if a.shape[axis] > 2 * edgeitems: return concatenate(( _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) @@ -585,7 +615,7 @@ def _array2string(a, options, separator=' ', prefix=""): # skip over "[" next_line_prefix = " " # skip over array( - next_line_prefix += " "*len(prefix) + next_line_prefix += " " * len(prefix) lst = _formatArray(a, format_function, options['linewidth'], next_line_prefix, separator, options['edgeitems'], @@ -596,18 +626,18 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, - *, legacy=None): + legacy=None): return (a,) @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", - *, legacy=None): + legacy=None): """ Return a string representation of an array. @@ -640,10 +670,6 @@ def array2string(a, max_line_width=None, precision=None, wrapping is forced at the column ``max_line_width - len(suffix)``. It should be noted that the content of prefix and suffix strings are not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -763,16 +789,8 @@ def array2string(a, max_line_width=None, precision=None, options.update(overrides) if options['legacy'] <= 113: - if style is np._NoValue: - style = repr - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=2) + return repr(a.item()) if options['legacy'] > 113: options['linewidth'] -= len(suffix) @@ -813,7 +831,7 @@ def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): line = next_line_prefix + words[0] indent = next_line_prefix else: - indent = len(line)*' ' + indent = len(line) * ' ' line += words[0] for word in words[1::]: @@ -821,7 +839,7 @@ def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): line = indent + word suffix_length = max_word_length - len(words[-1]) - line += suffix_length*' ' + line += suffix_length * ' ' return s, line @@ -855,7 +873,7 @@ def recurser(index, hanging_indent, curr_width): next_width = curr_width - len(']') a_len = a.shape[axis] - show_summary = summary_insert and 2*edge_items < a_len + show_summary = summary_insert and 2 * edge_items < a_len if show_summary: leading_items = edge_items trailing_items = edge_items @@ -910,7 +928,7 @@ def recurser(index, hanging_indent, curr_width): # other axes - insert newlines between rows else: s = '' - line_sep = separator.rstrip() + '\n'*(axes_left - 1) + line_sep = separator.rstrip() + '\n' * (axes_left - 1) for i in range(leading_items): nested = recurser( @@ -953,7 +971,7 @@ def _none_or_positive_arg(x, name): if x is None: return -1 if x < 0: - raise ValueError("{} must be >= 0".format(name)) + raise ValueError(f"{name} must be >= 0") return x class FloatingFormat: @@ -993,9 +1011,14 @@ def fillFormat(self, data): if len(abs_non_zero) != 0: max_val = np.max(abs_non_zero) min_val = np.min(abs_non_zero) + if self._legacy <= 202: + exp_cutoff_max = 1.e8 + else: + # consider data type while deciding the max cutoff for exp format + exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision) with errstate(over='ignore'): # division can overflow - if max_val >= 1.e8 or (not self.suppress_small and - (min_val < 0.0001 or max_val/min_val > 1000.)): + if max_val >= exp_cutoff_max or (not self.suppress_small and + (min_val < 0.0001 or max_val / min_val > 1000.)): self.exp_format = True # do a first pass of printing all the numbers, to determine sizes @@ -1080,7 +1103,7 @@ def __call__(self, x): else: # isinf sign = '-' if x < 0 else '+' if self.sign == '+' else '' ret = sign + current_options['infstr'] - return ' '*( + return ' ' * ( self.pad_left + self.pad_right + 1 - len(ret) ) + ret @@ -1352,7 +1375,7 @@ def __init__(self, data): if len(non_nat) < data.size: # data contains a NaT max_str_len = max(max_str_len, 5) - self._format = '%{}s'.format(max_str_len) + self._format = f'%{max_str_len}s' self._nat = "'NaT'".rjust(max_str_len) def _format_non_nat(self, x): @@ -1417,7 +1440,7 @@ def format_array(self, a): if np.ndim(a) == 0: return self.format_function(a) - if self.summary_insert and a.shape[0] > 2*self.edge_items: + if self.summary_insert and a.shape[0] > 2 * self.edge_items: formatted = ( [self.format_array(a_) for a_ in a[:self.edge_items]] + [self.summary_insert] @@ -1461,9 +1484,9 @@ def __call__(self, x): for field, format_function in zip(x, self.format_functions) ] if len(str_fields) == 1: - return "({},)".format(str_fields[0]) + return f"({str_fields[0]},)" else: - return "({})".format(", ".join(str_fields)) + return f"({', '.join(str_fields)})" def _void_scalar_to_string(x, is_repr=True): @@ -1552,14 +1575,14 @@ def dtype_short_repr(dtype): return str(dtype) elif issubclass(dtype.type, flexible): # handle these separately so they don't give garbage like str256 - return "'%s'" % str(dtype) + return f"'{str(dtype)}'" typename = dtype.name if not dtype.isnative: # deal with cases like dtype(' 210 - and arr.size > current_options['threshold']): + if ((arr.size == 0 and arr.shape != (0,)) + or (current_options['legacy'] > 210 + and arr.size > current_options['threshold'])): extras.append(f"shape={arr.shape}") if not dtype_is_implied(arr.dtype) or arr.size == 0: extras.append(f"dtype={dtype_short_repr(arr.dtype)}") @@ -1613,9 +1636,9 @@ def _array_repr_implementation( spacer = " " if current_options['legacy'] <= 113: if issubclass(arr.dtype.type, flexible): - spacer = '\n' + ' '*len(prefix) + spacer = '\n' + ' ' * len(prefix) elif last_line_len + len(extra_str) + 1 > max_line_width: - spacer = '\n' + ' '*len(prefix) + spacer = '\n' + ' ' * len(prefix) return arr_str + spacer + extra_str diff --git a/blimgui/dist64/numpy/_core/arrayprint.pyi b/blimgui/dist64/numpy/_core/arrayprint.pyi index 142d77f..498154a 100644 --- a/blimgui/dist64/numpy/_core/arrayprint.pyi +++ b/blimgui/dist64/numpy/_core/arrayprint.pyi @@ -3,12 +3,17 @@ from collections.abc import Callable # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager -from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypedDict, overload, type_check_only - -from typing_extensions import deprecated +from typing import ( + Any, + Final, + Literal, + SupportsIndex, + TypeAlias, + TypedDict, + type_check_only, +) import numpy as np -from numpy._globals import _NoValueType from numpy._typing import NDArray, _CharLike_co, _FloatLike_co __all__ = [ @@ -69,14 +74,14 @@ class _FormatOptions(TypedDict): __docformat__: Final = "restructuredtext" # undocumented def set_printoptions( - precision: None | SupportsIndex = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - linewidth: None | int = ..., - suppress: None | bool = ..., - nanstr: None | str = ..., - infstr: None | str = ..., - formatter: None | _FormatDict = ..., + precision: SupportsIndex | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + linewidth: int | None = None, + suppress: bool | None = None, + nanstr: str | None = None, + infstr: str | None = None, + formatter: _FormatDict | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, *, @@ -86,81 +91,6 @@ def set_printoptions( def get_printoptions() -> _FormatOptions: ... # public numpy export -@overload # no style -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - style: _NoValueType = ..., - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: _Legacy | None = None, -) -> str: ... -@overload # style= (positional), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (keyword), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (positional), legacy!="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: _LegacyNoStyle | None = None, -) -> str: ... -@overload # style= (keyword), legacy="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") def array2string( a: NDArray[Any], max_line_width: int | None = None, @@ -169,59 +99,58 @@ def array2string( separator: str = " ", prefix: str = "", *, - style: _ReprFunc, formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, suffix: str = "", - legacy: _LegacyNoStyle | None = None, + legacy: _Legacy | None = None, ) -> str: ... def format_float_scientific( x: _FloatLike_co, - precision: None | int = ..., - unique: bool = ..., + precision: int | None = None, + unique: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: None | int = ..., - exp_digits: None | int = ..., - min_digits: None | int = ..., + sign: bool = False, + pad_left: int | None = None, + exp_digits: int | None = None, + min_digits: int | None = None, ) -> str: ... def format_float_positional( x: _FloatLike_co, - precision: None | int = ..., - unique: bool = ..., - fractional: bool = ..., + precision: int | None = None, + unique: bool = True, + fractional: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: None | int = ..., - pad_right: None | int = ..., - min_digits: None | int = ..., + sign: bool = False, + pad_left: int | None = None, + pad_right: int | None = None, + min_digits: int | None = None, ) -> str: ... def array_repr( arr: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def array_str( a: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def printoptions( - precision: None | SupportsIndex = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - linewidth: None | int = ..., - suppress: None | bool = ..., - nanstr: None | str = ..., - infstr: None | str = ..., - formatter: None | _FormatDict = ..., - sign: None | _Sign = None, + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, floatmode: _FloatMode | None = None, *, legacy: _Legacy | None = None, diff --git a/blimgui/dist64/numpy/_core/defchararray.py b/blimgui/dist64/numpy/_core/defchararray.py index 8d89654..1ed6b8c 100644 --- a/blimgui/dist64/numpy/_core/defchararray.py +++ b/blimgui/dist64/numpy/_core/defchararray.py @@ -18,23 +18,24 @@ import functools import numpy as np -from .._utils import set_module -from .numerictypes import bytes_, str_, character -from .numeric import ndarray, array as narray, asarray as asnarray -from numpy._core.multiarray import compare_chararrays from numpy._core import overrides +from numpy._core.multiarray import compare_chararrays +from numpy._core.strings import ( + _join as join, + _rsplit as rsplit, + _split as split, + _splitlines as splitlines, +) +from numpy._utils import set_module from numpy.strings import * from numpy.strings import ( multiply as strings_multiply, partition as strings_partition, rpartition as strings_rpartition, ) -from numpy._core.strings import ( - _split as split, - _rsplit as rsplit, - _splitlines as splitlines, - _join as join, -) + +from .numeric import array as narray, asarray as asnarray, ndarray +from .numerictypes import bytes_, character, str_ __all__ = [ 'equal', 'not_equal', 'greater_equal', 'less_equal', @@ -495,7 +496,6 @@ class adds the following functionality: title tofile tolist - tostring translate transpose upper @@ -718,7 +718,7 @@ def __mod__(self, i): def __rmod__(self, other): return NotImplemented - def argsort(self, axis=-1, kind=None, order=None): + def argsort(self, axis=-1, kind=None, order=None, *, stable=None): """ Return the indices that sort the array lexicographically. @@ -736,7 +736,7 @@ def argsort(self, axis=-1, kind=None, order=None): dtype='|S5') """ - return self.__array__().argsort(axis, kind, order) + return self.__array__().argsort(axis, kind, order, stable=stable) argsort.__doc__ = ndarray.argsort.__doc__ def capitalize(self): @@ -1272,7 +1272,7 @@ class adds the following functionality: fastest). If order is 'A', then the returned array may be in any order (either C-, Fortran-contiguous, or even discontiguous). - + Examples -------- diff --git a/blimgui/dist64/numpy/_core/defchararray.pyi b/blimgui/dist64/numpy/_core/defchararray.pyi index f41247b..81ca564 100644 --- a/blimgui/dist64/numpy/_core/defchararray.pyi +++ b/blimgui/dist64/numpy/_core/defchararray.pyi @@ -1,37 +1,30 @@ from typing import ( - Literal as L, - overload, - TypeAlias, - TypeVar, Any, + Literal as L, + Self, SupportsIndex, SupportsInt, + TypeAlias, + overload, ) +from typing_extensions import Buffer, TypeVar import numpy as np -from numpy import ( - ndarray, - dtype, - str_, - bytes_, - int_, - object_, - _OrderKACF, - _SupportsBuffer, - _SupportsArray -) +from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ +from numpy._core.multiarray import compare_chararrays from numpy._typing import ( NDArray, - _Shape, - _ShapeLike, - _ArrayLikeStr_co as U_co, - _ArrayLikeBytes_co as S_co, - _ArrayLikeString_co as T_co, + _AnyShape, _ArrayLikeAnyString_co as UST_co, - _ArrayLikeInt_co as i_co, _ArrayLikeBool_co as b_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _Shape, + _ShapeLike, + _SupportsArray, ) -from numpy._core.multiarray import compare_chararrays __all__ = [ "equal", @@ -89,45 +82,58 @@ __all__ = [ "chararray", ] -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_SCT = TypeVar("_SCT", bound=np.character) -_CharDType_co = TypeVar("_CharDType_co", bound=dtype[np.character], covariant=True) -_CharArray: TypeAlias = chararray[tuple[int, ...], dtype[_SCT]] +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_CharacterT = TypeVar("_CharacterT", bound=np.character) +_CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) + +_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]] -_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_] _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType] -class chararray(ndarray[_ShapeT_co, _CharDType_co]): +class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = 1, + unicode: L[False] = False, + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[bytes_]: ... @overload def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[False] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., - ) -> chararray[_Shape, dtype[bytes_]]: ... + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... @overload def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[True] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., - ) -> chararray[_Shape, dtype[str_]]: ... + itemsize: SupportsIndex | SupportsInt = 1, + *, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_Shape, _CharDType_co]: ... - def __rmul__(self, other: i_co) -> chararray[_Shape, _CharDType_co]: ... - def __mod__(self, i: Any) -> chararray[_Shape, _CharDType_co]: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __eq__( self: _CharArray[str_], other: U_co, @@ -138,7 +144,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ne__( self: _CharArray[str_], other: U_co, @@ -149,7 +155,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ge__( self: _CharArray[str_], other: U_co, @@ -160,7 +166,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __le__( self: _CharArray[str_], other: U_co, @@ -171,7 +177,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __gt__( self: _CharArray[str_], other: U_co, @@ -182,7 +188,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __lt__( self: _CharArray[str_], other: U_co, @@ -193,7 +199,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __add__( self: _CharArray[str_], other: U_co, @@ -204,7 +210,7 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): other: S_co, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def __radd__( self: _CharArray[str_], other: U_co, @@ -219,90 +225,90 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): def center( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def center( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def count( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... def decode( self: _CharArray[bytes_], - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[str_]: ... def encode( self: _CharArray[str_], - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[bytes_]: ... @overload def endswith( self: _CharArray[str_], suffix: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( self: _CharArray[bytes_], suffix: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def expandtabs( self, - tabsize: i_co = ..., - ) -> chararray[_Shape, _CharDType_co]: ... + tabsize: i_co = 8, + ) -> Self: ... @overload def find( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload @@ -320,27 +326,27 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): def ljust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def ljust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def lstrip( self: _CharArray[str_], - chars: None | U_co = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def lstrip( self: _CharArray[bytes_], - chars: None | S_co = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def partition( self: _CharArray[str_], sep: U_co, @@ -356,57 +362,57 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): self: _CharArray[str_], old: U_co, new: U_co, - count: None | i_co = ..., + count: i_co | None = None, ) -> _CharArray[str_]: ... @overload def replace( self: _CharArray[bytes_], old: S_co, new: S_co, - count: None | i_co = ..., + count: i_co | None = None, ) -> _CharArray[bytes_]: ... @overload def rfind( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rjust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def rjust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload @@ -423,87 +429,87 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): @overload def rsplit( self: _CharArray[str_], - sep: None | U_co = ..., - maxsplit: None | i_co = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( self: _CharArray[bytes_], - sep: None | S_co = ..., - maxsplit: None | i_co = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rstrip( self: _CharArray[str_], - chars: None | U_co = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def rstrip( self: _CharArray[bytes_], - chars: None | S_co = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def split( self: _CharArray[str_], - sep: None | U_co = ..., - maxsplit: None | i_co = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( self: _CharArray[bytes_], - sep: None | S_co = ..., - maxsplit: None | i_co = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... - def splitlines(self, keepends: None | b_co = ...) -> NDArray[object_]: ... + def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... @overload def startswith( self: _CharArray[str_], prefix: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( self: _CharArray[bytes_], prefix: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def strip( self: _CharArray[str_], - chars: None | U_co = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def strip( self: _CharArray[bytes_], - chars: None | S_co = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def translate( self: _CharArray[str_], table: U_co, - deletechars: None | U_co = ..., + deletechars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def translate( self: _CharArray[bytes_], table: S_co, - deletechars: None | S_co = ..., + deletechars: S_co | None = None, ) -> _CharArray[bytes_]: ... - def zfill(self, width: i_co) -> chararray[_Shape, _CharDType_co]: ... - def capitalize(self) -> chararray[_ShapeT_co, _CharDType_co]: ... - def title(self) -> chararray[_ShapeT_co, _CharDType_co]: ... - def swapcase(self) -> chararray[_ShapeT_co, _CharDType_co]: ... - def lower(self) -> chararray[_ShapeT_co, _CharDType_co]: ... - def upper(self) -> chararray[_ShapeT_co, _CharDType_co]: ... + def zfill(self, width: i_co) -> Self: ... + def capitalize(self) -> Self: ... + def title(self) -> Self: ... + def swapcase(self) -> Self: ... + def lower(self) -> Self: ... + def upper(self) -> Self: ... def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... @@ -514,7 +520,6 @@ class chararray(ndarray[_ShapeT_co, _CharDType_co]): def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... - # Comparison @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @@ -565,7 +570,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @@ -576,7 +581,6 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... @overload def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... - @overload def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... @overload @@ -596,33 +600,33 @@ def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def center(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def center(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... def decode( a: S_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[str_]: ... def encode( a: U_co | T_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... @@ -634,13 +638,13 @@ def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _Str def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def ljust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def ljust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def ljust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[str_]: ... @@ -652,13 +656,13 @@ def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... @@ -674,53 +678,53 @@ def replace( a: U_co, old: U_co, new: U_co, - count: None | i_co = ..., + count: i_co | None = -1, ) -> NDArray[str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: None | i_co = ..., + count: i_co | None = -1, ) -> NDArray[bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload def rjust( a: U_co, width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> NDArray[str_]: ... @overload def rjust( a: S_co, width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> NDArray[bytes_]: ... @overload def rjust( a: _StringDTypeSupportsArray, width: i_co, - fillchar: _StringDTypeSupportsArray = ..., + fillchar: str | _StringDTypeSupportsArray = " ", ) -> _StringDTypeArray: ... @overload def rjust( a: T_co, width: i_co, - fillchar: T_co = ..., + fillchar: T_co = " ", ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -735,72 +739,72 @@ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def rsplit( a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: _StringDTypeSupportsArray, - sep: None | _StringDTypeSupportsArray = ..., - maxsplit: None | i_co = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: T_co, - sep: None | T_co = ..., - maxsplit: None | i_co = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload -def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def split( a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: _StringDTypeSupportsArray, - sep: None | _StringDTypeSupportsArray = ..., - maxsplit: None | i_co = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: T_co, - sep: None | T_co = ..., - maxsplit: None | i_co = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... -def splitlines(a: UST_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ... +def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @overload -def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +def strip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[str_]: ... @@ -824,25 +828,25 @@ def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... def translate( a: U_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> NDArray[str_]: ... @overload def translate( a: S_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> NDArray[bytes_]: ... @overload def translate( a: _StringDTypeSupportsArray, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> _StringDTypeArray: ... @overload def translate( a: T_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -868,88 +872,88 @@ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... def count( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def find( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... def isalpha(a: UST_co) -> NDArray[np.bool]: ... @@ -966,131 +970,181 @@ def isupper(a: UST_co) -> NDArray[np.bool]: ... def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, - suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + prefix: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes -# overload 3: arbitrary object with unicode=False (-> bytes_) -# overload 4: arbitrary object with unicode=True (-> str_) +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload def array( obj: U_co, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: S_co, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( obj: object, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( obj: object, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[True] = ..., - order: _OrderKACF = ..., + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = None, ) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, - itemsize: None | int = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, - itemsize: None | int = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, - itemsize: None | int = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + *, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, - itemsize: None | int = ..., - unicode: L[True] = ..., - order: _OrderKACF = ..., + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = None, ) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + *, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/blimgui/dist64/numpy/_core/einsumfunc.py b/blimgui/dist64/numpy/_core/einsumfunc.py index 108a136..8aaaecc 100644 --- a/blimgui/dist64/numpy/_core/einsumfunc.py +++ b/blimgui/dist64/numpy/_core/einsumfunc.py @@ -2,18 +2,21 @@ Implementation of optimized einsum. """ +import functools import itertools import operator -from numpy._core.multiarray import c_einsum -from numpy._core.numeric import asanyarray, tensordot +from numpy._core.multiarray import c_einsum, matmul +from numpy._core.numeric import asanyarray, reshape from numpy._core.overrides import array_function_dispatch +from numpy._core.umath import multiply __all__ = ['einsum', 'einsum_path'] # importing string for string.ascii_letters would be too slow # the first import before caching has been measured to take 800 µs (#23777) -einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' +# imports begin with uppercase to mimic ASCII values to avoid sorting issues +einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' einsum_symbols_set = set(einsum_symbols) @@ -439,116 +442,6 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): return path -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. @@ -588,7 +481,7 @@ def _parse_einsum_input(operands): if s in '.,->': continue if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) + raise ValueError(f"Character {s} is not a valid symbol.") else: tmp_operands = list(operands) @@ -690,7 +583,7 @@ def _parse_einsum_input(operands): tmp_subscripts = subscripts.replace(",", "") for s in sorted(set(tmp_subscripts)): if s not in (einsum_symbols): - raise ValueError("Character %s is not a valid symbol." % s) + raise ValueError(f"Character {s} is not a valid symbol.") if tmp_subscripts.count(s) == 1: output_subscript += s normal_inds = ''.join(sorted(set(output_subscript) - @@ -708,7 +601,7 @@ def _parse_einsum_input(operands): output_subscript = "" for s in sorted(set(tmp_subscripts)): if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) + raise ValueError(f"Character {s} is not a valid symbol.") if tmp_subscripts.count(s) == 1: output_subscript += s @@ -718,8 +611,7 @@ def _parse_einsum_input(operands): raise ValueError("Output character %s appeared more than once in " "the output." % char) if char not in input_subscripts: - raise ValueError("Output character %s did not appear in the input" - % char) + raise ValueError(f"Output character {char} did not appear in the input") # Make sure number operands is equivalent to the number of terms if len(input_subscripts.split(',')) != len(operands): @@ -875,7 +767,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_type = path_type[0] else: - raise TypeError("Did not understand the path: %s" % str(path_type)) + raise TypeError(f"Did not understand the path: {str(path_type)}") # Hidden option, only einsum should call this einsum_call_arg = einsum_call @@ -887,13 +779,14 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build a few useful list and sets input_list = input_subscripts.split(',') + num_inputs = len(input_list) input_sets = [set(x) for x in input_list] output_set = set(output_subscript) indices = set(input_subscripts.replace(',', '')) + num_indices = len(indices) # Get length of each unique dimension and ensure all dimensions are correct dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): @@ -903,10 +796,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for cnum, char in enumerate(term): dim = sh[cnum] - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - if char in dimension_dict.keys(): # For broadcasting cases we always want the largest dim size if dimension_dict[char] == 1: @@ -918,9 +807,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: dimension_dict[char] = dim - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - # Compute size of each input array plus the output array size_list = [_compute_size_by_dict(term, dimension_dict) for term in input_list + [output_subscript]] @@ -931,23 +817,16 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: memory_arg = memory_limit - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count( - indices, inner_product, len(input_list), dimension_dict - ) - # Compute the path if explicit_einsum_path: path = path_type[1:] elif ( (path_type is False) - or (len(input_list) in [1, 2]) + or (num_inputs in [1, 2]) or (indices == output_set) ): # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] + path = [tuple(range(num_inputs))] elif path_type == "greedy": path = _greedy_path( input_sets, output_set, dimension_dict, memory_arg @@ -969,26 +848,18 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract - cost = _flop_count( - idx_contract, idx_removed, len(contract_inds), dimension_dict - ) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + if not einsum_call_arg: + # these are only needed for printing info + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - bcast = set() tmp_inputs = [] for x in contract_inds: tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False # Last contraction if (cnum - len(path)) == -1: @@ -998,22 +869,17 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): idx_result = "".join([x[1] for x in sorted(sort_result)]) input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) einsum_str = ",".join(tmp_inputs) + "->" + idx_result - contraction = ( - contract_inds, idx_removed, einsum_str, input_list[:], do_blas - ) + contraction = (contract_inds, einsum_str, input_list[:]) contraction_list.append(contraction) - opt_cost = sum(cost_list) + 1 - if len(input_list) != 1: # Explicit "einsum_path" is usually trusted, but we detect this kind of # mistake in order to prevent from returning an intermediate value. raise RuntimeError( - "Invalid einsum_path is specified: {} more operands has to be " - "contracted.".format(len(input_list) - 1)) + f"Invalid einsum_path is specified: {len(input_list) - 1} more " + "operands has to be contracted.") if einsum_call_arg: return (operands, contraction_list) @@ -1022,22 +888,32 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): overall_contraction = input_subscripts + "->" + output_subscript header = ("scaling", "current", "remaining") + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = ( + sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices + ) > 0 + naive_cost = _flop_count( + indices, inner_product, num_inputs, dimension_dict + ) + + opt_cost = sum(cost_list) + 1 speedup = naive_cost / opt_cost max_i = max(size_list) - path_print = " Complete contraction: %s\n" % overall_contraction - path_print += " Naive scaling: %d\n" % len(indices) + path_print = f" Complete contraction: {overall_contraction}\n" + path_print += f" Naive scaling: {num_indices}\n" path_print += " Optimized scaling: %d\n" % max(scale_list) - path_print += " Naive FLOP count: %.3e\n" % naive_cost - path_print += " Optimized FLOP count: %.3e\n" % opt_cost - path_print += " Theoretical speedup: %3.3f\n" % speedup - path_print += " Largest intermediate: %.3e elements\n" % max_i + path_print += f" Naive FLOP count: {naive_cost:.3e}\n" + path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" + path_print += f" Theoretical speedup: {speedup:3.3f}\n" + path_print += f" Largest intermediate: {max_i:.3e} elements\n" path_print += "-" * 74 + "\n" path_print += "%6s %24s %40s\n" % header path_print += "-" * 74 for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run @@ -1046,6 +922,317 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): return (path, path_print) +def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): + """If there are no contracted indices, then we can directly transpose and + insert singleton dimensions into ``a`` and ``b`` such that (broadcast) + elementwise multiplication performs the einsum. + + No need to cache this as it is within the cached + ``_parse_eq_to_batch_matmul``. + + """ + desired_a = "" + desired_b = "" + new_shape_a = [] + new_shape_b = [] + for ix in out: + if ix in a_term: + desired_a += ix + new_shape_a.append(shape_a[a_term.index(ix)]) + else: + new_shape_a.append(1) + if ix in b_term: + desired_b += ix + new_shape_b.append(shape_b[b_term.index(ix)]) + else: + new_shape_b.append(1) + + if desired_a != a_term: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + if desired_b != b_term: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + None, # new_shape_ab, not needed since not fusing + None, # perm_ab, not needed as we transpose a and b first + True, # pure_multiplication=True + ) + + +@functools.lru_cache(2**12) +def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): + """Cached parsing of a two term einsum equation into the necessary + sequence of arguments for contracttion via batched matrix multiplication. + The steps we need to specify are: + + 1. Remove repeated and trivial indices from the left and right terms, + and transpose them, done as a single einsum. + 2. Fuse the remaining indices so we have two 3D tensors. + 3. Perform the batched matrix multiplication. + 4. Unfuse the output to get the desired final index order. + + """ + lhs, out = eq.split("->") + a_term, b_term = lhs.split(",") + + if len(a_term) != len(shape_a): + raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.") + if len(b_term) != len(shape_b): + raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") + + sizes = {} + singletons = set() + + # parse left term to unique indices with size > 1 + left = {} + for ix, d in zip(a_term, shape_a): + if d == 1: + # everything (including broadcasting) works nicely if simply ignore + # such dimensions, but we do need to track if they appear in output + # and thus should be reintroduced later + singletons.add(ix) + continue + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + left[ix] = True + + # parse right term to unique indices with size > 1 + right = {} + for ix, d in zip(b_term, shape_b): + # broadcast indices (size 1 on one input and size != 1 + # on the other) should not be treated as singletons + if d == 1: + if ix not in left: + singletons.add(ix) + continue + singletons.discard(ix) + + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + right[ix] = True + + # now we classify the unique size > 1 indices only + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + # other indices (appearing on A or B only) will + # be summed or traced out prior to the matmul + for ix in left: + if right.pop(ix, False): + if ix in out: + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + # now only indices unique to right remain + for ix in right: + if ix in out: + b_keep.append(ix) + + if not con_inds: + # contraction is pure multiplication, prepare inputs differently + return _parse_eq_to_pure_multiplication( + a_term, shape_a, b_term, shape_b, out + ) + + # only need the size one indices that appear in the output + singletons = [ix for ix in out if ix in singletons] + + # take diagonal, remove any trivial axes and transpose left + desired_a = "".join((*bat_inds, *a_keep, *con_inds)) + if a_term != desired_a: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + + # take diagonal, remove any trivial axes and transpose right + desired_b = "".join((*bat_inds, *con_inds, *b_keep)) + if b_term != desired_b: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + # then we want to reshape + if bat_inds: + lgroups = (bat_inds, a_keep, con_inds) + rgroups = (bat_inds, con_inds, b_keep) + ogroups = (bat_inds, a_keep, b_keep) + else: + # avoid size 1 batch dimension if no batch indices + lgroups = (a_keep, con_inds) + rgroups = (con_inds, b_keep) + ogroups = (a_keep, b_keep) + + if any(len(group) != 1 for group in lgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_a = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in lgroups + ) + else: + new_shape_a = None + + if any(len(group) != 1 for group in rgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_b = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in rgroups + ) + else: + new_shape_b = None + + if any(len(group) != 1 for group in ogroups) or singletons: + new_shape_ab = (1,) * len(singletons) + tuple( + sizes[ix] for ix_group in ogroups for ix in ix_group + ) + else: + new_shape_ab = None + + # then we might need to permute the matmul produced output: + out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) + if out_produced != out: + perm_ab = tuple(out_produced.index(ix) for ix in out) + else: + perm_ab = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + False, # pure_multiplication=False + ) + + +@functools.lru_cache(maxsize=64) +def _parse_output_order(order, a_is_fcontig, b_is_fcontig): + order = order.upper() + if order == "K": + return None + elif order in "CF": + return order + elif order == "A": + if a_is_fcontig and b_is_fcontig: + return "F" + else: + return "C" + else: + raise ValueError( + "ValueError: order must be one of " + f"'C', 'F', 'A', or 'K' (got '{order}')" + ) + + +def bmm_einsum(eq, a, b, out=None, **kwargs): + """Perform arbitrary pairwise einsums using only ``matmul``, or + ``multiply`` if no contracted indices are involved (plus maybe single term + ``einsum`` to prepare the terms individually). The logic for each is cached + based on the equation and array shape, and each step is only performed if + necessary. + + Parameters + ---------- + eq : str + The einsum equation. + a : array_like + The first array to contract. + b : array_like + The second array to contract. + + Returns + ------- + array_like + + Notes + ----- + A fuller description of this algorithm, and original source for this + implementation, can be found at https://github.com/jcmgray/einsum_bmm. + """ + ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + pure_multiplication, + ) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape) + + # n.b. one could special case various cases to call c_einsum directly here + + # need to handle `order` a little manually, since we do transpose + # operations before and potentially after the ufunc calls + output_order = _parse_output_order( + kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous + ) + + # prepare left + if eq_a is not None: + # diagonals, sums, and tranpose + a = c_einsum(eq_a, a) + if new_shape_a is not None: + a = reshape(a, new_shape_a) + + # prepare right + if eq_b is not None: + # diagonals, sums, and tranpose + b = c_einsum(eq_b, b) + if new_shape_b is not None: + b = reshape(b, new_shape_b) + + if pure_multiplication: + # no contracted indices + if output_order is not None: + kwargs["order"] = output_order + + # do the 'contraction' via multiplication! + return multiply(a, b, out=out, **kwargs) + + # can only supply out here if no other reshaping / transposing + matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None) + if matmul_out_compatible: + kwargs["out"] = out + + # do the contraction! + ab = matmul(a, b, **kwargs) + + # prepare the output + if new_shape_ab is not None: + ab = reshape(ab, new_shape_ab) + if perm_ab is not None: + ab = ab.transpose(perm_ab) + + if (out is not None) and (not matmul_out_compatible): + # handle case where out is specified, but we also needed + # to reshape / transpose ``ab`` after the matmul + out[:] = ab + ab = out + elif output_order is not None: + ab = asanyarray(ab, order=output_order) + + return ab + + def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): # Arguably we dispatch on more arguments than we really should; see note in # _einsum_path_dispatcher for why. @@ -1428,65 +1615,29 @@ def einsum(*operands, out=None, optimize=False, **kwargs): unknown_kwargs = [k for (k, v) in kwargs.items() if k not in valid_einsum_kwargs] if len(unknown_kwargs): - raise TypeError("Did not understand the following kwargs: %s" - % unknown_kwargs) + raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") # Build the contraction list and operand operands, contraction_list = einsum_path(*operands, optimize=optimize, einsum_call=True) - # Handle order kwarg for output array, c_einsum allows mixed case - output_order = kwargs.pop('order', 'K') - if output_order.upper() == 'A': - if all(arr.flags.f_contiguous for arr in operands): - output_order = 'F' - else: - output_order = 'C' - # Start contraction loop for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + inds, einsum_str, _ = contraction tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot( - *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) - ) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - kwargs["out"] = out - new_view = c_einsum( - tensor_result + '->' + results_index, new_view, **kwargs - ) + # If out was specified + if handle_out: + kwargs["out"] = out - # Call einsum + if len(tmp_operands) == 2: + # Call (batched) matrix multiplication if possible + new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs) else: - # If out was specified - if handle_out: - kwargs["out"] = out - - # Do the contraction + # Call einsum new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) # Append new items and dereference what we can @@ -1496,4 +1647,4 @@ def einsum(*operands, out=None, optimize=False, **kwargs): if specified_out: return out else: - return asanyarray(operands[0], order=output_order) + return operands[0] diff --git a/blimgui/dist64/numpy/_core/einsumfunc.pyi b/blimgui/dist64/numpy/_core/einsumfunc.pyi index 782e7bc..5787b41 100644 --- a/blimgui/dist64/numpy/_core/einsumfunc.pyi +++ b/blimgui/dist64/numpy/_core/einsumfunc.pyi @@ -1,37 +1,36 @@ from collections.abc import Sequence -from typing import TypeAlias, TypeVar, Any, overload, Literal +from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np -from numpy import number, _OrderKACF +from numpy import _OrderKACF, number from numpy._typing import ( NDArray, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, _DTypeLikeBool, - _DTypeLikeUInt, - _DTypeLikeInt, - _DTypeLikeFloat, _DTypeLikeComplex, _DTypeLikeComplex_co, + _DTypeLikeFloat, + _DTypeLikeInt, _DTypeLikeObject, + _DTypeLikeUInt, ) __all__ = ["einsum", "einsum_path"] -_ArrayType = TypeVar( - "_ArrayType", - bound=NDArray[np.bool | number[Any]], +_ArrayT = TypeVar( + "_ArrayT", + bound=NDArray[np.bool | number], ) _OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None _CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] _CastingUnsafe: TypeAlias = Literal["unsafe"] - # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order # to identify whether or an array or scalar is returned. At a cursory @@ -43,55 +42,55 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeBool_co, - out: None = ..., - dtype: None | _DTypeLikeBool = ..., + out: None = None, + dtype: _DTypeLikeBool | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeUInt_co, - out: None = ..., - dtype: None | _DTypeLikeUInt = ..., + out: None = None, + dtype: _DTypeLikeUInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeInt_co, - out: None = ..., - dtype: None | _DTypeLikeInt = ..., + out: None = None, + dtype: _DTypeLikeInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeFloat_co, - out: None = ..., - dtype: None | _DTypeLikeFloat = ..., + out: None = None, + dtype: _DTypeLikeFloat | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: None = ..., - dtype: None | _DTypeLikeComplex = ..., + out: None = None, + dtype: _DTypeLikeComplex | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -99,44 +98,44 @@ def einsum( /, *operands: Any, casting: _CastingUnsafe, - dtype: None | _DTypeLikeComplex_co = ..., - out: None = ..., + dtype: _DTypeLikeComplex_co | None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: _ArrayType, - dtype: None | _DTypeLikeComplex_co = ..., + out: _ArrayT, + dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> _ArrayT: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayType, + out: _ArrayT, casting: _CastingUnsafe, - dtype: None | _DTypeLikeComplex_co = ..., + dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> _ArrayT: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: None = ..., - dtype: None | _DTypeLikeObject = ..., + out: None = None, + dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -144,33 +143,33 @@ def einsum( /, *operands: Any, casting: _CastingUnsafe, - dtype: None | _DTypeLikeObject = ..., - out: None = ..., + dtype: _DTypeLikeObject | None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: _ArrayType, - dtype: None | _DTypeLikeObject = ..., + out: _ArrayT, + dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> _ArrayT: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayType, + out: _ArrayT, casting: _CastingUnsafe, - dtype: None | _DTypeLikeObject = ..., + dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> _ArrayT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. # It is therefore excluded from the signatures below. diff --git a/blimgui/dist64/numpy/_core/fromnumeric.py b/blimgui/dist64/numpy/_core/fromnumeric.py index 8050835..dc2dffd 100644 --- a/blimgui/dist64/numpy/_core/fromnumeric.py +++ b/blimgui/dist64/numpy/_core/fromnumeric.py @@ -2,18 +2,15 @@ """ import functools +import math import types -import warnings import numpy as np -from .._utils import set_module -from . import multiarray as mu -from . import overrides -from . import umath as um -from . import numerictypes as nt -from .multiarray import asarray, array, asanyarray, concatenate +from numpy._utils import set_module + +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um from ._multiarray_umath import _array_converter -from . import _methods +from .multiarray import asanyarray, asarray, concatenate _dt_ = nt.sctype2char @@ -172,7 +169,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): - for kk in ndindex(Nj): + for kk in ndindex(Nk): out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] For this reason, it is equivalent to (but faster than) the following use @@ -203,13 +200,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, - copy=None): +def _reshape_dispatcher(a, /, shape, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): +def reshape(a, /, shape, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -235,10 +231,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises @@ -302,23 +294,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): [3, 4], [5, 6]]) """ - if newshape is None and shape is None: - raise TypeError( - "reshape() missing 1 required positional argument: 'shape'") - if newshape is not None: - if shape is not None: - raise TypeError( - "You cannot specify 'newshape' and 'shape' arguments " - "at the same time.") - # Deprecated in NumPy 2.1, 2024-04-18 - warnings.warn( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", - DeprecationWarning, - stacklevel=2, - ) - shape = newshape if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) @@ -565,8 +540,7 @@ def put(a, ind, v, mode='raise'): try: put = a.put except AttributeError as e: - raise TypeError("argument 1 must be numpy.ndarray, " - "not {name}".format(name=type(a).__name__)) from e + raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e return put(ind, v, mode=mode) @@ -780,8 +754,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. @@ -893,8 +865,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): sequence of k-th it will partition all of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. @@ -1307,6 +1277,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Indexes of the maximal elements of a N-dimensional array: + >>> a.flat[np.argmax(a)] + 15 >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) >>> ind (1, 2) @@ -1405,6 +1377,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Indices of the minimum elements of a N-dimensional array: + >>> a.flat[np.argmin(a)] + 10 >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) >>> ind (0, 0) @@ -1608,7 +1582,8 @@ def resize(a, new_shape): # First case must zero fill. The second would have repeats == 0. return np.zeros_like(a, shape=new_shape) - repeats = -(-new_size // a.size) # ceil division + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) @@ -2028,15 +2003,6 @@ def nonzero(a): To group the indices by element, rather than dimension, use `argwhere`, which returns a row for each non-zero element. - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast_1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast_1d` explicitly if this behavior is deliberate. - Parameters ---------- a : array_like @@ -2450,19 +2416,12 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, """ if isinstance(a, _gentype): # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will " - "give a different result. Use np.sum(np.fromiter(generator)) or " + raise TypeError( + "Calling np.sum(generator) is deprecated." + "Use np.sum(np.fromiter(generator)) or " "the python sum builtin instead.", - DeprecationWarning, stacklevel=2 ) - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - return _wrapreduction( a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, initial=initial, where=where @@ -3572,10 +3531,13 @@ def size(a, axis=None): ---------- a : array_like Input data. - axis : int, optional - Axis along which the elements are counted. By default, give + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give the total number of elements. + .. versionchanged:: 2.4 + Extended to accept multiple axes. + Returns ------- element_count : int @@ -3593,10 +3555,12 @@ def size(a, axis=None): >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 - >>> np.size(a,1) + >>> np.size(a,axis=1) 3 - >>> np.size(a,0) + >>> np.size(a,axis=0) 2 + >>> np.size(a,axis=(0,1)) + 6 """ if axis is None: @@ -3605,10 +3569,10 @@ def size(a, axis=None): except AttributeError: return asarray(a).size else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) def _round_dispatcher(a, decimals=None, out=None): diff --git a/blimgui/dist64/numpy/_core/fromnumeric.pyi b/blimgui/dist64/numpy/_core/fromnumeric.pyi index 31f763c..14ad5ff 100644 --- a/blimgui/dist64/numpy/_core/fromnumeric.pyi +++ b/blimgui/dist64/numpy/_core/fromnumeric.pyi @@ -1,62 +1,63 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import ( Any, Literal, + Never, Protocol, SupportsIndex, TypeAlias, + TypedDict, TypeVar, + Unpack, overload, type_check_only, ) -from _typeshed import Incomplete -from typing_extensions import Never, deprecated - import numpy as np from numpy import ( - number, - uint64, - int_, - int64, - intp, - float16, - floating, - complexfloating, - timedelta64, - object_, - generic, - _AnyShapeType, - _OrderKACF, - _OrderACF, + _AnyShapeT, + _CastingKind, _ModeKind, + _OrderACF, + _OrderKACF, _PartitionKind, _SortKind, _SortSide, - _CastingKind, + complexfloating, + float16, + floating, + generic, + int64, + int_, + intp, + object_, + timedelta64, + uint64, ) from numpy._globals import _NoValueType from numpy._typing import ( - DTypeLike, - _DTypeLike, ArrayLike, - _ArrayLike, + DTypeLike, NDArray, - _NestedSequence, - _ShapeLike, + _AnyShape, + _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeUInt_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, _ArrayLikeObject_co, - _IntLike_co, + _ArrayLikeUInt_co, _BoolLike_co, _ComplexLike_co, + _DTypeLike, + _IntLike_co, + _NestedSequence, _NumberLike_co, _ScalarLike_co, + _ShapeLike, ) __all__ = [ @@ -106,55 +107,65 @@ __all__ = [ "var", ] -_SCT = TypeVar("_SCT", bound=generic) -_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) -_ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) @type_check_only -class _SupportsShape(Protocol[_ShapeType_co]): +class _SupportsShape(Protocol[_ShapeT_co]): # NOTE: it matters that `self` is positional only @property - def shape(self, /) -> _ShapeType_co: ... + def shape(self, /) -> _ShapeT_co: ... + +@type_check_only +class _UFuncKwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + order: _OrderKACF + subok: bool + signature: str | tuple[str | None, ...] + casting: _CastingKind # a "sequence" that isn't a string, bytes, bytearray, or memoryview _T = TypeVar("_T") _PyArray: TypeAlias = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = float | complex | bytes | str +_PyScalar: TypeAlias = complex | bytes | str +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def take( - a: _ArrayLike[_SCT], + a: _ArrayLike[_ScalarT], indices: _IntLike_co, - axis: None = ..., - out: None = ..., - mode: _ModeKind = ..., -) -> _SCT: ... + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _ScalarT: ... @overload def take( a: ArrayLike, indices: _IntLike_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload def take( - a: _ArrayLike[_SCT], + a: _ArrayLike[_ScalarT], indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[_ScalarT]: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload def take( @@ -162,45 +173,45 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def reshape( # shape: index - a: _ArrayLike[_SCT], + a: _ArrayLike[_ScalarT], /, shape: SupportsIndex, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... @overload -def reshape( # shape: (int, ...) @ _AnyShapeType - a: _ArrayLike[_SCT], +def reshape( # shape: (int, ...) @ _AnyShapeT + a: _ArrayLike[_ScalarT], /, - shape: _AnyShapeType, + shape: _AnyShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeType, np.dtype[_SCT]]: ... +) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... @overload # shape: Sequence[index] def reshape( - a: _ArrayLike[_SCT], + a: _ArrayLike[_ScalarT], /, shape: Sequence[SupportsIndex], order: _OrderACF = "C", *, copy: bool | None = None, -) -> NDArray[_SCT]: ... +) -> NDArray[_ScalarT]: ... @overload # shape: index def reshape( a: ArrayLike, @@ -209,16 +220,16 @@ def reshape( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +) -> np.ndarray[tuple[int], np.dtype]: ... @overload -def reshape( # shape: (int, ...) @ _AnyShapeType +def reshape( # shape: (int, ...) @ _AnyShapeT a: ArrayLike, /, - shape: _AnyShapeType, + shape: _AnyShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeType, np.dtype[Any]]: ... +) -> np.ndarray[_AnyShapeT, np.dtype]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -228,109 +239,103 @@ def reshape( *, copy: bool | None = None, ) -> NDArray[Any]: ... -@overload -@deprecated( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", -) -def reshape( - a: ArrayLike, - /, - shape: None = None, - order: _OrderACF = "C", - *, - newshape: _ShapeLike, - copy: bool | None = None, -) -> NDArray[Any]: ... @overload def choose( a: _IntLike_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload def choose( a: _ArrayLikeInt_co, - choices: _ArrayLike[_SCT], - out: None = ..., - mode: _ModeKind = ..., -) -> NDArray[_SCT]: ... + choices: _ArrayLike[_ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[_ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... +# keep in sync with `ma.core.repeat` +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[_ScalarT]: ... @overload def repeat( - a: _ArrayLike[_SCT], + a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., -) -> NDArray[_SCT]: ... + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex, ) -> NDArray[Any]: ... +# def put( a: NDArray[Any], ind: _ArrayLikeInt_co, v: ArrayLike, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> None: ... +# keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes( - a: _ArrayLike[_SCT], - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[_SCT]: ... +def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... @overload -def swapaxes( - a: ArrayLike, - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[Any]: ... +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... @overload def transpose( - a: _ArrayLike[_SCT], - axes: _ShapeLike | None = ... -) -> NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + axes: _ShapeLike | None = None, +) -> NDArray[_ScalarT]: ... @overload def transpose( a: ArrayLike, - axes: _ShapeLike | None = ... + axes: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... # @overload def partition( - a: _ArrayLike[_SCT], + a: _ArrayLike[_ScalarT], kth: _ArrayLikeInt, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, -) -> NDArray[_SCT]: ... +) -> NDArray[_ScalarT]: ... @overload def partition( a: _ArrayLike[np.void], @@ -360,166 +365,170 @@ def argpartition( # @overload def sort( - a: _ArrayLike[_SCT], - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., -) -> NDArray[_SCT]: ... + stable: bool | None = None, +) -> NDArray[_ScalarT]: ... @overload def sort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[intp]: ... @overload def argmax( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def argmax( a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, + out: _BoolOrIntArrayT, *, - keepdims: bool = ..., -) -> _ArrayT: ... + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - out: _ArrayT, - keepdims: bool = ..., -) -> _ArrayT: ... + out: _BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... @overload def argmin( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def argmin( a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, + out: _BoolOrIntArrayT, *, - keepdims: bool = ..., -) -> _ArrayT: ... + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - out: _ArrayT, - keepdims: bool = ..., -) -> _ArrayT: ... + out: _BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def searchsorted( a: ArrayLike, v: _ScalarLike_co, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... +def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... @overload -def resize(a: _ArrayLike[_SCT], new_shape: _AnyShapeType) -> np.ndarray[_AnyShapeType, np.dtype[_SCT]]: ... +def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... @overload -def resize(a: _ArrayLike[_SCT], new_shape: _ShapeLike) -> NDArray[_SCT]: ... +def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... @overload -def resize(a: ArrayLike, new_shape: _AnyShapeType) -> np.ndarray[_AnyShapeType, np.dtype[Any]]: ... +def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def squeeze( - a: _SCT, - axis: _ShapeLike | None = ..., -) -> _SCT: ... + a: _ScalarT, + axis: _ShapeLike | None = None, +) -> _ScalarT: ... @overload def squeeze( - a: _ArrayLike[_SCT], - axis: _ShapeLike | None = ..., -) -> NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = None, +) -> NDArray[_ScalarT]: ... @overload def squeeze( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.diagonal` @overload def diagonal( - a: _ArrayLike[_SCT], - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array -) -> NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array +) -> NDArray[_ScalarT]: ... @overload def diagonal( a: ArrayLike, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... +# keep in sync with `ma.core.trace` @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload def trace( @@ -527,24 +536,24 @@ def trace( offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @overload -def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = "C") -> _Array1D[_SCT]: ... +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... @overload def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... @overload @@ -552,24 +561,21 @@ def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np @overload def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... @overload -def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | Any]: ... @overload -def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | Any]: ... @overload -def ravel( - a: complex | _NestedSequence[complex], - order: _OrderKACF = "C", -) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype[Any]]: ... +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... # this prevents `Any` from being returned with Pyright @overload -def shape(a: _SupportsShape[Never]) -> tuple[int, ...]: ... +def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload -def shape(a: _SupportsShape[_ShapeType]) -> _ShapeType: ... +def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... @overload def shape(a: _PyScalar) -> tuple[()]: ... # `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are @@ -582,21 +588,21 @@ def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... @overload def shape(a: memoryview | bytearray) -> tuple[int]: ... @overload -def shape(a: ArrayLike) -> tuple[int, ...]: ... +def shape(a: ArrayLike) -> _AnyShape: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array - a: _ArrayLike[_SCT], - axis: SupportsIndex | None = ..., - out: None = ..., -) -> NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = None, + out: None = None, +) -> NDArray[_ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def compress( @@ -609,74 +615,59 @@ def compress( def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def clip( - a: _SCT, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., -) -> _SCT: ... + a: _ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ScalarT: ... @overload def clip( a: _ScalarLike_co, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload def clip( - a: _ArrayLike[_SCT], - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., -) -> NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> NDArray[_ScalarT]: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: None = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload def clip( @@ -685,129 +676,134 @@ def clip( a_max: ArrayLike | None, out: _ArrayT, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> _ArrayT: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: ArrayLike = ..., - *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike, - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: _ArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload def sum( - a: _ArrayLike[_SCT], - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + a: _ArrayLike[_ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def sum( - a: _ArrayLike[_SCT], - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT | NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( a: ArrayLike, axis: None, - dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def sum( a: ArrayLike, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def sum( a: ArrayLike, axis: _ShapeLike | None, - dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT | NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT | NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def sum( a: ArrayLike, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -816,7 +812,7 @@ def all( ) -> np.bool: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -825,7 +821,7 @@ def all( ) -> Incomplete: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -834,7 +830,7 @@ def all( ) -> _ArrayT: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -842,9 +838,10 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -853,7 +850,7 @@ def any( ) -> np.bool: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -862,7 +859,7 @@ def any( ) -> Incomplete: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -871,7 +868,7 @@ def any( ) -> _ArrayT: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -879,312 +876,313 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# @overload def cumsum( - a: _ArrayLike[_SCT], - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[_SCT]: ... + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None, - dtype: _DTypeLike[_SCT], - out: None = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def cumulative_sum( - x: _ArrayLike[_SCT], + x: _ArrayLike[_ScalarT], /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[_ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: _DTypeLike[_SCT], - out: None = ..., - include_initial: bool = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None = None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[_ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, out: _ArrayT, - include_initial: bool = ..., + include_initial: bool = False, ) -> _ArrayT: ... @overload def ptp( - a: _ArrayLike[_SCT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., -) -> _SCT: ... + a: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def ptp( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... @overload def amax( - a: _ArrayLike[_SCT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + a: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def amax( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amin( - a: _ArrayLike[_SCT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + a: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def amin( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. # The only requirement is that it is compatible # with the `.__mul__()` method(s) of the passed array's elements. - # Note that the same situation holds for all wrappers around # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def prod( a: _ArrayLikeBool_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int_: ... @overload def prod( a: _ArrayLikeUInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> uint64: ... @overload def prod( a: _ArrayLikeInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int64: ... @overload def prod( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> floating[Any]: ... + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... @overload def prod( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> complexfloating[Any, Any]: ... + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1192,232 +1190,235 @@ def prod( axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumprod( a: _ArrayLikeBool_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[floating[Any]]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[floating]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[complexfloating]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[object_]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: _DTypeLike[_SCT], - out: None = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumulative_prod( x: _ArrayLikeBool_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int_]: ... @overload def cumulative_prod( x: _ArrayLikeUInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[uint64]: ... @overload def cumulative_prod( x: _ArrayLikeInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int64]: ... @overload def cumulative_prod( x: _ArrayLikeFloat_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., -) -> NDArray[floating[Any]]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[floating]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[complexfloating]: ... @overload def cumulative_prod( x: _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[object_]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: _DTypeLike[_SCT], - out: None = ..., - include_initial: bool = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None = None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[_ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, out: _ArrayT, - include_initial: bool = ..., + include_initial: bool = False, ) -> _ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def around( a: _BoolLike_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> float16: ... @overload def around( - a: _SCT_uifcO, - decimals: SupportsIndex = ..., - out: None = ..., -) -> _SCT_uifcO: ... + a: _NumberOrObjectT, + decimals: SupportsIndex = 0, + out: None = None, +) -> _NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> Any: ... @overload def around( a: _ArrayLikeBool_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[float16]: ... @overload def around( - a: _ArrayLike[_SCT_uifcO], - decimals: SupportsIndex = ..., - out: None = ..., -) -> NDArray[_SCT_uifcO]: ... + a: _ArrayLike[_NumberOrObjectT], + decimals: SupportsIndex = 0, + out: None = None, +) -> NDArray[_NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[Any]: ... @overload def around( @@ -1428,37 +1429,38 @@ def around( @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., + decimals: SupportsIndex = 0, *, out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def mean( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> floating[Any]: ... +) -> floating: ... @overload def mean( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> complexfloating[Any]: ... +) -> complexfloating: ... @overload def mean( a: _ArrayLike[np.timedelta64], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1467,7 +1469,7 @@ def mean( def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ..., *, @@ -1476,8 +1478,8 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ..., @@ -1487,58 +1489,58 @@ def mean( def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_SCT], - out: None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _SCT: ... +) -> _ScalarT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _SCT: ... +) -> _ScalarT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], out: None, keepdims: Literal[True, 1], *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> NDArray[_SCT]: ... +) -> NDArray[_ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_SCT], - out: None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, *, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _SCT | NDArray[_SCT]: ... +) -> _ScalarT | NDArray[_ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _SCT | NDArray[_SCT]: ... +) -> _ScalarT | NDArray[_ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1547,24 +1549,24 @@ def mean( @overload def std( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> floating[Any]: ... +) -> floating: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1574,36 +1576,36 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_SCT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _SCT: ... +) -> _ScalarT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _SCT: ... +) -> _ScalarT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1613,10 +1615,10 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1625,12 +1627,12 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + ddof: float = 0, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1639,24 +1641,24 @@ def std( @overload def var( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> floating[Any]: ... +) -> floating: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1666,36 +1668,36 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_SCT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _SCT: ... +) -> _ScalarT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, - dtype: _DTypeLike[_SCT], - out: None = ..., - ddof: float = ..., - keepdims: Literal[False] = ..., + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _SCT: ... +) -> _ScalarT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1705,10 +1707,10 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + ddof: float = 0, + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1717,12 +1719,12 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, - ddof: float = ..., - keepdims: bool = ..., + ddof: float = 0, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., diff --git a/blimgui/dist64/numpy/_core/function_base.py b/blimgui/dist64/numpy/_core/function_base.py index 25724a4..9574ee4 100644 --- a/blimgui/dist64/numpy/_core/function_base.py +++ b/blimgui/dist64/numpy/_core/function_base.py @@ -1,14 +1,16 @@ import functools -import warnings +import inspect import operator import types +import warnings import numpy as np -from . import numeric as _nx -from .numeric import result_type, nan, asanyarray, ndim -from numpy._core.multiarray import add_docstring -from numpy._core._multiarray_umath import _array_converter from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.multiarray import add_docstring + +from . import numeric as _nx +from .numeric import asanyarray, nan, ndim, result_type __all__ = ['logspace', 'linspace', 'geomspace'] @@ -121,7 +123,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, num = operator.index(num) if num < 0: raise ValueError( - "Number of samples, %s, must be non-negative." % num + f"Number of samples, {num}, must be non-negative." ) div = (num - 1) if endpoint else num @@ -157,11 +159,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, y *= delta else: y = y * delta + elif _mult_inplace: + y *= step else: - if _mult_inplace: - y *= step - else: - y = y * step + y = y * step else: # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) # have an undefined step @@ -473,11 +474,13 @@ def _needs_add_docstring(obj): def _add_docstring(obj, doc, warn_on_python): if warn_on_python and not _needs_add_docstring(obj): warnings.warn( - "add_newdoc was used on a pure-python object {}. " - "Prefer to attach it directly to the source." - .format(obj), + f"add_newdoc was used on a pure-python object {obj}. " + "Prefer to attach it directly to the source.", UserWarning, stacklevel=3) + + doc = inspect.cleandoc(doc) + try: add_docstring(obj, doc) except Exception: @@ -495,10 +498,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): ---------- place : str The absolute name of the module to import from - obj : str or None + obj : str | None The name of the object to add documentation to, typically a class or function name. - doc : {str, Tuple[str, str], List[Tuple[str, str]]} + doc : str | tuple[str, str] | list[tuple[str, str]] If a string, the documentation to apply to `obj` If a tuple, then the first element is interpreted as an attribute @@ -535,12 +538,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): if isinstance(doc, str): if "${ARRAY_FUNCTION_LIKE}" in doc: doc = overrides.get_array_function_like_doc(new, doc) - _add_docstring(new, doc.strip(), warn_on_python) + _add_docstring(new, doc, warn_on_python) elif isinstance(doc, tuple): attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + _add_docstring(getattr(new, attr), docstring, warn_on_python) elif isinstance(doc, list): for attr, docstring in doc: - _add_docstring( - getattr(new, attr), docstring.strip(), warn_on_python - ) + _add_docstring(getattr(new, attr), docstring, warn_on_python) diff --git a/blimgui/dist64/numpy/_core/function_base.pyi b/blimgui/dist64/numpy/_core/function_base.pyi index fc8f1d0..3e62318 100644 --- a/blimgui/dist64/numpy/_core/function_base.pyi +++ b/blimgui/dist64/numpy/_core/function_base.pyi @@ -1,48 +1,58 @@ -from typing import ( - Literal as L, - overload, - Any, - SupportsIndex, - TypeVar, -) +from _typeshed import Incomplete +from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload -from numpy import floating, complexfloating, generic +import numpy as np from numpy._typing import ( - NDArray, DTypeLike, - _DTypeLike, - _ArrayLikeFloat_co, + NDArray, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _DTypeLike, ) +from numpy._typing._array_like import _DualArrayLike + +__all__ = ["geomspace", "linspace", "logspace"] -__all__ = ["logspace", "linspace", "geomspace"] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_SCT = TypeVar("_SCT", bound=generic) +_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[floating]: ... + device: L["cpu"] | None = None, +) -> NDArray[np.floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[complexfloating]: ... + device: L["cpu"] | None = None, +) -> NDArray[np.complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -50,104 +60,126 @@ def linspace( num: SupportsIndex, endpoint: bool, retstep: L[False], - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, *, - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64], np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[floating], floating]: ... + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.floating], np.floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[complexfloating], complexfloating]: ... + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[_SCT], _SCT]: ... + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[_ScalarT], _ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[Any], Any]: ... + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[Incomplete], Incomplete]: ... +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... @overload def logspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeFloat_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[floating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[complexfloating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -155,81 +187,90 @@ def logspace( num: SupportsIndex, endpoint: bool, base: _ArrayLikeComplex_co, - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, *, - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., -) -> NDArray[Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... @overload def geomspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[floating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[complexfloating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, - dtype: _DTypeLike[_SCT], - axis: SupportsIndex = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., -) -> NDArray[Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... def add_newdoc( place: str, obj: str, doc: str | tuple[str, str] | list[tuple[str, str]], - warn_on_python: bool = ..., + warn_on_python: bool = True, ) -> None: ... diff --git a/blimgui/dist64/numpy/_core/getlimits.py b/blimgui/dist64/numpy/_core/getlimits.py index 7e8778e..fa381c6 100644 --- a/blimgui/dist64/numpy/_core/getlimits.py +++ b/blimgui/dist64/numpy/_core/getlimits.py @@ -3,15 +3,15 @@ """ __all__ = ['finfo', 'iinfo'] +import math import types import warnings +from functools import cached_property -from .._utils import set_module -from ._machar import MachAr -from . import numeric -from . import numerictypes as ntypes -from .numeric import array, inf, nan -from .umath import log10, exp2, nextafter, isnan +from numpy._utils import set_module + +from . import numeric, numerictypes as ntypes +from ._multiarray_umath import _populate_finfo_constants def _fr0(a): @@ -30,96 +30,6 @@ def _fr1(a): return a -class MachArLike: - """ Object to simulate MachAr instance """ - def __init__(self, ftype, *, eps, epsneg, huge, tiny, - ibeta, smallest_subnormal=None, **kwargs): - self.params = _MACHAR_PARAMS[ftype] - self.ftype = ftype - self.title = self.params['title'] - # Parameter types same as for discovered MachAr object. - if not smallest_subnormal: - self._smallest_subnormal = nextafter( - self.ftype(0), self.ftype(1), dtype=self.ftype) - else: - self._smallest_subnormal = smallest_subnormal - self.epsilon = self.eps = self._float_to_float(eps) - self.epsneg = self._float_to_float(epsneg) - self.xmax = self.huge = self._float_to_float(huge) - self.xmin = self._float_to_float(tiny) - self.smallest_normal = self.tiny = self._float_to_float(tiny) - self.ibeta = self.params['itype'](ibeta) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = self._float_to_float( - self._float_conv(10) ** (-self.precision)) - self._str_eps = self._float_to_str(self.eps) - self._str_epsneg = self._float_to_str(self.epsneg) - self._str_xmin = self._float_to_str(self.xmin) - self._str_xmax = self._float_to_str(self.xmax) - self._str_resolution = self._float_to_str(self.resolution) - self._str_smallest_normal = self._float_to_str(self.xmin) - - @property - def smallest_subnormal(self): - """Return the value for the smallest subnormal. - - Returns - ------- - smallest_subnormal : float - value for the smallest subnormal. - - Warns - ----- - UserWarning - If the calculated value for the smallest subnormal is zero. - """ - # Check that the calculated value is not zero, in case it raises a - # warning. - value = self._smallest_subnormal - if self.ftype(0) == value: - warnings.warn( - 'The value of the smallest subnormal for {} type ' - 'is zero.'.format(self.ftype), UserWarning, stacklevel=2) - - return self._float_to_float(value) - - @property - def _str_smallest_subnormal(self): - """Return the string representation of the smallest subnormal.""" - return self._float_to_str(self.smallest_subnormal) - - def _float_to_float(self, value): - """Converts float to float. - - Parameters - ---------- - value : float - value to be converted. - """ - return _fr1(self._float_conv(value)) - - def _float_conv(self, value): - """Converts float to conv. - - Parameters - ---------- - value : float - value to be converted. - """ - return array([value], self.ftype) - - def _float_to_str(self, value): - """Converts float to str. - - Parameters - ---------- - value : float - value to be converted. - """ - return self.params['fmt'] % array(_fr0(value)[0], self.ftype) - - _convert_to_float = { ntypes.csingle: ntypes.single, ntypes.complex128: ntypes.float64, @@ -129,256 +39,22 @@ def _float_to_str(self, value): # Parameters for creating MachAr / MachAr-like objects _title_fmt = 'numpy {} precision floating point number' _MACHAR_PARAMS = { - ntypes.double: dict( - itype = ntypes.int64, - fmt = '%24.16e', - title = _title_fmt.format('double')), - ntypes.single: dict( - itype = ntypes.int32, - fmt = '%15.7e', - title = _title_fmt.format('single')), - ntypes.longdouble: dict( - itype = ntypes.longlong, - fmt = '%s', - title = _title_fmt.format('long double')), - ntypes.half: dict( - itype = ntypes.int16, - fmt = '%12.5e', - title = _title_fmt.format('half'))} - -# Key to identify the floating point type. Key is result of -# -# ftype = np.longdouble # or float64, float32, etc. -# v = (ftype(-1.0) / ftype(10.0)) -# v.view(v.dtype.newbyteorder('<')).tobytes() -# -# Uses division to work around deficiencies in strtold on some platforms. -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure - -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar - - -_float_ma = {} - - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended - # number of digits in the significand. - huge_dd = nextafter(ld(inf), ld(0), dtype=ld) - # As the smallest_normal in double double is so hard to calculate we set - # it to NaN. - smallest_normal_dd = nan - # Leave the same value for the smallest subnormal as double - smallest_subnormal_dd = ld(nextafter(0., 1.)) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg=exp2(ld(-106)), - huge=huge_dd, - tiny=smallest_normal_dd, - smallest_subnormal=smallest_subnormal_dd) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold - # may be deficient - key = (ftype(-1.0) / ftype(10.)) - key = key.view(key.dtype.newbyteorder("<")).tobytes() - ma_like = None - if ftype == ntypes.longdouble: - # Could be 80 bit == 10 byte extended precision, where last bytes can - # be random garbage. - # Comparing first 10 bytes to pattern first to avoid branching on the - # random garbage. - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is None: - # see if the full key is known. - ma_like = _KNOWN_TYPES.get(key) - if ma_like is None and len(key) == 16: - # machine limits could be f80 masquerading as np.float128, - # find all keys with length 16 and make new dict, but make the keys - # only 10 bytes long, the last bytes can be random garbage - _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} - ma_like = _kt.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - f'Signature {key} for {ftype} does not match any known type: ' - 'falling back to type probe function.\n' - 'This warnings indicates broken support for the dtype!', - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - - TODO: MachAr should be retired completely ideally. We currently only - ever use it system with broken longdouble (valgrind, WSL). - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v: _fr0(v.astype(params['itype']))[0], - lambda v: array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) + ntypes.double: { + 'itype': ntypes.int64, + 'fmt': '%24.16e', + 'title': _title_fmt.format('double')}, + ntypes.single: { + 'itype': ntypes.int32, + 'fmt': '%15.7e', + 'title': _title_fmt.format('single')}, + ntypes.longdouble: { + 'itype': ntypes.longlong, + 'fmt': '%s', + 'title': _title_fmt.format('long double')}, + ntypes.half: { + 'itype': ntypes.int16, + 'fmt': '%12.5e', + 'title': _title_fmt.format('half')}} @set_module('numpy') @@ -413,17 +89,20 @@ class finfo: The largest representable number. maxexp : int The smallest positive power of the base (2) that causes overflow. + Corresponds to the C standard MAX_EXP. min : floating point number of the appropriate type The smallest representable number, typically ``-max``. minexp : int The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. + being no leading 0's in the mantissa. Corresponds to the C + standard MIN_EXP - 1. negep : int The exponent that yields `epsneg`. nexp : int The number of bits in the exponent including its sign and bias. nmant : int - The number of bits in the mantissa. + The number of explicit bits in the mantissa (excluding the implicit + leading bit for normalized numbers). precision : int The approximate number of decimal digits to which this kind of float is precise. @@ -464,6 +143,12 @@ class finfo: fill the gap between 0 and ``smallest_normal``. However, subnormal numbers may have significantly reduced precision [2]_. + For ``longdouble``, the representation varies across platforms. On most + platforms it is IEEE 754 binary128 (quad precision) or binary64-extended + (80-bit extended precision). On PowerPC systems, it may use the IBM + double-double format (a pair of float64 values), which has special + characteristics for precision and range. + This function can also be used for complex data types as well. If used, the output will be the same as the corresponding real float type (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). @@ -522,7 +207,7 @@ def __new__(cls, dtype): dtypes.append(newdtype) dtype = newdtype if not issubclass(dtype, numeric.inexact): - raise ValueError("data type %r not inexact" % (dtype)) + raise ValueError(f"data type {dtype!r} not inexact") obj = cls._finfo_cache.get(dtype) if obj is not None: return obj @@ -548,77 +233,107 @@ def __new__(cls, dtype): def _init(self, dtype): self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['resolution', 'epsneg', 'smallest_subnormal']: - setattr(self, word, getattr(machar, word).flat[0]) self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self._machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - self._str_smallest_normal = machar._str_smallest_normal.strip() - self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + self._fmt = None + self._repr = None + _populate_finfo_constants(self, self.dtype) return self + @cached_property + def epsneg(self): + # Assume typical floating point logic. Could also use nextafter. + return self.eps / self._radix + + @cached_property + def resolution(self): + return self.dtype.type(10)**-self.precision + + @cached_property + def machep(self): + return int(math.log2(self.eps)) + + @cached_property + def negep(self): + return int(math.log2(self.epsneg)) + + @cached_property + def nexp(self): + # considering all ones (inf/nan) and all zeros (subnormal/zero) + return math.ceil(math.log2(self.maxexp - self.minexp + 2)) + + @cached_property + def iexp(self): + # Calculate exponent bits from it's range: + return math.ceil(math.log2(self.maxexp - self.minexp)) + def __str__(self): + if (fmt := getattr(self, "_fmt", None)) is not None: + return fmt + + def get_str(name, pad=None): + if (val := getattr(self, name, None)) is None: + return "" + if pad is not None: + s = str(val).ljust(pad) + return str(val) + + precision = get_str("precision", 3) + machep = get_str("machep", 6) + negep = get_str("negep", 6) + minexp = get_str("minexp", 6) + maxexp = get_str("maxexp", 6) + resolution = get_str("resolution") + eps = get_str("eps") + epsneg = get_str("epsneg") + tiny = get_str("tiny") + smallest_normal = get_str("smallest_normal") + smallest_subnormal = get_str("smallest_subnormal") + nexp = get_str("nexp", 6) + max_ = get_str("max") + if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max: + min_ = "-max" + else: + min_ = get_str("min") + fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - 'smallest_normal = %(_str_smallest_normal)s ' - 'smallest_subnormal = %(_str_smallest_subnormal)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ + f'Machine parameters for {self.dtype}\n' + f'---------------------------------------------------------------\n' + f'precision = {precision} resolution = {resolution}\n' + f'machep = {machep} eps = {eps}\n' + f'negep = {negep} epsneg = {epsneg}\n' + f'minexp = {minexp} tiny = {tiny}\n' + f'maxexp = {maxexp} max = {max_}\n' + f'nexp = {nexp} min = {min_}\n' + f'smallest_normal = {smallest_normal} ' + f'smallest_subnormal = {smallest_subnormal}\n' + f'---------------------------------------------------------------\n' + ) + self._fmt = fmt + return fmt def __repr__(self): + if (repr_str := getattr(self, "_repr", None)) is not None: + return repr_str + c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - @property - def smallest_normal(self): - """Return the value for the smallest normal. + # Use precision+1 digits in exponential notation + fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s') + if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'): + max_str = (fmt_str % self.max).strip() + min_str = (fmt_str % self.min).strip() + else: + max_str = str(self.max) + min_str = str(self.min) - Returns - ------- - smallest_normal : float - Value for the smallest normal. + resolution_str = str(self.resolution) - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - # This check is necessary because the value for smallest_normal is - # platform dependent for longdouble types. - if isnan(self._machar.smallest_normal.flat[0]): - warnings.warn( - 'The value of smallest normal is undefined for double double', - UserWarning, stacklevel=2) - return self._machar.smallest_normal.flat[0] + repr_str = (f"{c}(resolution={resolution_str}, min={min_str}," + f" max={max_str}, dtype={self.dtype})") + self._repr = repr_str + return repr_str - @property + @cached_property def tiny(self): """Return the value for tiny, alias of smallest_normal. @@ -703,7 +418,7 @@ def __init__(self, int_type): self.bits = self.dtype.itemsize * 8 self.key = "%s%d" % (self.kind, self.bits) if self.kind not in 'iu': - raise ValueError("Invalid integer data type %r." % (self.kind,)) + raise ValueError(f"Invalid integer data type {self.kind!r}.") @property def min(self): @@ -714,7 +429,7 @@ def min(self): try: val = iinfo._min_vals[self.key] except KeyError: - val = int(-(1 << (self.bits-1))) + val = int(-(1 << (self.bits - 1))) iinfo._min_vals[self.key] = val return val @@ -727,7 +442,7 @@ def max(self): if self.kind == 'u': val = int((1 << self.bits) - 1) else: - val = int((1 << (self.bits-1)) - 1) + val = int((1 << (self.bits - 1)) - 1) iinfo._max_vals[self.key] = val return val diff --git a/blimgui/dist64/numpy/_core/getlimits.pyi b/blimgui/dist64/numpy/_core/getlimits.pyi index 6da4954..babfc44 100644 --- a/blimgui/dist64/numpy/_core/getlimits.pyi +++ b/blimgui/dist64/numpy/_core/getlimits.pyi @@ -1,3 +1,124 @@ -from numpy import finfo, iinfo +from functools import cached_property +from types import GenericAlias +from typing import Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + _CLongDoubleCodes, + _Complex64Codes, + _Complex128Codes, + _DTypeLike, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _LongDoubleCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, +) __all__ = ["finfo", "iinfo"] + +### + +_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True) +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True) + +### + +class iinfo(Generic[_IntegerT_co]): + dtype: np.dtype[_IntegerT_co] + bits: Final[int] + kind: Final[str] + key: Final[str] + + @property + def min(self, /) -> int: ... + @property + def max(self, /) -> int: ... + + # + @overload + def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ... + @overload + def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ... + @overload + def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ... + @overload + def __init__(self, /, int_type: str) -> None: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + +class finfo(Generic[_FloatingT_co]): + dtype: np.dtype[_FloatingT_co] # readonly + eps: _FloatingT_co # readonly + _radix: _FloatingT_co # readonly + smallest_normal: _FloatingT_co # readonly + smallest_subnormal: _FloatingT_co # readonly + max: _FloatingT_co # readonly + min: _FloatingT_co # readonly + + _fmt: str | None # `__str__` cache + _repr: str | None # `__repr__` cache + + bits: Final[int] + maxexp: Final[int] + minexp: Final[int] + nmant: Final[int] + precision: Final[int] + + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + + # + @overload + def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ... + @overload + def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ... + @overload + def __new__(cls, dtype: str) -> finfo: ... + + # + @cached_property + def epsneg(self, /) -> _FloatingT_co: ... + @cached_property + def resolution(self, /) -> _FloatingT_co: ... + @cached_property + def machep(self, /) -> int: ... + @cached_property + def negep(self, /) -> int: ... + @cached_property + def nexp(self, /) -> int: ... + @cached_property + def iexp(self, /) -> int: ... + @cached_property + def tiny(self, /) -> _FloatingT_co: ... diff --git a/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.c b/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.c index 5a46a92..5296adf 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.c +++ b/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.c @@ -227,7 +227,7 @@ void *PyArray_API[] = { NULL, NULL, NULL, - NULL, + (void *) NpyIter_GetTransferFlags, (void *) NpyIter_New, (void *) NpyIter_MultiNew, (void *) NpyIter_AdvancedNew, diff --git a/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.h b/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.h index af3e702..5bd89a3 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.h +++ b/blimgui/dist64/numpy/_core/include/numpy/__multiarray_api.h @@ -404,6 +404,8 @@ extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; +NPY_NO_EXPORT NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags \ + (NpyIter *); NPY_NO_EXPORT NpyIter * NpyIter_New \ (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ @@ -1134,6 +1136,12 @@ static int PyArray_RUNTIME_VERSION = 0; #define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) #define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) #define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) + +#if NPY_FEATURE_VERSION >= NPY_2_3_API_VERSION +#define NpyIter_GetTransferFlags \ + (*(NPY_ARRAYMETHOD_FLAGS (*)(NpyIter *)) \ + PyArray_API[223]) +#endif #define NpyIter_New \ (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ PyArray_API[224]) @@ -1485,6 +1493,7 @@ _import_array(void) { int st; PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + PyObject *c_api; if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); @@ -1494,7 +1503,7 @@ _import_array(void) return -1; } - PyObject *c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); Py_DECREF(numpy); if (c_api == NULL) { return -1; @@ -1578,6 +1587,12 @@ _import_array(void) return 0; } +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + #define import_array() { \ if (_import_array() < 0) { \ PyErr_Print(); \ @@ -1585,7 +1600,7 @@ _import_array(void) PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ - return NULL; \ + return _RETURN_VALUE; \ } \ } diff --git a/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.c b/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.c index 47cd03e..b56b62c 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.c +++ b/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.c @@ -50,5 +50,6 @@ void *PyUFunc_API[] = { (void *) PyUFunc_AddLoopFromSpec, (void *) PyUFunc_AddPromoter, (void *) PyUFunc_AddWrappingLoop, - (void *) PyUFunc_GiveFloatingpointErrors + (void *) PyUFunc_GiveFloatingpointErrors, + (void *) PyUFunc_AddLoopsFromSpecs }; diff --git a/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.h b/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.h index 8df9a17..03ad251 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.h +++ b/blimgui/dist64/numpy/_core/include/numpy/__ufunc_api.h @@ -87,6 +87,8 @@ NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \ (PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *); NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \ (const char *, int); +NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs \ + (PyUFunc_LoopSlot *); #else @@ -249,9 +251,16 @@ static void **PyUFunc_API=NULL; PyUFunc_API[46]) #endif +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +#define PyUFunc_AddLoopsFromSpecs \ + (*(int (*)(PyUFunc_LoopSlot *)) \ + PyUFunc_API[47]) +#endif + static inline int _import_umath(void) { + PyObject *c_api; PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); @@ -264,7 +273,7 @@ _import_umath(void) return -1; } - PyObject *c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); Py_DECREF(numpy); if (c_api == NULL) { PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); diff --git a/blimgui/dist64/numpy/_core/include/numpy/_numpyconfig.h b/blimgui/dist64/numpy/_core/include/numpy/_numpyconfig.h index 36fb06c..56cb52a 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/_numpyconfig.h +++ b/blimgui/dist64/numpy/_core/include/numpy/_numpyconfig.h @@ -26,7 +26,7 @@ #define NPY_VISIBILITY_HIDDEN #define NPY_ABI_VERSION 0x02000000 -#define NPY_API_VERSION 0x00000013 +#define NPY_API_VERSION 0x00000015 #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS 1 diff --git a/blimgui/dist64/numpy/_core/include/numpy/arrayscalars.h b/blimgui/dist64/numpy/_core/include/numpy/arrayscalars.h index 35ae1ab..61297f8 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/arrayscalars.h +++ b/blimgui/dist64/numpy/_core/include/numpy/arrayscalars.h @@ -173,9 +173,11 @@ typedef struct { #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) #define PyArrayScalar_FromLong(i) \ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) #define PyArrayScalar_RETURN_FALSE \ return Py_INCREF(PyArrayScalar_False), \ PyArrayScalar_False diff --git a/blimgui/dist64/numpy/_core/include/numpy/dtype_api.h b/blimgui/dist64/numpy/_core/include/numpy/dtype_api.h index 2beefb5..08a9c9a 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/dtype_api.h +++ b/blimgui/dist64/numpy/_core/include/numpy/dtype_api.h @@ -99,6 +99,11 @@ typedef enum { } NPY_ARRAYMETHOD_FLAGS; +typedef enum { + /* Casting via same_value logic */ + NPY_SAME_VALUE_CONTEXT_FLAG=1, +} NPY_ARRAYMETHOD_CONTEXT_FLAGS; + typedef struct PyArrayMethod_Context_tag { /* The caller, which is typically the original ufunc. May be NULL */ PyObject *caller; @@ -107,7 +112,22 @@ typedef struct PyArrayMethod_Context_tag { /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr *const *descriptors; + #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION + void * _reserved; + /* + * Optional flag to pass information into the inner loop + * NPY_ARRAYMETHOD_CONTEXT_FLAGS + */ + uint64_t flags; + + /* + * Optional run-time parameters to pass to the loop (currently used in sorting). + * Fixed parameters are expected to be passed via auxdata. + */ + void *parameters; + /* Structure may grow (this is harmless for DType authors) */ + #endif } PyArrayMethod_Context; @@ -125,6 +145,13 @@ typedef struct { } PyArrayMethod_Spec; +// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs` +typedef struct { + const char *name; + PyArrayMethod_Spec *spec; +} PyUFunc_LoopSlot; + + /* * ArrayMethod slots * ----------------- @@ -144,7 +171,6 @@ typedef struct { #define NPY_METH_contiguous_indexed_loop 9 #define _NPY_METH_static_data 10 - /* * The resolve descriptors function, must be able to handle NULL values for * all output (but not input) `given_descrs` and fill `loop_descrs`. @@ -367,6 +393,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, #define NPY_DT_get_clear_loop 9 #define NPY_DT_get_fill_zero_loop 10 #define NPY_DT_finalize_descr 11 +#define NPY_DT_get_constant 12 // These PyArray_ArrFunc slots will be deprecated and replaced eventually // getitem and setitem can be defined as a performance optimization; @@ -377,7 +404,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, // used to separate dtype slots from arrfuncs slots // intended only for internal use but defined here for clarity -#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11) // Cast is disabled // #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET @@ -467,6 +494,42 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); */ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); +/* + * Constants that can be queried and used e.g. by reduce identies defaults. + * These are also used to expose .finfo and .iinfo for example. + */ +/* Numerical constants */ +#define NPY_CONSTANT_zero 1 +#define NPY_CONSTANT_one 2 +#define NPY_CONSTANT_all_bits_set 3 +#define NPY_CONSTANT_maximum_finite 4 +#define NPY_CONSTANT_minimum_finite 5 +#define NPY_CONSTANT_inf 6 +#define NPY_CONSTANT_ninf 7 +#define NPY_CONSTANT_nan 8 +#define NPY_CONSTANT_finfo_radix 9 +#define NPY_CONSTANT_finfo_eps 10 +#define NPY_CONSTANT_finfo_smallest_normal 11 +#define NPY_CONSTANT_finfo_smallest_subnormal 12 +/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */ +#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0 +#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1 +#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2 +#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3 + +/* It may make sense to continue with other constants here, e.g. pi, etc? */ + +/* + * Function to get a constant value for the dtype. Data may be unaligned, the + * function is always called with the GIL held. + * + * @param descr The dtype instance (i.e. self) + * @param ID The ID of the constant to get. + * @param data Pointer to the data to be written too, may be unaligned. + * @returns 1 on success, 0 if the constant is not available, or -1 with an error set. + */ +typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data); + /* * TODO: These two functions are currently only used for experimental DType * API support. Their relation should be "reversed": NumPy should @@ -477,4 +540,8 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtyp typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); +typedef struct { + NPY_SORTKIND flags; +} PyArrayMethod_SortParameters; + #endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/blimgui/dist64/numpy/_core/include/numpy/ndarrayobject.h b/blimgui/dist64/numpy/_core/include/numpy/ndarrayobject.h index 40f2130..accc67e 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/ndarrayobject.h +++ b/blimgui/dist64/numpy/_core/include/numpy/ndarrayobject.h @@ -32,7 +32,7 @@ extern "C" { #define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) +#define PyArray_CheckExact(op) (Py_TYPE((PyObject*)(op)) == &PyArray_Type) #define PyArray_HasArrayInterfaceType(op, type, context, out) \ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ diff --git a/blimgui/dist64/numpy/_core/include/numpy/ndarraytypes.h b/blimgui/dist64/numpy/_core/include/numpy/ndarraytypes.h index 0a67689..5dfb746 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/ndarraytypes.h +++ b/blimgui/dist64/numpy/_core/include/numpy/ndarraytypes.h @@ -162,18 +162,37 @@ enum NPY_TYPECHAR { }; /* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. + * Changing this may break Numpy API compatibility due to changing offsets in + * PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will depend on the data + * type. + * + * Updated in NumPy 2.4 + * + * Updated with new names denoting requirements rather than specifying a + * particular algorithm. All the previous values are reused in a way that + * should be downstream compatible, but the actual algorithms used may be + * different than before. The new approach should be more flexible and easier + * to update. + * + * Names with a leading underscore are private, and should only be used + * internally by NumPy. + * + * NPY_NSORTS remains the same for backwards compatibility, it should not be + * changed. */ + typedef enum { - _NPY_SORT_UNDEFINED=-1, - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, + _NPY_SORT_UNDEFINED = -1, + NPY_QUICKSORT = 0, + NPY_HEAPSORT = 1, + NPY_MERGESORT = 2, + NPY_STABLESORT = 2, + // new style names + _NPY_SORT_HEAPSORT = 1, + NPY_SORT_DEFAULT = 0, + NPY_SORT_STABLE = 2, + NPY_SORT_DESCENDING = 4, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) @@ -214,6 +233,16 @@ typedef enum { NPY_KEEPORDER=2 } NPY_ORDER; +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +/* + * check that no values overflow/change during casting + * Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to + * indicate that a same-value cast is supported. In external APIs, use only + * NPY_SAME_VALUE_CASTING + */ +#define NPY_SAME_VALUE_CASTING_FLAG 64 +#endif + /* For specifying allowed casting in operations which support it */ typedef enum { _NPY_ERROR_OCCURRED_IN_CAST = -1, @@ -227,6 +256,9 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION + NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG, +#endif } NPY_CASTING; typedef enum { @@ -1679,7 +1711,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) /* * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific - * lookup and are defined in npy_2_compat.h. + * lookup and are defined in npy_2_compat.h. */ @@ -1908,10 +1940,6 @@ typedef struct { #error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." #endif #define NPY_DEPRECATED_INCLUDES -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -#include "npy_1_7_deprecated_api.h" -#endif /* * There is no file npy_1_8_deprecated_api.h since there are no additional * deprecated API features in NumPy 1.8. @@ -1923,6 +1951,27 @@ typedef struct { * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) * #include "npy_1_9_deprecated_api.h" * #endif + * Then in the npy_1_9_deprecated_api.h header add something like this + * -------------------- + * #ifndef NPY_DEPRECATED_INCLUDES + * #error "Should never include npy_*_*_deprecated_api directly." + * #endif + * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * + * #ifndef NPY_NO_DEPRECATED_API + * #if defined(_WIN32) + * #define _WARN___STR2__(x) #x + * #define _WARN___STR1__(x) _WARN___STR2__(x) + * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") + * #else + * #warning "Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" + * #endif + * #endif + * -------------------- */ #undef NPY_DEPRECATED_INCLUDES diff --git a/blimgui/dist64/numpy/_core/include/numpy/npy_1_7_deprecated_api.h b/blimgui/dist64/numpy/_core/include/numpy/npy_1_7_deprecated_api.h deleted file mode 100644 index b8249e6..0000000 --- a/blimgui/dist64/numpy/_core/include/numpy/npy_1_7_deprecated_api.h +++ /dev/null @@ -1,112 +0,0 @@ -#ifndef NPY_DEPRECATED_INCLUDES -#error "Should never include npy_*_*_deprecated_api directly." -#endif - -#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ -#define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ - -/* Emit a warning if the user did not specifically request the old API */ -#ifndef NPY_NO_DEPRECATED_API -#if defined(_WIN32) -#define _WARN___STR2__(x) #x -#define _WARN___STR1__(x) _WARN___STR2__(x) -#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " -#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") -#else -#warning "Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" -#endif -#endif - -/* - * This header exists to collect all dangerous/deprecated NumPy API - * as of NumPy 1.7. - * - * This is an attempt to remove bad API, the proliferation of macros, - * and namespace pollution currently produced by the NumPy headers. - */ - -/* These array flags are deprecated as of NumPy 1.7 */ -#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS - -/* - * The consistent NPY_ARRAY_* names which don't pollute the NPY_* - * namespace were added in NumPy 1.7. - * - * These versions of the carray flags are deprecated, but - * probably should only be removed after two releases instead of one. - */ -#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS -#define NPY_OWNDATA NPY_ARRAY_OWNDATA -#define NPY_FORCECAST NPY_ARRAY_FORCECAST -#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY -#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY -#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES -#define NPY_ALIGNED NPY_ARRAY_ALIGNED -#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED -#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE -#define NPY_BEHAVED NPY_ARRAY_BEHAVED -#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS -#define NPY_CARRAY NPY_ARRAY_CARRAY -#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO -#define NPY_DEFAULT NPY_ARRAY_DEFAULT -#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY -#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY -#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY -#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY -#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY -#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY -#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL - -/* This way of accessing the default type is deprecated as of NumPy 1.7 */ -#define PyArray_DEFAULT NPY_DEFAULT_TYPE - -/* - * Deprecated as of NumPy 1.7, this kind of shortcut doesn't - * belong in the public API. - */ -#define NPY_AO PyArrayObject - -/* - * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't - * belong in the public API. - */ -#define fortran fortran_ - -/* - * Deprecated as of NumPy 1.7, as it is a namespace-polluting - * macro. - */ -#define FORTRAN_IF PyArray_FORTRAN_IF - -/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ -#define NPY_METADATA_DTSTR "__timeunit__" - -/* - * Deprecated as of NumPy 1.7. - * The reasoning: - * - These are for datetime, but there's no datetime "namespace". - * - They just turn NPY_STR_ into "", which is just - * making something simple be indirected. - */ -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - - -#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ */ diff --git a/blimgui/dist64/numpy/_core/include/numpy/npy_3kcompat.h b/blimgui/dist64/numpy/_core/include/numpy/npy_3kcompat.h index fb26e7a..1c4ba37 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/npy_3kcompat.h +++ b/blimgui/dist64/numpy/_core/include/numpy/npy_3kcompat.h @@ -242,7 +242,7 @@ static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK if (open == NULL) { return NULL; } diff --git a/blimgui/dist64/numpy/_core/include/numpy/npy_common.h b/blimgui/dist64/numpy/_core/include/numpy/npy_common.h index d5c55ae..7b2bd29 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/npy_common.h +++ b/blimgui/dist64/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) @@ -410,9 +422,6 @@ typedef longdouble_t _Complex npy_clongdouble; #define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) #define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) #define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) #define NPY_MIN_DATETIME NPY_MIN_INT64 #define NPY_MAX_DATETIME NPY_MAX_INT64 #define NPY_MIN_TIMEDELTA NPY_MIN_INT64 @@ -515,17 +524,6 @@ typedef longdouble_t _Complex npy_clongdouble; #define NPY_UINT64_FMT NPY_ULONG_FMT #define MyPyLong_FromInt64 PyLong_FromLong #define MyPyLong_AsInt64 PyLong_AsLong -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT #endif #if NPY_BITSOF_LONGLONG == 8 @@ -595,36 +593,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define NPY_MAX_LONGLONG NPY_MAX_INT64 # define NPY_MIN_LONGLONG NPY_MIN_INT64 # define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 #endif #if NPY_BITSOF_INT == 8 @@ -682,19 +650,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define MyPyLong_FromInt64 PyLong_FromLong # define MyPyLong_AsInt64 PyLong_AsLong #endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif #endif #if NPY_BITSOF_SHORT == 8 @@ -752,19 +707,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define MyPyLong_FromInt64 PyLong_FromLong # define MyPyLong_AsInt64 PyLong_AsLong #endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif #endif @@ -824,18 +766,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define MyPyLong_AsInt64 PyLong_AsLong #endif #elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif #endif @@ -1046,17 +976,6 @@ typedef npy_half npy_float16; #define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT #define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT #endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT #endif /* datetime typedefs */ diff --git a/blimgui/dist64/numpy/_core/include/numpy/npy_cpu.h b/blimgui/dist64/numpy/_core/include/numpy/npy_cpu.h index 5428871..8fe62bd 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/npy_cpu.h +++ b/blimgui/dist64/numpy/_core/include/numpy/npy_cpu.h @@ -20,6 +20,7 @@ * NPY_CPU_RISCV64 * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 * NPY_CPU_WASM */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ @@ -109,26 +110,17 @@ #elif __riscv_xlen == 32 #define NPY_CPU_RISCV32 #endif -#elif defined(__loongarch__) - #define NPY_CPU_LOONGARCH -#elif defined(__EMSCRIPTEN__) +#elif defined(__loongarch_lp64) + #define NPY_CPU_LOONGARCH64 +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/blimgui/dist64/numpy/_core/include/numpy/npy_endian.h b/blimgui/dist64/numpy/_core/include/numpy/npy_endian.h index 9a01132..6fdfa64 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/npy_endian.h +++ b/blimgui/dist64/numpy/_core/include/numpy/npy_endian.h @@ -51,6 +51,7 @@ || defined(NPY_CPU_RISCV64) \ || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN diff --git a/blimgui/dist64/numpy/_core/include/numpy/npy_math.h b/blimgui/dist64/numpy/_core/include/numpy/npy_math.h index a6bc48d..a69ffd0 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/npy_math.h +++ b/blimgui/dist64/numpy/_core/include/numpy/npy_math.h @@ -363,7 +363,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[0]; + return z._Val[0]; #else return creal(z); #endif @@ -377,7 +377,7 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[1]; + return z._Val[1]; #else return cimag(z); #endif @@ -391,7 +391,7 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[0]; + return z._Val[0]; #else return crealf(z); #endif @@ -405,7 +405,7 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[1]; + return z._Val[1]; #else return cimagf(z); #endif @@ -419,7 +419,7 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[0]; + return (npy_longdouble)z._Val[0]; #else return creall(z); #endif @@ -433,7 +433,7 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[1]; + return (npy_longdouble)z._Val[1]; #else return cimagl(z); #endif diff --git a/blimgui/dist64/numpy/_core/include/numpy/numpyconfig.h b/blimgui/dist64/numpy/_core/include/numpy/numpyconfig.h index f86c44f..3489fac 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/numpyconfig.h +++ b/blimgui/dist64/numpy/_core/include/numpy/numpyconfig.h @@ -82,6 +82,9 @@ #define NPY_1_25_API_VERSION 0x00000011 #define NPY_2_0_API_VERSION 0x00000012 #define NPY_2_1_API_VERSION 0x00000013 +#define NPY_2_2_API_VERSION 0x00000013 +#define NPY_2_3_API_VERSION 0x00000014 +#define NPY_2_4_API_VERSION 0x00000015 /* @@ -121,8 +124,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.10 support) */ - #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION + /* Use the default (increase when dropping Python 3.11 support) */ + #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -170,6 +173,10 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.3" +#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.4" #else #error "Missing version string define for new NumPy version." #endif diff --git a/blimgui/dist64/numpy/_core/include/numpy/ufuncobject.h b/blimgui/dist64/numpy/_core/include/numpy/ufuncobject.h index 549fd4a..0169676 100644 --- a/blimgui/dist64/numpy/_core/include/numpy/ufuncobject.h +++ b/blimgui/dist64/numpy/_core/include/numpy/ufuncobject.h @@ -316,8 +316,7 @@ typedef struct _loop1d_info { #define UFUNC_PYVALS_NAME "UFUNC_PYVALS" -/* - * THESE MACROS ARE DEPRECATED. +/* THESE MACROS ARE DEPRECATED. * Use npy_set_floatstatus_* in the npymath library. */ #define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO @@ -325,10 +324,7 @@ typedef struct _loop1d_info { #define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW #define UFUNC_FPE_INVALID NPY_FPE_INVALID -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ +/* Make sure it gets defined if it isn't already */ #ifndef UFUNC_NOFPE /* Clear the floating point exception default of Borland C++ */ #if defined(__BORLANDC__) diff --git a/blimgui/dist64/numpy/_core/memmap.py b/blimgui/dist64/numpy/_core/memmap.py index 1c4b778..8826e1a 100644 --- a/blimgui/dist64/numpy/_core/memmap.py +++ b/blimgui/dist64/numpy/_core/memmap.py @@ -1,8 +1,10 @@ -from contextlib import nullcontext import operator +from contextlib import nullcontext + import numpy as np -from .._utils import set_module -from .numeric import uint8, ndarray, dtype +from numpy._utils import set_module + +from .numeric import dtype, ndarray, uint8 __all__ = ['memmap'] @@ -11,10 +13,10 @@ writeable_filemodes = ["r+", "w+"] mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" + "readonly": "r", + "copyonwrite": "c", + "readwrite": "r+", + "write": "w+" } @@ -220,9 +222,9 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, mode = mode_equivalents[mode] except KeyError as e: if mode not in valid_filemodes: + all_modes = valid_filemodes + list(mode_equivalents.keys()) raise ValueError( - "mode must be one of {!r} (got {!r})" - .format(valid_filemodes + list(mode_equivalents.keys()), mode) + f"mode must be one of {all_modes!r} (got {mode!r})" ) from None if mode == 'w+' and shape is None: @@ -233,7 +235,7 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, else: f_ctx = open( os.fspath(filename), - ('r' if mode == 'c' else mode)+'b' + ('r' if mode == 'c' else mode) + 'b' ) with f_ctx as fid: @@ -250,17 +252,17 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, size = bytes // _dbytes shape = (size,) else: - if type(shape) not in (tuple, list): + if not isinstance(shape, (tuple, list)): try: shape = [operator.index(shape)] except TypeError: pass shape = tuple(shape) - size = np.intp(1) # avoid default choice of np.int_, which might overflow + size = np.intp(1) # avoid overflows for k in shape: size *= k - bytes = int(offset + size*_dbytes) + bytes = int(offset + size * _dbytes) if mode in ('w+', 'r+'): # gh-27723 diff --git a/blimgui/dist64/numpy/_core/multiarray.py b/blimgui/dist64/numpy/_core/multiarray.py index dad1a23..81f2929 100644 --- a/blimgui/dist64/numpy/_core/multiarray.py +++ b/blimgui/dist64/numpy/_core/multiarray.py @@ -7,17 +7,25 @@ """ import functools -from . import overrides -from . import _multiarray_umath + +from . import _multiarray_umath, overrides from ._multiarray_umath import * # noqa: F403 + # These imports are needed for backward compatibility, # do not change them. issue gh-15518 # _get_ndarray_c_version is semi-public, on purpose not added to __all__ -from ._multiarray_umath import ( - _flagdict, from_dlpack, _place, _reconstruct, - _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version, - _get_madvise_hugepage, _set_madvise_hugepage, - ) +from ._multiarray_umath import ( # noqa: F401 + _ARRAY_API, + _flagdict, + _get_madvise_hugepage, + _get_ndarray_c_version, + _monotonicity, + _place, + _reconstruct, + _set_madvise_hugepage, + _vec_string, + from_dlpack, +) __all__ = [ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', @@ -106,11 +114,20 @@ def _override___module__(): @array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like( - prototype, dtype=None, order=None, subok=None, shape=None, *, device=None + prototype, dtype=None, order="K", subok=True, shape=None, *, device=None ): """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, - device=None) + empty_like( + prototype, + /, + dtype=None, + order='K', + subok=True, + shape=None, + *, + device=None, + ) + -- Return a new array with the same shape and type as a given array. @@ -173,20 +190,23 @@ def empty_like( array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - """ # NOQA + """ return (prototype,) @array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): +def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): """ concatenate( - (a1, a2, ...), + arrays, + /, axis=0, out=None, + *, dtype=None, - casting="same_kind" + casting="same_kind", ) + -- Join a sequence of arrays along an existing axis. @@ -287,7 +307,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): +def inner(a, b, /): """ inner(a, b, /) @@ -381,7 +401,7 @@ def inner(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): +def where(condition, x=None, y=None, /): """ where(condition, [x, y], /) @@ -457,7 +477,7 @@ def where(condition, x=None, y=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): +def lexsort(keys, axis=-1): """ lexsort(keys, axis=-1) @@ -578,7 +598,7 @@ def lexsort(keys, axis=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): +def can_cast(from_, to, casting="safe"): """ can_cast(from_, to, casting='safe') @@ -640,7 +660,7 @@ def can_cast(from_, to, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): +def min_scalar_type(a, /): """ min_scalar_type(a, /) @@ -693,19 +713,7 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. + :ref:`type promotion ` rules to the arguments. Parameters ---------- @@ -721,27 +729,6 @@ def result_type(*arrays_and_dtypes): -------- dtype, promote_types, min_scalar_type, can_cast - Notes - ----- - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each scalar, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - Examples -------- >>> import numpy as np @@ -854,7 +841,7 @@ def dot(a, b, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): +def vdot(a, b, /): r""" vdot(a, b, /) @@ -917,7 +904,7 @@ def vdot(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): +def bincount(x, /, weights=None, minlength=0): """ bincount(x, /, weights=None, minlength=0) @@ -993,7 +980,7 @@ def bincount(x, weights=None, minlength=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): +def ravel_multi_index(multi_index, dims, mode="raise", order="C"): """ ravel_multi_index(multi_index, dims, mode='raise', order='C') @@ -1051,7 +1038,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None): +def unravel_index(indices, shape, order="C"): """ unravel_index(indices, shape, order='C') @@ -1096,7 +1083,7 @@ def unravel_index(indices, shape=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): +def copyto(dst, src, casting="same_kind", where=True): """ copyto(dst, src, casting='same_kind', where=True) @@ -1148,7 +1135,7 @@ def copyto(dst, src, casting=None, where=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, /, mask, values): """ - putmask(a, mask, values) + putmask(a, /, mask, values) Changes elements of an array based on conditional and input values. @@ -1192,7 +1179,7 @@ def putmask(a, /, mask, values): @array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): +def packbits(a, /, axis=None, bitorder="big"): """ packbits(a, /, axis=None, bitorder='big') @@ -1249,7 +1236,7 @@ def packbits(a, axis=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): +def unpackbits(a, /, axis=None, count=None, bitorder="big"): """ unpackbits(a, /, axis=None, count=None, bitorder='big') @@ -1329,9 +1316,9 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): +def shares_memory(a, b, /, max_work=-1): """ - shares_memory(a, b, /, max_work=None) + shares_memory(a, b, /, max_work=-1) Determine if two arrays share memory. @@ -1408,9 +1395,9 @@ def shares_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): +def may_share_memory(a, b, /, max_work=0): """ - may_share_memory(a, b, /, max_work=None) + may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory @@ -1450,14 +1437,14 @@ def may_share_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): +def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None): """ is_busday( dates, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) Calculates which of the given dates are valid days, and which are not. @@ -1509,7 +1496,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, +def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None, busdaycal=None, out=None): """ busday_offset( @@ -1519,7 +1506,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) First adjusts the date to fall on a valid day according to @@ -1611,7 +1598,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, +def busday_count(begindates, enddates, weekmask="1111100", holidays=(), busdaycal=None, out=None): """ busday_count( @@ -1684,9 +1671,8 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, return (begindates, enddates, weekmask, holidays, out) -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): +@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"): """ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') @@ -1715,7 +1701,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- >>> import numpy as np - >>> import pytz + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1728,9 +1714,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='=3.12 + _AnyShapeT, + _CastingKind, + _CopyMode, + _ModeKind, + _NDIterFlagsKind, + _NDIterFlagsOp, + _OrderCF, + _OrderKACF, + _SupportsBuffer, + _SupportsFileMethods, broadcast, + busdaycalendar, + complexfloating, correlate, count_nonzero, + datetime64, dtype, einsum as c_einsum, flatiter, + float64, + floating, from_dlpack, + generic, + int_, interp, + intp, matmul, ndarray, nditer, - vecdot, - - # The rest - ufunc, + signedinteger, str_, - uint8, - intp, - int_, - float64, timedelta64, - datetime64, - generic, + ufunc, + uint8, unsignedinteger, - signedinteger, - floating, - complexfloating, - _AnyShapeType, - _OrderKACF, - _OrderCF, - _CastingKind, - _ModeKind, - _SupportsBuffer, - _SupportsFileMethods, - _CopyMode, - _NDIterFlagsKind, - _NDIterFlagsOp, + vecdot, ) -from numpy.lib._array_utils_impl import normalize_axis_index - from numpy._typing import ( - # Shapes - _ShapeLike, - - # DTypes + ArrayLike, DTypeLike, - _DTypeLike, - _SupportsDType, - - # Arrays NDArray, - ArrayLike, + _AnyShape, _ArrayLike, - _SupportsArrayFunc, - _NestedSequence, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, + _ArrayLikeBytes_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeStr_co, - _ArrayLikeBytes_co, - _ScalarLike_co, - _IntLike_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DT64Codes, + _DTypeLike, _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, _TD64Like_co, ) from numpy._typing._ufunc import ( _2PTuple, _PyFunc_Nin1_Nout1, + _PyFunc_Nin1P_Nout2P, _PyFunc_Nin2_Nout1, _PyFunc_Nin3P_Nout1, - _PyFunc_Nin1P_Nout2P, ) __all__ = [ @@ -192,22 +184,19 @@ __all__ = [ "zeros", ] -_SCT = TypeVar("_SCT", bound=generic) -_DType = TypeVar("_DType", bound=np.dtype[Any]) -_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) -_ArrayType_co = TypeVar( - "_ArrayType_co", - bound=ndarray[Any, Any], - covariant=True, -) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +# TODO: fix the names of these typevars _ReturnType = TypeVar("_ReturnType") _IDType = TypeVar("_IDType") _Nin = TypeVar("_Nin", bound=int) _Nout = TypeVar("_Nout", bound=int) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_Array: TypeAlias = ndarray[_ShapeT, dtype[_SCT]] -_Array1D: TypeAlias = ndarray[tuple[int], dtype[_SCT]] +_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] +_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] # Valid time units _UnitKind: TypeAlias = L[ @@ -235,13 +224,8 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded ] @type_check_only -class _SupportsArray(Protocol[_ArrayType_co]): - def __array__(self, /) -> _ArrayType_co: ... - -@type_check_only -class _KwargsEmpty(TypedDict, total=False): - device: None | L["cpu"] - like: None | _SupportsArrayFunc +class _SupportsArray(Protocol[_ArrayT_co]): + def __array__(self, /) -> _ArrayT_co: ... @type_check_only class _ConstructorEmpty(Protocol): @@ -251,640 +235,580 @@ class _ConstructorEmpty(Protocol): self, /, shape: SupportsIndex, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array1D[float64]: ... @overload def __call__( self, /, shape: SupportsIndex, - dtype: _DType | _SupportsDType[_DType], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[tuple[int], _DType]: ... + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... @overload def __call__( self, /, shape: SupportsIndex, - dtype: type[_SCT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array1D[_SCT]: ... + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[_ScalarT]: ... @overload def __call__( self, /, shape: SupportsIndex, - dtype: DTypeLike, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[Incomplete]: ... # known shape @overload def __call__( self, /, - shape: _AnyShapeType, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeType, float64]: ... + shape: _AnyShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, float64]: ... @overload def __call__( self, /, - shape: _AnyShapeType, - dtype: _DType | _SupportsDType[_DType], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[_AnyShapeType, _DType]: ... + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShapeT, _DTypeT]: ... @overload def __call__( self, /, - shape: _AnyShapeType, - dtype: type[_SCT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeType, _SCT]: ... + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def __call__( self, /, - shape: _AnyShapeType, - dtype: DTypeLike, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeType, Any]: ... + shape: _AnyShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, Incomplete]: ... # unknown shape @overload def __call__( self, /, shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def __call__( self, /, shape: _ShapeLike, - dtype: _DType | _SupportsDType[_DType], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[Any, _DType]: ... + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShape, _DTypeT]: ... @overload def __call__( self, /, shape: _ShapeLike, - dtype: type[_SCT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> NDArray[_SCT]: ... + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[_ScalarT]: ... @overload def __call__( - self, /, + self, + /, shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[Incomplete]: ... # using `Final` or `TypeAlias` will break stubtest error = Exception # from ._multiarray_umath -ITEM_HASOBJECT: Final[L[1]] -LIST_PICKLE: Final[L[2]] -ITEM_IS_POINTER: Final[L[4]] -NEEDS_INIT: Final[L[8]] -NEEDS_PYAPI: Final[L[16]] -USE_GETITEM: Final[L[32]] -USE_SETITEM: Final[L[64]] -DATETIMEUNITS: Final[CapsuleType] -_ARRAY_API: Final[CapsuleType] -_flagdict: Final[dict[str, int]] -_monotonicity: Final[Callable[..., object]] -_place: Final[Callable[..., object]] -_reconstruct: Final[Callable[..., object]] -_vec_string: Final[Callable[..., object]] -correlate2: Final[Callable[..., object]] -dragon4_positional: Final[Callable[..., object]] -dragon4_scientific: Final[Callable[..., object]] -interp_complex: Final[Callable[..., object]] -set_datetimeparse_function: Final[Callable[..., object]] +ITEM_HASOBJECT: Final = 1 +LIST_PICKLE: Final = 2 +ITEM_IS_POINTER: Final = 4 +NEEDS_INIT: Final = 8 +NEEDS_PYAPI: Final = 16 +USE_GETITEM: Final = 32 +USE_SETITEM: Final = 64 +DATETIMEUNITS: Final[CapsuleType] = ... +_ARRAY_API: Final[CapsuleType] = ... + +_flagdict: Final[dict[str, int]] = ... +_monotonicity: Final[Callable[..., object]] = ... +_place: Final[Callable[..., object]] = ... +_reconstruct: Final[Callable[..., object]] = ... +_vec_string: Final[Callable[..., object]] = ... +correlate2: Final[Callable[..., object]] = ... +dragon4_positional: Final[Callable[..., object]] = ... +dragon4_scientific: Final[Callable[..., object]] = ... +interp_complex: Final[Callable[..., object]] = ... +set_datetimeparse_function: Final[Callable[..., object]] = ... + def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... -def scalar(dtype: _DType, object: bytes | object = ...) -> ndarray[tuple[()], _DType]: ... -def set_typeDict(dict_: dict[str, np.dtype[Any]], /) -> None: ... -typeinfo: Final[dict[str, np.dtype[np.generic]]] +def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... + +typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) -BUFSIZE: L[8192] -CLIP: L[0] -WRAP: L[1] -RAISE: L[2] -MAXDIMS: L[32] -MAY_SHARE_BOUNDS: L[0] -MAY_SHARE_EXACT: L[-1] -tracemalloc_domain: L[389047] +BUFSIZE: Final = 8_192 +CLIP: Final = 0 +WRAP: Final = 1 +RAISE: Final = 2 +MAXDIMS: Final = 64 +MAY_SHARE_BOUNDS: Final = 0 +MAY_SHARE_EXACT: Final = -1 +tracemalloc_domain: Final = 389_047 -zeros: Final[_ConstructorEmpty] -empty: Final[_ConstructorEmpty] +zeros: Final[_ConstructorEmpty] = ... +empty: Final[_ConstructorEmpty] = ... @overload def empty_like( - prototype: _ArrayType, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., - *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... -@overload -def empty_like( - prototype: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + prototype: _ArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> _ArrayT: ... @overload def empty_like( - prototype: object, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + prototype: _ArrayLike[_ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def empty_like( - prototype: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + prototype: Incomplete, + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def empty_like( - prototype: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + prototype: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... @overload def array( - object: _ArrayType, - dtype: None = ..., + object: _ArrayT, + dtype: None = None, *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> _ArrayType: ... + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> _ArrayT: ... @overload def array( - object: _SupportsArray[_ArrayType], - dtype: None = ..., + object: _SupportsArray[_ArrayT], + dtype: None = None, *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: L[0] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _ArrayType: ... + ndmin: L[0] = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> _ArrayT: ... @overload def array( - object: _ArrayLike[_SCT], - dtype: None = ..., + object: _ArrayLike[_ScalarT], + dtype: None = None, *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def array( - object: object, - dtype: None = ..., - *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... @overload def array( object: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... @overload def array( object: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = None, *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + +# +def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> int: ... + # NOTE: Allow any sequence of array-like objects @overload -def concatenate( # type: ignore[misc] - arrays: _ArrayLike[_SCT], +def concatenate( + arrays: _ArrayLike[_ScalarT], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: None = ..., - casting: None | _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[_ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: None = ..., - casting: None | _CastingKind = ... -) -> NDArray[Any]: ... + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = "same_kind", +) -> NDArray[_ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: _DTypeLike[_SCT], - casting: None | _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[Incomplete]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, *, - dtype: DTypeLike, - casting: None | _CastingKind = ... -) -> NDArray[Any]: ... + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> _ArrayT: ... @overload def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, *, - dtype: DTypeLike = ..., - casting: None | _CastingKind = ... -) -> _ArrayType: ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> _ArrayT: ... -def inner( - a: ArrayLike, - b: ArrayLike, - /, -) -> Any: ... +def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... @overload -def where( - condition: ArrayLike, - /, -) -> tuple[NDArray[intp], ...]: ... +def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload -def where( - condition: ArrayLike, - x: ArrayLike, - y: ArrayLike, - /, -) -> NDArray[Any]: ... +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... -def lexsort( - keys: ArrayLike, - axis: None | SupportsIndex = ..., -) -> Any: ... +def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... -def can_cast( - from_: ArrayLike | DTypeLike, - to: DTypeLike, - casting: None | _CastingKind = ..., -) -> bool: ... +def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... -def min_scalar_type( - a: ArrayLike, /, -) -> dtype[Any]: ... - -def result_type( - *arrays_and_dtypes: ArrayLike | DTypeLike, -) -> dtype[Any]: ... +def min_scalar_type(a: ArrayLike, /) -> dtype: ... +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ... +def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @overload -def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload -def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... @overload -def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... @overload -def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... @overload -def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... @overload def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... @overload -def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload -def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... +def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount( - x: ArrayLike, - /, - weights: None | ArrayLike = ..., - minlength: SupportsIndex = ..., -) -> NDArray[intp]: ... +def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... -def copyto( - dst: NDArray[Any], - src: ArrayLike, - casting: None | _CastingKind = ..., - where: None | _ArrayLikeBool_co = ..., -) -> None: ... +def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... +def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def putmask( - a: NDArray[Any], - /, - mask: _ArrayLikeBool_co, - values: ArrayLike, -) -> None: ... +_BitOrder: TypeAlias = L["big", "little"] -def packbits( - a: _ArrayLikeInt_co, - /, - axis: None | SupportsIndex = ..., - bitorder: L["big", "little"] = ..., -) -> NDArray[uint8]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... +@overload +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: None = None, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> ndarray[tuple[int], dtype[uint8]]: ... +@overload def unpackbits( a: _ArrayLike[uint8], /, - axis: None | SupportsIndex = ..., - count: None | SupportsIndex = ..., - bitorder: L["big", "little"] = ..., + axis: SupportsIndex, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -def shares_memory( - a: object, - b: object, - /, - max_work: None | int = ..., -) -> bool: ... +_MaxWork: TypeAlias = L[-1, 0] -def may_share_memory( - a: object, - b: object, - /, - max_work: None | int = ..., -) -> bool: ... +# any two python objects will be accepted, not just `ndarray`s +def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... +def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload def asarray( - a: _ArrayLike[_SCT], - dtype: None = ..., + a: _ArrayLike[_ScalarT], + dtype: None = None, order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def asarray( - a: object, - dtype: None = ..., - order: _OrderKACF = ..., - *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def asarray( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def asarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def asanyarray( - a: _ArrayType, # Preserve subclass-information - dtype: None = ..., + a: _ArrayT, # Preserve subclass-information + dtype: None = None, order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> _ArrayType: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... @overload def asanyarray( - a: _ArrayLike[_SCT], - dtype: None = ..., + a: _ArrayLike[_ScalarT], + dtype: None = None, order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def asanyarray( - a: object, - dtype: None = ..., - order: _OrderKACF = ..., - *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def asanyarray( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def asanyarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def ascontiguousarray( - a: _ArrayLike[_SCT], - dtype: None = ..., - *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def ascontiguousarray( - a: object, - dtype: None = ..., + a: _ArrayLike[_ScalarT], + dtype: None = None, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def ascontiguousarray( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def ascontiguousarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def asfortranarray( - a: _ArrayLike[_SCT], - dtype: None = ..., - *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def asfortranarray( - a: object, - dtype: None = ..., + a: _ArrayLike[_ScalarT], + dtype: None = None, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def asfortranarray( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def asfortranarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ... +def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... # `sep` is a de facto mandatory argument, as its default value is deprecated @overload def fromstring( string: str | bytes, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., *, sep: str, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload def fromstring( string: str | bytes, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], count: SupportsIndex = ..., *, sep: str, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def fromstring( string: str | bytes, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., count: SupportsIndex = ..., *, sep: str, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload @@ -893,7 +817,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: L[1], nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -909,7 +833,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: L[2], nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -925,7 +849,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: _Nin, nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -941,7 +865,7 @@ def frompyfunc( nin: _Nin, nout: _Nout, *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... @overload def frompyfunc( @@ -957,321 +881,370 @@ def frompyfunc( nin: SupportsIndex, nout: SupportsIndex, *, - identity: None | object = ..., + identity: object | None = ..., ) -> ufunc: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def fromiter( iter: Iterable[Any], - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], count: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def fromiter( iter: Iterable[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, count: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def frombuffer( buffer: _SupportsBuffer, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload def frombuffer( buffer: _SupportsBuffer, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[_ScalarT], count: SupportsIndex = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... @overload def frombuffer( buffer: _SupportsBuffer, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., count: SupportsIndex = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -@overload -def arange( # type: ignore[misc] - stop: _IntLike_co, - /, *, - dtype: None = ..., - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - start: _IntLike_co, - stop: _IntLike_co, - step: _IntLike_co = ..., - dtype: None = ..., +_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) + +# keep in sync with ma.core.arange +# NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - stop: _FloatLike_co, - /, *, - dtype: None = ..., - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[floating]: ... -@overload -def arange( # type: ignore[misc] - start: _FloatLike_co, - stop: _FloatLike_co, - step: _FloatLike_co = ..., - dtype: None = ..., + dtype: _DTypeLike[_ArangeScalarT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) +def arange( + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[floating]: ... -@overload + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - stop: _TD64Like_co, - /, *, - dtype: None = ..., - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[timedelta64]: ... -@overload + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) def arange( - start: _TD64Like_co, - stop: _TD64Like_co, - step: _TD64Like_co = ..., - dtype: None = ..., + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[timedelta64]: ... -@overload -def arange( # both start and stop must always be specified for datetime64 - start: datetime64, - stop: datetime64, - step: datetime64 = ..., - dtype: None = ..., + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) +def arange( + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[datetime64]: ... -@overload + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - stop: Any, - /, *, - dtype: _DTypeLike[_SCT], - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[_SCT]: ... -@overload + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: _DTypeLike[_SCT] = ..., + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[_SCT]: ... -@overload + dtype: _DTypeLike[np.datetime64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) def arange( - stop: Any, /, + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, *, - dtype: DTypeLike, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[Any]: ... -@overload + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # dtype= def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: DTypeLike = ..., + start_or_stop: _ArangeScalar | float | str, + /, + stop: _ArangeScalar | float | str | None = None, + step: _ArangeScalar | float | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... -def datetime_data( - dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, -) -> tuple[str, int]: ... +# +def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here +_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] +_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] + @overload -def busday_count( # type: ignore[misc] +def busday_count( begindates: _ScalarLike_co | dt.date, enddates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> int_: ... @overload -def busday_count( # type: ignore[misc] - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[int_]: ... @overload def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: datetime64 | dt.date, offsets: _TD64Like_co | dt.timedelta, - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... -@overload -def busday_offset( # type: ignore[misc] +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( dates: _ScalarLike_co | dt.date, offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: ArrayLike | _NestedSequence[dt.date], + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> np.bool: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[np.bool]: ... @overload def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + dates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def is_busday( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... + +_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo @overload -def datetime_as_string( # type: ignore[misc] +def datetime_as_string( arr: datetime64 | dt.date, - unit: None | L["auto"] | _UnitKind = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> str_: ... @overload def datetime_as_string( arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], - unit: None | L["auto"] | _UnitKind = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> NDArray[str_]: ... @overload @@ -1346,9 +1319,9 @@ class flagsobj: def nested_iters( op: ArrayLike | Sequence[ArrayLike], axes: Sequence[Sequence[SupportsIndex]], - flags: None | Sequence[_NDIterFlagsKind] = ..., - op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ..., diff --git a/blimgui/dist64/numpy/_core/numeric.py b/blimgui/dist64/numpy/_core/numeric.py index 86c7b7c..c7966e9 100644 --- a/blimgui/dist64/numpy/_core/numeric.py +++ b/blimgui/dist64/numpy/_core/numeric.py @@ -1,33 +1,66 @@ +import builtins import functools import itertools +import math +import numbers import operator import sys import warnings -import numbers -import builtins -import math import numpy as np -from . import multiarray -from . import numerictypes as nt -from .multiarray import ( - ALLOW_THREADS, BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, - RAISE, WRAP, arange, array, asarray, asanyarray, ascontiguousarray, - asfortranarray, broadcast, can_cast, concatenate, copyto, dot, dtype, - empty, empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter, - fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, - ndarray, nditer, nested_iters, promote_types, putmask, result_type, - shares_memory, vdot, where, zeros, normalize_axis_index, vecdot -) +from numpy.exceptions import AxisError -from . import overrides -from . import umath -from . import shape_base -from .overrides import finalize_array_function_like, set_module -from .umath import (multiply, invert, sin, PINF, NAN) -from . import numerictypes -from ..exceptions import AxisError +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath from ._ufunc_config import errstate +from .multiarray import ( # noqa: F401 + ALLOW_THREADS, + BUFSIZE, + CLIP, + MAXDIMS, + MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT, + RAISE, + WRAP, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + vecdot, + where, + zeros, +) +from .overrides import finalize_array_function_like, set_module +from .umath import NAN, PINF, invert, multiply, sin bitwise_not = invert ufunc = type(sin) @@ -284,7 +317,7 @@ def ones_like( def _full_dispatcher( shape, fill_value, dtype=None, order=None, *, device=None, like=None ): - return(like,) + return (like,) @finalize_array_function_like @@ -490,11 +523,11 @@ def count_nonzero(a, axis=None, *, keepdims=False): -------- >>> import numpy as np >>> np.count_nonzero(np.eye(4)) - 4 + np.int64(4) >>> a = np.array([[0, 1, 7, 0], ... [3, 0, 2, 19]]) >>> np.count_nonzero(a) - 5 + np.int64(5) >>> np.count_nonzero(a, axis=0) array([1, 1, 2, 1]) >>> np.count_nonzero(a, axis=1) @@ -860,12 +893,12 @@ def convolve(a, v, mode='full'): """ a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) - if (len(v) > len(a)): - a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') + if len(v) > len(a): + a, v = v, a return multiarray.correlate(a, v[::-1], mode) @@ -989,7 +1022,8 @@ def tensordot(a, b, axes=2): * (2,) array_like Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements array_like must be of the same length. - + Each axis may appear at most once; repeated axes are not allowed. + For example, ``axes=([1, 1], [0, 0])`` is invalid. Returns ------- output : ndarray @@ -1020,11 +1054,18 @@ def tensordot(a, b, axes=2): first in both sequences, the second axis second, and so forth. The calculation can be referred to ``numpy.einsum``. + For example, if ``a.shape == (2, 3, 4)`` and ``b.shape == (3, 4, 5)``, + then ``axes=([1, 2], [0, 1])`` sums over the ``(3, 4)`` dimensions of + both arrays and produces an output of shape ``(2, 5)``. + + Each summation axis corresponds to a distinct contraction index; repeating + an axis (for example ``axes=([1, 1], [0, 0])``) is invalid. + The shape of the result consists of the non-contracted axes of the first tensor, followed by the non-contracted axes of the second. Examples - -------- + -------- An example on integer_like: >>> a_0 = np.array([[1, 2], [3, 4]]) @@ -1055,9 +1096,9 @@ def tensordot(a, b, axes=2): [4664., 5018.], [4796., 5162.], [4928., 5306.]]) - + A slower but equivalent way of computing the same... - + >>> d = np.zeros((5,2)) >>> for i in range(5): ... for j in range(2): @@ -1073,10 +1114,9 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) + >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], [3, 4]], @@ -1122,7 +1162,7 @@ def tensordot(a, b, axes=2): iter(axes) except Exception: axes_a = list(range(-axes, 0)) - axes_b = list(range(0, axes)) + axes_b = list(range(axes)) else: axes_a, axes_b = axes try: @@ -1138,6 +1178,11 @@ def tensordot(a, b, axes=2): axes_b = [axes_b] nb = 1 + if len(set(axes_a)) != len(axes_a): + raise ValueError("duplicate axes are not allowed in tensordot") + if len(set(axes_b)) != len(axes_b): + raise ValueError("duplicate axes are not allowed in tensordot") + a, b = asarray(a), asarray(b) as_ = a.shape nda = a.ndim @@ -1163,13 +1208,13 @@ def tensordot(a, b, axes=2): notin = [k for k in range(nda) if k not in axes_a] newaxes_a = notin + axes_a N2 = math.prod(as_[axis] for axis in axes_a) - newshape_a = (math.prod([as_[ax] for ax in notin]), N2) + newshape_a = (math.prod(as_[ax] for ax in notin), N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] newaxes_b = axes_b + notin N2 = math.prod(bs[axis] for axis in axes_b) - newshape_b = (N2, math.prod([bs[ax] for ax in notin])) + newshape_b = (N2, math.prod(bs[ax] for ax in notin)) oldb = [bs[axis] for axis in notin] at = a.transpose(newaxes_a).reshape(newshape_a) @@ -1268,7 +1313,7 @@ def roll(a, shift, axis=None): if broadcasted.ndim > 1: raise ValueError( "'shift' and 'axis' should be scalars or 1D sequences") - shifts = {ax: 0 for ax in range(a.ndim)} + shifts = dict.fromkeys(range(a.ndim), 0) for sh, ax in broadcasted: shifts[ax] += int(sh) @@ -1377,7 +1422,7 @@ def rollaxis(a, axis, start=0): start -= 1 if axis == start: return a[...] - axes = list(range(0, n)) + axes = list(range(n)) axes.remove(axis) axes.insert(start, axis) return a.transpose(axes) @@ -1426,16 +1471,16 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): normalize_axis_index : normalizing a single scalar axis """ # Optimization to speed-up the most common cases. - if type(axis) not in (tuple, list): + if not isinstance(axis, (tuple, list)): try: axis = [operator.index(axis)] except TypeError: pass # Going via an iterator directly is slower than via list comprehension. - axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis) if not allow_duplicate and len(set(axis)) != len(axis): if argname: - raise ValueError('repeated axis in `{}` argument'.format(argname)) + raise ValueError(f'repeated axis in `{argname}` argument') else: raise ValueError('repeated axis') return axis @@ -1717,7 +1762,7 @@ def cross2d(x, y): # cp1 = a2 * b0 - a0 * b2 # cp2 = a0 * b1 - a1 * b0 multiply(a1, b2, out=cp0) - tmp = array(a2 * b1) + tmp = np.multiply(a2, b1, out=...) cp0 -= tmp multiply(a2, b0, out=cp1) multiply(a0, b2, out=tmp) @@ -1828,14 +1873,14 @@ def indices(dimensions, dtype=int, sparse=False): """ dimensions = tuple(dimensions) N = len(dimensions) - shape = (1,)*N + shape = (1,) * N if sparse: - res = tuple() + res = () else: - res = empty((N,)+dimensions, dtype=dtype) + res = empty((N,) + dimensions, dtype=dtype) for i, dim in enumerate(dimensions): idx = arange(dim, dtype=dtype).reshape( - shape[:i] + (dim,) + shape[i+1:] + shape[:i] + (dim,) + shape[i + 1:] ) if sparse: res = res + (idx,) @@ -1920,8 +1965,11 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): _fromfunction_with_like = array_function_dispatch()(fromfunction) -def _frombuffer(buf, dtype, shape, order): - return frombuffer(buf, dtype=dtype).reshape(shape, order=order) +def _frombuffer(buf, dtype, shape, order, axis_order=None): + array = frombuffer(buf, dtype=dtype) + if order == 'K' and axis_order is not None: + return array.reshape(shape, order='C').transpose(axis_order) + return array.reshape(shape, order=order) @set_module('numpy') @@ -2091,32 +2139,31 @@ def err_if_insufficient(width, binwidth): return '0' * (width or 1) elif num > 0: - binary = bin(num)[2:] + binary = f'{num:b}' binwidth = len(binary) outwidth = (binwidth if width is None else builtins.max(binwidth, width)) err_if_insufficient(width, binwidth) return binary.zfill(outwidth) - else: - if width is None: - return '-' + bin(-num)[2:] + elif width is None: + return f'-{-num:b}' - else: - poswidth = len(bin(-num)[2:]) + else: + poswidth = len(f'{-num:b}') - # See gh-8679: remove extra digit - # for numbers at boundaries. - if 2**(poswidth - 1) == -num: - poswidth -= 1 + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 - twocomp = 2**(poswidth + 1) + num - binary = bin(twocomp)[2:] - binwidth = len(binary) + twocomp = 2**(poswidth + 1) + num + binary = f'{twocomp:b}' + binwidth = len(binary) - outwidth = builtins.max(binwidth, width) - err_if_insufficient(width, binwidth) - return '1' * (outwidth - binwidth) + binary + outwidth = builtins.max(binwidth, width) + err_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary @set_module('numpy') @@ -2443,8 +2490,21 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): elif isinstance(y, int): y = float(y) + # atol and rtol can be arrays + if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))): + err_s = np.geterr()["invalid"] + err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}" + + if err_s == "warn": + warnings.warn(err_msg, RuntimeWarning, stacklevel=2) + elif err_s == "raise": + raise FloatingPointError(err_msg) + elif err_s == "print": + print(err_msg) + with errstate(invalid='ignore'): - result = (less_equal(abs(x-y), atol + rtol * abs(y)) + + result = (less_equal(abs(x - y), atol + rtol * abs(y)) & isfinite(y) | (x == y)) if equal_nan: @@ -2695,16 +2755,14 @@ def extend_all(module): __all__.append(a) -from .umath import * -from .numerictypes import * -from . import fromnumeric -from .fromnumeric import * -from . import arrayprint -from .arrayprint import * -from . import _asarray +from . import _asarray, _ufunc_config, arrayprint, fromnumeric from ._asarray import * -from . import _ufunc_config from ._ufunc_config import * +from .arrayprint import * +from .fromnumeric import * +from .numerictypes import * +from .umath import * + extend_all(fromnumeric) extend_all(umath) extend_all(numerictypes) diff --git a/blimgui/dist64/numpy/_core/numeric.pyi b/blimgui/dist64/numpy/_core/numeric.pyi index b6372ac..4ff9de4 100644 --- a/blimgui/dist64/numpy/_core/numeric.pyi +++ b/blimgui/dist64/numpy/_core/numeric.pyi @@ -1,303 +1,773 @@ -from collections.abc import Callable, Sequence +from _typeshed import Incomplete +from builtins import bool as py_bool +from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Final, - TypeAlias, - overload, - TypeVar, Literal as L, SupportsAbs, SupportsIndex, - NoReturn, + TypeAlias, TypeGuard, + TypeVar, + overload, ) -from typing_extensions import Unpack import numpy as np from numpy import ( - # re-exports - bitwise_not, False_, True_, - broadcast, - dtype, - flatiter, - from_dlpack, + _OrderCF, + _OrderKACF, + bitwise_not, inf, little_endian, - matmul, - vecdot, nan, - ndarray, - nditer, newaxis, ufunc, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _CDoubleCodes, + _Complex128Codes, + _DoubleCodes, + _DTypeLike, + _DTypeLikeBool, + _Float64Codes, + _IntCodes, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArray, + _SupportsArrayFunc, + _SupportsDType, +) - # other - generic, - unsignedinteger, - signedinteger, - floating, - complexfloating, - int_, - intp, - float64, - timedelta64, - object_, - _AnyShapeType, - _OrderKACF, - _OrderCF, +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, ) from .fromnumeric import ( - all as all, - any as any, - argpartition as argpartition, - matrix_transpose as matrix_transpose, - mean as mean, + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, ) from .multiarray import ( - # re-exports + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, + _Array, + _ConstructorEmpty, arange, array, - asarray, asanyarray, + asarray, ascontiguousarray, asfortranarray, + broadcast, can_cast, concatenate, copyto, dot, + dtype, empty, empty_like, + flatiter, + from_dlpack, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, + matmul, may_share_memory, min_scalar_type, + ndarray, + nditer, nested_iters, - putmask, + normalize_axis_index as normalize_axis_index, promote_types, + putmask, result_type, shares_memory, vdot, where, zeros, - - # other - _Array, - _ConstructorEmpty, - _KwargsEmpty, ) - -from numpy._typing import ( - ArrayLike, - NDArray, - DTypeLike, - _SupportsDType, - _ShapeLike, - _DTypeLike, - _ArrayLike, - _SupportsArrayFunc, - _ScalarLike_co, - _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, - _ArrayLikeTD64_co, - _ArrayLikeObject_co, - _ArrayLikeUnknown, - _NestedSequence, +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, ) __all__ = [ - "newaxis", - "ndarray", - "flatiter", - "nditer", - "nested_iters", - "ufunc", + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", "array", - "asarray", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", "asanyarray", + "asarray", "ascontiguousarray", "asfortranarray", - "zeros", - "count_nonzero", - "empty", - "broadcast", - "dtype", - "fromstring", - "fromfile", - "frombuffer", - "from_dlpack", - "where", - "argwhere", - "copyto", - "concatenate", - "lexsort", "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", "can_cast", - "promote_types", - "min_scalar_type", - "result_type", - "isfortran", - "empty_like", - "zeros_like", - "ones_like", - "correlate", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", "convolve", - "inner", - "dot", - "outer", - "vdot", - "roll", - "rollaxis", - "moveaxis", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", "cross", - "tensordot", - "little_endian", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", "fromiter", - "array_equal", - "array_equiv", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", "indices", - "fromfunction", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", "isscalar", - "binary_repr", - "base_repr", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", "ones", - "identity", - "allclose", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", "putmask", - "flatnonzero", - "inf", - "nan", - "False_", - "True_", - "bitwise_not", - "full", - "full_like", - "matmul", - "vecdot", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", "shares_memory", - "may_share_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", ] _T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) -_DType = TypeVar("_DType", bound=np.dtype[Any]) -_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) -_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) +_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], + tuple[int], + tuple[int, int], + tuple[int, int, int], + tuple[int, int, int, int], + tuple[int, ...], +) +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) _CorrelateMode: TypeAlias = L["valid", "same", "full"] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool +_TD64_co: TypeAlias = np.timedelta64 | _Int_co + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] +_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] + +_DTypeLikeInt: TypeAlias = type[int] | _IntCodes +_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes +_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes + +### + +# keep in sync with `ones_like` @overload def zeros_like( - a: _ArrayType, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + a: _ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... + device: L["cpu"] | None = None, +) -> _ArrayT: ... @overload def zeros_like( - a: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def zeros_like( a: object, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., - *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... -@overload -def zeros_like( - a: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... ones: Final[_ConstructorEmpty] +# keep in sync with `zeros_like` @overload def ones_like( - a: _ArrayType, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + a: _ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... + device: L["cpu"] | None = None, +) -> _ArrayT: ... @overload def ones_like( - a: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def ones_like( a: object, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., - *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... -@overload -def ones_like( - a: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview @@ -305,161 +775,174 @@ def ones_like( @overload def full( shape: SupportsIndex, - fill_value: _SCT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[int], _SCT]: ... + fill_value: _ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], _ScalarT]: ... @overload def full( shape: SupportsIndex, fill_value: Any, - dtype: _DType | _SupportsDType[_DType], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[tuple[int], _DType]: ... + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[tuple[int], _DTypeT]: ... @overload def full( shape: SupportsIndex, fill_value: Any, - dtype: type[_SCT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[int], _SCT]: ... + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], _ScalarT]: ... @overload def full( shape: SupportsIndex, fill_value: Any, - dtype: None | DTypeLike = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], Any]: ... # known shape @overload def full( - shape: _AnyShapeType, - fill_value: _SCT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_AnyShapeType, _SCT]: ... + shape: _AnyShapeT, + fill_value: _ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( - shape: _AnyShapeType, + shape: _AnyShapeT, fill_value: Any, - dtype: _DType | _SupportsDType[_DType], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[_AnyShapeType, _DType]: ... + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[_AnyShapeT, _DTypeT]: ... @overload def full( - shape: _AnyShapeType, + shape: _AnyShapeT, fill_value: Any, - dtype: type[_SCT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_AnyShapeType, _SCT]: ... + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( - shape: _AnyShapeType, + shape: _AnyShapeT, fill_value: Any, - dtype: None | DTypeLike = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_AnyShapeType, Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[_AnyShapeT, Any]: ... # unknown shape @overload def full( shape: _ShapeLike, - fill_value: _SCT, - dtype: None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> NDArray[_SCT]: ... + fill_value: _ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: _DType | _SupportsDType[_DType], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[Any, _DType]: ... + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[Any, _DTypeT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: type[_SCT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], -) -> NDArray[_SCT]: ... + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: None | DTypeLike = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload def full_like( - a: _ArrayType, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + a: _ArrayT, + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... + device: L["cpu"] | None = None, +) -> _ArrayT: ... @overload def full_like( - a: _ArrayLike[_SCT], - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + a: _ArrayLike[_ScalarT], + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def full_like( a: object, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., - *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... -@overload -def full_like( - a: Any, - fill_value: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + fill_value: object, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # @overload -def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> int: ... +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ... @overload def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... @overload @@ -467,430 +950,327 @@ def count_nonzero( a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] ) -> NDArray[np.intp]: ... @overload -def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> bool: ... - -def argwhere(a: ArrayLike) -> NDArray[intp]: ... +def isfortran(a: ndarray | generic) -> py_bool: ... -def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... +# +def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... +def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... +# keep in sync with `convolve` @overload def correlate( - a: _ArrayLikeUnknown, - v: _ArrayLikeUnknown, - mode: _CorrelateMode = ..., -) -> NDArray[Any]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def correlate( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., -) -> NDArray[np.bool]: ... -@overload -def correlate( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[unsignedinteger[Any]]: ... -@overload -def correlate( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[signedinteger[Any]]: ... +def correlate(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def correlate( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., -) -> NDArray[floating[Any]]: ... +def correlate(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def correlate( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def correlate(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def correlate( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def correlate( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep in sync with `correlate` @overload def convolve( - a: _ArrayLikeUnknown, - v: _ArrayLikeUnknown, - mode: _CorrelateMode = ..., -) -> NDArray[Any]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def convolve( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., -) -> NDArray[np.bool]: ... +def convolve(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def convolve( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[unsignedinteger[Any]]: ... +def convolve(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def convolve( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[signedinteger[Any]]: ... +def convolve(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def convolve( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., -) -> NDArray[floating[Any]]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def convolve( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def convolve( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., -) -> NDArray[timedelta64]: ... -@overload -def convolve( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload @overload def outer( - a: _ArrayLikeUnknown, - b: _ArrayLikeUnknown, - out: None = ..., -) -> NDArray[Any]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None +) -> _Array2D[_AnyNumericScalarT]: ... @overload -def outer( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - out: None = ..., -) -> NDArray[np.bool]: ... -@overload -def outer( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - out: None = ..., -) -> NDArray[unsignedinteger[Any]]: ... -@overload -def outer( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - out: None = ..., -) -> NDArray[signedinteger[Any]]: ... +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = None) -> _Array2D[np.bool]: ... @overload -def outer( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[floating[Any]]: ... +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = None) -> _Array2D[np.int_ | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - out: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = None) -> _Array2D[np.float64 | Any]: ... @overload -def outer( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> _Array2D[np.complex128 | Any]: ... @overload -def outer( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - out: None = ..., -) -> NDArray[object_]: ... +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - out: _ArrayType, -) -> _ArrayType: ... +def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... +# keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload def tensordot( - a: _ArrayLikeUnknown, - b: _ArrayLikeUnknown, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[Any]: ... -@overload -def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[np.bool]: ... -@overload -def tensordot( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[unsignedinteger[Any]]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[_AnyNumericScalarT]: ... @overload -def tensordot( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[signedinteger[Any]]: ... -@overload -def tensordot( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[floating[Any]]: ... +def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[timedelta64]: ... + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[object_]: ... + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... -@overload -def roll( - a: _ArrayLike[_SCT], - shift: _ShapeLike, - axis: None | _ShapeLike = ..., -) -> NDArray[_SCT]: ... -@overload -def roll( - a: ArrayLike, - shift: _ShapeLike, - axis: None | _ShapeLike = ..., -) -> NDArray[Any]: ... - -def rollaxis( - a: NDArray[_SCT], - axis: int, - start: int = ..., -) -> NDArray[_SCT]: ... - -def moveaxis( - a: NDArray[_SCT], - source: _ShapeLike, - destination: _ShapeLike, -) -> NDArray[_SCT]: ... - -@overload -def cross( - a: _ArrayLikeUnknown, - b: _ArrayLikeUnknown, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[Any]: ... -@overload -def cross( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NoReturn: ... +# @overload def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[unsignedinteger[Any]]: ... + a: _ArrayLike[_AnyNumericScalarT], + b: _ArrayLike[_AnyNumericScalarT], + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[_AnyNumericScalarT]: ... @overload def cross( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[signedinteger[Any]]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.int_ | Any]: ... @overload def cross( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[floating[Any]]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.float64 | Any]: ... @overload def cross( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def cross( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[object_]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.complex128 | Any]: ... +# @overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[False] = ..., -) -> NDArray[int_]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[True] = ..., -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_SCT], - sparse: L[False] = ..., -) -> NDArray[_SCT]: ... +def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... @overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_SCT], - sparse: L[True], -) -> tuple[NDArray[_SCT], ...]: ... +def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload +def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... +def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int, + argname: str | None = None, + allow_duplicate: py_bool | None = False, +) -> tuple[int, ...]: ... + +# +@overload # 0d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = False) -> _Array1D[np.intp]: ... +@overload # 0d, dtype=, sparse=True +def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... +@overload # 1d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = False) -> _Array2D[np.intp]: ... +@overload # 1d, dtype=int (default), sparse=True +def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array1D[Any]]: ... +@overload # 2d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: type[int] = int, sparse: L[False] = False) -> _Array3D[np.intp]: ... +@overload # 2d, dtype=int (default), sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike, - sparse: L[False] = ..., -) -> NDArray[Any]: ... -@overload + dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] +) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +@overload # 2d, dtype=, sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... + dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] +) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... +@overload # 2d, dtype=, sparse=True +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array2D[Any], _Array2D[Any]]: ... +@overload # ?d, dtype=int (default), sparse=False (default) +def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] = False) -> NDArray[np.intp]: ... +@overload # ?d, dtype=int (default), sparse=True +def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... +# def fromfunction( function: Callable[..., _T], shape: Sequence[int], *, - dtype: DTypeLike = ..., - like: _SupportsArrayFunc = ..., - **kwargs: Any, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, ) -> _T: ... -def isscalar(element: object) -> TypeGuard[ - generic | bool | int | float | complex | str | bytes | memoryview -]: ... - -def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ... +# +def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... -def base_repr( - number: SupportsAbs[float], - base: float = ..., - padding: SupportsIndex = ..., -) -> str: ... +# +def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... +def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsIndex | None = 0) -> str: ... -@overload -def identity( - n: int, - dtype: None = ..., - *, - like: _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... -@overload -def identity( - n: int, - dtype: _DTypeLike[_SCT], - *, - like: _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def identity( - n: int, - dtype: DTypeLike, - *, - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... +# +@overload # dtype: None (default) +def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... +@overload # dtype: known scalar type +def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +@overload # dtype: like bool +def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... +@overload # dtype: like int_ +def identity(n: int, dtype: _DTypeLikeInt, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.int_ | Any]: ... +@overload # dtype: like float64 +def identity(n: int, dtype: _DTypeLikeFloat64, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64 | Any]: ... +@overload # dtype: like complex128 +def identity(n: int, dtype: _DTypeLikeComplex128, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.complex128 | Any]: ... +@overload # dtype: unknown +def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc | None = None) -> _Array2D[Incomplete]: ... +# def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... -@overload +# +@overload # scalar, scalar def isclose( - a: _ScalarLike_co, - b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + a: _NumberLike_co, + b: _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... -@overload +@overload # known shape, same shape or scalar +def isclose( + a: np.ndarray[_ShapeT], + b: np.ndarray[_ShapeT] | _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # same shape or scalar, known shape +def isclose( + a: np.ndarray[_ShapeT] | _NumberLike_co, + b: np.ndarray[_ShapeT], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # 1d sequence, <=1d array-like +def isclose( + a: Sequence[_NumberLike_co], + b: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=1d array-like, 1d sequence +def isclose( + a: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + b: Sequence[_NumberLike_co], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # 2d sequence, <=2d array-like +def isclose( + a: Sequence[Sequence[_NumberLike_co]], + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=2d array-like, 2d sequence +def isclose( + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + a: Sequence[Sequence[_NumberLike_co]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # unknown shape, unknown shape def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> NDArray[np.bool]: ... - -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> NDArray[np.bool] | Any: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... +# +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... +# @overload def astype( - x: ndarray[_ShapeType, dtype[Any]], - dtype: _DTypeLike[_SCT], + x: ndarray[_ShapeT], + dtype: _DTypeLike[_ScalarT], /, *, - copy: bool = ..., - device: None | L["cpu"] = ..., -) -> ndarray[_ShapeType, dtype[_SCT]]: ... + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( - x: ndarray[_ShapeType, dtype[Any]], - dtype: DTypeLike, + x: ndarray[_ShapeT], + dtype: DTypeLike | None, /, *, - copy: bool = ..., - device: None | L["cpu"] = ..., -) -> ndarray[_ShapeType, dtype[Any]]: ... + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[_ShapeT]: ... diff --git a/blimgui/dist64/numpy/_core/numerictypes.py b/blimgui/dist64/numpy/_core/numerictypes.py index 241233a..857539b 100644 --- a/blimgui/dist64/numpy/_core/numerictypes.py +++ b/blimgui/dist64/numpy/_core/numerictypes.py @@ -12,10 +12,10 @@ Bit-width names - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 + int8 int16 int32 int64 + uint8 uint16 uint32 uint64 + float16 float32 float64 float96 float128 + complex64 complex128 complex192 complex256 datetime64 timedelta64 c-based names @@ -79,12 +79,19 @@ import numbers import warnings +from numpy._utils import set_module + from . import multiarray as ma from .multiarray import ( - ndarray, dtype, datetime_data, datetime_as_string, - busday_offset, busday_count, is_busday, busdaycalendar - ) -from .._utils import set_module + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + dtype, + is_busday, + ndarray, +) # we add more at the bottom __all__ = [ @@ -95,30 +102,28 @@ # we don't need all these imports, but we need to keep them for compatibility # for users using np._core.numerictypes.UPPER_TABLE -from ._string_helpers import ( # noqa: F401 - english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE -) - -from ._type_aliases import ( - sctypeDict, allTypes, sctypes -) -from ._dtype import _kind_name - # we don't export these for import *, but we do want them accessible # as numerictypes.bool, etc. -from builtins import bool, int, float, complex, object, str, bytes # noqa: F401, UP029 +from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 +from ._dtype import _kind_name +from ._string_helpers import ( # noqa: F401 + LOWER_TABLE, + UPPER_TABLE, + english_capitalize, + english_lower, + english_upper, +) +from ._type_aliases import allTypes, sctypeDict, sctypes # We use this later generic = allTypes['generic'] genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] + 'int32', 'uint32', 'int64', 'uint64', + 'float16', 'float32', 'float64', 'float96', 'float128', + 'complex64', 'complex128', 'complex192', 'complex256', + 'object'] @set_module('numpy') def maximum_sctype(t): @@ -231,7 +236,6 @@ def issctype(rep): return False -@set_module('numpy') def obj2sctype(rep, default=None): """ Return the scalar dtype or NumPy equivalent of Python type of an object. @@ -594,7 +598,7 @@ def _scalar_type_key(typ): ScalarType = [int, float, complex, bool, bytes, str, memoryview] -ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key) +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) ScalarType = tuple(ScalarType) diff --git a/blimgui/dist64/numpy/_core/numerictypes.pyi b/blimgui/dist64/numpy/_core/numerictypes.pyi index 6a697cf..7cfde8e 100644 --- a/blimgui/dist64/numpy/_core/numerictypes.pyi +++ b/blimgui/dist64/numpy/_core/numerictypes.pyi @@ -1,68 +1,69 @@ -import builtins -from typing import ( - Any, - Literal as L, - TypedDict, - type_check_only, -) +from builtins import bool as py_bool +from typing import Any, Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( - dtype, - generic, bool, bool_, - uint8, - uint16, - uint32, - uint64, - ubyte, - ushort, - uintc, - ulong, - ulonglong, - uintp, - uint, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + double, + dtype, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, int8, int16, int32, int64, - byte, - short, + int_, intc, - long, - longlong, + integer, intp, - int_, - float16, - float32, - float64, - half, - single, - double, + long, longdouble, - complex64, - complex128, - csingle, - cdouble, - clongdouble, - datetime64, - timedelta64, + longlong, + number, object_, + short, + signedinteger, + single, str_, - bytes_, - void, + timedelta64, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, unsignedinteger, - character, - inexact, - number, - integer, - flexible, - complexfloating, - signedinteger, - floating, + ushort, + void, ) -from ._type_aliases import sctypeDict # noqa: F401 +from numpy._typing import DTypeLike + +from ._type_aliases import sctypeDict as sctypeDict from .multiarray import ( busday_count, busday_offset, @@ -72,22 +73,6 @@ from .multiarray import ( is_busday, ) -from numpy._typing import DTypeLike -from numpy._typing._extended_precision import ( - uint128, - uint256, - int128, - int256, - float80, - float96, - float128, - float256, - complex160, - complex192, - complex256, - complex512, -) - __all__ = [ "ScalarType", "typecodes", @@ -151,67 +136,61 @@ __all__ = [ "bool_", "int_", "uint", - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] @type_check_only class _TypeCodes(TypedDict): - Character: L['c'] - Integer: L['bhilqnp'] - UnsignedInteger: L['BHILQNP'] - Float: L['efdg'] - Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQnNpP'] - AllFloat: L['efdgFDG'] - Datetime: L['Mm'] - All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] - -def isdtype(dtype: dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[builtins.bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview[Any]], + type[np.bool], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], + type[datetime64], + type[timedelta64], + type[object_], + type[bytes_], + type[str_], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], + type[void], + ] +] = ... +typeDict: Final = sctypeDict diff --git a/blimgui/dist64/numpy/_core/overrides.py b/blimgui/dist64/numpy/_core/overrides.py index 1f5039b..167a954 100644 --- a/blimgui/dist64/numpy/_core/overrides.py +++ b/blimgui/dist64/numpy/_core/overrides.py @@ -1,12 +1,15 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools +import inspect -from .._utils import set_module -from .._utils._inspect import getargspec from numpy._core._multiarray_umath import ( - add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) - + _ArrayFunctionDispatcher, + _get_implementing_args, + add_docstring, +) +from numpy._utils import set_module # noqa: F401 +from numpy._utils._inspect import getargspec ARRAY_FUNCTIONS = set() @@ -154,11 +157,15 @@ def decorator(implementation): "argument and a keyword-only argument. " f"{implementation} does not seem to comply.") - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) + if docs_from_dispatcher and dispatcher.__doc__ is not None: + doc = inspect.cleandoc(dispatcher.__doc__) + add_docstring(implementation, doc) public_api = _ArrayFunctionDispatcher(dispatcher, implementation) - public_api = functools.wraps(implementation)(public_api) + functools.update_wrapper(public_api, implementation) + + if not verify and not getattr(implementation, "__text_signature__", None): + public_api.__signature__ = inspect.signature(dispatcher) if module is not None: public_api.__module__ = module diff --git a/blimgui/dist64/numpy/_core/overrides.pyi b/blimgui/dist64/numpy/_core/overrides.pyi index 35e4ed3..98edf3b 100644 --- a/blimgui/dist64/numpy/_core/overrides.pyi +++ b/blimgui/dist64/numpy/_core/overrides.pyi @@ -1,13 +1,13 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple +from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar -from typing_extensions import ParamSpec, TypeVar - -from numpy._typing import _SupportsArrayFunc +from numpy._utils import set_module as set_module _T = TypeVar("_T") _Tss = ParamSpec("_Tss") -_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) +_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) + +_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] ### @@ -20,14 +20,11 @@ class ArgSpec(NamedTuple): keywords: str | None defaults: tuple[Any, ...] -def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... # -def verify_matching_signatures( - implementation: Callable[_Tss, object], - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], -) -> None: ... +def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks @@ -35,11 +32,11 @@ def verify_matching_signatures( # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. def array_function_dispatch( - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + dispatcher: _Dispatcher[_Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncT], _FuncT]: ... +) -> Callable[[_FuncLikeT], _FuncLikeT]: ... # def array_function_from_dispatcher( @@ -47,4 +44,4 @@ def array_function_from_dispatcher( module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... diff --git a/blimgui/dist64/numpy/_core/printoptions.py b/blimgui/dist64/numpy/_core/printoptions.py index 0e232d3..515e7e3 100644 --- a/blimgui/dist64/numpy/_core/printoptions.py +++ b/blimgui/dist64/numpy/_core/printoptions.py @@ -29,4 +29,4 @@ } format_options = ContextVar( - "format_options", default=default_format_options_dict.copy()) + "format_options", default=default_format_options_dict) diff --git a/blimgui/dist64/numpy/_core/records.py b/blimgui/dist64/numpy/_core/records.py index acb9584..fe5b39f 100644 --- a/blimgui/dist64/numpy/_core/records.py +++ b/blimgui/dist64/numpy/_core/records.py @@ -6,9 +6,9 @@ from collections import Counter from contextlib import nullcontext -from .._utils import set_module -from . import numeric as sb -from . import numerictypes as nt +from numpy._utils import set_module + +from . import numeric as sb, numerictypes as nt from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype @@ -127,7 +127,7 @@ def _parseFormats(self, formats, aligned=False): if isinstance(formats, list): dtype = sb.dtype( [ - ('f{}'.format(i), format_) + (f'f{i}', format_) for i, format_ in enumerate(formats) ], aligned, @@ -153,7 +153,7 @@ def _setfieldnames(self, names, titles): elif isinstance(names, str): names = names.split(',') else: - raise NameError("illegal input names %s" % repr(names)) + raise NameError(f"illegal input names {repr(names)}") self._names = [n.strip() for n in names[:self._nfields]] else: @@ -168,7 +168,7 @@ def _setfieldnames(self, names, titles): # check for redundant names _dup = find_duplicate(self._names) if _dup: - raise ValueError("Duplicate field names: %s" % _dup) + raise ValueError(f"Duplicate field names: {_dup}") if titles: self._titles = [n.strip() for n in titles[:self._nfields]] @@ -228,28 +228,25 @@ def __getattribute__(self, attr): try: dt = obj.dtype except AttributeError: - #happens if field is Object type + # happens if field is Object type return obj if dt.names is not None: return obj.view((self.__class__, obj.dtype)) return obj else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) + raise AttributeError(f"'record' object has no attribute '{attr}'") def __setattr__(self, attr, val): if attr in ('setfield', 'getfield', 'dtype'): - raise AttributeError("Cannot set '%s' attribute" % attr) + raise AttributeError(f"Cannot set '{attr}' attribute") fielddict = nt.void.__getattribute__(self, 'dtype').fields res = fielddict.get(attr, None) if res: return self.setfield(val, *res[:2]) + elif getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) else: - if getattr(self, attr, None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) + raise AttributeError(f"'record' object has no attribute '{attr}'") def __getitem__(self, indx): obj = nt.void.__getitem__(self, indx) @@ -428,7 +425,7 @@ def __getattribute__(self, attr): try: res = fielddict[attr][:2] except (TypeError, KeyError) as e: - raise AttributeError("recarray has no attribute %s" % attr) from e + raise AttributeError(f"recarray has no attribute {attr}") from e obj = self.getfield(*res) # At this point obj will always be a recarray, since (see @@ -481,7 +478,7 @@ def __setattr__(self, attr, val): res = fielddict[attr][:2] except (TypeError, KeyError) as e: raise AttributeError( - "record array has no attribute %s" % attr + f"record array has no attribute {attr}" ) from e return self.setfield(val, *res) @@ -531,9 +528,9 @@ def __repr__(self): self, separator=', ', prefix=prefix, suffix=',') else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(self.shape),) + lst = f"[], shape={repr(self.shape)}" - lf = '\n'+' '*len(prefix) + lf = '\n' + ' ' * len(prefix) if _get_legacy_print_mode() <= 113: lf = ' ' + lf # trailing space return fmt % (lst, lf, repr_dtype) diff --git a/blimgui/dist64/numpy/_core/records.pyi b/blimgui/dist64/numpy/_core/records.pyi index 4b26bcf..e87211c 100644 --- a/blimgui/dist64/numpy/_core/records.pyi +++ b/blimgui/dist64/numpy/_core/records.pyi @@ -1,14 +1,31 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false +from _typeshed import Incomplete, StrOrBytesPath from collections.abc import Iterable, Sequence -from typing import Any, ClassVar, Literal, Protocol, SupportsIndex, TypeAlias, overload, type_check_only - -from _typeshed import StrOrBytesPath -from typing_extensions import TypeVar +from typing import ( + Any, + ClassVar, + Literal, + Protocol, + SupportsIndex, + TypeAlias, + overload, + type_check_only, +) +from typing_extensions import Buffer, TypeVar import numpy as np -from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer -from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeVoid_co, _NestedSequence, _ShapeLike +from numpy import _ByteOrder, _OrderKACF +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeVoid_co, + _NestedSequence, + _Shape, + _ShapeLike, +) __all__ = [ "array", @@ -23,11 +40,11 @@ __all__ = [ ] _T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=np.generic) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], covariant=True) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_RecArray: TypeAlias = recarray[Any, np.dtype[_SCT]] +_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]] @type_check_only class _SupportsReadInto(Protocol): @@ -38,29 +55,35 @@ class _SupportsReadInto(Protocol): ### # exported in `numpy.rec` -class record(np.void): - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... +class record(np.void): # type: ignore[misc] + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + def pprint(self) -> str: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... @overload - def __getitem__(self, key: list[str]) -> record: ... + def __getitem__(self, key: list[str], /) -> record: ... # exported in `numpy.rec` class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): - __name__: ClassVar[Literal["record"]] = "record" - __module__: Literal["numpy"] = "numpy" + __name__: ClassVar[Literal["recarray"]] = "recarray" + __module__: Literal["numpy.rec"] = "numpy.rec" + @overload def __new__( subtype, shape: _ShapeLike, dtype: None = None, - buf: _SupportsBuffer | None = None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, byteorder: _ByteOrder | None = None, @@ -71,8 +94,8 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): def __new__( subtype, shape: _ShapeLike, - dtype: DTypeLike, - buf: _SupportsBuffer | None = None, + dtype: DTypeLike | None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, formats: None = None, @@ -81,16 +104,18 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): byteorder: None = None, aligned: Literal[False] = False, order: _OrderKACF = "C", - ) -> _RecArray[Any]: ... - def __array_finalize__(self, /, obj: object) -> None: ... + ) -> _RecArray[Incomplete]: ... + def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + def __array_finalize__(self, /, obj: object) -> None: ... + # @overload def field(self, /, attr: int | str, val: ArrayLike) -> None: ... @overload - def field(self, /, attr: int | str, val: None = None) -> Any: ... + def field(self, /, attr: int | str, val: None = None) -> Incomplete: ... # exported in `numpy.rec` class format_parser: @@ -98,7 +123,7 @@ class format_parser: def __init__( self, /, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None, titles: str | Sequence[str] | None, aligned: bool = False, @@ -123,7 +148,7 @@ def fromarrays( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -147,7 +172,7 @@ def fromrecords( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -157,8 +182,8 @@ def fromrecords( # exported in `numpy.rec` @overload def fromstring( - datastring: _SupportsBuffer, - dtype: DTypeLike, + datastring: Buffer, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -169,12 +194,12 @@ def fromstring( ) -> _RecArray[record]: ... @overload def fromstring( - datastring: _SupportsBuffer, + datastring: Buffer, dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -185,7 +210,7 @@ def fromstring( @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -201,7 +226,7 @@ def fromfile( shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -211,7 +236,7 @@ def fromfile( # exported in `numpy.rec` @overload def array( - obj: _SCT | NDArray[_SCT], + obj: _ScalarT | NDArray[_ScalarT], dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, @@ -222,11 +247,11 @@ def array( aligned: bool = False, byteorder: None = None, copy: bool = True, -) -> _RecArray[_SCT]: ... +) -> _RecArray[_ScalarT]: ... @overload def array( obj: ArrayLike, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -245,7 +270,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -255,7 +280,7 @@ def array( @overload def array( obj: None, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -274,7 +299,7 @@ def array( shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -284,7 +309,7 @@ def array( @overload def array( obj: _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -303,7 +328,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, diff --git a/blimgui/dist64/numpy/_core/shape_base.py b/blimgui/dist64/numpy/_core/shape_base.py index 89a2e94..cee26a5 100644 --- a/blimgui/dist64/numpy/_core/shape_base.py +++ b/blimgui/dist64/numpy/_core/shape_base.py @@ -5,10 +5,8 @@ import itertools import operator -from . import numeric as _nx -from . import overrides +from . import fromnumeric as _from_nx, numeric as _nx, overrides from .multiarray import array, asanyarray, normalize_axis_index -from . import fromnumeric as _from_nx array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -538,6 +536,7 @@ def unstack(x, /, *, axis=0): raise ValueError("Input array must be at least 1-d.") return tuple(_nx.moveaxis(x, axis, 0)) + # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. # Use getattr to protect against __array_function__ being disabled. @@ -551,7 +550,7 @@ def _block_format_index(index): """ Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. """ - idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + idx_str = ''.join(f'[{i}]' for i in index if i is not None) return 'arrays' + idx_str @@ -586,20 +585,18 @@ def _block_check_depths_match(arrays, parent_index=[]): the choice of algorithm used using benchmarking wisdom. """ - if type(arrays) is tuple: + if isinstance(arrays, tuple): # not strictly necessary, but saves us from: # - more than one way to do things - no point treating tuples like # lists # - horribly confusing behaviour that results when tuples are # treated like ndarray raise TypeError( - '{} is a tuple. ' + f'{_block_format_index(parent_index)} is a tuple. ' 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - _block_format_index(parent_index) - ) + 'not allow implicit conversion from tuple to ndarray.' ) - elif type(arrays) is list and len(arrays) > 0: + elif isinstance(arrays, list) and len(arrays) > 0: idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) for i, arr in enumerate(arrays)) @@ -610,19 +607,16 @@ def _block_check_depths_match(arrays, parent_index=[]): max_arr_ndim = ndim if len(index) != len(first_index): raise ValueError( - "List depths are mismatched. First element was at depth " - "{}, but there is an element at depth {} ({})".format( - len(first_index), - len(index), - _block_format_index(index) - ) + "List depths are mismatched. First element was at " + f"depth {len(first_index)}, but there is an element at " + f"depth {len(index)} ({_block_format_index(index)})" ) # propagate our flag that indicates an empty list at the bottom if index[-1] is None: first_index = index return first_index, max_arr_ndim, final_size - elif type(arrays) is list and len(arrays) == 0: + elif isinstance(arrays, list) and len(arrays) == 0: # We've 'bottomed out' on an empty list return parent_index + [None], 0, 0 else: @@ -682,14 +676,14 @@ def _concatenate_shapes(shapes, axis): # Take a shape, any shape first_shape = shapes[0] first_shape_pre = first_shape[:axis] - first_shape_post = first_shape[axis+1:] + first_shape_post = first_shape[axis + 1:] if any(shape[:axis] != first_shape_pre or - shape[axis+1:] != first_shape_post for shape in shapes): + shape[axis + 1:] != first_shape_post for shape in shapes): raise ValueError( - 'Mismatched array shapes in block along axis {}.'.format(axis)) + f'Mismatched array shapes in block along axis {axis}.') - shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:]) offsets_at_axis = _accumulate(shape_at_axis) slice_prefixes = [(slice(start, end),) @@ -727,7 +721,7 @@ def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): """ if depth < max_depth: shapes, slices, arrays = zip( - *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) + *[_block_info_recursion(arr, max_depth, result_ndim, depth + 1) for arr in arrays]) axis = result_ndim - max_depth + depth @@ -761,9 +755,9 @@ def _block(arrays, max_depth, result_ndim, depth=0): for details). """ if depth < max_depth: - arrs = [_block(arr, max_depth, result_ndim, depth+1) + arrs = [_block(arr, max_depth, result_ndim, depth + 1) for arr in arrays] - return _concatenate(arrs, axis=-(max_depth-depth)) + return _concatenate(arrs, axis=-(max_depth - depth)) else: # We've 'bottomed out' - arrays is either a scalar or an array # type(arrays) is not list @@ -774,7 +768,7 @@ def _block_dispatcher(arrays): # Use type(...) is list to match the behavior of np.block(), which special # cases list specifically rather than allowing for generic iterables or # tuple. Also, we know that list.__array_function__ will never exist. - if type(arrays) is list: + if isinstance(arrays, list): for subarrays in arrays: yield from _block_dispatcher(subarrays) else: @@ -966,9 +960,7 @@ def _block_setup(arrays): list_ndim = len(bottom_index) if bottom_index and bottom_index[-1] is None: raise ValueError( - 'List at {} cannot be empty'.format( - _block_format_index(bottom_index) - ) + f'List at {_block_format_index(bottom_index)} cannot be empty' ) result_ndim = max(arr_ndim, list_ndim) return arrays, list_ndim, result_ndim, final_size diff --git a/blimgui/dist64/numpy/_core/shape_base.pyi b/blimgui/dist64/numpy/_core/shape_base.pyi index 93f1b3f..d153d98 100644 --- a/blimgui/dist64/numpy/_core/shape_base.pyi +++ b/blimgui/dist64/numpy/_core/shape_base.pyi @@ -15,19 +15,21 @@ __all__ = [ "vstack", ] -_SCT = TypeVar("_SCT", bound=generic) -_SCT1 = TypeVar("_SCT1", bound=generic) -_SCT2 = TypeVar("_SCT2", bound=generic) +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=generic) _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) ### +# keep in sync with `numpy.ma.extras.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... @overload -def atleast_1d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... +def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -35,13 +37,13 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... @overload -def atleast_2d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... +def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -49,13 +51,13 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_3d` @overload -def atleast_3d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... @overload -def atleast_3d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... +def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -63,77 +65,82 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# used by numpy.lib._shape_base_impl +def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... + +# keep in sync with `numpy.ma.extras.vstack` @overload def vstack( - tup: Sequence[_ArrayLike[_SCT]], + tup: Sequence[_ArrayLike[_ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_SCT], - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.hstack` @overload def hstack( - tup: Sequence[_ArrayLike[_SCT]], + tup: Sequence[_ArrayLike[_ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_SCT], - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.stack` @overload def stack( - arrays: Sequence[_ArrayLike[_SCT]], - axis: SupportsIndex = ..., - out: None = ..., + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = 0, + out: None = None, *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: _DTypeLike[_SCT], - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def stack( @@ -156,20 +163,20 @@ def stack( @overload def unstack( - array: _ArrayLike[_SCT], + array: _ArrayLike[_ScalarT], /, *, - axis: int = ..., -) -> tuple[NDArray[_SCT], ...]: ... + axis: int = 0, +) -> tuple[NDArray[_ScalarT], ...]: ... @overload def unstack( array: ArrayLike, /, *, - axis: int = ..., + axis: int = 0, ) -> tuple[NDArray[Any], ...]: ... @overload -def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... @overload def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/blimgui/dist64/numpy/_core/strings.py b/blimgui/dist64/numpy/_core/strings.py index e0f4e7b..ed858ac 100644 --- a/blimgui/dist64/numpy/_core/strings.py +++ b/blimgui/dist64/numpy/_core/strings.py @@ -3,49 +3,58 @@ operations. """ +import functools import sys + import numpy as np from numpy import ( - equal, not_equal, less, less_equal, greater, greater_equal, - add, multiply as _multiply_ufunc, + add, + equal, + greater, + greater_equal, + less, + less_equal, + multiply as _multiply_ufunc, + not_equal, ) from numpy._core.multiarray import _vec_string -from numpy._core.overrides import set_module +from numpy._core.overrides import array_function_dispatch, set_module from numpy._core.umath import ( + _center, + _expandtabs, + _expandtabs_length, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, + isalnum, isalpha, + isdecimal, isdigit, - isspace, - isalnum, islower, - isupper, - istitle, - isdecimal, isnumeric, - str_len, - find as _find_ufunc, + isspace, + istitle, + isupper, rfind as _rfind_ufunc, - index as _index_ufunc, rindex as _rindex_ufunc, - count as _count_ufunc, startswith as _startswith_ufunc, - endswith as _endswith_ufunc, - _lstrip_whitespace, - _lstrip_chars, - _rstrip_whitespace, - _rstrip_chars, - _strip_whitespace, - _strip_chars, - _replace, - _expandtabs_length, - _expandtabs, - _center, - _ljust, - _rjust, - _zfill, - _partition, - _partition_index, - _rpartition, - _rpartition_index, + str_len, ) @@ -68,7 +77,7 @@ def _override___module__(): "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", - "zfill", "partition", "rpartition", + "zfill", "partition", "rpartition", "slice", # _vec_string - Will gradually become ufuncs as well "upper", "lower", "swapcase", "capitalize", "title", @@ -83,6 +92,9 @@ def _override___module__(): MAX = np.iinfo(np.int64).max +array_function_dispatch = functools.partial( + array_function_dispatch, module='numpy.strings') + def _get_num_chars(a): """ @@ -129,7 +141,12 @@ def _clean_args(*args): return newargs +def _multiply_dispatcher(a, i): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_multiply_dispatcher) def multiply(a, i): """ Return (a * i), that is string multiple concatenation, @@ -185,7 +202,7 @@ def multiply(a, i): # Ensure we can do a_len * i without overflow. if np.any(a_len > sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" @@ -193,7 +210,12 @@ def multiply(a, i): return _multiply_ufunc(a, i, out=out) +def _mod_dispatcher(a, values): + return (a, values) + + @set_module("numpy.strings") +@array_function_dispatch(_mod_dispatcher) def mod(a, values): """ Return (a % i), that is pre-Python 2.6 string formatting @@ -506,7 +528,12 @@ def endswith(a, suffix, start=0, end=None): return _endswith_ufunc(a, suffix, start, end) +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_code_dispatcher) def decode(a, encoding=None, errors=None): r""" Calls :meth:`bytes.decode` element-wise. @@ -555,6 +582,7 @@ def decode(a, encoding=None, errors=None): @set_module("numpy.strings") +@array_function_dispatch(_code_dispatcher) def encode(a, encoding=None, errors=None): """ Calls :meth:`str.encode` element-wise. @@ -599,7 +627,12 @@ def encode(a, encoding=None, errors=None): np.bytes_(b'')) +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_expandtabs_dispatcher) def expandtabs(a, tabsize=8): """ Return a copy of each string element where all tab characters are @@ -651,7 +684,12 @@ def expandtabs(a, tabsize=8): return _expandtabs(a, tabsize, out=out) +def _just_dispatcher(a, width, fillchar=None): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_just_dispatcher) def center(a, width, fillchar=' '): """ Return a copy of `a` with its elements centered in a string of @@ -720,6 +758,7 @@ def center(a, width, fillchar=' '): @set_module("numpy.strings") +@array_function_dispatch(_just_dispatcher) def ljust(a, width, fillchar=' '): """ Return an array with the elements of `a` left-justified in a @@ -784,6 +823,7 @@ def ljust(a, width, fillchar=' '): @set_module("numpy.strings") +@array_function_dispatch(_just_dispatcher) def rjust(a, width, fillchar=' '): """ Return an array with the elements of `a` right-justified in a @@ -847,7 +887,12 @@ def rjust(a, width, fillchar=' '): return _rjust(a, width, fillchar, out=out) +def _zfill_dispatcher(a, width): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_zfill_dispatcher) def zfill(a, width): """ Return the numeric string left-filled with zeros. A leading @@ -1032,7 +1077,12 @@ def strip(a, chars=None): return _strip_chars(a, chars) +def _unary_op_dispatcher(a): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def upper(a): """ Return an array with the elements converted to uppercase. @@ -1070,6 +1120,7 @@ def upper(a): @set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def lower(a): """ Return an array with the elements converted to lowercase. @@ -1107,6 +1158,7 @@ def lower(a): @set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def swapcase(a): """ Return element-wise a copy of the string with @@ -1147,6 +1199,7 @@ def swapcase(a): @set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def capitalize(a): """ Return a copy of ``a`` with only the first character of each element @@ -1187,6 +1240,7 @@ def capitalize(a): @set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def title(a): """ Return element-wise title cased version of string or unicode. @@ -1228,7 +1282,12 @@ def title(a): return _vec_string(a_arr, a_arr.dtype, 'title') +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_replace_dispatcher) def replace(a, old, new, count=-1): """ For each element in ``a``, return a copy of the string with @@ -1280,8 +1339,8 @@ def replace(a, old, new, count=-1): return _replace(arr, old, new, count) a_dt = arr.dtype - old = old.astype(old_dtype if old_dtype else a_dt, copy=False) - new = new.astype(new_dtype if new_dtype else a_dt, copy=False) + old = old.astype(old_dtype or a_dt, copy=False) + new = new.astype(new_dtype or a_dt, copy=False) max_int64 = np.iinfo(np.int64).max counts = _count_ufunc(arr, old, 0, max_int64) counts = np.where(count < 0, counts, np.minimum(counts, count)) @@ -1292,6 +1351,11 @@ def replace(a, old, new, count=-1): return _replace(arr, old, new, counts, out=out) +def _join_dispatcher(sep, seq): + return (sep, seq) + + +@array_function_dispatch(_join_dispatcher) def _join(sep, seq): """ Return a string which is the concatenation of the strings in the @@ -1328,6 +1392,11 @@ def _join(sep, seq): _vec_string(sep, np.object_, 'join', (seq,)), seq) +def _split_dispatcher(a, sep=None, maxsplit=None): + return (a,) + + +@array_function_dispatch(_split_dispatcher) def _split(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the @@ -1372,6 +1441,7 @@ def _split(a, sep=None, maxsplit=None): a, np.object_, 'split', [sep] + _clean_args(maxsplit)) +@array_function_dispatch(_split_dispatcher) def _rsplit(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the @@ -1417,6 +1487,11 @@ def _rsplit(a, sep=None, maxsplit=None): a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) def _splitlines(a, keepends=None): """ For each element in `a`, return a list of the lines in the @@ -1454,7 +1529,12 @@ def _splitlines(a, keepends=None): a, np.object_, 'splitlines', _clean_args(keepends)) +def _partition_dispatcher(a, sep): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_partition_dispatcher) def partition(a, sep): """ Partition each element in ``a`` around ``sep``. @@ -1523,6 +1603,7 @@ def partition(a, sep): @set_module("numpy.strings") +@array_function_dispatch(_partition_dispatcher) def rpartition(a, sep): """ Partition (split) each element around the right-most separator. @@ -1591,7 +1672,12 @@ def rpartition(a, sep): a, sep, pos, out=(out["f0"], out["f1"], out["f2"])) +def _translate_dispatcher(a, table, deletechars=None): + return (a,) + + @set_module("numpy.strings") +@array_function_dispatch(_translate_dispatcher) def translate(a, table, deletechars=None): """ For each element in `a`, return a copy of the string where all @@ -1639,3 +1725,89 @@ def translate(a, table, deletechars=None): 'translate', [table] + _clean_args(deletechars) ) + +@set_module("numpy.strings") +def slice(a, start=None, stop=np._NoValue, step=None, /): + """ + Slice the strings in `a` by slices specified by `start`, `stop`, `step`. + Like in the regular Python `slice` object, if only `start` is + specified then it is interpreted as the `stop`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + + start : None, an integer or an array of integers + The start of the slice, broadcasted to `a`'s shape + + stop : None, an integer or an array of integers + The end of the slice, broadcasted to `a`'s shape + + step : None, an integer or an array of integers + The step for the slice, broadcasted to `a`'s shape + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['hello', 'world']) + >>> np.strings.slice(a, 2) + array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) + array(['el', 'ol'], dtype='>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5])) + array(['ell', 'rld'], dtype='>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ... dtype=np.dtypes.StringDType()) + >>> np.strings.slice(b, -2) + array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType()) + + >>> np.strings.slice(b, -2, None) + array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) + array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, None, None, -1) + array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'], + dtype=StringDType()) + + """ + # Just like in the construction of a regular slice object, if only start + # is specified then start will become stop, see logic in slice_new. + if stop is np._NoValue: + stop = start + start = None + + # adjust start, stop, step to be integers, see logic in PySlice_Unpack + if step is None: + step = 1 + step = np.asanyarray(step) + if not np.issubdtype(step.dtype, np.integer): + raise TypeError(f"unsupported type {step.dtype} for operand 'step'") + if np.any(step == 0): + raise ValueError("slice step cannot be zero") + + if start is None: + start = np.where(step < 0, np.iinfo(np.intp).max, 0) + + if stop is None: + stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max) + + return _slice(a, start, stop, step) diff --git a/blimgui/dist64/numpy/_core/strings.pyi b/blimgui/dist64/numpy/_core/strings.pyi index 69654e3..c8f8f8a 100644 --- a/blimgui/dist64/numpy/_core/strings.pyi +++ b/blimgui/dist64/numpy/_core/strings.pyi @@ -1,14 +1,15 @@ -from typing import Any, overload, TypeAlias +from typing import TypeAlias, overload import numpy as np +from numpy._globals import _NoValueType from numpy._typing import ( NDArray, - _ArrayLikeStr_co as U_co, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, _ArrayLikeBytes_co as S_co, _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, _ArrayLikeString_co as T_co, - _ArrayLikeAnyString_co as UST_co, - _Shape, _SupportsArray, ) @@ -58,11 +59,12 @@ __all__ = [ "translate", "upper", "zfill", + "slice", ] -_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | _StringDTypeArray +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @@ -149,154 +151,154 @@ def str_len(x: UST_co) -> NDArray[np.int_]: ... def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, prefix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def decode( @@ -311,13 +313,13 @@ def encode( ) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[np.str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @@ -432,28 +434,28 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -498,3 +500,37 @@ def translate( table: str, deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice( + a: U_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.str_]: ... +@overload +def slice( + a: S_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeOrUnicodeArray: ... diff --git a/blimgui/dist64/numpy/_core/tests/_locales.py b/blimgui/dist64/numpy/_core/tests/_locales.py index b6c3bd5..cec669d 100644 --- a/blimgui/dist64/numpy/_core/tests/_locales.py +++ b/blimgui/dist64/numpy/_core/tests/_locales.py @@ -1,8 +1,8 @@ """Provide class for testing in French locale """ -import sys import locale +import sys import pytest diff --git a/blimgui/dist64/numpy/_core/tests/_natype.py b/blimgui/dist64/numpy/_core/tests/_natype.py index d5ab47c..f8d2561 100644 --- a/blimgui/dist64/numpy/_core/tests/_natype.py +++ b/blimgui/dist64/numpy/_core/tests/_natype.py @@ -8,16 +8,15 @@ import numpy as np + def _create_binary_propagating_op(name, is_divmod=False): is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"] def method(self, other): if ( other is pd_NA - or isinstance(other, (str, bytes)) - or isinstance(other, (numbers.Number, np.bool)) - or isinstance(other, np.ndarray) - and not other.shape + or isinstance(other, (str, bytes, numbers.Number, np.bool)) + or (isinstance(other, np.ndarray) and not other.shape) ): # Need the other.shape clause to handle NumPy scalars, # since we do a setitem on `out` below, which @@ -75,8 +74,7 @@ def __bool__(self): raise TypeError("boolean value of NA is ambiguous") def __hash__(self): - exponent = 31 if is_32bit else 61 - return 2**exponent - 1 + return 2**61 - 1 def __reduce__(self): return "pd_NA" @@ -115,33 +113,6 @@ def __reduce__(self): __abs__ = _create_unary_propagating_op("__abs__") __invert__ = _create_unary_propagating_op("__invert__") - # pow has special - def __pow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 0: - # returning positive is correct for +/- 0. - return type(other)(1) - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 0, other.dtype.type(1), pd_NA) - - return NotImplemented - - def __rpow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 1: - return other - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 1, other, pd_NA) - return NotImplemented - # Logical ops using Kleene logic def __and__(self, other): @@ -169,30 +140,5 @@ def __xor__(self, other): __rxor__ = __xor__ - __array_priority__ = 1000 - _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - types = self._HANDLED_TYPES + (NAType,) - for x in inputs: - if not isinstance(x, types): - return NotImplemented - - if method != "__call__": - raise ValueError(f"ufunc method '{method}' not supported for NA") - result = maybe_dispatch_ufunc_to_dunder_op( - self, ufunc, method, *inputs, **kwargs - ) - if result is NotImplemented: - # For a NumPy ufunc that's not a binop, like np.logaddexp - index = [i for i, x in enumerate(inputs) if x is pd_NA][0] - result = np.broadcast_arrays(*inputs)[index] - if result.ndim == 0: - result = result.item() - if ufunc.nout > 1: - result = (pd_NA,) * ufunc.nout - - return result - pd_NA = NAType() diff --git a/blimgui/dist64/numpy/_core/tests/examples/cython/checks.pyx b/blimgui/dist64/numpy/_core/tests/examples/cython/checks.pyx index 5ec8d59..dbbfb1c 100644 --- a/blimgui/dist64/numpy/_core/tests/examples/cython/checks.pyx +++ b/blimgui/dist64/numpy/_core/tests/examples/cython/checks.pyx @@ -244,10 +244,11 @@ def npyiter_has_multi_index(it: "nditer"): def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) - cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cdef cnp.NpyIter_GetMultiIndexFunc _get_multi_index = \ cnp.NpyIter_GetGetMultiIndex(cit, NULL) - cdef cnp.NpyIter_IterNextFunc iternext = \ + cdef cnp.NpyIter_IterNextFunc _iternext = \ cnp.NpyIter_GetIterNext(cit, NULL) + cnp.NpyIter_Deallocate(cit) return 1 @@ -277,6 +278,96 @@ def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): arr[1].imag = arr[1].imag + 1 +def npystring_pack(arr): + cdef char *string = "Hello world" + cdef size_t size = 11 + + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + # copy string->packed_string, the pointer to the underlying array buffer + ret = cnp.NpyString_pack( + allocator, cnp.PyArray_DATA(arr), string, size, + ) + + cnp.NpyString_release_allocator(allocator) + return ret + + +def npystring_load(arr): + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + cdef cnp.npy_static_string sdata + sdata.size = 0 + sdata.buf = NULL + + cdef cnp.npy_packed_static_string *packed_string = cnp.PyArray_DATA(arr) + cdef int is_null = cnp.NpyString_load(allocator, packed_string, &sdata) + cnp.NpyString_release_allocator(allocator) + if is_null == -1: + raise ValueError("String unpacking failed.") + elif is_null == 1: + # String in the array buffer is the null string + return "" + else: + # Cython syntax for copying a c string to python bytestring: + # slice the char * by the length of the string + return sdata.buf[:sdata.size].decode('utf-8') + + +def npystring_pack_multiple(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # Write into the first element of each array + cdef int ret1 = cnp.NpyString_pack( + allocators[0], cnp.PyArray_DATA(arr1), "Hello world", 11, + ) + cdef int ret2 = cnp.NpyString_pack( + allocators[1], cnp.PyArray_DATA(arr2), "test this", 9, + ) + + # Write a null string into the last element + cdef cnp.npy_intp elsize = cnp.PyArray_ITEMSIZE(arr1) + cdef int ret3 = cnp.NpyString_pack_null( + allocators[0], + (cnp.PyArray_DATA(arr1) + 2*elsize), + ) + + cnp.NpyString_release_allocators(2, allocators) + if ret1 == -1 or ret2 == -1 or ret3 == -1: + return -1 + + return 0 + + +def npystring_allocators_other_types(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # None of the dtypes here are StringDType, so every allocator + # should be NULL upon acquisition. + cdef int ret = 0 + for allocator in allocators: + if allocator != NULL: + ret = -1 + break + + cnp.NpyString_release_allocators(2, allocators) + return ret + + def check_npy_uintp_type_enum(): # Regression test for gh-27890: cnp.NPY_UINTP was not defined. # Cython would fail to compile this before gh-27890 was fixed. diff --git a/blimgui/dist64/numpy/_core/tests/examples/cython/setup.py b/blimgui/dist64/numpy/_core/tests/examples/cython/setup.py index f8099d8..4a2c3db 100644 --- a/blimgui/dist64/numpy/_core/tests/examples/cython/setup.py +++ b/blimgui/dist64/numpy/_core/tests/examples/cython/setup.py @@ -3,13 +3,15 @@ for testing. """ -import Cython -import numpy as np -from numpy._utils import _pep440 +import os from distutils.core import setup + +import Cython from Cython.Build import cythonize from setuptools.extension import Extension -import os + +import numpy as np +from numpy._utils import _pep440 macros = [ ("NPY_NO_DEPRECATED_API", 0), diff --git a/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api1.c b/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api1.c index b0af584..c7e96d6 100644 --- a/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api1.c +++ b/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -1,5 +1,3 @@ -#define Py_LIMITED_API 0x03060000 - #include #include #include diff --git a/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api_latest.c index ebf2ff5..3f71a6b 100644 --- a/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api_latest.c +++ b/blimgui/dist64/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -1,11 +1,11 @@ -#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 - # error "Py_LIMITED_API not defined to Python major+minor version" -#endif - #include #include #include +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + static PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "limited_api_latest" diff --git a/blimgui/dist64/numpy/_core/tests/examples/limited_api/meson.build b/blimgui/dist64/numpy/_core/tests/examples/limited_api/meson.build index 4f66ae4..74eda80 100644 --- a/blimgui/dist64/numpy/_core/tests/examples/limited_api/meson.build +++ b/blimgui/dist64/numpy/_core/tests/examples/limited_api/meson.build @@ -1,4 +1,8 @@ -project('checks', 'c', 'cython') +project( + 'checks', + 'c', 'cython', + meson_version: '>=1.8.3', +) py = import('python').find_installation(pure: false) @@ -31,7 +35,7 @@ py.extension_module( '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', ], include_directories: [npy_include_path], - limited_api: '3.6', + limited_api: '3.9', ) py.extension_module( @@ -55,5 +59,5 @@ py.extension_module( '-DCYTHON_LIMITED_API=1', ], include_directories: [npy_include_path], - limited_api: '3.7', + limited_api: '3.9', ) diff --git a/blimgui/dist64/numpy/_core/tests/examples/limited_api/setup.py b/blimgui/dist64/numpy/_core/tests/examples/limited_api/setup.py index 1a02a48..d1e4ee3 100644 --- a/blimgui/dist64/numpy/_core/tests/examples/limited_api/setup.py +++ b/blimgui/dist64/numpy/_core/tests/examples/limited_api/setup.py @@ -2,10 +2,12 @@ Build an example package using the limited Python C API. """ -import numpy as np -from setuptools import setup, Extension import os +from setuptools import Extension, setup + +import numpy as np + macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] limited_api = Extension( diff --git a/blimgui/dist64/numpy/_core/tests/test__exceptions.py b/blimgui/dist64/numpy/_core/tests/test__exceptions.py index 7da56a7..0f83b11 100644 --- a/blimgui/dist64/numpy/_core/tests/test__exceptions.py +++ b/blimgui/dist64/numpy/_core/tests/test__exceptions.py @@ -5,6 +5,7 @@ import pickle import pytest + import numpy as np from numpy.exceptions import AxisError @@ -31,19 +32,19 @@ def test__size_to_string(self): assert f(1) == '1 bytes' assert f(1023) == '1023 bytes' assert f(Ki) == '1.00 KiB' - assert f(Ki+1) == '1.00 KiB' - assert f(10*Ki) == '10.0 KiB' - assert f(int(999.4*Ki)) == '999. KiB' - assert f(int(1023.4*Ki)) == '1023. KiB' - assert f(int(1023.5*Ki)) == '1.00 MiB' - assert f(Ki*Ki) == '1.00 MiB' + assert f(Ki + 1) == '1.00 KiB' + assert f(10 * Ki) == '10.0 KiB' + assert f(int(999.4 * Ki)) == '999. KiB' + assert f(int(1023.4 * Ki)) == '1023. KiB' + assert f(int(1023.5 * Ki)) == '1.00 MiB' + assert f(Ki * Ki) == '1.00 MiB' # 1023.9999 Mib should round to 1 GiB - assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' - assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + assert f(int(Ki * Ki * Ki * 0.9999)) == '1.00 GiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki) == '1.00 EiB' # larger than sys.maxsize, adding larger prefixes isn't going to help # anyway. - assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki * 123456) == '123456. EiB' def test__total_size(self): """ Test e._total_size """ diff --git a/blimgui/dist64/numpy/_core/tests/test_abc.py b/blimgui/dist64/numpy/_core/tests/test_abc.py index 85cba6d..a61036f 100644 --- a/blimgui/dist64/numpy/_core/tests/test_abc.py +++ b/blimgui/dist64/numpy/_core/tests/test_abc.py @@ -1,9 +1,9 @@ -from numpy.testing import assert_ - import numbers import numpy as np from numpy._core.numerictypes import sctypes +from numpy.testing import assert_ + class TestABC: def test_abstract(self): diff --git a/blimgui/dist64/numpy/_core/tests/test_api.py b/blimgui/dist64/numpy/_core/tests/test_api.py index 51e80a0..ac2f03f 100644 --- a/blimgui/dist64/numpy/_core/tests/test_api.py +++ b/blimgui/dist64/numpy/_core/tests/test_api.py @@ -1,13 +1,18 @@ import sys +import pytest + import numpy as np import numpy._core.umath as ncu from numpy._core._rational_tests import rational -import pytest +from numpy.lib import stride_tricks from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, - HAS_REFCOUNT - ) + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) def test_array_array(): @@ -56,7 +61,7 @@ def test_array_array(): np.ones((), dtype=np.float64)) assert_equal(np.array("1.0").dtype, U3) assert_equal(np.array("1.0", dtype=str).dtype, U3) - assert_equal(np.array("1.0", dtype=U2), np.array(str("1."))) + assert_equal(np.array("1.0", dtype=U2), np.array("1.")) assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) builtins = getattr(__builtins__, '__dict__', __builtins__) @@ -74,23 +79,23 @@ def test_array_array(): # test array interface a = np.array(100.0, dtype=np.float64) o = type("o", (object,), - dict(__array_interface__=a.__array_interface__)) + {"__array_interface__": a.__array_interface__}) assert_equal(np.array(o, dtype=np.float64), a) # test array_struct interface a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], dtype=[('f0', int), ('f1', float), ('f2', str)]) o = type("o", (object,), - dict(__array_struct__=a.__array_struct__)) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") + {"__array_struct__": a.__array_struct__}) + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get an array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) - # test array + # test __array__ def custom__array__(self, dtype=None, copy=None): return np.array(100.0, dtype=dtype, copy=copy) - o = type("o", (object,), dict(__array__=custom__array__))() + o = type("o", (object,), {"__array__": custom__array__})() assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) # test recursion @@ -152,6 +157,39 @@ def custom__array__(self, dtype=None, copy=None): assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + @pytest.mark.parametrize("array", [True, False]) def test_array_impossible_casts(array): # All builtin types can be forcibly cast, at least theoretically, @@ -228,21 +266,21 @@ class MyNDArray(np.ndarray): # Make sure converting from string object to fixed length string # does not truncate. - a = np.array([b'a'*100], dtype='O') + a = np.array([b'a' * 100], dtype='O') b = a.astype('S') assert_equal(a, b) assert_equal(b.dtype, np.dtype('S100')) - a = np.array(['a'*100], dtype='O') + a = np.array(['a' * 100], dtype='O') b = a.astype('U') assert_equal(a, b) assert_equal(b.dtype, np.dtype('U100')) # Same test as above but for strings shorter than 64 characters - a = np.array([b'a'*10], dtype='O') + a = np.array([b'a' * 10], dtype='O') b = a.astype('S') assert_equal(a, b) assert_equal(b.dtype, np.dtype('S10')) - a = np.array(['a'*10], dtype='O') + a = np.array(['a' * 10], dtype='O') b = a.astype('U') assert_equal(a, b) assert_equal(b.dtype, np.dtype('U10')) @@ -309,7 +347,7 @@ def test_object_array_astype_to_void(): def test_array_astype_warning(t): # test ComplexWarning when casting from complex to float or int a = np.array(10, dtype=np.complex128) - assert_warns(np.exceptions.ComplexWarning, a.astype, t) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) @pytest.mark.parametrize(["dtype", "out_dtype"], [(np.bytes_, np.bool), @@ -335,12 +373,12 @@ def test_string_to_boolean_cast(dtype, out_dtype): [np.complex64, np.complex128, np.clongdouble]) def test_string_to_complex_cast(str_type, scalar_type): value = scalar_type(b"1+3j") - assert scalar_type(value) == 1+3j - assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j - assert np.array(value).astype(scalar_type)[()] == 1+3j + assert scalar_type(value) == 1 + 3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1 + 3j + assert np.array(value).astype(scalar_type)[()] == 1 + 3j arr = np.zeros(1, dtype=scalar_type) arr[0] = value - assert arr[0] == 1+3j + assert arr[0] == 1 + 3j @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) def test_none_to_nan_cast(dtype): @@ -441,8 +479,8 @@ def test_copyto_permut(): # test explicit overflow case pad = 500 l = [True] * pad + [True, True, True, True] - r = np.zeros(len(l)-pad) - d = np.ones(len(l)-pad) + r = np.zeros(len(l) - pad) + d = np.ones(len(l) - pad) mask = np.array(l)[pad:] np.copyto(r, d, where=mask[::-1]) @@ -552,8 +590,8 @@ def check_copy_result(x, y, ccontig, fcontig, strides=False): check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) def test_contiguous_flags(): - a = np.ones((4, 4, 1))[::2,:,:] - a.strides = a.strides[:2] + (-123,) + a = np.ones((4, 4, 1))[::2, :, :] + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): @@ -585,11 +623,12 @@ def check_contig(a, ccontig, fcontig): def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) @pytest.mark.parametrize(["shape", "fill_value", "expected_output"], [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), diff --git a/blimgui/dist64/numpy/_core/tests/test_array_api_info.py b/blimgui/dist64/numpy/_core/tests/test_array_api_info.py index 48abca0..401a12f 100644 --- a/blimgui/dist64/numpy/_core/tests/test_array_api_info.py +++ b/blimgui/dist64/numpy/_core/tests/test_array_api_info.py @@ -1,6 +1,7 @@ -import numpy as np import pytest +import numpy as np + info = np.__array_namespace_info__() diff --git a/blimgui/dist64/numpy/_core/tests/test_array_coercion.py b/blimgui/dist64/numpy/_core/tests/test_array_coercion.py index 100fa0f..6491229 100644 --- a/blimgui/dist64/numpy/_core/tests/test_array_coercion.py +++ b/blimgui/dist64/numpy/_core/tests/test_array_coercion.py @@ -12,10 +12,7 @@ import numpy as np import numpy._core._multiarray_umath as ncu from numpy._core._rational_tests import rational - -from numpy.testing import ( - assert_array_equal, assert_warns, IS_PYPY, IS_64BIT -) +from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal def arraylikes(): @@ -46,7 +43,7 @@ class _SequenceLike: def __len__(self): raise TypeError - def __getitem__(self): + def __getitem__(self, _, /): raise TypeError # Array-interface @@ -91,10 +88,10 @@ def scalar_instances(times=True, extended_precision=True, user_dtype=True): yield param(np.sqrt(np.longdouble(5)), id="longdouble") # Complex: - yield param(np.sqrt(np.complex64(2+3j)), id="complex64") - yield param(np.sqrt(np.complex128(2+3j)), id="complex128") + yield param(np.sqrt(np.complex64(2 + 3j)), id="complex64") + yield param(np.sqrt(np.complex128(2 + 3j)), id="complex128") if extended_precision: - yield param(np.sqrt(np.clongdouble(2+3j)), id="clongdouble") + yield param(np.sqrt(np.clongdouble(2 + 3j)), id="clongdouble") # Bool: # XFAIL: Bool should be added, but has some bad properties when it @@ -269,11 +266,6 @@ def test_scalar_coercion(self, scalar): # Ensure we have a full-precision number if available scalar = type(scalar)((scalar * 2)**0.5) - if type(scalar) is rational: - # Rational generally fails due to a missing cast. In the future - # object casts should automatically be defined based on `setitem`. - pytest.xfail("Rational to object cast is undefined currently.") - # Use casting from object: arr = np.array(scalar, dtype=object).astype(scalar.dtype) @@ -308,7 +300,7 @@ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): scalar = scalar.values[0] if dtype.type == np.void: - if scalar.dtype.fields is not None and dtype.fields is None: + if scalar.dtype.fields is not None and dtype.fields is None: # Here, coercion to "V6" works, but the cast fails. # Since the types are identical, SETITEM takes care of # this, but has different rules than the cast. @@ -325,18 +317,18 @@ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): cast = np.array(scalar).astype(dtype) except (TypeError, ValueError, RuntimeError): # coercion should also raise (error type may change) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 np.array(scalar, dtype=dtype) if (isinstance(scalar, rational) and np.issubdtype(dtype, np.signedinteger)): return - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 np.array([scalar], dtype=dtype) # assignment should also raise res = np.zeros((), dtype=dtype) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 res[()] = scalar return @@ -470,7 +462,6 @@ def test_coercion_assignment_datetime(self, val, unit, dtype): # the explicit cast fails: np.array(scalar).astype(dtype) - @pytest.mark.parametrize(["val", "unit"], [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) def test_coercion_assignment_timedelta(self, val, unit): @@ -599,6 +590,7 @@ class TestBadSequences: def test_growing_list(self): # List to coerce, `mylist` will append to it during coercion obj = [] + class mylist(list): def __len__(self): obj.append([1, 2]) @@ -616,6 +608,7 @@ def __len__(self): def test_mutated_list(self): # List to coerce, `mylist` will mutate the first element obj = [] + class mylist(list): def __len__(self): obj[0] = [2, 3] # replace with a different list. @@ -629,12 +622,13 @@ def __len__(self): def test_replace_0d_array(self): # List to coerce, `mylist` will mutate the first element obj = [] + class baditem: def __len__(self): obj[0][0] = 2 # replace with a different list. raise ValueError("not actually a sequence!") - def __getitem__(self): + def __getitem__(self, _, /): pass # Runs into a corner case in the new code, the `array(2)` is cached @@ -718,6 +712,7 @@ def __array__(self, dtype=None, copy=None): assert arr[0] is ArrayLike @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") @@ -755,7 +750,8 @@ def test_bad_array_like_bad_length(self, error): class BadSequence: def __len__(self): raise error - def __getitem__(self): + + def __getitem__(self, _, /): # must have getitem to be a Sequence return 1 @@ -845,7 +841,7 @@ class TestSpecialAttributeLookupFailure: class WeirdArrayLike: @property - def __array__(self, dtype=None, copy=None): + def __array__(self, dtype=None, copy=None): # noqa: PLR0206 raise RuntimeError("oops!") class WeirdArrayInterface: @@ -909,3 +905,24 @@ def test_empty_string(): assert_array_equal(res, b"") assert res.shape == (2, 10) assert res.dtype == "S1" + + +@pytest.mark.parametrize("dtype", ["S", "U", object]) +@pytest.mark.parametrize("res_dt,hug_val", + [("float16", "1e30"), ("float32", "1e200")]) +def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): + # This test primarly tests setitem + val = np.array(["3M"], dtype=dtype)[0] # use the scalar + + with pytest.raises(ValueError): + np.array(val, dtype=res_dt) + + val = np.array([hug_val], dtype=dtype)[0] # use the scalar + + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + np.array(val, dtype=res_dt) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + np.array(val, dtype=res_dt) diff --git a/blimgui/dist64/numpy/_core/tests/test_array_interface.py b/blimgui/dist64/numpy/_core/tests/test_array_interface.py index 30b6666..c8cf748 100644 --- a/blimgui/dist64/numpy/_core/tests/test_array_interface.py +++ b/blimgui/dist64/numpy/_core/tests/test_array_interface.py @@ -1,7 +1,10 @@ import sys +import sysconfig + import pytest + import numpy as np -from numpy.testing import extbuild, IS_WASM, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -123,6 +126,8 @@ def get_module(tmp_path): pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('array_interface_testing', functions, prologue=prologue, @@ -167,9 +172,8 @@ def __array_struct__(self): # share the data stderr.write(' ---- share data via the array interface protocol ---- \n') arr = np.array(buf, copy=False) - stderr.write('arr.__array_interface___ = %s\n' % ( - str(arr.__array_interface__))) - stderr.write('arr.base = %s\n' % (str(arr.base))) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') stderr.write(' ---- OK!\n\n') # release the source of the shared data. this will not release the data @@ -188,7 +192,7 @@ def __array_struct__(self): # called then reading the values here may cause a SEGV and will be reported # as invalid reads by valgrind stderr.write(' ---- read shared data ---- \n') - stderr.write('arr = %s\n' % (str(arr))) + stderr.write(f'arr = {str(arr)}\n') stderr.write(' ---- OK!\n\n') # write to the shared buffer. If the shared data was prematurely deleted @@ -196,15 +200,14 @@ def __array_struct__(self): stderr.write(' ---- modify shared data ---- \n') arr *= multiplier expected_value *= multiplier - stderr.write('arr.__array_interface___ = %s\n' % ( - str(arr.__array_interface__))) - stderr.write('arr.base = %s\n' % (str(arr.base))) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') stderr.write(' ---- OK!\n\n') # read the data. If the shared data was prematurely deleted this # will may cause a SEGV and valgrind will report invalid reads stderr.write(' ---- read modified shared data ---- \n') - stderr.write('arr = %s\n' % (str(arr))) + stderr.write(f'arr = {str(arr)}\n') stderr.write(' ---- OK!\n\n') # check that we got the expected data. If the PyCapsule destructor we diff --git a/blimgui/dist64/numpy/_core/tests/test_arraymethod.py b/blimgui/dist64/numpy/_core/tests/test_arraymethod.py index b64e3f5..5b78396 100644 --- a/blimgui/dist64/numpy/_core/tests/test_arraymethod.py +++ b/blimgui/dist64/numpy/_core/tests/test_arraymethod.py @@ -3,8 +3,6 @@ this is private API, but when added, public API may be added here. """ -from __future__ import annotations - import types from typing import Any @@ -53,8 +51,8 @@ class TestSimpleStridedCall: ValueError), # not 1-D (((np.ones(3, dtype="d"), np.ones(4, dtype="f")),), ValueError), # different length - (((np.frombuffer(b"\0x00"*3*2, dtype="d"), - np.frombuffer(b"\0x00"*3, dtype="f")),), + (((np.frombuffer(b"\0x00" * 3 * 2, dtype="d"), + np.frombuffer(b"\0x00" * 3, dtype="f")),), ValueError), # output not writeable ]) def test_invalid_arguments(self, args, error): diff --git a/blimgui/dist64/numpy/_core/tests/test_arrayobject.py b/blimgui/dist64/numpy/_core/tests/test_arrayobject.py index 2728941..ece842b 100644 --- a/blimgui/dist64/numpy/_core/tests/test_arrayobject.py +++ b/blimgui/dist64/numpy/_core/tests/test_arrayobject.py @@ -1,7 +1,9 @@ +import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_matrix_transpose_raises_error_for_1d(): @@ -73,3 +75,21 @@ def test_array_wrap(subclass_self, subclass_arr): # Non 0-D array can't be converted to scalar, so we ignore that arr1d = np.array([3], dtype=np.int8).view(subclass_arr) assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_cleanup_with_refs_non_contig(): + # Regression test, leaked the dtype (but also good for rest) + dtype = np.dtype("O,i") + obj = object() + expected_ref_dtype = sys.getrefcount(dtype) + expected_ref_obj = sys.getrefcount(obj) + proto = np.full((3, 4, 5, 6, 7), np.array((obj, 2), dtype=dtype)) + # Give array a non-trivial order to exercise more cleanup paths. + arr = proto.transpose((2, 0, 3, 1, 4)).copy("K") + del proto, arr + + actual_ref_dtype = sys.getrefcount(dtype) + actual_ref_obj = sys.getrefcount(obj) + assert actual_ref_dtype == expected_ref_dtype + assert actual_ref_obj == actual_ref_dtype diff --git a/blimgui/dist64/numpy/_core/tests/test_arrayprint.py b/blimgui/dist64/numpy/_core/tests/test_arrayprint.py index dd19e42..adde407 100644 --- a/blimgui/dist64/numpy/_core/tests/test_arrayprint.py +++ b/blimgui/dist64/numpy/_core/tests/test_arrayprint.py @@ -1,17 +1,23 @@ -import sys import gc +import sys +import textwrap + +import pytest from hypothesis import given from hypothesis.extra import numpy as hynp -import pytest import numpy as np +from numpy._core.arrayprint import _typelessdata from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, - assert_raises_regex, IS_WASM - ) + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, +) from numpy.testing._private.utils import run_threaded -from numpy._core.arrayprint import _typelessdata -import textwrap + class TestArrayRepr: def test_nan_inf(self): @@ -33,7 +39,7 @@ class sub(np.ndarray): ' [3, 4]])') # two dimensional with flexible dtype - xstruct = np.ones((2,2), dtype=[('a', ' top1, since the test will also do the + # reverse dtype matching. Catch then warning if the comparison warns, + # i.e. np.int16(65535) < np.float16(6.55e4) + with warnings.catch_warnings(record=True): + warnings.simplefilter("always", RuntimeWarning) + if top2 >= top1: + # will be tested when the dtypes are reversed + return + # Happy path + arr1 = np.array([0] * 10, dtype=from_dtype) + arr2 = np.array([0] * 10, dtype=to_dtype) + arr1_astype = arr1.astype(to_dtype, casting='same_value') + assert_equal(arr1_astype, arr2, strict=True) + # Make it overflow, both aligned and unaligned + arr1[0] = top1 + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + with pytest.raises(ValueError): + # Casting float to float with overflow should raise + # RuntimeWarning (fperror) + # Casting float to int with overflow sometimes raises + # RuntimeWarning (fperror) + # Casting with overflow and 'same_value', should raise ValueError + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + arr1.astype(to_dtype, casting='same_value') + assert len(w) < 2 + with pytest.raises(ValueError): + # again, unaligned + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + unaligned.astype(to_dtype, casting='same_value') + assert len(w) < 2 + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + arr1 = np.arange(10, dtype=from_dtype) + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + arr2 = np.arange(10, dtype=to_dtype) + assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2) + assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2) + + # Should raise, since values cannot round trip. Might warn too about + # FPE errors + arr1_66 = arr1 + 0.666 + unaligned_66 = unaligned + 0.66 + with pytest.raises(ValueError): + arr1_66.astype(to_dtype, casting='same_value') + with pytest.raises(ValueError): + unaligned_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + s1 = np.array(10, dtype=from_dtype) + assert s1.astype(to_dtype, casting='same_value') == 10 + + # Should raise, since values cannot round trip + s1_66 = s1 + 0.666 + with pytest.raises(ValueError): + s1_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_naninf(self, value): + # These work, but may trigger FPE warnings on macOS + np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.half).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.float32).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.half, casting='same_value') + np.array([value], dtype=np.complex64).astype(np.half, casting='same_value') + # These fail + with pytest.raises(ValueError): + np.array([value], dtype=np.half).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.float32).astype(np.int64, casting='same_value') + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_complex(self): + arr = np.array([complex(1, 1)], dtype=np.cdouble) + # This works + arr.astype(np.complex64, casting='same_value') + # Casting with a non-zero imag part fails + with pytest.raises(ValueError): + arr.astype(np.float32, casting='same_value') + + def test_same_value_scalar(self): + i = np.array(123, dtype=np.int64) + f = np.array(123, dtype=np.float64) + assert i.astype(np.float64, casting='same_value') == f + assert f.astype(np.int64, casting='same_value') == f diff --git a/blimgui/dist64/numpy/_core/tests/test_conversion_utils.py b/blimgui/dist64/numpy/_core/tests/test_conversion_utils.py index 79807b5..a1e61f1 100644 --- a/blimgui/dist64/numpy/_core/tests/test_conversion_utils.py +++ b/blimgui/dist64/numpy/_core/tests/test_conversion_utils.py @@ -2,14 +2,12 @@ Tests for numpy/_core/src/multiarray/conversion_utils.c """ import re -import sys import pytest -import numpy as np import numpy._core._multiarray_tests as mt -from numpy._core.multiarray import CLIP, WRAP, RAISE -from numpy.testing import assert_warns, IS_PYPY +from numpy._core.multiarray import CLIP, RAISE, WRAP +from numpy.testing import assert_raises class StringConverterTestCase: @@ -19,13 +17,13 @@ class StringConverterTestCase: warn = True def _check_value_error(self, val): - pattern = r'\(got {}\)'.format(re.escape(repr(val))) + pattern = fr'\(got {re.escape(repr(val))}\)' with pytest.raises(ValueError, match=pattern) as exc: self.conv(val) def _check_conv_assert_warn(self, val, expected): if self.warn: - with assert_warns(DeprecationWarning) as exc: + with assert_raises(ValueError) as exc: assert self.conv(val) == expected else: assert self.conv(val) == expected @@ -123,6 +121,7 @@ def test_valid(self): class TestSearchsideConverter(StringConverterTestCase): """ Tests of PyArray_SearchsideConverter """ conv = mt.run_searchside_converter + def test_valid(self): self._check('left', 'NPY_SEARCHLEFT') self._check('right', 'NPY_SEARCHRIGHT') @@ -151,6 +150,7 @@ def test_flatten_invalid_order(self): class TestClipmodeConverter(StringConverterTestCase): """ Tests of PyArray_ClipmodeConverter """ conv = mt.run_clipmode_converter + def test_valid(self): self._check('clip', 'NPY_CLIP') self._check('wrap', 'NPY_WRAP') @@ -172,9 +172,12 @@ def test_valid(self): self._check("no", "NPY_NO_CASTING") self._check("equiv", "NPY_EQUIV_CASTING") self._check("safe", "NPY_SAFE_CASTING") - self._check("same_kind", "NPY_SAME_KIND_CASTING") self._check("unsafe", "NPY_UNSAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + def test_invalid(self): + # Currently, 'same_value' is supported only in ndarray.astype + self._check_value_error("same_value") class TestIntpConverter: """ Tests of PyArray_IntpConverter """ @@ -187,12 +190,9 @@ def test_basic(self): assert self.conv(()) == () def test_none(self): - # once the warning expires, this will raise TypeError - with pytest.warns(DeprecationWarning): + with pytest.raises(TypeError): assert self.conv(None) == () - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_float(self): with pytest.raises(TypeError): self.conv(1.0) @@ -204,6 +204,6 @@ def test_too_large(self): self.conv(2**64) def test_too_many_dims(self): - assert self.conv([1]*64) == (1,)*64 + assert self.conv([1] * 64) == (1,) * 64 with pytest.raises(ValueError): - self.conv([1]*65) + self.conv([1] * 65) diff --git a/blimgui/dist64/numpy/_core/tests/test_cpu_dispatcher.py b/blimgui/dist64/numpy/_core/tests/test_cpu_dispatcher.py index b10e528..ce96d45 100644 --- a/blimgui/dist64/numpy/_core/tests/test_cpu_dispatcher.py +++ b/blimgui/dist64/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,20 +1,23 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) -from numpy._core import _umath_tests from numpy.testing import assert_equal + def test_dispatcher(): """ Testing the utilities of the CPU dispatcher """ targets = ( - "SSE2", "SSE41", "AVX2", + "X86_V2", "X86_V3", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", - "VX", "VXE" + "VX", "VXE", "LSX", "RVV" ) - highest_sfx = "" # no suffix for the baseline + highest_sfx = "" # no suffix for the baseline all_sfx = [] for feature in reversed(targets): # skip baseline features, by the default `CCompilerOpt` do not generate separated objects @@ -32,14 +35,14 @@ def test_dispatcher(): test = _umath_tests.test_dispatch() assert_equal(test["func"], "func" + highest_sfx) - assert_equal(test["var"], "var" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) if highest_sfx: assert_equal(test["func_xb"], "func" + highest_sfx) - assert_equal(test["var_xb"], "var" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) else: assert_equal(test["func_xb"], "nobase") assert_equal(test["var_xb"], "nobase") - all_sfx.append("func") # add the baseline + all_sfx.append("func") # add the baseline assert_equal(test["all"], all_sfx) diff --git a/blimgui/dist64/numpy/_core/tests/test_cpu_features.py b/blimgui/dist64/numpy/_core/tests/test_cpu_features.py index ffc542c..f5ecab5 100644 --- a/blimgui/dist64/numpy/_core/tests/test_cpu_features.py +++ b/blimgui/dist64/numpy/_core/tests/test_cpu_features.py @@ -1,16 +1,18 @@ import os -import re -import sys import pathlib import platform +import re import subprocess +import sys + import pytest + from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__, + __cpu_features__, ) -import numpy as np + def assert_features_equal(actual, desired, fname): __tracebackhide__ = True # Hide traceback for py.test @@ -26,30 +28,30 @@ def assert_features_equal(actual, desired, fname): try: import subprocess - auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) auxv = auxv.decode() except Exception as err: auxv = str(err) import textwrap error_report = textwrap.indent( -""" +f""" ########################################### ### Extra debugging information ########################################### ------------------------------------------- --- NumPy Detections ------------------------------------------- -%s +{detected} ------------------------------------------- --- SYS / CPUINFO ------------------------------------------- -%s.... +{cpuinfo}.... ------------------------------------------- --- SYS / AUXV ------------------------------------------- -%s -""" % (detected, cpuinfo, auxv), prefix='\r') +{auxv} +""", prefix='\r') raise AssertionError(( "Failure Detection\n" @@ -72,6 +74,7 @@ class AbstractTest: def load_flags(self): # a hook pass + def test_features(self): self.load_flags() for gname, features in self.features_groups.items(): @@ -104,7 +107,7 @@ def get_cpuinfo_item(self, magic_key): return values def load_flags_auxv(self): - auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) for at in auxv.split(b'\n'): if not at.startswith(b"AT_HWCAP"): continue @@ -116,17 +119,18 @@ def load_flags_auxv(self): @pytest.mark.skipif( sys.platform == 'emscripten', - reason= ( + reason=( "The subprocess module is not available on WASM platforms and" " therefore this test class cannot be properly executed." ), ) +@pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables") class TestEnvPrivation: cwd = pathlib.Path(__file__).parent.resolve() env = os.environ.copy() _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None) _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None) - SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True) + SUBPROCESS_ARGS = {"cwd": cwd, "capture_output": True, "text": True, "check": True} unavailable_feats = [ feat for feat in __cpu_dispatch__ if not __cpu_features__[feat] ] @@ -155,7 +159,6 @@ def setup_class(self, tmp_path_factory): file /= "_runtime_detect.py" file.write_text(self.SCRIPT) self.file = file - return def _run(self): return subprocess.run( @@ -188,7 +191,6 @@ def _expect_error( def setup_method(self): """Ensure that the environment is reset""" self.env = os.environ.copy() - return def test_runtime_feature_selection(self): """ @@ -227,7 +229,6 @@ def test_runtime_feature_selection(self): # Ensure that both features are enabled, and they are exactly the ones # specified by `NPY_ENABLE_CPU_FEATURES` assert set(enabled_features) == set(non_baseline_features) - return @pytest.mark.parametrize("enabled, disabled", [ @@ -329,56 +330,61 @@ def test_impossible_feature_enable(self): ) self._expect_error(msg, err_type) + is_linux = sys.platform.startswith('linux') is_cygwin = sys.platform.startswith('cygwin') -machine = platform.machine() -is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +machine = platform.machine() +is_x86 = re.match(r"^(amd64|x86|i386|i686)", machine, re.IGNORECASE) @pytest.mark.skipif( not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" ) class Test_X86_Features(AbstractTest): - features = [ - "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", - "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", - "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", - "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", - "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", + features = [] + + features_groups = { + "X86_V2": [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "SSE42", + "POPCNT", "LAHF", "CX16" + ], + } + features_groups["X86_V3"] = features_groups["X86_V2"] + [ + "AVX", "AVX2", "FMA3", "BMI", "BMI2", + "LZCNT", "F16C", "MOVBE" ] - features_groups = dict( - AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], - AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", - "AVX5124VNNIW", "AVX512VPOPCNTDQ"], - AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], - AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], - AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI"], - AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], - AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", - "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", - "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", - "AVX512FP16"], - ) - features_map = dict( - SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA", - AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2", - AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ", - AVX512FP16="AVX512_FP16", - ) + features_groups["X86_V4"] = features_groups["X86_V3"] + [ + "AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL" + ] + features_groups["AVX512_ICL"] = features_groups["X86_V4"] + [ + "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "VAES", "VPCLMULQDQ", "GFNI" + ] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] + + features_map = { + "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "BMI": "BMI1", "LZCNT": "ABM", "LAHF": "LAHF_LM", + "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", + "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", + "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", + "AVX512FP16": "AVX512_FP16", "AVX512BF16": "AVX512_BF16" + } + def load_flags(self): self.load_flags_cpuinfo("flags") -is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE) + +is_power = re.match(r"^(powerpc|ppc)64", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") class Test_POWER_Features(AbstractTest): features = ["VSX", "VSX2", "VSX3", "VSX4"] - features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1") + features_map = {"VSX2": "ARCH_2_07", "VSX3": "ARCH_3_00", "VSX4": "ARCH_3_1"} def load_flags(self): self.load_flags_auxv() -is_zarch = re.match("^(s390x)", machine, re.IGNORECASE) +is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_zarch, reason="Only for Linux and IBM Z") class Test_ZARCH_Features(AbstractTest): @@ -388,32 +394,57 @@ def load_flags(self): self.load_flags_auxv() -is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE) +is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") class Test_ARM_Features(AbstractTest): features = [ "SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" ] - features_groups = dict( - NEON_FP16 = ["NEON", "HALF"], - NEON_VFPV4 = ["NEON", "VFPV4"], - ) + features_groups = { + "NEON_FP16": ["NEON", "HALF"], + "NEON_VFPV4": ["NEON", "VFPV4"], + } + def load_flags(self): self.load_flags_cpuinfo("Features") arch = self.get_cpuinfo_item("CPU architecture") # in case of mounting virtual filesystem of aarch64 kernel without linux32 is_rootfs_v8 = ( - not re.match("^armv[0-9]+l$", machine) and + not re.match(r"^armv[0-9]+l$", machine) and (int('0' + next(iter(arch))) > 7 if arch else 0) ) - if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: + if re.match(r"^(aarch64|AARCH64)", machine) or is_rootfs_v8: self.features_map = { "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" } else: - self.features_map = dict( + self.features_map = { # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) # doesn't provide information about ASIMD, so we assume that ASIMD is supported # if the kernel reports any one of the following ARM8 features. - ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32") - ) + "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") + } + + +is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch") +class Test_LOONGARCH_Features(AbstractTest): + features = ["LSX"] + + def load_flags(self): + self.load_flags_cpuinfo("Features") + + +is_riscv = re.match(r"^(riscv)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_riscv, reason="Only for Linux and RISC-V") +class Test_RISCV_Features(AbstractTest): + features = ["RVV"] + + def load_flags(self): + self.load_flags_auxv() + if not self.features_flags: + # Let the test fail and dump if we cannot read HWCAP. + return + hwcap = int(next(iter(self.features_flags)), 16) + if hwcap & (1 << 21): # HWCAP_RISCV_V + self.features_flags.add("RVV") diff --git a/blimgui/dist64/numpy/_core/tests/test_custom_dtypes.py b/blimgui/dist64/numpy/_core/tests/test_custom_dtypes.py index 1a40094..9fe370c 100644 --- a/blimgui/dist64/numpy/_core/tests/test_custom_dtypes.py +++ b/blimgui/dist64/numpy/_core/tests/test_custom_dtypes.py @@ -3,10 +3,11 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal from numpy._core._multiarray_umath import ( - _discover_array_parameters as discover_array_params, _get_sfloat_dtype) - + _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, +) +from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() @@ -14,13 +15,13 @@ class TestSFloat: def _get_array(self, scaling, aligned=True): if not aligned: - a = np.empty(3*8 + 1, dtype=np.uint8)[1:] + a = np.empty(3 * 8 + 1, dtype=np.uint8)[1:] a = a.view(np.float64) a[:] = [1., 2., 3.] else: a = np.array([1., 2., 3.]) - a *= 1./scaling # the casting code also uses the reciprocal. + a *= 1. / scaling # the casting code also uses the reciprocal. return a.view(SF(scaling)) def test_sfloat_rescaled(self): @@ -47,6 +48,9 @@ def test_repr(self): # Check the repr, mainly to cover the code paths: assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)" + def test_dtype_str(self): + assert SF(1.).str == "_ScaledFloatTestDType(scaling=1.0)" + def test_dtype_name(self): assert SF(1.).name == "_ScaledFloatTestDType64" @@ -227,6 +231,78 @@ def test_wrapped_and_wrapped_reductions(self): expected = np.hypot.reduce(float_equiv, keepdims=True) assert res.view(np.float64) * 2 == expected + def test_sort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + a.sort() + assert_array_equal(a.view(np.float64), [1., 2., 3.]) + + a = self._get_array(1.) + a = a[::-1] # reverse it + + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) # different factor + a = a[::2][::-1] # non-contiguous + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=True) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=False) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_argsort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) + a = a[::2][::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=True) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=False) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + def test_astype_class(self): # Very simple test that we accept `.astype()` also on the class. # ScaledFloat always returns the default descriptor, but it does @@ -251,6 +327,9 @@ def test_creation_class(self): assert np.zeros(3, dtype=SF).dtype == SF(1.) assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + @pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" + ) def test_np_save_load(self): # this monkeypatch is needed because pickle # uses the repr of a type to reconstruct it @@ -294,6 +373,9 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) +@pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" +) def test_type_pickle(): # can't actually unpickle, but we can pickle (if in namespace) import pickle diff --git a/blimgui/dist64/numpy/_core/tests/test_cython.py b/blimgui/dist64/numpy/_core/tests/test_cython.py index eebbfcd..0b8bbfd 100644 --- a/blimgui/dist64/numpy/_core/tests/test_cython.py +++ b/blimgui/dist64/numpy/_core/tests/test_cython.py @@ -1,11 +1,13 @@ -from datetime import datetime import os import subprocess import sys +import sysconfig +from datetime import datetime + import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_WASM, assert_array_equal # This import is copied from random.tests.test_extending try: @@ -53,6 +55,8 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--buildtype=release", @@ -293,12 +297,56 @@ def test_fillwithbytes(install_temp): def test_complex(install_temp): from checks import inc2_cfloat_struct - arr = np.array([0, 10+10j], dtype="F") + arr = np.array([0, 10 + 10j], dtype="F") inc2_cfloat_struct(arr) assert arr[1] == (12 + 12j) -def test_npy_uintp_type_enum(): +def test_npystring_pack(install_temp): + """Check that the cython API can write to a vstring array.""" + import checks + + arr = np.array(['a', 'b', 'c'], dtype='T') + assert checks.npystring_pack(arr) == 0 + + # checks.npystring_pack writes to the beginning of the array + assert arr[0] == "Hello world" + +def test_npystring_load(install_temp): + """Check that the cython API can load strings from a vstring array.""" + import checks + + arr = np.array(['abcd', 'b', 'c'], dtype='T') + result = checks.npystring_load(arr) + assert result == 'abcd' + + +def test_npystring_multiple_allocators(install_temp): + """Check that the cython API can acquire/release multiple vstring allocators.""" + import checks + + dt = np.dtypes.StringDType(na_object=None) + arr1 = np.array(['abcd', 'b', 'c'], dtype=dt) + arr2 = np.array(['a', 'b', 'c'], dtype=dt) + + assert checks.npystring_pack_multiple(arr1, arr2) == 0 + assert arr1[0] == "Hello world" + assert arr1[-1] is None + assert arr2[0] == "test this" + + +def test_npystring_allocators_other_dtype(install_temp): + """Check that allocators for non-StringDType arrays is NULL.""" import checks - assert checks.check_npy_uintp_type_enum() + arr1 = np.array([1, 2, 3], dtype='i') + arr2 = np.array([4, 5, 6], dtype='i') + + assert checks.npystring_allocators_other_types(arr1, arr2) == 0 + + +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') +def test_npy_uintp_type_enum(install_temp): + import checks + assert checks.check_npy_uintp_type_enum() diff --git a/blimgui/dist64/numpy/_core/tests/test_datetime.py b/blimgui/dist64/numpy/_core/tests/test_datetime.py index ace2bae..cc9b49b 100644 --- a/blimgui/dist64/numpy/_core/tests/test_datetime.py +++ b/blimgui/dist64/numpy/_core/tests/test_datetime.py @@ -1,5 +1,7 @@ import datetime import pickle +import warnings +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -7,22 +9,23 @@ import numpy as np from numpy.testing import ( IS_WASM, - assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, - assert_raises_regex, assert_array_equal, - ) - -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) try: RecursionError except NameError: RecursionError = RuntimeError # python < 3.5 +try: + ZoneInfo("US/Central") + _has_tz = True +except ZoneInfoNotFoundError: + _has_tz = False def _assert_equal_hash(v1, v2): assert v1 == v2 @@ -51,10 +54,10 @@ def test_datetime_dtype_creation(self): 'h', 'm', 's', 'ms', 'us', 'μs', # alias for us 'ns', 'ps', 'fs', 'as']: - dt1 = np.dtype('M8[750%s]' % unit) - assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) - dt2 = np.dtype('m8[%s]' % unit) - assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) + dt1 = np.dtype(f'M8[750{unit}]') + assert_(dt1 == np.dtype(f'datetime64[750{unit}]')) + dt2 = np.dtype(f'm8[{unit}]') + assert_(dt2 == np.dtype(f'timedelta64[{unit}]')) # Generic units shouldn't add [] to the end assert_equal(str(np.dtype("M8")), "datetime64") @@ -261,10 +264,12 @@ def test_datetime_scalar_construction(self): # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT')") + "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") + assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + "np.datetime64('NaT','ns')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) @@ -377,7 +382,7 @@ def test_datetime_array_find_type(self): # "generic" to select generic unit ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"), ("s"), ("ms"), ("us"), ("ns"), ("ps"), - ("fs"), ("as"), ("generic") ]) + ("fs"), ("as"), ("generic")]) def test_timedelta_np_int_construction(self, unit): # regression test for gh-7617 if unit != "generic": @@ -494,7 +499,7 @@ def test_timedelta_0_dim_object_array_conversion(self): def test_timedelta_nat_format(self): # gh-17552 - assert_equal('NaT', '{0}'.format(np.timedelta64('nat'))) + assert_equal('NaT', f'{np.timedelta64("nat")}') def test_timedelta_scalar_construction_units(self): # String construction detecting units @@ -630,42 +635,42 @@ def test_datetime_nat_casting(self): def test_days_creation(self): assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 - 365) + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 - 365) assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3) + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3) assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 + 366) + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 + 366) assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4) + (1900 - 1970) * 365 - (1970 - 1900) // 4) assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4 + 365) - assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) - assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) - assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) - assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) - assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) - assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) - assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) - assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) + (1900 - 1970) * 365 - (1970 - 1900) // 4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3 * 365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2 * 365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1 * 365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0 * 365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1 * 365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2 * 365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3 * 365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4 * 365 + 1) assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4) + (2000 - 1970) * 365 + (2000 - 1972) // 4) assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366) assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3) + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3) assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3 + 366) assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 28) assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 29) assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 28) assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 29) assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366 + 31 + 28 + 21) def test_days_to_pydate(self): assert_equal(np.array('1599', dtype='M8[D]').astype('O'), @@ -815,7 +820,7 @@ def test_datetime_array_str(self): a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') assert_equal(np.array2string(a, separator=', ', formatter={'datetime': lambda x: - "'%s'" % np.datetime_as_string(x, timezone='UTC')}), + f"'{np.datetime_as_string(x, timezone='UTC')}'"}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") # Check that one NaT doesn't corrupt subsequent entries @@ -839,6 +844,21 @@ def test_timedelta_array_str(self): a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + with pytest.warns(np.exceptions.VisibleDeprecationWarning, + match=r".*align should be passed"): + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): @@ -880,23 +913,23 @@ def test_dtype_promotion(self): # timedelta timedelta computes the metadata gcd for mM in ['m', 'M']: assert_equal( - np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), - np.dtype(mM+'8[2Y]')) + np.promote_types(np.dtype(mM + '8[2Y]'), np.dtype(mM + '8[2Y]')), + np.dtype(mM + '8[2Y]')) assert_equal( - np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), - np.dtype(mM+'8[3Y]')) + np.promote_types(np.dtype(mM + '8[12Y]'), np.dtype(mM + '8[15Y]')), + np.dtype(mM + '8[3Y]')) assert_equal( - np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), - np.dtype(mM+'8[2M]')) + np.promote_types(np.dtype(mM + '8[62M]'), np.dtype(mM + '8[24M]')), + np.dtype(mM + '8[2M]')) assert_equal( - np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), - np.dtype(mM+'8[1D]')) + np.promote_types(np.dtype(mM + '8[1W]'), np.dtype(mM + '8[2D]')), + np.dtype(mM + '8[1D]')) assert_equal( - np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), - np.dtype(mM+'8[s]')) + np.promote_types(np.dtype(mM + '8[W]'), np.dtype(mM + '8[13s]')), + np.dtype(mM + '8[s]')) assert_equal( - np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), - np.dtype(mM+'8[7s]')) + np.promote_types(np.dtype(mM + '8[13W]'), np.dtype(mM + '8[49s]')), + np.dtype(mM + '8[7s]')) # timedelta timedelta raises when there is no reasonable gcd assert_raises(TypeError, np.promote_types, np.dtype('m8[Y]'), np.dtype('m8[D]')) @@ -943,7 +976,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + f"Error roundtripping unit {unit}") # With time units for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', 'M8[300as]', 'M8[20us]']: @@ -959,7 +992,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + f"Error roundtripping unit {unit}") def test_month_truncation(self): # Make sure that months are truncating correctly @@ -977,9 +1010,9 @@ def test_month_truncation(self): def test_different_unit_comparison(self): # Check some years with date units for unit1 in ['Y', 'M', 'D']: - dt1 = np.dtype('M8[%s]' % unit1) + dt1 = np.dtype(f'M8[{unit1}]') for unit2 in ['Y', 'M', 'D']: - dt2 = np.dtype('M8[%s]' % unit2) + dt2 = np.dtype(f'M8[{unit2}]') assert_equal(np.array('1945', dtype=dt1), np.array('1945', dtype=dt2)) assert_equal(np.array('1970', dtype=dt1), @@ -998,9 +1031,9 @@ def test_different_unit_comparison(self): np.datetime64('10000-01-01', unit2)) # Check some datetimes with time units for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']: - dt1 = np.dtype('M8[%s]' % unit1) + dt1 = np.dtype(f'M8[{unit1}]') for unit2 in ['h', 'm', 's', 'ms', 'us']: - dt2 = np.dtype('M8[%s]' % unit2) + dt2 = np.dtype(f'M8[{unit2}]') assert_equal(np.array('1945-03-12T18', dtype=dt1), np.array('1945-03-12T18', dtype=dt2)) assert_equal(np.array('1970-03-12T18', dtype=dt1), @@ -1019,9 +1052,9 @@ def test_different_unit_comparison(self): np.datetime64('10000-01-01T00', unit2)) # Check some days with units that won't overflow for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']: - dt1 = np.dtype('M8[%s]' % unit1) + dt1 = np.dtype(f'M8[{unit1}]') for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']: - dt2 = np.dtype('M8[%s]' % unit2) + dt2 = np.dtype(f'M8[{unit2}]') assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1), np.array('1932-02-17T00:00:00', dtype='M').astype(dt2), casting='unsafe')) @@ -1097,7 +1130,7 @@ def test_datetime_add(self): np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), - np.array([3*24 + 11], dtype='m8[h]')), + np.array([3 * 24 + 11], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), @@ -1105,7 +1138,7 @@ def test_datetime_add(self): np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), - np.timedelta64(3*24 + 11, '[h]'))]: + np.timedelta64(3 * 24 + 11, '[h]'))]: # m8 + m8 assert_equal(tda + tdb, tdc) assert_equal((tda + tdb).dtype, np.dtype('m8[h]')) @@ -1113,14 +1146,14 @@ def test_datetime_add(self): assert_equal(tdb + True, tdb + 1) assert_equal((tdb + True).dtype, np.dtype('m8[h]')) # m8 + int - assert_equal(tdb + 3*24, tdc) - assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]')) + assert_equal(tdb + 3 * 24, tdc) + assert_equal((tdb + 3 * 24).dtype, np.dtype('m8[h]')) # bool + m8 assert_equal(False + tdb, tdb) assert_equal((False + tdb).dtype, np.dtype('m8[h]')) # int + m8 - assert_equal(3*24 + tdb, tdc) - assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]')) + assert_equal(3 * 24 + tdb, tdc) + assert_equal((3 * 24 + tdb).dtype, np.dtype('m8[h]')) # M8 + bool assert_equal(dta + True, dta + 1) assert_equal(dtnat + True, dtnat) @@ -1169,7 +1202,7 @@ def test_datetime_subtract(self): np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), - np.array([3*24 - 11], dtype='m8[h]')), + np.array([3 * 24 - 11], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), @@ -1179,7 +1212,7 @@ def test_datetime_subtract(self): np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), - np.timedelta64(3*24 - 11, '[h]'))]: + np.timedelta64(3 * 24 - 11, '[h]'))]: # m8 - m8 assert_equal(tda - tdb, tdc) assert_equal((tda - tdb).dtype, np.dtype('m8[h]')) @@ -1189,14 +1222,14 @@ def test_datetime_subtract(self): assert_equal(tdc - True, tdc - 1) assert_equal((tdc - True).dtype, np.dtype('m8[h]')) # m8 - int - assert_equal(tdc - 3*24, -tdb) - assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]')) + assert_equal(tdc - 3 * 24, -tdb) + assert_equal((tdc - 3 * 24).dtype, np.dtype('m8[h]')) # int - m8 assert_equal(False - tdb, -tdb) assert_equal((False - tdb).dtype, np.dtype('m8[h]')) # int - m8 - assert_equal(3*24 - tdb, tdc) - assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]')) + assert_equal(3 * 24 - tdb, tdc) + assert_equal((3 * 24 - tdb).dtype, np.dtype('m8[h]')) # M8 - bool assert_equal(dtb - True, dtb - 1) assert_equal(dtnat - True, dtnat) @@ -1274,9 +1307,11 @@ def test_datetime_multiply(self): assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in multiply") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in multiply", RuntimeWarning) nat = np.timedelta64('NaT') + def check(a, b, res): assert_equal(a * b, res) assert_equal(b * a, res) @@ -1335,7 +1370,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @@ -1399,7 +1434,7 @@ def test_timedelta_divmod(self, op1, op2): @pytest.mark.parametrize("op1, op2", [ # Y and M are incompatible with all units except Y and M (np.timedelta64(1, 'Y'), np.timedelta64(1, 's')), - (np.timedelta64(1, 'D'), np.timedelta64(1, 'M')), + (np.timedelta64(1, 'D'), np.timedelta64(1, 'M')), ]) def test_timedelta_divmod_typeerror(self, op1, op2): assert_raises(TypeError, np.divmod, op1, op2) @@ -1419,9 +1454,9 @@ def test_timedelta_divmod_typeerror(self, op1, op2): np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) @@ -1473,8 +1508,9 @@ def test_datetime_divide(self): assert_raises(TypeError, np.divide, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', r".*encountered in divide", RuntimeWarning) nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -1561,7 +1597,7 @@ def test_datetime_minmax(self): # Also do timedelta a = np.array(3, dtype='m8[h]') - b = np.array(3*3600 - 3, dtype='m8[s]') + b = np.array(3 * 3600 - 3, dtype='m8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.fmin(a, b), b) @@ -1591,7 +1627,7 @@ def test_datetime_minmax(self): def test_hours(self): t = np.ones(3, dtype='M8[s]') - t[0] = 60*60*24 + 60*60*10 + t[0] = 60 * 60 * 24 + 60 * 60 * 10 assert_(t[0].item().hour == 10) def test_divisor_conversion_year(self): @@ -1761,10 +1797,10 @@ def test_creation_overflow(self): timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) for unit in ['ms', 'us', 'ns']: timesteps *= 1000 - x = np.array([date], dtype='datetime64[%s]' % unit) + x = np.array([date], dtype=f'datetime64[{unit}]') assert_equal(timesteps, x[0].astype(np.int64), - err_msg='Datetime conversion error for unit %s' % unit) + err_msg=f'Datetime conversion error for unit {unit}') assert_equal(x[0].astype(np.int64), 322689600000000000) @@ -1834,6 +1870,10 @@ def test_datetime_as_string(self): '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') + + with pytest.raises(ValueError): + np.datetime_as_string(a, unit='Y', casting='same_value') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), '2032-07-18T12:23') @@ -1880,7 +1920,7 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") + @pytest.mark.skipif(not _has_tz, reason="The tzdata module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1895,29 +1935,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): @@ -2036,7 +2076,7 @@ def test_timedelta_modulus_error(self, val1, val2): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_timedelta_modulus_div_by_zero(self): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) @@ -2382,7 +2422,7 @@ def test_datetime_busday_holidays_count(self): assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd), expected) # Returns negative value when reversed - expected = -np.arange(366)+1 + expected = -np.arange(366) + 1 expected[0] = 0 assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd), expected) @@ -2408,7 +2448,6 @@ def test_datetime_busday_holidays_count(self): assert_equal(np.busday_count(friday, saturday), 1) assert_equal(np.busday_count(saturday, friday), 0) - def test_datetime_is_busday(self): holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24', '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17', @@ -2460,13 +2499,13 @@ def test_isnat(self): for unit in ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as']: - arr = np.array([123, -321, "NaT"], dtype='datetime64[{unit}]') assert_equal(np.isnat(arr), res) - arr = np.array([123, -321, "NaT"], dtype='timedelta64[{unit}]') assert_equal(np.isnat(arr), res) def test_isnat_error(self): @@ -2492,10 +2531,10 @@ def test_isfinite_isinf_isnan_units(self, unit, dstr): '''check isfinite, isinf, isnan for all units of M, m dtypes ''' arr_val = [123, -321, "NaT"] - arr = np.array(arr_val, dtype= dstr % unit) - pos = np.array([True, True, False]) - neg = np.array([False, False, True]) - false = np.array([False, False, False]) + arr = np.array(arr_val, dtype=(dstr % unit)) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) assert_equal(np.isfinite(arr), pos) assert_equal(np.isinf(arr), false) assert_equal(np.isnan(arr), neg) @@ -2661,6 +2700,54 @@ def test_timedelta_hash_big_positive(self, wk, unit): td2 = np.timedelta64(td, unit) _assert_equal_hash(td, td2) + @pytest.mark.parametrize( + "inputs, divisor, expected", + [ + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.int64(2), + np.array( + [datetime.timedelta(seconds=10), datetime.timedelta(days=1)], + dtype="object", + ), + ), + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.timedelta64(2, "s"), + np.array( + [10.0, 24.0 * 60.0 * 60.0], + dtype="object", + ), + ), + ( + datetime.timedelta(seconds=2), + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.array( + [1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)], + dtype="object", + ), + ), + ], + ) + def test_true_divide_object_by_timedelta( + self, + inputs: np.ndarray | type[np.generic], + divisor: np.ndarray | type[np.generic], + expected: np.ndarray, + ): + # gh-30025 + results = inputs / divisor + assert_array_equal(results, expected) + class TestDateTimeData: diff --git a/blimgui/dist64/numpy/_core/tests/test_defchararray.py b/blimgui/dist64/numpy/_core/tests/test_defchararray.py index 5e7e97e..e45b6ff 100644 --- a/blimgui/dist64/numpy/_core/tests/test_defchararray.py +++ b/blimgui/dist64/numpy/_core/tests/test_defchararray.py @@ -3,9 +3,12 @@ import numpy as np from numpy._core.multiarray import _vec_string from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - assert_raises_regex - ) + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) kw_unicode_true = {'unicode': True} # make 2to3 work properly kw_unicode_false = {'unicode': False} @@ -131,63 +134,67 @@ def fail(): assert_raises(ValueError, fail) - class TestWhitespace: - def setup_method(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.char.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) + A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + assert_(np.all(A == B)) + assert_(np.all(A >= B)) + assert_(np.all(A <= B)) + assert_(not np.any(A > B)) + assert_(not np.any(A < B)) + assert_(not np.any(A != B)) class TestChar: - def setup_method(self): - self.A = np.array('abc1', dtype='c').view(np.char.chararray) - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') + A = np.array('abc1', dtype='c').view(np.char.chararray) + assert_equal(A.shape, (4,)) + assert_equal(A.upper()[:2].tobytes(), b'AB') class TestComparisons: - def setup_method(self): - self.A = np.array([['abc', 'abcc', '123'], - ['789', 'abc', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', 'efg', '123 '], - ['051', 'efgg', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) def test_not_equal(self): - assert_array_equal((self.A != self.B), + A, B = self.A(), self.B() + assert_array_equal((A != B), [[True, True, False], [True, True, True]]) def test_equal(self): - assert_array_equal((self.A == self.B), + A, B = self.A(), self.B() + assert_array_equal((A == B), [[False, False, True], [False, False, False]]) def test_greater_equal(self): - assert_array_equal((self.A >= self.B), + A, B = self.A(), self.B() + assert_array_equal((A >= B), [[False, False, True], [True, False, True]]) def test_less_equal(self): - assert_array_equal((self.A <= self.B), + A, B = self.A(), self.B() + assert_array_equal((A <= B), [[True, True, True], [False, True, False]]) def test_greater(self): - assert_array_equal((self.A > self.B), + A, B = self.A(), self.B() + assert_array_equal((A > B), [[False, False, False], [True, False, True]]) def test_less(self): - assert_array_equal((self.A < self.B), + A, B = self.A(), self.B() + assert_array_equal((A < B), [[True, True, False], [False, True, False]]) def test_type(self): - out1 = np.char.equal(self.A, self.B) + A, B = self.A(), self.B() + out1 = np.char.equal(A, B) out2 = np.char.equal('a', 'a') assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) @@ -195,59 +202,56 @@ def test_type(self): class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.B = np.array( + def B(self): + return np.array( [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.A = np.array( + def A(self): + return np.array( [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) class TestInformation: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - # Array with longer strings, > MEMCHR_CUT_OFF in code. - self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', - '01234567890123456789012345']) - .view(np.char.chararray)) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + A, B = self.A(), self.B() + assert_(issubclass(np.char.str_len(A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]]) def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + A, B = self.A(), self.B() + assert_(issubclass(A.count('').dtype.type, np.integer)) + assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]]) # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]]) def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.endswith('').dtype.type, np.bool)) + assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.endswith('3', 'fdjk') + A.endswith('3', 'fdjk') assert_raises(TypeError, fail) @@ -257,7 +261,7 @@ def fail(): ("S", lambda x: x.encode('ascii')), ]) def test_find(self, dtype, encode): - A = self.A.astype(dtype) + A = self.A().astype(dtype) assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) assert_array_equal(A.find(encode('a')), [[1, -1], [-1, 6], [-1, -1]]) @@ -267,103 +271,119 @@ def test_find(self, dtype, encode): [[1, -1], [-1, -1], [-1, -1]]) assert_array_equal(A.find([encode('1'), encode('P')]), [[-1, -1], [0, -1], [0, 1]]) - C = self.C.astype(dtype) + C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)).astype(dtype) assert_array_equal(C.find(encode('M')), [12, -1]) def test_index(self): + A = self.A() def fail(): - self.A.index('a') + A.index('a') assert_raises(ValueError, fail) assert_(np.char.index('abcba', 'b') == 1) assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalnum().dtype.type, np.bool)) + assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]]) def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalpha().dtype.type, np.bool)) + assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]]) def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isdigit().dtype.type, np.bool)) + assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]]) def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.islower().dtype.type, np.bool)) + assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]]) def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isspace().dtype.type, np.bool)) + assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.istitle().dtype.type, np.bool)) + assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + A = self.A() + assert_(issubclass(A.isupper().dtype.type, np.bool)) + assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]]) def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + A = self.A() + assert_(issubclass(A.rfind('a').dtype.type, np.integer)) + assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) def test_rindex(self): + A = self.A() def fail(): - self.A.rindex('a') + A.rindex('a') assert_raises(ValueError, fail) assert_(np.char.rindex('abcba', 'b') == 3) assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.startswith('').dtype.type, np.bool)) + assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.startswith('3', 'fdjk') + A.startswith('3', 'fdjk') assert_raises(TypeError, fail) - class TestMethods: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view( - np.char.chararray) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_capitalize(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) - assert_array_equal(self.A.capitalize(), tgt) + assert_(issubclass(A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(A.capitalize(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) - assert_array_equal(self.B.capitalize(), tgt) + assert_(issubclass(B.capitalize().dtype.type, np.str_)) + assert_array_equal(B.capitalize(), tgt) def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) - C = self.A.center([10, 20]) + A = self.A() + assert_(issubclass(A.center(10).dtype.type, np.bytes_)) + C = A.center([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, b'#') + C = A.center(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_(np.all(C.endswith(b'#'))) @@ -378,17 +398,17 @@ def test_decode(self): assert_(A.decode('unicode-escape')[0] == '\u03a3') def test_encode(self): - B = self.B.encode('unicode_escape') - assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) + B = self.B().encode('unicode_escape') + assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) def test_expandtabs(self): - T = self.A.expandtabs() + T = self.A().expandtabs() assert_(T[2, 0] == b'123 345 \0') def test_join(self): # NOTE: list(b'123') == [49, 50, 51] # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') + A0 = self.A().decode('ascii') A = np.char.join([',', '#'], A0) assert_(issubclass(A.dtype.type, np.str_)) @@ -398,12 +418,13 @@ def test_join(self): assert_array_equal(np.char.join([',', '#'], A0), tgt) def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.ljust(10).dtype.type, np.bytes_)) - C = self.A.ljust([10, 20]) + C = A.ljust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, b'#') + C = A.ljust(20, b'#') assert_array_equal(C.startswith(b'#'), [ [False, True], [False, False], [False, False]]) assert_(np.all(C.endswith(b'#'))) @@ -415,38 +436,41 @@ def test_ljust(self): assert_array_equal(C, tgt) def test_lower(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'mixedcase'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) - assert_array_equal(self.A.lower(), tgt) + assert_(issubclass(A.lower().dtype.type, np.bytes_)) + assert_array_equal(A.lower(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mixedcase'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.str_)) - assert_array_equal(self.B.lower(), tgt) + assert_(issubclass(B.lower().dtype.type, np.str_)) + assert_array_equal(B.lower(), tgt) def test_lstrip(self): + A, B = self.A(), self.B() tgt = [[b'abc ', b''], [b'12345', b'MixedCase'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) - assert_array_equal(self.A.lstrip(), tgt) + assert_(issubclass(A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(A.lstrip(), tgt) tgt = [[b' abc', b''], [b'2345', b'ixedCase'], [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + assert_array_equal(A.lstrip([b'1', b'M']), tgt) tgt = [['\u03a3 ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) - assert_array_equal(self.B.lstrip(), tgt) + assert_(issubclass(B.lstrip().dtype.type, np.str_)) + assert_array_equal(B.lstrip(), tgt) def test_partition(self): - P = self.A.partition([b'3', b'M']) + A = self.A() + P = A.partition([b'3', b'M']) tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] @@ -454,7 +478,8 @@ def test_partition(self): assert_array_equal(P, tgt) def test_replace(self): - R = self.A.replace([b'3', b'a'], + A = self.A() + R = A.replace([b'3', b'a'], [b'##########', b'@']) tgt = [[b' abc ', b''], [b'12##########45', b'MixedC@se'], @@ -463,34 +488,34 @@ def test_replace(self): assert_array_equal(R, tgt) # Test special cases that should just return the input array, # since replacements are not possible or do nothing. - S1 = self.A.replace(b'A very long byte string, longer than A', b'') - assert_array_equal(S1, self.A) - S2 = self.A.replace(b'', b'') - assert_array_equal(S2, self.A) - S3 = self.A.replace(b'3', b'3') - assert_array_equal(S3, self.A) - S4 = self.A.replace(b'3', b'', count=0) - assert_array_equal(S4, self.A) + S1 = A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, A) + S2 = A.replace(b'', b'') + assert_array_equal(S2, A) + S3 = A.replace(b'3', b'3') + assert_array_equal(S3, A) + S4 = A.replace(b'3', b'', count=0) + assert_array_equal(S4, A) def test_replace_count_and_size(self): a = np.array(['0123456789' * i for i in range(4)] ).view(np.char.chararray) r1 = a.replace('5', 'ABCDE') - assert r1.dtype.itemsize == (3*10 + 3*4) * 4 + assert r1.dtype.itemsize == (3 * 10 + 3 * 4) * 4 assert_array_equal(r1, np.array(['01234ABCDE6789' * i for i in range(4)])) r2 = a.replace('5', 'ABCDE', count=1) - assert r2.dtype.itemsize == (3*10 + 4) * 4 + assert r2.dtype.itemsize == (3 * 10 + 4) * 4 r3 = a.replace('5', 'ABCDE', count=0) assert r3.dtype.itemsize == a.dtype.itemsize assert_array_equal(r3, a) # Negative values mean to replace all. r4 = a.replace('5', 'ABCDE', count=-1) - assert r4.dtype.itemsize == (3*10 + 3*4) * 4 + assert r4.dtype.itemsize == (3 * 10 + 3 * 4) * 4 assert_array_equal(r4, r1) # We can do count on an element-by-element basis. r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1]) - assert r5.dtype.itemsize == (3*10 + 4) * 4 + assert r5.dtype.itemsize == (3 * 10 + 4) * 4 assert_array_equal(r5, np.array( ['01234ABCDE6789' * i for i in range(3)] + ['01234ABCDE6789' + '0123456789' * 2])) @@ -507,12 +532,13 @@ def test_replace_broadcasting(self): assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.rjust(10).dtype.type, np.bytes_)) - C = self.A.rjust([10, 20]) + C = A.rjust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, b'#') + C = A.rjust(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_array_equal(C.endswith(b'#'), [[False, True], [False, False], [False, False]]) @@ -524,7 +550,8 @@ def test_rjust(self): assert_array_equal(C, tgt) def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) + A = self.A() + P = A.rpartition([b'3', b'M']) tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] @@ -532,7 +559,7 @@ def test_rpartition(self): assert_array_equal(P, tgt) def test_rsplit(self): - A = self.A.rsplit(b'3') + A = self.A().rsplit(b'3') tgt = [[[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] @@ -540,45 +567,47 @@ def test_rsplit(self): assert_equal(A.tolist(), tgt) def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + A, B = self.A(), self.B() + assert_(issubclass(A.rstrip().dtype.type, np.bytes_)) tgt = [[b' abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) + assert_array_equal(A.rstrip(), tgt) tgt = [[b' abc ', b''], [b'1234', b'MixedCase'], [b'123 \t 345 \x00', b'UPP'] ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + assert_array_equal(A.rstrip([b'5', b'ER']), tgt) tgt = [[' \u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) - assert_array_equal(self.B.rstrip(), tgt) + assert_(issubclass(B.rstrip().dtype.type, np.str_)) + assert_array_equal(B.rstrip(), tgt) def test_strip(self): + A, B = self.A(), self.B() tgt = [[b'abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) - assert_array_equal(self.A.strip(), tgt) + assert_(issubclass(A.strip().dtype.type, np.bytes_)) + assert_array_equal(A.strip(), tgt) tgt = [[b' abc ', b''], [b'234', b'ixedCas'], [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + assert_array_equal(A.strip([b'15', b'EReM']), tgt) tgt = [['\u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.str_)) - assert_array_equal(self.B.strip(), tgt) + assert_(issubclass(B.strip().dtype.type, np.str_)) + assert_array_equal(B.strip(), tgt) def test_split(self): - A = self.A.split(b'3') + A = self.A().split(b'3') tgt = [ [[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], @@ -593,102 +622,115 @@ def test_splitlines(self): assert_(len(A[0]) == 3) def test_swapcase(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'mIXEDcASE'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) - assert_array_equal(self.A.swapcase(), tgt) + assert_(issubclass(A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(A.swapcase(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mIXEDcASE'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) - assert_array_equal(self.B.swapcase(), tgt) + assert_(issubclass(B.swapcase().dtype.type, np.str_)) + assert_array_equal(B.swapcase(), tgt) def test_title(self): + A, B = self.A(), self.B() tgt = [[b' Abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.bytes_)) - assert_array_equal(self.A.title(), tgt) + assert_(issubclass(A.title().dtype.type, np.bytes_)) + assert_array_equal(A.title(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.str_)) - assert_array_equal(self.B.title(), tgt) + assert_(issubclass(B.title().dtype.type, np.str_)) + assert_array_equal(B.title(), tgt) def test_upper(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'MIXEDCASE'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) - assert_array_equal(self.A.upper(), tgt) + assert_(issubclass(A.upper().dtype.type, np.bytes_)) + assert_array_equal(A.upper(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'MIXEDCASE'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.str_)) - assert_array_equal(self.B.upper(), tgt) + assert_(issubclass(B.upper().dtype.type, np.str_)) + assert_array_equal(B.upper(), tgt) def test_isnumeric(self): + A, B = self.A(), self.B() def fail(): - self.A.isnumeric() + A.isnumeric() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) - assert_array_equal(self.B.isnumeric(), [ + assert_(issubclass(B.isnumeric().dtype.type, np.bool)) + assert_array_equal(B.isnumeric(), [ [False, False], [True, False], [False, False]]) def test_isdecimal(self): + A, B = self.A(), self.B() def fail(): - self.A.isdecimal() + A.isdecimal() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) - assert_array_equal(self.B.isdecimal(), [ + assert_(issubclass(B.isdecimal().dtype.type, np.bool)) + assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) - class TestOperations: - def setup_method(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) + + def test_argsort(self): + arr = np.array(['abc'] * 4).view(np.char.chararray) + actual = arr.argsort(stable=True) + assert_array_equal(actual, [0, 1, 2, 3]) def test_add(self): + A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], ['789051', 'xyztuv']]).view(np.char.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) + assert_array_equal(AB, (A + B)) + assert_(len((A + B)[0][0]) == 6) def test_radd(self): + A = self.A() QA = np.array([['qabc', 'q123'], ['q789', 'qxyz']]).view(np.char.chararray) - assert_array_equal(QA, ('q' + self.A)) + assert_array_equal(QA, ('q' + A)) def test_mul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray) + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (self.A * r)) + assert_array_equal(Ar, (A * r)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, 'Can only multiply by integers'): - A*ob + A * ob def test_rmul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray) - assert_array_equal(Ar, (r * self.A)) + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + assert_array_equal(Ar, (r * A)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -713,13 +755,14 @@ def test_mod(self): assert_array_equal(A2, (A % [[1, 2], [3, 4]])) def test_rmod(self): - assert_(("%s" % self.A) == str(self.A)) - assert_(("%r" % self.A) == repr(self.A)) + A = self.A() + assert_(f"{A}" == str(A)) + assert_(f"{A!r}" == repr(A)) for ob in [42, object()]: with assert_raises_regex( TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A + ob % A def test_slice(self): """Regression test for https://github.com/numpy/numpy/issues/5982""" @@ -748,27 +791,21 @@ def test_getitem_length_zero_item(self, data): # or does not have length 0. assert_equal(a[1], a.dtype.type()) - class TestMethodsEmptyArray: - def setup_method(self): - self.U = np.array([], dtype='U') - self.S = np.array([], dtype='S') - def test_encode(self): - res = np.char.encode(self.U) + res = np.char.encode(np.array([], dtype='U')) assert_array_equal(res, []) assert_(res.dtype.char == 'S') def test_decode(self): - res = np.char.decode(self.S) + res = np.char.decode(np.array([], dtype='S')) assert_array_equal(res, []) assert_(res.dtype.char == 'U') def test_decode_with_reshape(self): - res = np.char.decode(self.S.reshape((1, 0, 1))) + res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1))) assert_(res.shape == (1, 0, 1)) - class TestMethodsScalarValues: def test_mod(self): A = np.array([[' abc ', ''], @@ -813,7 +850,6 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') - def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an diff --git a/blimgui/dist64/numpy/_core/tests/test_deprecations.py b/blimgui/dist64/numpy/_core/tests/test_deprecations.py index fcbbf1e..0d4ad03 100644 --- a/blimgui/dist64/numpy/_core/tests/test_deprecations.py +++ b/blimgui/dist64/numpy/_core/tests/test_deprecations.py @@ -3,25 +3,15 @@ to document how deprecations should eventually be turned into errors. """ +import contextlib import warnings + import pytest -import tempfile -import re import numpy as np -from numpy.testing import ( - assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, - KnownFailureException, break_cycles, temppath - ) - -from numpy._core._multiarray_tests import fromstring_null_term_c_api import numpy._core._struct_ufunc_tests as struct_ufunc - -try: - import pytz - _has_pytz = True -except ImportError: - _has_pytz = False +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 +from numpy.testing import assert_raises class _DeprecationTestCase: @@ -30,22 +20,20 @@ class _DeprecationTestCase: message = '' warning_cls = DeprecationWarning - def setup_method(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown_method(self): - self.warn_ctx.__exit__() + @contextlib.contextmanager + def filter_warnings(self): + with warnings.catch_warnings(record=True) as w: + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + yield w + return def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, @@ -82,20 +70,20 @@ def assert_deprecated(self, function, num=1, ignore_others=False, """ __tracebackhide__ = True # Hide traceback for py.test - # reset the log - self.log[:] = [] - if exceptions is np._NoValue: exceptions = (self.warning_cls,) - try: - function(*args, **kwargs) - except (Exception if function_fails else tuple()): - pass + if function_fails: + context_manager = contextlib.suppress(Exception) + else: + context_manager = contextlib.nullcontext() + with context_manager: + with self.filter_warnings() as w_context: + function(*args, **kwargs) # just in case, clear the registry num_found = 0 - for warning in self.log: + for warning in w_context: if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: @@ -103,8 +91,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, "expected %s but got: %s" % (self.warning_cls.__name__, warning.category)) if num is not None and num_found != num: - msg = "%i warnings found but %i expected." % (len(self.log), num) - lst = [str(w) for w in self.log] + msg = f"{len(w_context)} warnings found but {num} expected." + lst = [str(w) for w in w_context] raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings(): @@ -112,11 +100,11 @@ def assert_deprecated(self, function, num=1, ignore_others=False, category=self.warning_cls) try: function(*args, **kwargs) - if exceptions != tuple(): + if exceptions != (): raise AssertionError( "No error raised during function call") except exceptions: - if exceptions == tuple(): + if exceptions == (): raise AssertionError( "Error raised during function call") @@ -129,34 +117,16 @@ def assert_not_deprecated(self, function, args=(), kwargs={}): exceptions=tuple(), args=args, kwargs=kwargs) """ self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) + exceptions=(), args=args, kwargs=kwargs) class _VisibleDeprecationTestCase(_DeprecationTestCase): warning_cls = np.exceptions.VisibleDeprecationWarning -class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase): - # Deprecated 2021-01-05, NumPy 1.21 - message = r".*`.dtype` attribute" - - def test_deprecation_dtype_attribute_is_dtype(self): - class dt: - dtype = "f8" - - class vdt(np.void): - dtype = "f,f" - - self.assert_deprecated(lambda: np.dtype(dt)) - self.assert_deprecated(lambda: np.dtype(dt())) - self.assert_deprecated(lambda: np.dtype(vdt)) - self.assert_deprecated(lambda: np.dtype(vdt(1))) - - class TestTestDeprecated: def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() - test_case_instance.setup_method() assert_raises(AssertionError, test_case_instance.assert_deprecated, lambda: None) @@ -165,43 +135,9 @@ def foo(): warnings.warn("foo", category=DeprecationWarning, stacklevel=2) test_case_instance.assert_deprecated(foo) - test_case_instance.teardown_method() - - -class TestNonNumericConjugate(_DeprecationTestCase): - """ - Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, - which conflicts with the error behavior of np.conjugate. - """ - def test_conjugate(self): - for a in np.array(5), np.array(5j): - self.assert_not_deprecated(a.conjugate) - for a in (np.array('s'), np.array('2016', 'M'), - np.array((1, 2), [('a', int), ('b', int)])): - self.assert_deprecated(a.conjugate) - - -class TestDatetimeEvent(_DeprecationTestCase): - # 2017-08-11, 1.14.0 - def test_3_tuple(self): - for cls in (np.datetime64, np.timedelta64): - # two valid uses - (unit, num) and (unit, num, den, None) - self.assert_not_deprecated(cls, args=(1, ('ms', 2))) - self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) - - # trying to use the event argument, removed in 1.7.0, is deprecated - # it used to be a uint8 - self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) class TestBincount(_DeprecationTestCase): - # 2017-06-01, 1.14.0 - def test_bincount_minlength(self): - self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) - # 2024-07-29, 2.1.0 @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], ['0', '1', '1']]) @@ -209,117 +145,6 @@ def test_bincount_bad_list(self, badlist): self.assert_deprecated(lambda: np.bincount(badlist)) -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - -class TestFromstring(_DeprecationTestCase): - # 2017-10-19, 1.14 - def test_fromstring(self): - self.assert_deprecated(np.fromstring, args=('\x00'*80,)) - - -class TestFromStringAndFileInvalidData(_DeprecationTestCase): - # 2019-06-08, 1.17.0 - # Tests should be moved to real tests when deprecation is done. - message = "string or file could not be read to its end" - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_data_file(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - - with tempfile.TemporaryFile(mode="w") as f: - x.tofile(f, sep=',', format='%.2f') - f.write(invalid_str) - - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",")) - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) - # Should not raise: - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - f.seek(0) - res = np.fromfile(f, sep=",", count=4) - assert_array_equal(res, x) - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_string(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - x_str = "1.51,2,3.51,4{}".format(invalid_str) - - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) - - # The C-level API can use not fixed size, but 0 terminated strings, - # so test that as well: - bytestr = x_str.encode("ascii") - self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) - - with assert_warns(DeprecationWarning): - # this is slightly strange, in that fromstring leaves data - # potentially uninitialized (would be good to error when all is - # read, but count is larger then actual data maybe). - res = np.fromstring(x_str, sep=",", count=5) - assert_array_equal(res[:-1], x) - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - - # Should not raise: - res = np.fromstring(x_str, sep=",", count=4) - assert_array_equal(res, x) - - -class TestToString(_DeprecationTestCase): - # 2020-03-06 1.19.0 - message = re.escape("tostring() is deprecated. Use tobytes() instead.") - - def test_tostring(self): - arr = np.array(list(b"test\xFF"), dtype=np.uint8) - self.assert_deprecated(arr.tostring) - - def test_tostring_matches_tobytes(self): - arr = np.array(list(b"test\xFF"), dtype=np.uint8) - b = arr.tobytes() - with assert_warns(DeprecationWarning): - s = arr.tostring() - assert s == b - - -class TestDTypeCoercion(_DeprecationTestCase): - # 2020-02-06 1.19.0 - message = "Converting .* to a dtype .*is deprecated" - deprecated_types = [ - # The builtin scalar super types: - np.generic, np.flexible, np.number, - np.inexact, np.floating, np.complexfloating, - np.integer, np.unsignedinteger, np.signedinteger, - # character is a deprecated S1 special case: - np.character, - ] - - def test_dtype_coercion(self): - for scalar_type in self.deprecated_types: - self.assert_deprecated(np.dtype, args=(scalar_type,)) - - def test_array_construction(self): - for scalar_type in self.deprecated_types: - self.assert_deprecated(np.array, args=([], scalar_type,)) - - def test_not_deprecated(self): - # All specific types are not deprecated: - for group in np._core.sctypes.values(): - for scalar_type in group: - self.assert_not_deprecated(np.dtype, args=(scalar_type,)) - - for scalar_type in [type, dict, list, tuple]: - # Typical python types are coerced to object currently: - self.assert_not_deprecated(np.dtype, args=(scalar_type,)) - - class BuiltInRoundComplexDType(_DeprecationTestCase): # 2020-03-31 1.19.0 deprecated_types = [np.csingle, np.cdouble, np.clongdouble] @@ -344,57 +169,6 @@ def test_not_deprecated(self): self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) -class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase): - # 2020-05-27, NumPy 1.20.0 - message = "Out of bound index found. This was previously ignored.*" - - @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])]) - def test_empty_subspace(self, index): - # Test for both a single and two/multiple advanced indices. These - # This will raise an IndexError in the future. - arr = np.ones((2, 2, 0)) - self.assert_deprecated(arr.__getitem__, args=(index,)) - self.assert_deprecated(arr.__setitem__, args=(index, 0.)) - - # for this array, the subspace is only empty after applying the slice - arr2 = np.ones((2, 2, 1)) - index2 = (slice(0, 0),) + index - self.assert_deprecated(arr2.__getitem__, args=(index2,)) - self.assert_deprecated(arr2.__setitem__, args=(index2, 0.)) - - def test_empty_index_broadcast_not_deprecated(self): - arr = np.ones((2, 2, 2)) - - index = ([[3], [2]], []) # broadcast to an empty result. - self.assert_not_deprecated(arr.__getitem__, args=(index,)) - self.assert_not_deprecated(arr.__setitem__, - args=(index, np.empty((2, 0, 2)))) - - -class TestNonExactMatchDeprecation(_DeprecationTestCase): - # 2020-04-22 - def test_non_exact_match(self): - arr = np.array([[3, 6, 6], [4, 5, 1]]) - # misspelt mode check - self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp')) - # using completely different word with first character as R - self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random')) - - -class TestMatrixInOuter(_DeprecationTestCase): - # 2020-05-13 NumPy 1.20.0 - message = (r"add.outer\(\) was passed a numpy matrix as " - r"(first|second) argument.") - - def test_deprecated(self): - arr = np.array([1, 2, 3]) - m = np.array([1, 2, 3]).view(np.matrix) - self.assert_deprecated(np.add.outer, args=(m, m), num=2) - self.assert_deprecated(np.add.outer, args=(arr, m)) - self.assert_deprecated(np.add.outer, args=(m, arr)) - self.assert_not_deprecated(np.add.outer, args=(arr, arr)) - - class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): # NumPy 1.20, 2020-09-03 message = "concatenate with `axis=None` will use same-kind casting" @@ -402,7 +176,7 @@ class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): def test_deprecated(self): self.assert_deprecated(np.concatenate, args=(([0.], [1.]),), - kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64))) + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64)}) def test_not_deprecated(self): self.assert_not_deprecated(np.concatenate, @@ -416,147 +190,14 @@ def test_not_deprecated(self): casting="same_kind") -class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase): - # Deprecated 2020-11-24, NumPy 1.20 - """ - Technically, it should be impossible to create numpy object scalars, - but there was an unpickle path that would in theory allow it. That - path is invalid and must lead to the warning. - """ - message = "Unpickling a scalar with object dtype is deprecated." - - def test_deprecated(self): - ctor = np._core.multiarray.scalar - self.assert_deprecated(lambda: ctor(np.dtype("O"), 1)) - - -class TestSingleElementSignature(_DeprecationTestCase): - # Deprecated 2021-04-01, NumPy 1.21 - message = r"The use of a length 1" - - def test_deprecated(self): - self.assert_deprecated(lambda: np.add(1, 2, signature="d")) - self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),))) - - class TestCtypesGetter(_DeprecationTestCase): - # Deprecated 2021-05-18, Numpy 1.21.0 - warning_cls = DeprecationWarning ctypes = np.array([1]).ctypes - @pytest.mark.parametrize( - "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] - ) - def test_deprecated(self, name: str) -> None: - func = getattr(self.ctypes, name) - self.assert_deprecated(lambda: func()) - - @pytest.mark.parametrize( - "name", ["data", "shape", "strides", "_as_parameter_"] - ) + @pytest.mark.parametrize("name", ["data", "shape", "strides", "_as_parameter_"]) def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -PARTITION_DICT = { - "partition method": np.arange(10).partition, - "argpartition method": np.arange(10).argpartition, - "partition function": lambda kth: np.partition(np.arange(10), kth), - "argpartition function": lambda kth: np.argpartition(np.arange(10), kth), -} - - -@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT) -class TestPartitionBoolIndex(_DeprecationTestCase): - # Deprecated 2021-09-29, NumPy 1.22 - warning_cls = DeprecationWarning - message = "Passing booleans as partition index is deprecated" - - def test_deprecated(self, func): - self.assert_deprecated(lambda: func(True)) - self.assert_deprecated(lambda: func([False, True])) - - def test_not_deprecated(self, func): - self.assert_not_deprecated(lambda: func(1)) - self.assert_not_deprecated(lambda: func([0, 1])) - - -class TestMachAr(_DeprecationTestCase): - # Deprecated 2022-11-22, NumPy 1.25 - warning_cls = DeprecationWarning - - def test_deprecated_module(self): - self.assert_deprecated(lambda: np._core.MachAr) - - -class TestQuantileInterpolationDeprecation(_DeprecationTestCase): - # Deprecated 2021-11-08, NumPy 1.22 - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_deprecated(self, func): - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="linear")) - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="nearest")) - - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_both_passed(self, func): - with warnings.catch_warnings(): - # catch the DeprecationWarning so that it does not raise: - warnings.simplefilter("always", DeprecationWarning) - with pytest.raises(TypeError): - func([0., 1.], 0., interpolation="nearest", method="nearest") - - -class TestArrayFinalizeNone(_DeprecationTestCase): - message = "Setting __array_finalize__ = None" - - def test_use_none_is_deprecated(self): - # Deprecated way that ndarray itself showed nothing needs finalizing. - class NoFinalize(np.ndarray): - __array_finalize__ = None - - self.assert_deprecated(lambda: np.array(1).view(NoFinalize)) - - -class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase): - # Deprecated 2022-07-03, NumPy 1.23 - # This test can be removed without replacement after the deprecation. - # The tests: - # * numpy/lib/tests/test_loadtxt.py::test_integer_signs - # * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails - # Have a warning filter that needs to be removed. - message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*" - - @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) - def test_deprecated_warning(self, dtype): - with pytest.warns(DeprecationWarning, match=self.message): - np.loadtxt(["10.5"], dtype=dtype) - - @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) - def test_deprecated_raised(self, dtype): - # The DeprecationWarning is chained when raised, so test manually: - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - try: - np.loadtxt(["10.5"], dtype=dtype) - except ValueError as e: - assert isinstance(e.__cause__, DeprecationWarning) - - -class TestScalarConversion(_DeprecationTestCase): - # 2023-01-02, 1.25.0 - def test_float_conversion(self): - self.assert_deprecated(float, args=(np.array([3.14]),)) - - def test_behaviour(self): - b = np.array([[3.14]]) - c = np.zeros(5) - with pytest.warns(DeprecationWarning): - c[0] = b - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" @@ -633,40 +274,34 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy.lib._utils_impl import safe_eval + from numpy import row_stack + from numpy._core.numerictypes import maximum_sctype from numpy.lib._npyio_impl import recfromcsv, recfromtxt - from numpy.lib._function_base_impl import disp from numpy.lib._shape_base_impl import get_array_wrap - from numpy._core.numerictypes import maximum_sctype + from numpy.lib._utils_impl import safe_eval from numpy.lib.tests.test_io import TextIO - from numpy import in1d, row_stack, trapz self.assert_deprecated(lambda: safe_eval("None")) data_gen = lambda: TextIO('A,B\n0,1\n2,3') - kwargs = dict(delimiter=",", missing_values="N/A", names=True) + kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True} self.assert_deprecated(lambda: recfromcsv(data_gen())) self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - self.assert_deprecated(lambda: disp("test")) - self.assert_deprecated(lambda: get_array_wrap()) + self.assert_deprecated(get_array_wrap) self.assert_deprecated(lambda: maximum_sctype(int)) - self.assert_deprecated(lambda: in1d([1], [1])) self.assert_deprecated(lambda: row_stack([[]])) - self.assert_deprecated(lambda: trapz([1], [1])) self.assert_deprecated(lambda: np.chararray) class TestDeprecatedDTypeAliases(_DeprecationTestCase): def _check_for_warning(self, func): - with warnings.catch_warnings(record=True) as caught_warnings: + with pytest.warns(DeprecationWarning, + match="alias 'a' was deprecated in NumPy 2.0") as w: func() - assert len(caught_warnings) == 1 - w = caught_warnings[0] - assert w.category is DeprecationWarning - assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) + assert len(w) == 1 def test_a_dtype_alias(self): for dtype in ["a", "a10"]: @@ -702,6 +337,12 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): @@ -712,34 +353,108 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestDeprecatedSaveFixImports(_DeprecationTestCase): - # Deprecated in Numpy 2.1, 2024-05 - message = "The 'fix_imports' flag is deprecated and has no effect." - - def test_deprecated(self): - with temppath(suffix='.npy') as path: - sample_args = (path, np.array(np.zeros((1024, 10)))) - self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': False}) - for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': False}) - - class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 + @pytest.mark.thread_unsafe( + reason="modifies and checks docstring which is global state" + ) def test_deprecated(self): + doc = struct_ufunc.add_triplet.__doc__ + # gh-26718 + # This test mutates the C-level docstring pointer for add_triplet, + # which is permanent once set. Skip when re-running tests. + if doc is not None and "new docs" in doc: + pytest.skip("Cannot retest deprecation, otherwise ValueError: " + "Cannot change docstring of ufunc with non-NULL docstring") self.assert_deprecated( lambda: np._core.umath._add_newdoc_ufunc( struct_ufunc.add_triplet, "new docs" ) ) + + +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but needs to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + + def test_deprecated(self): + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) + + +@pytest.mark.thread_unsafe( + reason="warning control utilities are deprecated due to being thread-unsafe" +) +class TestWarningUtilityDeprecations(_DeprecationTestCase): + # Deprecation in NumPy 2.4, 2025-08 + message = r"NumPy warning suppression and assertion utilities are deprecated." + + def test_assert_warns_deprecated(self): + def use_assert_warns(): + with np.testing.assert_warns(RuntimeWarning): + warnings.warn("foo", RuntimeWarning, stacklevel=1) + + self.assert_deprecated(use_assert_warns) + + def test_suppress_warnings_deprecated(self): + def use_suppress_warnings(): + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, 'invalid value encountered in divide') + + self.assert_deprecated(use_suppress_warnings) + + +class TestTooManyArgsExtremum(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-08, gh-27639 + message = "Passing more than 2 positional arguments to np.maximum and np.minimum " + + @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) + def test_extremem_3_args(self, ufunc): + self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) diff --git a/blimgui/dist64/numpy/_core/tests/test_dlpack.py b/blimgui/dist64/numpy/_core/tests/test_dlpack.py index 6967caa..a5d6cbf 100644 --- a/blimgui/dist64/numpy/_core/tests/test_dlpack.py +++ b/blimgui/dist64/numpy/_core/tests/test_dlpack.py @@ -1,8 +1,9 @@ import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_PYPY +from numpy.testing import IS_PYPY, assert_array_equal def new_and_old_dlpack(): @@ -22,9 +23,9 @@ class TestDLPack: def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) y = x.__dlpack__(max_version=max_version) - assert sys.getrefcount(x) == 3 + startcount = sys.getrefcount(x) del y - assert sys.getrefcount(x) == 2 + assert startcount - sys.getrefcount(x) == 1 def test_dunder_dlpack_stream(self): x = np.arange(5) @@ -58,9 +59,9 @@ def test_strides_not_multiple_of_itemsize(self): def test_from_dlpack_refcount(self, arr): arr = arr.copy() y = np.from_dlpack(arr) - assert sys.getrefcount(arr) == 3 + startcount = sys.getrefcount(arr) del y - assert sys.getrefcount(arr) == 2 + assert startcount - sys.getrefcount(arr) == 1 @pytest.mark.parametrize("dtype", [ np.bool, @@ -183,7 +184,7 @@ def test_device(self): np.from_dlpack(x, device="cpu") np.from_dlpack(x, device=None) - with pytest.raises(ValueError): + with pytest.raises(BufferError): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") diff --git a/blimgui/dist64/numpy/_core/tests/test_dtype.py b/blimgui/dist64/numpy/_core/tests/test_dtype.py index d24df27..e255863 100644 --- a/blimgui/dist64/numpy/_core/tests/test_dtype.py +++ b/blimgui/dist64/numpy/_core/tests/test_dtype.py @@ -1,25 +1,33 @@ -import sys -import operator -import pytest +import contextlib import ctypes import gc +import inspect +import operator +import pickle +import sys import types +from itertools import permutations from typing import Any -import pickle + +import hypothesis +import pytest +from hypothesis.extra import numpy as hynp import numpy as np import numpy.dtypes -from numpy._core._rational_tests import rational from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, - IS_PYSTON) -from itertools import permutations -import random - -import hypothesis -from hypothesis.extra import numpy as hynp - + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) def assert_dtype_equal(a, b): @@ -181,21 +189,21 @@ def test_dtype_from_bytes(self): def test_bad_param(self): # Can't give a size that's too small assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i4', 'i1'], - 'offsets':[0, 4], - 'itemsize':4}) + {'names': ['f0', 'f1'], + 'formats': ['i4', 'i1'], + 'offsets': [0, 4], + 'itemsize': 4}) # If alignment is enabled, the alignment (4) must divide the itemsize assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i4', 'i1'], - 'offsets':[0, 4], - 'itemsize':9}, align=True) + {'names': ['f0', 'f1'], + 'formats': ['i4', 'i1'], + 'offsets': [0, 4], + 'itemsize': 9}, align=True) # If alignment is enabled, the individual fields must be aligned assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i1', 'f4'], - 'offsets':[0, 2]}, align=True) + {'names': ['f0', 'f1'], + 'formats': ['i1', 'f4'], + 'offsets': [0, 2]}, align=True) def test_field_order_equality(self): x = np.dtype({'names': ['A', 'B'], @@ -205,7 +213,7 @@ def test_field_order_equality(self): 'formats': ['i4', 'f4'], 'offsets': [4, 0]}) assert_equal(x == y, False) - # This is an safe cast (not equiv) due to the different names: + # This is a safe cast (not equiv) due to the different names: assert np.can_cast(x, y, casting="safe") @pytest.mark.parametrize( @@ -218,7 +226,7 @@ def test_create_string_dtypes_directly( dtype = dtype_class(8) assert dtype.type is scalar_type - assert dtype.itemsize == 8*char_size + assert dtype.itemsize == 8 * char_size def test_create_invalid_string_errors(self): one_too_big = np.iinfo(np.intc).max + 1 @@ -282,7 +290,7 @@ def test_refcount_dictionary_setting(self): formats = ["f8"] titles = ["t1"] offsets = [0] - d = dict(names=names, formats=formats, titles=titles, offsets=offsets) + d = {"names": names, "formats": formats, "titles": titles, "offsets": offsets} refcounts = {k: sys.getrefcount(i) for k, i in d.items()} np.dtype(d) refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()} @@ -326,9 +334,9 @@ def test_not_lists(self): the dtype constructor. """ assert_raises(TypeError, np.dtype, - dict(names={'A', 'B'}, formats=['f8', 'i4'])) + {"names": {'A', 'B'}, "formats": ['f8', 'i4']}) assert_raises(TypeError, np.dtype, - dict(names=['A', 'B'], formats={'f8', 'i4'})) + {"names": ['A', 'B'], "formats": {'f8', 'i4'}}) def test_aligned_size(self): # Check that structured dtypes get padded to an aligned size @@ -336,22 +344,22 @@ def test_aligned_size(self): assert_equal(dt.itemsize, 8) dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) assert_equal(dt.itemsize, 8) - dt = np.dtype({'names':['f0', 'f1'], - 'formats':['i4', 'u1'], - 'offsets':[0, 4]}, align=True) + dt = np.dtype({'names': ['f0', 'f1'], + 'formats': ['i4', 'u1'], + 'offsets': [0, 4]}, align=True) assert_equal(dt.itemsize, 8) - dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) + dt = np.dtype({'f0': ('i4', 0), 'f1': ('u1', 4)}, align=True) assert_equal(dt.itemsize, 8) # Nesting should preserve that alignment dt1 = np.dtype([('f0', 'i4'), ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=True) assert_equal(dt1.itemsize, 20) - dt2 = np.dtype({'names':['f0', 'f1', 'f2'], - 'formats':['i4', + dt2 = np.dtype({'names': ['f0', 'f1', 'f2'], + 'formats': ['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], - 'offsets':[0, 4, 16]}, align=True) + 'offsets': [0, 4, 16]}, align=True) assert_equal(dt2.itemsize, 20) dt3 = np.dtype({'f0': ('i4', 0), 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), @@ -364,11 +372,11 @@ def test_aligned_size(self): ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=False) assert_equal(dt1.itemsize, 11) - dt2 = np.dtype({'names':['f0', 'f1', 'f2'], - 'formats':['i4', + dt2 = np.dtype({'names': ['f0', 'f1', 'f2'], + 'formats': ['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], - 'offsets':[0, 4, 10]}, align=False) + 'offsets': [0, 4, 10]}, align=False) assert_equal(dt2.itemsize, 11) dt3 = np.dtype({'f0': ('i4', 0), 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), @@ -401,23 +409,23 @@ def test_empty_struct_alignment(self): def test_union_struct(self): # Should be able to create union dtypes - dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['i4,j', [a, a], casting='same_value') + + def test_einsum_sorting_behavior(self): + # Case 1: 26 dimensions (all lowercase indices) + n1 = 26 + x1 = np.random.random((1,) * n1) + path1 = np.einsum_path(x1, range(n1))[1] # Get einsum path details + output_indices1 = path1.split("->")[-1].strip() # Extract output indices + # Assert indices are only uppercase letters and sorted correctly + assert all(c.isupper() for c in output_indices1), ( + "Output indices for n=26 should use uppercase letters only: " + f"{output_indices1}" + ) + assert_equal( + output_indices1, + ''.join(sorted(output_indices1)), + err_msg=( + "Output indices for n=26 are not lexicographically sorted: " + f"{output_indices1}" + ) + ) + + # Case 2: 27 dimensions (includes uppercase indices) + n2 = 27 + x2 = np.random.random((1,) * n2) + path2 = np.einsum_path(x2, range(n2))[1] + output_indices2 = path2.split("->")[-1].strip() + # Assert indices include both uppercase and lowercase letters + assert any(c.islower() for c in output_indices2), ( + "Output indices for n=27 should include uppercase letters: " + f"{output_indices2}" + ) + # Assert output indices are sorted uppercase before lowercase + assert_equal( + output_indices2, + ''.join(sorted(output_indices2)), + err_msg=( + "Output indices for n=27 are not lexicographically sorted: " + f"{output_indices2}" + ) + ) + + # Additional Check: Ensure dimensions correspond correctly to indices + # Generate expected mapping of dimensions to indices + expected_indices = [ + chr(i + ord('A')) if i < 26 else chr(i - 26 + ord('a')) + for i in range(n2) + ] + assert_equal( + output_indices2, + ''.join(expected_indices), + err_msg=( + "Output indices do not map to the correct dimensions. Expected: " + f"{''.join(expected_indices)}, Got: {output_indices2}" + ) + ) + @pytest.mark.parametrize("do_opt", [True, False]) def test_einsum_specific_errors(self, do_opt): # out parameter must be an array @@ -152,7 +218,7 @@ def __rmul__(self, other): assert_raises(CustomException, np.einsum, "ij->i", a) # raised from unbuffered_loop_nop1_ndim3 - b = np.array([DestructoBox(i, 100) for i in range(0, 27)], + b = np.array([DestructoBox(i, 100) for i in range(27)], dtype='object').reshape(3, 3, 3) assert_raises(CustomException, np.einsum, "i...k->...", b) @@ -170,21 +236,20 @@ def __rmul__(self, other): def test_einsum_views(self): # pass-through for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) # output is writeable whenever input is writeable @@ -195,115 +260,110 @@ def test_einsum_views(self): assert_(not b.flags['WRITEABLE']) # transpose - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) # diagonal - a = np.arange(9) - a.shape = (3, 3) + a = np.arange(9).reshape((3, 3)) b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) + a = np.arange(24).reshape((2, 3, 4)) b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype, do_opt=False): @@ -320,7 +380,7 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum(a, [0], [], optimize=do_opt), b) for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = np.sum(a, axis=-1) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -329,7 +389,7 @@ def check_einsum_sums(self, dtype, do_opt=False): # sum(a, axis=0) for n in range(1, 17): - a = np.arange(2*n, dtype=dtype).reshape(2, n) + a = np.arange(2 * n, dtype=dtype).reshape(2, n) b = np.sum(a, axis=0) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -337,7 +397,7 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = np.sum(a, axis=0) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -346,7 +406,7 @@ def check_einsum_sums(self, dtype, do_opt=False): # trace(a) for n in range(1, 17): - a = np.arange(n*n, dtype=dtype).reshape(n, n) + a = np.arange(n * n, dtype=dtype).reshape(n, n) b = np.trace(a) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -386,20 +446,20 @@ def check_einsum_sums(self, dtype, do_opt=False): # outer(a,b) for n in range(1, 17): - a = np.arange(3, dtype=dtype)+1 - b = np.arange(n, dtype=dtype)+1 + a = np.arange(3, dtype=dtype) + 1 + b = np.arange(n, dtype=dtype) + 1 assert_equal(np.einsum("i,j", a, b, optimize=do_opt), np.outer(a, b)) assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), np.dot(a, b)) @@ -420,7 +480,7 @@ def check_einsum_sums(self, dtype, do_opt=False): b.astype('f8')).astype(dtype)) for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), np.dot(b.T, a.T)) @@ -443,16 +503,16 @@ def check_einsum_sums(self, dtype, do_opt=False): # matmat(a,b) / a.dot(b) where a is matrix, b is matrix for n in range(1, 17): if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), np.dot(a, b)) assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), np.dot(a, b)) for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) c = np.arange(24, dtype=dtype).reshape(4, 6) np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', optimize=do_opt) @@ -525,10 +585,10 @@ def check_einsum_sums(self, dtype, do_opt=False): np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) a = np.arange(9, dtype=dtype) - assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) - assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) - assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) - assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) + assert_equal(np.einsum(",i->", 3, a), 3 * np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3 * np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3 * np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3 * np.sum(a)) # Various stride0, contiguous, and SSE aligned variants for n in range(1, 25): @@ -537,21 +597,21 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum("...,...", a, a, optimize=do_opt), np.multiply(a, a)) assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) - assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) - assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) - assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) - assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2 * a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2 * a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2 * np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2 * np.sum(a)) assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), np.multiply(a[1:], a[:-1])) assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), np.dot(a[1:], a[:-1])) - assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) - assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2 * a[1:]) assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), - 2*np.sum(a[1:])) + 2 * np.sum(a[1:])) assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), - 2*np.sum(a[1:])) + 2 * np.sum(a[1:])) # An object array, summed as the data type a = np.arange(9, dtype=object) @@ -575,8 +635,8 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) # singleton dimensions broadcast (gh-10343) - p = np.ones((10,2)) - q = np.ones((1,2)) + p = np.ones((10, 2)) + q = np.ones((1, 2)) assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), np.einsum('ij,ij->j', p, q, optimize=False)) assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), @@ -709,7 +769,7 @@ def __mul__(self, other): return 42 objMult = np.array([Mult()]) - objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) with pytest.raises(TypeError): np.einsum("i,j", [1], objNULL) @@ -1059,6 +1119,41 @@ def test_output_order(self): tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) assert_(tmp.flags.c_contiguous) + def test_singleton_broadcasting(self): + eq = "ijp,ipq,ikq->ijk" + shapes = ((3, 1, 1), (3, 1, 3), (1, 3, 3)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "jhcabhijaci,dfijejgh->fgje" + shapes = ( + (1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1), + (3, 1, 3, 1, 1, 1, 1, 2), + ) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "baegffahgc,hdggeff->dhg" + shapes = ((2, 1, 4, 1, 1, 1, 1, 2, 1, 1), (1, 1, 1, 1, 4, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cehgbaifff,fhhdegih->cdghbi" + shapes = ((1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 1, 1, 2, 4, 1, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "gah,cdbcghefg->ef" + shapes = ((2, 3, 1), (1, 3, 1, 1, 1, 2, 1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cacc,bcb->" + shapes = ((1, 1, 1, 1), (1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + class TestEinsumPath: def build_operands(self, string, size_dict=global_size_dict): @@ -1185,7 +1280,7 @@ def test_path_type_input(self): assert_almost_equal(noopt, opt) def test_path_type_input_internal_trace(self): - #gh-20962 + # gh-20962 path_test = self.build_operands('cab,cdd->ab') exp_path = ['einsum_path', (1,), (0, 1)] @@ -1211,7 +1306,7 @@ def test_path_type_input_invalid(self): RuntimeError, np.einsum_path, *path_test, optimize=exp_path) def test_spaces(self): - #gh-10794 + # gh-10794 arr = np.array([[1]]) for sp in itertools.product(['', ' '], repeat=4): # no error for any spacing @@ -1224,6 +1319,33 @@ def test_overlap(): # sanity check c = np.einsum('ij,jk->ik', a, b) assert_equal(c, d) - #gh-10080, out overlaps one of the operands + # gh-10080, out overlaps one of the operands c = np.einsum('ij,jk->ik', a, b, out=b) assert_equal(c, d) + +def test_einsum_chunking_precision(): + """Most einsum operations are reductions and until NumPy 2.3 reductions + never (or almost never?) used the `GROWINNER` mechanism to increase the + inner loop size when no buffers are needed. + Because einsum reductions work roughly: + + def inner(*inputs, out): + accumulate = 0 + for vals in zip(*inputs): + accumulate += prod(vals) + out[0] += accumulate + + Calling the inner-loop more often actually improves accuracy slightly + (same effect as pairwise summation but much less). + Without adding pairwise summation to the inner-loop it seems best to just + not use GROWINNER, a quick tests suggest that is maybe 1% slowdown for + the simplest `einsum("i,i->i", x, x)` case. + + (It is not clear that we should guarantee precision to this extend.) + """ + num = 1_000_000 + value = 1. + np.finfo(np.float64).eps * 8196 + res = np.einsum("i->", np.broadcast_to(np.array(value), num)) / num + + # At with GROWINNER 11 decimals succeed (larger will be less) + assert_almost_equal(res, value, decimal=15) diff --git a/blimgui/dist64/numpy/_core/tests/test_errstate.py b/blimgui/dist64/numpy/_core/tests/test_errstate.py index 7aa9d78..65fb23f 100644 --- a/blimgui/dist64/numpy/_core/tests/test_errstate.py +++ b/blimgui/dist64/numpy/_core/tests/test_errstate.py @@ -1,8 +1,9 @@ -import pytest import sysconfig +import pytest + import numpy as np -from numpy.testing import assert_, assert_raises, IS_WASM +from numpy.testing import IS_WASM, assert_raises # The floating point emulation on ARM EABI systems lacking a hardware FPU is # known to be buggy. This is an attempt to identify these hosts. It may not @@ -46,6 +47,7 @@ def test_divide(self): reason='platform/cpu issue with FPU (gh-15562)') def test_errcall(self): count = 0 + def foo(*args): nonlocal count count += 1 @@ -85,7 +87,7 @@ def test_errstate_enter_once(self): @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") def test_asyncio_safe(self): - # asyncio may not always work, lets assume its fine if missing + # asyncio may not always work, let's assume its fine if missing # Pyodide/wasm doesn't support it. If this test makes problems, # it should just be skipped liberally (or run differently). asyncio = pytest.importorskip("asyncio") diff --git a/blimgui/dist64/numpy/_core/tests/test_extint128.py b/blimgui/dist64/numpy/_core/tests/test_extint128.py index a7fd83c..a634759 100644 --- a/blimgui/dist64/numpy/_core/tests/test_extint128.py +++ b/blimgui/dist64/numpy/_core/tests/test_extint128.py @@ -1,13 +1,12 @@ -import itertools import contextlib +import itertools import operator + import pytest import numpy as np import numpy._core._multiarray_tests as mt - -from numpy.testing import assert_raises, assert_equal - +from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max INT64_MIN = np.iinfo(np.int64).min @@ -22,8 +21,8 @@ [INT64_MIN + j for j in range(20)] + [INT64_MAX - j for j in range(20)] + [INT64_MID + j for j in range(-20, 20)] + - [2*INT64_MID + j for j in range(-20, 20)] + - [INT64_MID//2 + j for j in range(-20, 20)] + + [2 * INT64_MID + j for j in range(-20, 20)] + + [INT64_MID // 2 + j for j in range(-20, 20)] + list(range(-70, 70)) ) @@ -31,8 +30,8 @@ [INT128_MIN + j for j in range(20)] + [INT128_MAX - j for j in range(20)] + [INT128_MID + j for j in range(-20, 20)] + - [2*INT128_MID + j for j in range(-20, 20)] + - [INT128_MID//2 + j for j in range(-20, 20)] + + [2 * INT128_MID + j for j in range(-20, 20)] + + [INT128_MID // 2 + j for j in range(-20, 20)] + list(range(-70, 70)) + [False] # negative zero ) @@ -58,8 +57,7 @@ def iterate(): yield iterate() except Exception: import traceback - msg = "At: %r\n%s" % (repr(value[0]), - traceback.format_exc()) + msg = f"At: {repr(value[0])!r}\n{traceback.format_exc()}" raise AssertionError(msg) @@ -151,9 +149,9 @@ def test_shl_128(): with exc_iter(INT128_VALUES) as it: for a, in it: if a < 0: - b = -(((-a) << 1) & (2**128-1)) + b = -(((-a) << 1) & (2**128 - 1)) else: - b = (a << 1) & (2**128-1) + b = (a << 1) & (2**128 - 1) c = mt.extint_shl_128(a) if b != c: assert_equal(c, b) @@ -193,10 +191,10 @@ def test_divmod_128_64(): d, dr = mt.extint_divmod_128_64(a, b) - if c != d or d != dr or b*d + dr != a: + if c != d or d != dr or b * d + dr != a: assert_equal(d, c) assert_equal(dr, cr) - assert_equal(b*d + dr, a) + assert_equal(b * d + dr, a) def test_floordiv_128_64(): diff --git a/blimgui/dist64/numpy/_core/tests/test_finfo.py b/blimgui/dist64/numpy/_core/tests/test_finfo.py new file mode 100644 index 0000000..5703b8d --- /dev/null +++ b/blimgui/dist64/numpy/_core/tests/test_finfo.py @@ -0,0 +1,86 @@ +import pytest + +import numpy as np +from numpy import exp2, log10 +from numpy._core import numerictypes as ntypes + + +class MachArLike: + """Minimal class to simulate machine arithmetic parameters.""" + def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp): + self.dtype = dtype + self.machep = machep + self.negep = negep + self.minexp = minexp + self.maxexp = maxexp + self.nmant = nmant + self.iexp = iexp + self.eps = exp2(dtype(-nmant)) + self.epsneg = exp2(dtype(negep)) + self.precision = int(-log10(self.eps)) + self.resolution = dtype(10) ** (-self.precision) + + +@pytest.fixture +def float16_ma(): + """Machine arithmetic parameters for float16.""" + f16 = ntypes.float16 + return MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + nmant=10, + iexp=5) + + +@pytest.fixture +def float32_ma(): + """Machine arithmetic parameters for float32.""" + f32 = ntypes.float32 + return MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + nmant=23, + iexp=8) + + +@pytest.fixture +def float64_ma(): + """Machine arithmetic parameters for float64.""" + f64 = ntypes.float64 + return MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + nmant=52, + iexp=11) + + +@pytest.mark.parametrize("dtype,ma_fixture", [ + (np.half, "float16_ma"), + (np.float32, "float32_ma"), + (np.float64, "float64_ma"), +]) +@pytest.mark.parametrize("prop", [ + 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', + 'eps', 'epsneg', 'precision', 'resolution' +]) +@pytest.mark.thread_unsafe( + reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)" +) +def test_finfo_properties(dtype, ma_fixture, prop, request): + """Test that finfo properties match expected machine arithmetic values.""" + ma = request.getfixturevalue(ma_fixture) + finfo = np.finfo(dtype) + + actual = getattr(finfo, prop) + expected = getattr(ma, prop) + + assert actual == expected, ( + f"finfo({dtype}) property '{prop}' mismatch: " + f"expected {expected}, got {actual}" + ) diff --git a/blimgui/dist64/numpy/_core/tests/test_function_base.py b/blimgui/dist64/numpy/_core/tests/test_function_base.py index 81e7390..bacefd0 100644 --- a/blimgui/dist64/numpy/_core/tests/test_function_base.py +++ b/blimgui/dist64/numpy/_core/tests/test_function_base.py @@ -1,22 +1,39 @@ -import sys import platform +import sys + import pytest import numpy as np from numpy import ( - logspace, linspace, geomspace, dtype, array, arange, isnan, - ndarray, sqrt, nextafter, stack, errstate - ) + arange, + array, + dtype, + errstate, + geomspace, + isnan, + linspace, + logspace, + ndarray, + nextafter, + sqrt, + stack, +) from numpy._core import sctypes from numpy._core.function_base import add_newdoc from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, - IS_PYPY - ) + IS_PYPY, + assert_, + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + def _is_armhf(): # Check if the current platform is ARMHF (32-bit ARM architecture) - return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): @@ -39,10 +56,10 @@ def __mul__(self, x): return PhysicalQuantity(float(x) * float(self)) __rmul__ = __mul__ - def __div__(self, x): + def __truediv__(self, x): return PhysicalQuantity(float(self) / float(x)) - def __rdiv__(self, x): + def __rtruediv__(self, x): return PhysicalQuantity(float(x) / float(self)) @@ -195,29 +212,29 @@ def test_complex(self): assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) assert_array_equal(y.real, 0) - y = geomspace(1+1j, 1000+1000j, num=4) - assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) + y = geomspace(1 + 1j, 1000 + 1000j, num=4) + assert_allclose(y, [1 + 1j, 10 + 10j, 100 + 100j, 1000 + 1000j]) - y = geomspace(-1+1j, -1000+1000j, num=4) - assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) + y = geomspace(-1 + 1j, -1000 + 1000j, num=4) + assert_allclose(y, [-1 + 1j, -10 + 10j, -100 + 100j, -1000 + 1000j]) # Logarithmic spirals y = geomspace(-1, 1, num=3, dtype=complex) assert_allclose(y, [-1, 1j, +1]) - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(0+3j, 3+0j, 3) - assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) - y = geomspace(-3+0j, 0-3j, 3) - assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(-2-3j, 5+7j, 7) - assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, - 2.08885354-4.34146838j, 4.58345529-3.16355218j, - 6.41401745-0.55233457j, 6.75707386+3.11795092j, - 5+7j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(0 + 3j, 3 + 0j, 3) + assert_allclose(y, [0 + 3j, 3 / sqrt(2) + 3j / sqrt(2), 3 + 0j]) + y = geomspace(-3 + 0j, 0 - 3j, 3) + assert_allclose(y, [-3 + 0j, -3 / sqrt(2) - 3j / sqrt(2), 0 - 3j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(-2 - 3j, 5 + 7j, 7) + assert_allclose(y, [-2 - 3j, -0.29058977 - 4.15771027j, + 2.08885354 - 4.34146838j, 4.58345529 - 3.16355218j, + 6.41401745 - 0.55233457j, 6.75707386 + 3.11795092j, + 5 + 7j]) # Type promotion should prevent the -5 from becoming a NaN y = geomspace(3j, -5, 2) @@ -228,14 +245,13 @@ def test_complex(self): def test_complex_shortest_path(self): # test the shortest logarithmic spiral is used, see gh-25644 x = 1.2 + 3.4j - y = np.exp(1j*(np.pi-.1)) * x + y = np.exp(1j * (np.pi - .1)) * x z = np.geomspace(x, y, 5) expected = np.array([1.2 + 3.4j, -1.47384 + 3.2905616j, -3.33577588 + 1.36842949j, -3.36011056 - 1.30753855j, -1.53343861 - 3.26321406j]) np.testing.assert_array_almost_equal(z, expected) - def test_dtype(self): y = geomspace(1, 1e6, dtype='float32') assert_equal(y.dtype, dtype('float32')) @@ -268,8 +284,8 @@ def test_start_stop_array_scalar(self): def test_start_stop_array(self): # Try to use all special cases. - start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) - stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) + start = array([1.e0, 32., 1j, -4j, 1 + 1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000 + 10000j, 1]) t1 = geomspace(start, stop, 5) t2 = stack([geomspace(_start, _stop, 5) for _start, _stop in zip(start, stop)], axis=1) @@ -363,9 +379,9 @@ def test_start_stop_array(self): def test_complex(self): lim1 = linspace(1 + 2j, 3 + 4j, 5) - t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) + t1 = array([1.0 + 2.j, 1.5 + 2.5j, 2.0 + 3j, 2.5 + 3.5j, 3.0 + 4j]) lim2 = linspace(1j, 10, 5) - t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) + t2 = array([0.0 + 1.j, 2.5 + 0.75j, 5.0 + 0.5j, 7.5 + 0.25j, 10.0 + 0j]) assert_equal(lim1, t1) assert_equal(lim2, t2) @@ -430,8 +446,8 @@ def test_denormal_numbers(self): def test_equivalent_to_arange(self): for j in range(1000): - assert_equal(linspace(0, j, j+1, dtype=int), - arange(j+1, dtype=int)) + assert_equal(linspace(0, j, j + 1, dtype=int), + arange(j + 1, dtype=int)) def test_retstep(self): for num in [0, 1, 2]: @@ -476,7 +492,7 @@ def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." assert_equal(np._core.flatiter.index.__doc__[:len(tgt)], tgt) - assert_(len(np._core.ufunc.identity.__doc__) > 300) + assert_(len(np._core.ufunc.identity.__doc__) > 250) assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") diff --git a/blimgui/dist64/numpy/_core/tests/test_getlimits.py b/blimgui/dist64/numpy/_core/tests/test_getlimits.py index 06282b4..5143945 100644 --- a/blimgui/dist64/numpy/_core/tests/test_getlimits.py +++ b/blimgui/dist64/numpy/_core/tests/test_getlimits.py @@ -3,12 +3,13 @@ """ import types import warnings -import numpy as np + import pytest + +import numpy as np +from numpy import double, half, longdouble, single from numpy._core import finfo, iinfo -from numpy import half, single, double, longdouble -from numpy.testing import assert_equal, assert_, assert_raises -from numpy._core.getlimits import _discovered_machar, _float_ma +from numpy.testing import assert_, assert_equal, assert_raises ################################################## @@ -107,7 +108,7 @@ def test_iinfo_repr(self): assert_equal(repr(np.iinfo(np.int16)), expected) def test_finfo_repr(self): - expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ + expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\ " max=3.4028235e+38, dtype=float32)" assert_equal(repr(np.finfo(np.float32)), expected) @@ -137,53 +138,20 @@ def test_instances(): finfo(np.int64(1)) -def assert_ma_equal(discovered, ma_like): - # Check MachAr-like objects same as calculated MachAr instances - for key, value in discovered.__dict__.items(): - assert_equal(value, getattr(ma_like, key)) - if hasattr(value, 'shape'): - assert_equal(value.shape, getattr(ma_like, key).shape) - assert_equal(value.dtype, getattr(ma_like, key).dtype) - - -def test_known_types(): - # Test we are correctly compiling parameters for known types - for ftype, ma_like in ((np.float16, _float_ma[16]), - (np.float32, _float_ma[32]), - (np.float64, _float_ma[64])): - assert_ma_equal(_discovered_machar(ftype), ma_like) - # Suppress warning for broken discovery of double double on PPC - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - assert_ma_equal(ld_ma, _float_ma[80]) - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - assert_ma_equal(ld_ma, _float_ma[128]) - - def test_subnormal_warning(): """Test that the subnormal is zero warning is not being raised.""" - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - ld_ma.smallest_subnormal - assert len(w) == 0 - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - ld_ma.smallest_subnormal - assert len(w) == 0 - else: - # Double double - ld_ma.smallest_subnormal - # This test may fail on some platforms - assert len(w) == 0 + # Test for common float types + for dtype in [np.float16, np.float32, np.float64]: + f = finfo(dtype) + _ = f.smallest_subnormal + # Also test longdouble + with np.errstate(all='ignore'): + fld = finfo(np.longdouble) + _ = fld.smallest_subnormal + # Check no warnings were raised + assert len(w) == 0 def test_plausible_finfo(): diff --git a/blimgui/dist64/numpy/_core/tests/test_half.py b/blimgui/dist64/numpy/_core/tests/test_half.py index f26d48b..4b2339b 100644 --- a/blimgui/dist64/numpy/_core/tests/test_half.py +++ b/blimgui/dist64/numpy/_core/tests/test_half.py @@ -1,9 +1,10 @@ import platform + import pytest import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal, IS_WASM +from numpy import float16, float32, float64, uint16 +from numpy.testing import IS_WASM, assert_, assert_equal def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -11,62 +12,69 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs): callable(*args, **kwargs) except FloatingPointError as exc: assert_(str(exc).find(strmatch) >= 0, - "Did not raise floating point %s error" % strmatch) + f"Did not raise floating point {strmatch} error") else: assert_(False, - "Did not raise floating point %s error" % strmatch) + f"Did not raise floating point {strmatch} error") class TestHalf: - def setup_method(self): + def _create_arrays_all(self): # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 + all_f16 = np.arange(0x10000, dtype=uint16) + all_f16 = all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) + all_f32 = np.array(all_f16, dtype=float32) + all_f64 = np.array(all_f16, dtype=float64) + return all_f16, all_f32, all_f64 + def _create_arrays_nonan(self): # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( + nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] + nonan_f16 = nonan_f16.view(float16) + nonan_f32 = np.array(nonan_f16, dtype=float32) + nonan_f64 = np.array(nonan_f16, dtype=float64) + return nonan_f16, nonan_f32, nonan_f64 + + def _create_arrays_finite(self): + nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan() + finite_f16 = nonan_f16[1:-1] + finite_f32 = nonan_f32[1:-1] + finite_f64 = nonan_f64[1:-1] + return finite_f16, finite_f32, finite_f64 def test_half_conversions(self): """Checks that all 16-bit values survive conversion to/from 32-bit and 64-bit float""" # Because the underlying routines preserve the NaN bits, every # value is preserved when converting to/from other floats. + all_f16, all_f32, all_f64 = self._create_arrays_all() + nonan_f16, _, _ = self._create_arrays_nonan() # Convert from float32 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f32, dtype=float16) + b = np.array(all_f32, dtype=float16) # avoid testing NaNs due to differing bit patterns in Q/S NaNs b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert from float64 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f64, dtype=float16) + b = np.array(all_f64, dtype=float16) b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert float16 to longdouble and back # This doesn't necessarily preserve the extra NaN bits, # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + a_ld = np.array(nonan_f16, dtype=np.longdouble) b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), + assert_equal(nonan_f16.view(dtype=uint16), b.view(dtype=uint16)) # Check the range for which all integers can be represented @@ -85,6 +93,21 @@ def test_half_conversion_to_string(self, string_dt): arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt + @pytest.mark.parametrize("dtype", ["S", "U", object]) + def test_to_half_cast_error(self, dtype): + arr = np.array(["3M"], dtype=dtype) + with pytest.raises(ValueError): + arr.astype(np.float16) + + arr = np.array(["23490349034"], dtype=dtype) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + arr.astype(np.float16) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + arr.astype(np.float16) + @pytest.mark.parametrize("string_dt", ["S", "U"]) def test_half_conversion_from_string(self, string_dt): string = np.array("3.1416", dtype=string_dt) @@ -99,7 +122,7 @@ def test_half_conversion_rounding(self, float_t, shift, offset): # Test all (positive) finite numbers, denormals are most interesting # however: - f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) + f16s_patterns = np.arange(0, max_pattern + 1, dtype=np.uint16) f16s_float = f16s_patterns.view(np.float16).astype(float_t) # Shift the values by half a bit up or a down (or do not shift), @@ -170,34 +193,35 @@ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): assert larger_value.astype(np.float16) == smallest_value def test_nans_infs(self): + all_f16, all_f32, _ = self._create_arrays_all() with np.errstate(all='ignore'): # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.isnan(all_f16), np.isnan(all_f32)) + assert_equal(np.isinf(all_f16), np.isinf(all_f32)) + assert_equal(np.isfinite(all_f16), np.isfinite(all_f32)) + assert_equal(np.signbit(all_f16), np.signbit(all_f32)) assert_equal(np.spacing(float16(65504)), np.inf) # Check comparisons of all values with NaN nan = float16(np.nan) - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) + assert_(not (all_f16 == nan).any()) + assert_(not (nan == all_f16).any()) - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) + assert_((all_f16 != nan).all()) + assert_((nan != all_f16).all()) - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) + assert_(not (all_f16 < nan).any()) + assert_(not (nan < all_f16).any()) - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) + assert_(not (all_f16 <= nan).any()) + assert_(not (nan <= all_f16).any()) - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) + assert_(not (all_f16 > nan).any()) + assert_(not (nan > all_f16).any()) - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) + assert_(not (all_f16 >= nan).any()) + assert_(not (nan >= all_f16).any()) def test_half_values(self): """Confirms a small number of known half values""" @@ -207,7 +231,7 @@ def test_half_values(self): 65504, -65504, # Maximum magnitude 2.0**(-14), -2.0**(-14), # Minimum normal 2.0**(-24), -2.0**(-24), # Minimum subnormal - 0, -1/1e1000, # Signed zeros + 0, -1 / 1e1000, # Signed zeros np.inf, -np.inf]) b = np.array([0x3c00, 0xbc00, 0x4000, 0xc000, @@ -217,7 +241,7 @@ def test_half_values(self): 0x0001, 0x8001, 0x0000, 0x8000, 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 + b = b.view(dtype=float16) assert_equal(a, b) def test_half_rounding(self): @@ -225,16 +249,16 @@ def test_half_rounding(self): a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal 2.0**-25, # Underflows to zero (nearest even mode) 2.0**-26, # Underflows to zero - 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) - 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) - 1.0+2.0**-12, # rounds to 1.0 + 1.0 + 2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0 + 2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0 + 2.0**-12, # rounds to 1.0 65519, # rounds to 65504 65520], # rounds to inf dtype=float64) rounded = [2.0**-24, 0.0, 0.0, - 1.0+2.0**(-10), + 1.0 + 2.0**(-10), 1.0, 1.0, 65504, @@ -254,9 +278,10 @@ def test_half_rounding(self): def test_half_correctness(self): """Take every finite float16, and check the casting functions with a manual conversion.""" + finite_f16, finite_f32, finite_f64 = self._create_arrays_finite() # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) + a_bits = finite_f16.view(dtype=uint16) # Convert to 64-bit float manually a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) @@ -269,29 +294,30 @@ def test_half_correctness(self): a_manual = a_sgn * a_man * 2.0**a_exp - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + a32_fail = np.nonzero(finite_f32 != a_manual)[0] if len(a32_fail) != 0: bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, + assert_equal(finite_f32, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f32[bad_index], + finite_f32[bad_index], a_manual[bad_index])) - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, + assert_equal(finite_f64, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f64[bad_index], + finite_f64[bad_index], a_manual[bad_index])) def test_half_ordering(self): """Make sure comparisons are working right""" + nonan_f16, _, _ = self._create_arrays_nonan() # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() + a = nonan_f16[::-1].copy() # 32-bit float copy b = np.array(a, dtype=float32) @@ -307,8 +333,8 @@ def test_half_ordering(self): assert_((a[1:] >= a[:-1]).all()) assert_(not (a[1:] < a[:-1]).any()) # All != except for +/-0 - assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) - assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size - 2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size - 2) def test_half_funcs(self): """Test the various ArrFuncs""" @@ -323,7 +349,7 @@ def test_half_funcs(self): assert_equal(a, np.ones((5,), dtype=float16)) # nonzero and copyswap - a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) assert_equal(a.nonzero()[0], [2, 5, 6]) a = a.byteswap() @@ -358,7 +384,7 @@ def test_spacing_nextafter(self): hnan = np.array((np.nan,), dtype=float16) a_f16 = a.view(dtype=float16) - assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:] - a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) @@ -383,7 +409,7 @@ def test_spacing_nextafter(self): a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) - assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1] - a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) @@ -497,40 +523,40 @@ def test_half_fpe(self): by16 = float16(1e4) # Underflow errors - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, + assert_raises_fpe('underflow', lambda a, b: a / b, float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-23), float16(4)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14 - 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-23), float16(4)) # Overflow errors - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a, b:a+b, + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, by16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b: a + b, float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a, b:a-b, + assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors @@ -539,9 +565,9 @@ def test_half_fpe(self): assert_raises_fpe('invalid', np.spacing, float16(np.nan)) # These should not raise - float16(65472)+float16(32) - float16(2**-13)/float16(2) - float16(2**-14)/float16(2**10) + float16(65472) + float16(32) + float16(2**-13) / float16(2) + float16(2**-14) / float16(2**10) np.spacing(float16(-65504)) np.nextafter(float16(65504), float16(-np.inf)) np.nextafter(float16(-65504), float16(np.inf)) @@ -549,10 +575,10 @@ def test_half_fpe(self): np.nextafter(float16(-np.inf), float16(0)) np.nextafter(float16(0), float16(np.nan)) np.nextafter(float16(np.nan), float16(0)) - float16(2**-14)/float16(2**10) - float16(-2**-14)/float16(2**10) - float16(2**-14+2**-23)/float16(2) - float16(-2**-14-2**-23)/float16(2) + float16(2**-14) / float16(2**10) + float16(-2**-14) / float16(2**10) + float16(2**-14 + 2**-23) / float16(2) + float16(-2**-14 - 2**-23) / float16(2) def test_half_array_interface(self): """Test that half is compatible with __array_interface__""" diff --git a/blimgui/dist64/numpy/_core/tests/test_hashtable.py b/blimgui/dist64/numpy/_core/tests/test_hashtable.py index e9d46c4..f6262e5 100644 --- a/blimgui/dist64/numpy/_core/tests/test_hashtable.py +++ b/blimgui/dist64/numpy/_core/tests/test_hashtable.py @@ -1,6 +1,7 @@ +import random + import pytest -import random from numpy._core._multiarray_tests import identityhash_tester diff --git a/blimgui/dist64/numpy/_core/tests/test_indexerrors.py b/blimgui/dist64/numpy/_core/tests/test_indexerrors.py index 5225253..fb5eb85 100644 --- a/blimgui/dist64/numpy/_core/tests/test_indexerrors.py +++ b/blimgui/dist64/numpy/_core/tests/test_indexerrors.py @@ -1,7 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_raises, assert_raises_regex, - ) +from numpy.testing import assert_raises, assert_raises_regex class TestIndexErrors: diff --git a/blimgui/dist64/numpy/_core/tests/test_indexing.py b/blimgui/dist64/numpy/_core/tests/test_indexing.py index 043e62f..ece6c26 100644 --- a/blimgui/dist64/numpy/_core/tests/test_indexing.py +++ b/blimgui/dist64/numpy/_core/tests/test_indexing.py @@ -1,18 +1,24 @@ -import sys -import warnings import functools +import inspect import operator +import sys +import warnings +from itertools import product import pytest import numpy as np from numpy._core._multiarray_tests import array_indexing -from itertools import product from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM - ) + HAS_REFCOUNT, + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestIndexing: @@ -22,25 +28,25 @@ def test_index_no_floats(self): assert_raises(IndexError, lambda: a[0.0]) assert_raises(IndexError, lambda: a[0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0]) - assert_raises(IndexError, lambda: a[0.0,:]) + assert_raises(IndexError, lambda: a[0.0, :]) assert_raises(IndexError, lambda: a[:, 0.0]) - assert_raises(IndexError, lambda: a[:, 0.0,:]) - assert_raises(IndexError, lambda: a[0.0,:,:]) + assert_raises(IndexError, lambda: a[:, 0.0, :]) + assert_raises(IndexError, lambda: a[0.0, :, :]) assert_raises(IndexError, lambda: a[0, 0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0, 0]) assert_raises(IndexError, lambda: a[0, 0.0, 0]) assert_raises(IndexError, lambda: a[-1.4]) assert_raises(IndexError, lambda: a[0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0]) - assert_raises(IndexError, lambda: a[-1.4,:]) + assert_raises(IndexError, lambda: a[-1.4, :]) assert_raises(IndexError, lambda: a[:, -1.4]) - assert_raises(IndexError, lambda: a[:, -1.4,:]) - assert_raises(IndexError, lambda: a[-1.4,:,:]) + assert_raises(IndexError, lambda: a[:, -1.4, :]) + assert_raises(IndexError, lambda: a[-1.4, :, :]) assert_raises(IndexError, lambda: a[0, 0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0, 0]) assert_raises(IndexError, lambda: a[0, -1.4, 0]) assert_raises(IndexError, lambda: a[0.0:, 0.0]) - assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) + assert_raises(IndexError, lambda: a[0.0:, 0.0, :]) def test_slicing_no_floats(self): a = np.array([[5]]) @@ -49,26 +55,26 @@ def test_slicing_no_floats(self): assert_raises(TypeError, lambda: a[0.0:]) assert_raises(TypeError, lambda: a[0:, 0.0:2]) assert_raises(TypeError, lambda: a[0.0::2, :0]) - assert_raises(TypeError, lambda: a[0.0:1:2,:]) + assert_raises(TypeError, lambda: a[0.0:1:2, :]) assert_raises(TypeError, lambda: a[:, 0.0:]) # stop as float. assert_raises(TypeError, lambda: a[:0.0]) assert_raises(TypeError, lambda: a[:0, 1:2.0]) assert_raises(TypeError, lambda: a[:0.0:2, :0]) - assert_raises(TypeError, lambda: a[:0.0,:]) + assert_raises(TypeError, lambda: a[:0.0, :]) assert_raises(TypeError, lambda: a[:, 0:4.0:2]) # step as float. assert_raises(TypeError, lambda: a[::1.0]) assert_raises(TypeError, lambda: a[0:, :2:2.0]) assert_raises(TypeError, lambda: a[1::4.0, :0]) - assert_raises(TypeError, lambda: a[::5.0,:]) + assert_raises(TypeError, lambda: a[::5.0, :]) assert_raises(TypeError, lambda: a[:, 0:4:2.0]) # mixed. assert_raises(TypeError, lambda: a[1.0:2:2.0]) assert_raises(TypeError, lambda: a[1.0::2.0]) assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) - assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0, :]) assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) # should still get the DeprecationWarning if step = 0. assert_raises(TypeError, lambda: a[::0.0]) @@ -113,8 +119,8 @@ def test_same_kind_index_casting(self): arr = np.arange(10).reshape(5, 2) assert_array_equal(arr[index], arr[u_index]) - arr[u_index] = np.arange(5)[:,None] - assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) + arr[u_index] = np.arange(5)[:, None] + assert_array_equal(arr, np.arange(5)[:, None].repeat(2, axis=1)) arr = np.arange(25).reshape(5, 5) assert_array_equal(arr[u_index, u_index], arr[index, index]) @@ -155,6 +161,20 @@ def test_gh_26542_index_overlap(self): actual_vals = arr[10:] assert_equal(actual_vals, expected_vals) + def test_gh_26844(self): + expected = [0, 1, 3, 3, 3] + a = np.arange(5) + a[2:][a[:-2]] = 3 + assert_equal(a, expected) + + def test_gh_26844_segfault(self): + # check for absence of segfault for: + # https://github.com/numpy/numpy/pull/26958/files#r1854589178 + a = np.arange(5) + expected = [0, 1, 3, 3, 3] + a[2:][None, a[:-2]] = 3 + assert_equal(a, expected) + def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], @@ -167,7 +187,7 @@ def test_ellipsis_index(self): # Slicing with ellipsis can skip an # arbitrary number of dimensions assert_equal(a[0, ...], a[0]) - assert_equal(a[0, ...], a[0,:]) + assert_equal(a[0, ...], a[0, :]) assert_equal(a[..., 0], a[:, 0]) # Slicing with ellipsis always results @@ -219,8 +239,8 @@ def test_boolean_shape_mismatch(self): def test_boolean_indexing_onedim(self): # Indexing a 2-dimensional array with # boolean array of length one - a = np.array([[ 0., 0., 0.]]) - b = np.array([ True], dtype=bool) + a = np.array([[0., 0., 0.]]) + b = np.array([True], dtype=bool) assert_equal(a[b], a) # boolean assignment a[b] = 1. @@ -258,9 +278,9 @@ def test_boolean_indexing_twodim(self): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - b = np.array([[ True, False, True], - [False, True, False], - [ True, False, True]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) assert_equal(a[b], [1, 3, 5, 7, 9]) assert_equal(a[b[1]], [[4, 5, 6]]) assert_equal(a[b[0]], a[b[2]]) @@ -415,7 +435,7 @@ def test_subclass_writeable(self, writeable): dtype=[('target', 'S20'), ('V_mag', '>f4')]) d.flags.writeable = writeable # Advanced indexing results are always writeable: - ind = np.array([False, True, True], dtype=bool) + ind = np.array([False, True, True], dtype=bool) assert d[ind].flags.writeable ind = np.array([0, 1]) assert d[ind].flags.writeable @@ -427,7 +447,7 @@ def test_memory_order(self): # This is not necessary to preserve. Memory layouts for # more complex indices are not as simple. a = np.arange(10) - b = np.arange(10).reshape(5,2).T + b = np.arange(10).reshape(5, 2).T assert_(a[b].flags.f_contiguous) # Takes a different implementation branch: @@ -492,7 +512,7 @@ def test_unaligned(self): x = x.view(np.dtype("S8")) x[...] = np.array("b" * 8, dtype="S") b = np.arange(d.size) - #trivial + # trivial assert_equal(d[b], d) d[b] = x # nontrivial @@ -590,31 +610,6 @@ def test_too_many_advanced_indices(self, index, num, original_ndim): with pytest.raises(IndexError): arr[(index,) * num] = 1. - @pytest.mark.skipif(IS_WASM, reason="no threading") - def test_structured_advanced_indexing(self): - # Test that copyswap(n) used by integer array indexing is threadsafe - # for structured datatypes, see gh-15387. This test can behave randomly. - from concurrent.futures import ThreadPoolExecutor - - # Create a deeply nested dtype to make a failure more likely: - dt = np.dtype([("", "f8")]) - dt = np.dtype([("", dt)] * 2) - dt = np.dtype([("", dt)] * 2) - # The array should be large enough to likely run into threading issues - arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] - - rng = np.random.default_rng() - def func(arr): - indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) - arr[indx] - - tpe = ThreadPoolExecutor(max_workers=8) - futures = [tpe.submit(func, arr) for _ in range(10)] - for f in futures: - f.result() - - assert arr.dtype is dt - def test_nontuple_ndindex(self): a = np.arange(25).reshape((5, 5)) assert_equal(a[[0, 1]], np.array([a[0], a[1]])) @@ -626,7 +621,7 @@ class TestFieldIndexing: def test_scalar_return_type(self): # Field access on an array should return an array, even if it # is 0-d. - a = np.zeros((), [('a','f8')]) + a = np.zeros((), [('a', 'f8')]) assert_(isinstance(a['a'], np.ndarray)) assert_(isinstance(a[['a']], np.ndarray)) @@ -652,9 +647,9 @@ def test_prepend_not_one(self): a = np.zeros(5) # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) def test_simple_broadcasting_errors(self): assign = self.assign @@ -692,7 +687,7 @@ def test_index_is_larger(self): def test_broadcast_subspace(self): a = np.zeros((100, 100)) - v = np.arange(100)[:,None] + v = np.arange(100)[:, None] b = np.arange(100)[::-1] a[b] = v assert_((a[::-1] == v).all()) @@ -740,7 +735,6 @@ class SubClass(np.ndarray): s_fancy = s[[0, 1, 2]] assert_(s_fancy.flags.writeable) - def test_finalize_gets_full_info(self): # Array finalize should be called on the filled array. class SubClass(np.ndarray): @@ -753,7 +747,7 @@ def __array_finalize__(self, old): assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) - new_s = s[[0,1,2,3]] + new_s = s[[0, 1, 2, 3]] assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) @@ -775,35 +769,36 @@ def test_boolean_index_cast_assign(self): assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) + class TestFancyIndexingEquivalence: def test_object_assign(self): # Check that the field and object special case using copyto is active. # The right hand side cannot be converted to an array here. a = np.arange(5, dtype=object) b = a.copy() - a[:3] = [1, (1,2), 3] - b[[0, 1, 2]] = [1, (1,2), 3] + a[:3] = [1, (1, 2), 3] + b[[0, 1, 2]] = [1, (1, 2), 3] assert_array_equal(a, b) # test same for subspace fancy indexing b = np.arange(5, dtype=object)[None, :] - b[[0], :3] = [[1, (1,2), 3]] + b[[0], :3] = [[1, (1, 2), 3]] assert_array_equal(a, b[0]) # Check that swapping of axes works. # There was a bug that made the later assignment throw a ValueError # do to an incorrectly transposed temporary right hand side (gh-5714) b = b.T - b[:3, [0]] = [[1], [(1,2)], [3]] + b[:3, [0]] = [[1], [(1, 2)], [3]] assert_array_equal(a, b[:, 0]) # Another test for the memory order of the subspace @@ -854,10 +849,11 @@ class TestMultiIndexingAutomated: """ - def setup_method(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, + def _create_array(self): + return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + + def _create_complex_indices(self): + return ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up # dimensions, also need to test all False @@ -875,13 +871,8 @@ def setup_method(self): np.array([[2], [0], [1]]), np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), np.array([2, -1], dtype=np.int8), - np.zeros([1]*31, dtype=int), # trigger too large array. + np.zeros([1] * 31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): """Mimic multi dimensional indexing. @@ -955,7 +946,7 @@ def _get_multi_index(self, arr, indices): except ValueError: raise IndexError in_indices[i] = indx - elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + elif indx.dtype.kind not in 'bi': raise IndexError('arrays used as indices must be of ' 'integer (or boolean) type') if indx.ndim != 0: @@ -975,7 +966,7 @@ def _get_multi_index(self, arr, indices): return arr.copy(), no_copy if ellipsis_pos is not None: - in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * + in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] * (arr.ndim - ndim)) for ax, indx in enumerate(in_indices): @@ -990,21 +981,21 @@ def _get_multi_index(self, arr, indices): arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) continue if isinstance(indx, np.ndarray) and indx.dtype == bool: - if indx.shape != arr.shape[ax:ax+indx.ndim]: + if indx.shape != arr.shape[ax:ax + indx.ndim]: raise IndexError try: flat_indx = np.ravel_multi_index(np.nonzero(indx), - arr.shape[ax:ax+indx.ndim], mode='raise') + arr.shape[ax:ax + indx.ndim], mode='raise') except Exception: error_unless_broadcast_to_empty = True # fill with 0s instead, and raise error later - flat_indx = np.array([0]*indx.sum(), dtype=np.intp) + flat_indx = np.array([0] * indx.sum(), dtype=np.intp) # concatenate axis into a single one: if indx.ndim != 0: arr = arr.reshape(arr.shape[:ax] - + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:]) + + (np.prod(arr.shape[ax:ax + indx.ndim]),) + + arr.shape[ax + indx.ndim:]) indx = flat_indx else: # This could be changed, a 0-d boolean index can @@ -1012,12 +1003,12 @@ def _get_multi_index(self, arr, indices): # Note that originally this is could be interpreted as # integer in the full integer special case. raise IndexError - else: - # If the index is a singleton, the bounds check is done - # before the broadcasting. This used to be different in <1.9 - if indx.ndim == 0: - if indx >= arr.shape[ax] or indx < -arr.shape[ax]: - raise IndexError + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + elif indx.ndim == 0 and not ( + -arr.shape[ax] <= indx < arr.shape[ax] + ): + raise IndexError if indx.ndim == 0: # The index is a scalar. This used to be two fold, but if # fancy indexing was active, the check was done later, @@ -1087,7 +1078,7 @@ def _get_multi_index(self, arr, indices): if _indx.size == 0: continue if np.any(_indx >= _size) or np.any(_indx < -_size): - raise IndexError + raise IndexError if len(indx[1:]) == len(orig_slice): if np.prod(orig_slice) == 0: # Work around for a crash or IndexError with 'wrap' @@ -1109,7 +1100,7 @@ def _get_multi_index(self, arr, indices): try: arr = arr.reshape(arr.shape[:ax] + mi.shape - + arr.shape[ax+1:]) + + arr.shape[ax + 1:]) except ValueError: # too many dimensions, probably raise IndexError @@ -1174,6 +1165,8 @@ def _compare_index_result(self, arr, index, mimic_get, no_copy): """Compare mimicked result to indexing result. """ arr = arr.copy() + if HAS_REFCOUNT: + startcount = sys.getrefcount(arr) indexed_arr = arr[index] assert_array_equal(indexed_arr, mimic_get) # Check if we got a view, unless its a 0-sized or 0-d array. @@ -1184,9 +1177,9 @@ def _compare_index_result(self, arr, index, mimic_get, no_copy): if HAS_REFCOUNT: if no_copy: # refcount increases by one: - assert_equal(sys.getrefcount(arr), 3) + assert_equal(sys.getrefcount(arr), startcount + 1) else: - assert_equal(sys.getrefcount(arr), 2) + assert_equal(sys.getrefcount(arr), startcount) # Test non-broadcast setitem: b = arr.copy() @@ -1212,16 +1205,23 @@ def test_boolean(self): # it is aligned to the left. This is probably correct for # consistency with arr[boolean_array,] also no broadcasting # is done at all + a = self._create_array() self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) + a, (np.zeros_like(a, dtype=bool),)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + a, (np.zeros_like(a, dtype=bool)[..., 0],)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + a, (np.zeros_like(a, dtype=bool)[None, ...],)) def test_multidim(self): # Automatically test combinations with complex indexes on 2nd (or 1st) # spot and the simple ones in one other spot. + a = self._create_array() + b = np.empty((3, 0, 5, 6)) + complex_indices = self._create_complex_indices() + simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + fill_indices = [slice(None, None), 0] + with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer # index, when running the file separately. @@ -1232,28 +1232,30 @@ def isskip(idx): return isinstance(idx, str) and idx == "skip" for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices + tocheck = [fill_indices, complex_indices, + fill_indices, fill_indices] + tocheck[simple_pos] = simple_indices for index in product(*tocheck): index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) + self._check_multi_index(a, index) + self._check_multi_index(b, index) # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) + self._check_multi_index(a, (0, 0, 0, 0)) + self._check_multi_index(b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): a = np.arange(10) - for index in self.complex_indices: + complex_indices = self._create_complex_indices() + for index in complex_indices: self._check_single_index(a, index) + class TestFloatNonIntegerArgument: """ These test that ``TypeError`` is raised when you try to use @@ -1268,8 +1270,8 @@ def test_valid_indexing(self): a[np.array([0])] a[[0, 0]] a[:, [0, 0]] - a[:, 0,:] - a[:,:,:] + a[:, 0, :] + a[:, :, :] def test_valid_slicing(self): # These should raise no errors. @@ -1302,7 +1304,7 @@ def mult(a, b): mult([1], np.int_(3)) def test_reduce_axis_float_index(self): - d = np.zeros((3,3,3)) + d = np.zeros((3, 3, 3)) assert_raises(TypeError, np.min, d, 0.5) assert_raises(TypeError, np.min, d, (0.5, 1)) assert_raises(TypeError, np.min, d, (1, 2.2)) @@ -1319,7 +1321,7 @@ def test_bool_as_int_argument_errors(self): # Note that operator.index(np.array(True)) does not work, a boolean # array is thus also deprecated, but not with the same message: assert_raises(TypeError, operator.index, np.array(True)) - assert_warns(DeprecationWarning, operator.index, np.True_) + assert_raises(TypeError, operator.index, np.True_) assert_raises(TypeError, np.take, args=(a, [0], False)) def test_boolean_indexing_weirdness(self): @@ -1335,21 +1337,22 @@ def test_boolean_indexing_fast_path(self): a = np.ones((3, 3)) # This used to incorrectly work (and give an array of shape (0,)) - idx1 = np.array([[False]*9]) + idx1 = np.array([[False] * 9]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together - idx2 = np.array([[False]*8 + [True]]) + # This used to incorrectly give a ValueError: operands could not be + # broadcast together + idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx2]) # This is the same as it used to be. The above two should work like this. - idx3 = np.array([[False]*10]) + idx3 = np.array([[False] * 10]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " "size of axis is 3 but size of corresponding boolean axis is 1", @@ -1442,3 +1445,248 @@ def test_setitem(self): a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("methodname", ["__array__", "copy"]) +def test_flatiter_method_signatures(methodname: str): + method = getattr(np.flatiter, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.flatiter.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/blimgui/dist64/numpy/_core/tests/test_item_selection.py b/blimgui/dist64/numpy/_core/tests/test_item_selection.py index 7debd92..3dfc814 100644 --- a/blimgui/dist64/numpy/_core/tests/test_item_selection.py +++ b/blimgui/dist64/numpy/_core/tests/test_item_selection.py @@ -3,9 +3,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, HAS_REFCOUNT - ) +from numpy.testing import HAS_REFCOUNT, assert_, assert_array_equal, assert_raises class TestTake: @@ -15,7 +13,7 @@ def test_simple(self): modes = ['raise', 'wrap', 'clip'] indices = [-1, 4] index_arrays = [np.empty(0, dtype=np.intp), - np.empty(tuple(), dtype=np.intp), + np.empty((), dtype=np.intp), np.empty((1, 1), dtype=np.intp)] real_indices = {'raise': {-1: 1, 4: IndexError}, 'wrap': {-1: 1, 4: 0}, @@ -50,19 +48,23 @@ def test_simple(self): def test_refcounting(self): objects = [object() for i in range(10)] + if HAS_REFCOUNT: + orig_rcs = [sys.getrefcount(o) for o in objects] for mode in ('raise', 'clip', 'wrap'): a = np.array(objects) b = np.array([2, 2, 4, 5, 3, 5]) a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) # not contiguous, example: a = np.array(objects * 2)[::2] a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) def test_unicode_mode(self): d = np.arange(10) diff --git a/blimgui/dist64/numpy/_core/tests/test_limited_api.py b/blimgui/dist64/numpy/_core/tests/test_limited_api.py index b99cccb..f088f4f 100644 --- a/blimgui/dist64/numpy/_core/tests/test_limited_api.py +++ b/blimgui/dist64/numpy/_core/tests/test_limited_api.py @@ -2,9 +2,10 @@ import subprocess import sys import sysconfig + import pytest -from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD # This import is copied from random.tests.test_extending try: @@ -52,6 +53,8 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--werror", @@ -76,7 +79,6 @@ def install_temp(tmpdir_factory): sys.path.append(str(build_dir)) - @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.xfail( sysconfig.get_config_var("Py_DEBUG"), @@ -95,6 +97,6 @@ def test_limited_api(install_temp): and building a cython extension with the limited API """ - import limited_api1 # Earliest (3.6) - import limited_api_latest # Latest version (current Python) - import limited_api2 # cython + import limited_api1 # Earliest (3.6) # noqa: F401 + import limited_api2 # cython # noqa: F401 + import limited_api_latest # Latest version (current Python) # noqa: F401 diff --git a/blimgui/dist64/numpy/_core/tests/test_longdouble.py b/blimgui/dist64/numpy/_core/tests/test_longdouble.py index 7e6be3e..87eb751 100644 --- a/blimgui/dist64/numpy/_core/tests/test_longdouble.py +++ b/blimgui/dist64/numpy/_core/tests/test_longdouble.py @@ -1,14 +1,18 @@ -import warnings import platform +import warnings + import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, - temppath, IS_MUSL - ) from numpy._core.tests._locales import CommaDecimalPointLocale - +from numpy.testing import ( + IS_MUSL, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) @@ -40,7 +44,7 @@ def test_scalar_extraction(): def test_str_roundtrip(): # We will only see eps in repr if within printing precision. o = 1 + LD_INFO.eps - assert_equal(np.longdouble(str(o)), o, "str was %s" % str(o)) + assert_equal(np.longdouble(str(o)), o, f"str was {str(o)}") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") @@ -83,10 +87,10 @@ def test_bogus_string(): @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_fromstring(): o = 1 + LD_INFO.eps - s = (" " + str(o))*5 - a = np.array([o]*5) + s = (" " + str(o)) * 5 + a = np.array([o] * 5) assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, - err_msg="reading '%s'" % s) + err_msg=f"reading '{s}'") def test_fromstring_complex(): @@ -101,48 +105,39 @@ def test_fromstring_complex(): assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) # Spaces at wrong places - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), - np.array([1j])) + with assert_raises(ValueError): + np.fromstring("1+2 j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+ 2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1 +2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+j", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1j+1", dtype=ctype, sep=",") def test_fromstring_bogus(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), - np.array([1., 2., 3.])) + with assert_raises(ValueError): + np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" ") def test_fromstring_empty(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("xxxxx", sep="x"), - np.array([])) + with assert_raises(ValueError): + np.fromstring("xxxxx", sep="x") def test_fromstring_missing(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), - np.array([1])) + with assert_raises(ValueError): + np.fromstring("1xx3x4x5x6", sep="x") class TestFileBased: ldbl = 1 + LD_INFO.eps - tgt = np.array([ldbl]*5) + tgt = np.array([ldbl] * 5) out = ''.join([str(t) + '\n' for t in tgt]) def test_fromfile_bogus(self): @@ -150,9 +145,8 @@ def test_fromfile_bogus(self): with open(path, 'w') as f: f.write("1. 2. 3. flop 4.\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=float, sep=" ") - assert_equal(res, np.array([1., 2., 3.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=float, sep=" ") def test_fromfile_complex(self): for ctype in ["complex", "cdouble"]: @@ -185,56 +179,48 @@ def test_fromfile_complex(self): with open(path, 'w') as f: f.write("1+2 j,3\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") # Spaces at wrong places with temppath() as path: with open(path, 'w') as f: f.write("1+ 2j,3\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") # Spaces at wrong places with temppath() as path: with open(path, 'w') as f: f.write("1 +2j,3\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") - # Spaces at wrong places + # Wrong sep with temppath() as path: with open(path, 'w') as f: f.write("1+j\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") - # Spaces at wrong places + # Wrong sep with temppath() as path: with open(path, 'w') as f: f.write("1+\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") - # Spaces at wrong places + # Wrong sep with temppath() as path: with open(path, 'w') as f: f.write("1j+1\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.j])) - - + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") @@ -284,8 +270,7 @@ def test_str_exact(): @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_format(): - o = 1 + LD_INFO.eps - assert_("{0:.40g}".format(o) != '1') + assert_(f"{1 + LD_INFO.eps:.40g}" != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") @@ -293,7 +278,7 @@ def test_format(): reason="Need strtold_l") def test_percent(): o = 1 + LD_INFO.eps - assert_("%.40g" % o != '1') + assert_(f"{o:.40g}" != '1') @pytest.mark.skipif(longdouble_longer_than_double, @@ -306,7 +291,8 @@ def test_array_repr(): b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) + with np.printoptions(precision=LD_INFO.precision + 1): + assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale @@ -323,16 +309,6 @@ def test_fromstring_foreign_repr(self): a = np.fromstring(repr(f), dtype=float, sep=" ") assert_equal(a[0], f) - def test_fromstring_best_effort_float(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=float, sep=" "), - np.array([1.])) - - def test_fromstring_best_effort(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), - np.array([1.])) - def test_fromstring_foreign(self): s = "1.234" a = np.fromstring(s, dtype=np.longdouble, sep=" ") @@ -344,9 +320,8 @@ def test_fromstring_foreign_sep(self): assert_array_equal(a, b) def test_fromstring_foreign_value(self): - with assert_warns(DeprecationWarning): - b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") - assert_array_equal(b[0], 1) + with assert_raises(ValueError): + np.fromstring("1,234", dtype=np.longdouble, sep=" ") @pytest.mark.parametrize("int_val", [ diff --git a/blimgui/dist64/numpy/_core/tests/test_machar.py b/blimgui/dist64/numpy/_core/tests/test_machar.py deleted file mode 100644 index 5341667..0000000 --- a/blimgui/dist64/numpy/_core/tests/test_machar.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -from numpy._core._machar import MachAr -import numpy._core.numerictypes as ntypes -from numpy import errstate, array - - -class TestMachAr: - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v: array(v, hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = "Caught %s exception, should not have been raised." % e - raise AssertionError(msg) diff --git a/blimgui/dist64/numpy/_core/tests/test_mem_overlap.py b/blimgui/dist64/numpy/_core/tests/test_mem_overlap.py index 88ccc36..7d5c3e5 100644 --- a/blimgui/dist64/numpy/_core/tests/test_mem_overlap.py +++ b/blimgui/dist64/numpy/_core/tests/test_mem_overlap.py @@ -1,14 +1,12 @@ import itertools + import pytest import numpy as np -from numpy._core._multiarray_tests import solve_diophantine, internal_overlap from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal - ) - +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises ndims = 2 size = 10 @@ -63,7 +61,7 @@ def _check_assignment(srcidx, dstidx): arr[dstidx] = arr[srcidx] assert_(np.all(arr == cpy), - 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) + f'assigning arr[{dstidx}] = arr[{srcidx}]') def test_overlapping_assignments(): @@ -72,8 +70,8 @@ def test_overlapping_assignments(): inds = _indices(ndims) for ind in inds: - srcidx = tuple([a[0] for a in ind]) - dstidx = tuple([a[1] for a in ind]) + srcidx = tuple(a[0] for a in ind) + dstidx = tuple(a[1] for a in ind) _check_assignment(srcidx, dstidx) @@ -89,7 +87,7 @@ def test_diophantine_fuzz(): feasible_count = 0 infeasible_count = 0 - min_count = 500//(ndim + 1) + min_count = 500 // (ndim + 1) while min(feasible_count, infeasible_count) < min_count: # Ensure big and small integer problems @@ -97,15 +95,15 @@ def test_diophantine_fuzz(): U_max = rng.randint(0, 11, dtype=np.intp)**6 A_max = min(max_int, A_max) - U_max = min(max_int-1, U_max) + U_max = min(max_int - 1, U_max) - A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) + A = tuple(int(rng.randint(1, A_max + 1, dtype=np.intp)) for j in range(ndim)) - U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) + U = tuple(int(rng.randint(0, U_max + 2, dtype=np.intp)) for j in range(ndim)) - b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) - b = int(rng.randint(-1, b_ub+2, dtype=np.intp)) + b_ub = min(max_int - 2, sum(a * ub for a, ub in zip(A, U))) + b = int(rng.randint(-1, b_ub + 2, dtype=np.intp)) if ndim == 0 and feasible_count < min_count: b = 0 @@ -120,7 +118,7 @@ def test_diophantine_fuzz(): # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't # take too long) - ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U)) + ranges = tuple(range(0, a * ub + 1, a) for a, ub in zip(A, U)) size = 1 for r in ranges: @@ -134,7 +132,7 @@ def test_diophantine_fuzz(): assert_(X_simplified is not None, (A, U, b, X_simplified)) # Check validity - assert_(sum(a*x for a, x in zip(A, X)) == b) + assert_(sum(a * x for a, x in zip(A, X)) == b) assert_(all(0 <= x <= ub for x, ub in zip(X, U))) feasible_count += 1 @@ -147,9 +145,9 @@ def test_diophantine_overflow(): if max_int64 <= max_intp: # Check that the algorithm works internally in 128-bit; # solving this problem requires large intermediate numbers - A = (max_int64//2, max_int64//2 - 10) - U = (max_int64//2, max_int64//2 - 10) - b = 2*(max_int64//2) - 10 + A = (max_int64 // 2, max_int64 // 2 - 10) + U = (max_int64 // 2, max_int64 // 2 - 10) + b = 2 * (max_int64 // 2) - 10 assert_equal(solve_diophantine(A, U, b), (1, 1)) @@ -167,14 +165,15 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), - "shape_a = %r" % (a.shape,), - "shape_b = %r" % (b.shape,), - "strides_a = %r" % (a.strides,), - "strides_b = %r" % (b.strides,), - "size_a = %r" % (a.size,), - "size_b = %r" % (b.size,) + f"base_a - base_b = {base_delta!r}", + f"shape_a = {a.shape!r}", + f"shape_b = {b.shape!r}", + f"strides_a = {a.strides!r}", + f"strides_b = {b.strides!r}", + f"size_a = {a.size!r}", + f"size_b = {b.size!r}" ]) assert_equal(got, exact, err_msg=err_msg) @@ -186,24 +185,24 @@ def test_may_share_memory_manual(): # Base arrays xs0 = [ np.zeros([13, 21, 23, 22], dtype=np.int8), - np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] + np.zeros([13, 21, 23 * 2, 22], dtype=np.int8)[:, :, ::2, :] ] # Generate all negative stride combinations xs = [] for x in xs0: - for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],) * 4)): xp = x[ss] xs.append(xp) for x in xs: # The default is a simple extent check - assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) - assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :])) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :], max_work=None)) # Exact checks - check_may_share_memory_exact(x[:,0,:], x[:,1,:]) - check_may_share_memory_exact(x[:,::7], x[:,3::3]) + check_may_share_memory_exact(x[:, 0, :], x[:, 1, :]) + check_may_share_memory_exact(x[:, ::7], x[:, 3::3]) try: xp = x.ravel() @@ -215,15 +214,15 @@ def test_may_share_memory_manual(): # 0-size arrays cannot overlap check_may_share_memory_exact(x.ravel()[6:6], - xp.reshape(13, 21, 23, 11)[:,::7]) + xp.reshape(13, 21, 23, 11)[:, ::7]) # Test itemsize is dealt with - check_may_share_memory_exact(x[:,::7], + check_may_share_memory_exact(x[:, ::7], xp.reshape(13, 21, 23, 11)) - check_may_share_memory_exact(x[:,::7], - xp.reshape(13, 21, 23, 11)[:,3::3]) + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)[:, 3::3]) check_may_share_memory_exact(x.ravel()[6:7], - xp.reshape(13, 21, 23, 11)[:,::7]) + xp.reshape(13, 21, 23, 11)[:, ::7]) # Check unit size x = np.zeros([1], dtype=np.int8) @@ -238,18 +237,18 @@ def iter_random_view_pairs(x, same_steps=True, equal_size=False): raise ValueError def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) def random_slice_fixed_size(n, step, size): - start = rng.randint(0, n+1 - size*step) - stop = start + (size-1)*step + 1 + start = rng.randint(0, n + 1 - size * step) + stop = start + (size - 1) * step + 1 if rng.randint(0, 2) == 0: - stop, start = start-1, stop-1 + stop, start = start - 1, stop - 1 if stop < 0: stop = None step *= -1 @@ -259,7 +258,7 @@ def random_slice_fixed_size(n, step, size): yield x, x for j in range(1, 7, 3): yield x[j:], x[:-j] - yield x[...,j:], x[...,:-j] + yield x[..., j:], x[..., :-j] # An array with zero stride internal overlap strides = list(x.strides) @@ -298,7 +297,7 @@ def random_slice_fixed_size(n, step, size): if a.size == 0: continue - steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) + steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) s2 = tuple(random_slice_fixed_size(p, s, pa) @@ -322,7 +321,7 @@ def random_slice_fixed_size(n, step, size): def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): # Check that overlap problems with common strides are solved with # little work. - x = np.zeros([17,34,71,97], dtype=np.int16) + x = np.zeros([17, 34, 71, 97], dtype=np.int16) feasible = 0 infeasible = 0 @@ -370,7 +369,7 @@ def test_may_share_memory_harder_fuzz(): # also exist but not be detected here, as the set of problems # comes from RNG. - check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size) // 2, same_steps=False, min_count=2000) @@ -381,8 +380,8 @@ def test_shares_memory_api(): assert_equal(np.shares_memory(x, x), True) assert_equal(np.shares_memory(x, x.copy()), False) - a = x[:,::2,::3] - b = x[:,::3,::2] + a = x[:, ::2, ::3] + b = x[:, ::3, ::2] assert_equal(np.shares_memory(a, b), True) assert_equal(np.shares_memory(a, b, max_work=None), True) assert_raises( @@ -404,9 +403,11 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) - assert_(any(x != u//2 for x, u in zip(X, U))) + assert_(any(x != u // 2 for x, u in zip(X, U))) if exists: assert_(X is not None, repr(X)) @@ -414,20 +415,20 @@ def check(A, U, exists=None): assert_(X is None, repr(X)) # Smoke tests - check((3, 2), (2*2, 3*2), exists=True) - check((3*2, 2), (15*2, (3-1)*2), exists=False) + check((3, 2), (2 * 2, 3 * 2), exists=True) + check((3 * 2, 2), (15 * 2, (3 - 1) * 2), exists=False) def test_internal_overlap_slices(): # Slicing an array never generates internal overlap - x = np.zeros([17,34,71,97], dtype=np.int16) + x = np.zeros([17, 34, 71, 97], dtype=np.int16) rng = np.random.RandomState(1234) def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 @@ -456,7 +457,7 @@ def check_internal_overlap(a, manual_expected=None): m = set() ranges = tuple(range(n) for n in a.shape) for v in itertools.product(*ranges): - offset = sum(s*w for s, w in zip(a.strides, v)) + offset = sum(s * w for s, w in zip(a.strides, v)) if offset in m: expected = True break @@ -482,8 +483,8 @@ def test_internal_overlap_manual(): # Check low-dimensional special cases - check_internal_overlap(x, False) # 1-dim - check_internal_overlap(x.reshape([]), False) # 0-dim + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim a = as_strided(x, strides=(3, 4), shape=(4, 4)) check_internal_overlap(a, False) @@ -640,19 +641,18 @@ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, sl = [slice(None)] * ndim if axis is None: if outsize is None: - sl = [slice(0, 1)] + [0]*(ndim - 1) + sl = [slice(0, 1)] + [0] * (ndim - 1) else: - sl = [slice(0, outsize)] + [0]*(ndim - 1) - else: - if outsize is None: - k = b.shape[axis]//2 - if ndim == 1: - sl[axis] = slice(k, k + 1) - else: - sl[axis] = k + sl = [slice(0, outsize)] + [0] * (ndim - 1) + elif outsize is None: + k = b.shape[axis] // 2 + if ndim == 1: + sl[axis] = slice(k, k + 1) else: - assert b.shape[axis] >= outsize - sl[axis] = slice(0, outsize) + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) b_out = b[tuple(sl)] if scalarize: @@ -706,7 +706,7 @@ def get_out_axis_size(a, b, axis): def do_reduceat(a, out, axis): if axis is None: size = len(a) - step = size//len(out) + step = size // len(out) else: size = a.shape[axis] step = a.shape[axis] // out.shape[axis] @@ -753,19 +753,19 @@ def test_unary_gufunc_fuzz(self): # Ensure the shapes are so that euclidean_pdist is happy if b.shape[-1] > b.shape[-2]: - b = b[...,0,:] + b = b[..., 0, :] else: - b = b[...,:,0] + b = b[..., :, 0] n = a.shape[-2] p = n * (n - 1) // 2 if p <= b.shape[-1] and p > 0: - b = b[...,:p] + b = b[..., :p] else: - n = max(2, int(np.sqrt(b.shape[-1]))//2) + n = max(2, int(np.sqrt(b.shape[-1])) // 2) p = n * (n - 1) // 2 - a = a[...,:n,:] - b = b[...,:p] + a = a[..., :n, :] + b = b[..., :p] # Call if np.shares_memory(a, b): @@ -843,17 +843,17 @@ def check(a, b): k = 10 indices = [ np.index_exp[:n], - np.index_exp[k:k+n], - np.index_exp[n-1::-1], - np.index_exp[k+n-1:k-1:-1], - np.index_exp[:2*n:2], - np.index_exp[k:k+2*n:2], - np.index_exp[2*n-1::-2], - np.index_exp[k+2*n-1:k-1:-2], + np.index_exp[k:k + n], + np.index_exp[n - 1::-1], + np.index_exp[k + n - 1:k - 1:-1], + np.index_exp[:2 * n:2], + np.index_exp[k:k + 2 * n:2], + np.index_exp[2 * n - 1::-2], + np.index_exp[k + 2 * n - 1:k - 1:-2], ] for xi, yi in itertools.product(indices, indices): - v = np.arange(1, 1 + n*2 + k, dtype=dtype) + v = np.arange(1, 1 + n * 2 + k, dtype=dtype) x = v[xi] y = v[yi] @@ -901,14 +901,14 @@ def check(a, b, c): indices = [] for p in [1, 2]: indices.extend([ - np.index_exp[:p*n:p], - np.index_exp[k:k+p*n:p], - np.index_exp[p*n-1::-p], - np.index_exp[k+p*n-1:k-1:-p], + np.index_exp[:p * n:p], + np.index_exp[k:k + p * n:p], + np.index_exp[p * n - 1::-p], + np.index_exp[k + p * n - 1:k - 1:-p], ]) for x, y, z in itertools.product(indices, indices, indices): - v = np.arange(6*n).astype(dtype) + v = np.arange(6 * n).astype(dtype) x = v[x] y = v[y] z = v[z] diff --git a/blimgui/dist64/numpy/_core/tests/test_mem_policy.py b/blimgui/dist64/numpy/_core/tests/test_mem_policy.py index 57826c1..6ad042a 100644 --- a/blimgui/dist64/numpy/_core/tests/test_mem_policy.py +++ b/blimgui/dist64/numpy/_core/tests/test_mem_policy.py @@ -2,13 +2,14 @@ import gc import os import sys +import sysconfig import threading import pytest import numpy as np -from numpy.testing import extbuild, assert_warns, IS_WASM, IS_EDITABLE from numpy._core.multiarray import get_handler_name +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -220,6 +221,8 @@ def get_module(tmp_path): except ImportError: pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('mem_policy', functions, prologue=prologue, @@ -412,6 +415,7 @@ def test_new_policy(get_module): reason=("bad interaction between getenv and " "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) +@pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): a = get_module.get_array() assert np._core.multiarray.get_handler_name(a) is None @@ -429,7 +433,7 @@ def test_switch_owner(get_module, policy): # The policy should be NULL, so we have to assume we can call # "free". A warning is given if the policy == "1" if policy: - with assert_warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning) as w: del a gc.collect() else: diff --git a/blimgui/dist64/numpy/_core/tests/test_memmap.py b/blimgui/dist64/numpy/_core/tests/test_memmap.py index 78b79f6..0b186a9 100644 --- a/blimgui/dist64/numpy/_core/tests/test_memmap.py +++ b/blimgui/dist64/numpy/_core/tests/test_memmap.py @@ -1,19 +1,36 @@ -import sys -import os import mmap -import pytest +import os +import sys +import warnings from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile -from numpy import ( - memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply) +import pytest -from numpy import arange, allclose, asarray +from numpy import ( + add, + allclose, + arange, + asarray, + average, + isscalar, + memmap, + multiply, + ndarray, + prod, + subtract, + sum, +) from numpy.testing import ( - assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY, - break_cycles - ) + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + break_cycles, +) + +@pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") class TestMemmap: def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') @@ -151,8 +168,9 @@ def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) for unary_op in [sum, average, prod]: result = unary_op(fp) assert_(isscalar(result)) @@ -167,9 +185,9 @@ def test_ufunc_return_ndarray(self): assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 - assert(fp.__class__ is memmap) + assert fp.__class__ is memmap add(fp, 1, out=fp) - assert(fp.__class__ is memmap) + assert fp.__class__ is memmap def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) @@ -191,7 +209,7 @@ class MemmapSubClass(memmap): assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) - assert(fp[[0, 1]].__class__ is MemmapSubClass) + assert fp[[0, 1]].__class__ is MemmapSubClass def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY @@ -200,14 +218,14 @@ def test_mmap_offset_greater_than_allocation_granularity(self): assert_(fp.offset == offset) def test_empty_array_with_offset_multiple_of_allocation_granularity(self): - self.tmpfp.write(b'a'*mmap.ALLOCATIONGRANULARITY) + self.tmpfp.write(b'a' * mmap.ALLOCATIONGRANULARITY) size = 0 offset = mmap.ALLOCATIONGRANULARITY fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_equal(fp.offset, offset) def test_no_shape(self): - self.tmpfp.write(b'a'*16) + self.tmpfp.write(b'a' * 16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) diff --git a/blimgui/dist64/numpy/_core/tests/test_multiarray.py b/blimgui/dist64/numpy/_core/tests/test_multiarray.py index 955144e..36f690d 100644 --- a/blimgui/dist64/numpy/_core/tests/test_multiarray.py +++ b/blimgui/dist64/numpy/_core/tests/test_multiarray.py @@ -1,44 +1,61 @@ -from __future__ import annotations - +import builtins import collections.abc -import tempfile -import sys -import warnings -import operator +import ctypes +import functools +import gc +import importlib +import inspect import io import itertools -import functools -import ctypes +import mmap +import operator import os -import gc +import pathlib +import pickle import re +import sys +import tempfile +import warnings import weakref -import pytest from contextlib import contextmanager -import pickle -import pathlib -import builtins + +# Need to test an object that does not fully implement math interface +from datetime import datetime, timedelta from decimal import Decimal -import mmap + +import pytest import numpy as np import numpy._core._multiarray_tests as _multiarray_tests from numpy._core._rational_tests import rational -from numpy.exceptions import AxisError, ComplexWarning -from numpy.testing import ( - assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, - assert_array_equal, assert_raises_regex, assert_array_almost_equal, - assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, - assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - check_support_sve, assert_array_compare, IS_64BIT - ) -from numpy.testing._private.utils import requires_memory, _no_tracing +from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks from numpy.lib.recfunctions import repack_fields -from numpy._core.multiarray import _get_ndarray_c_version, dot - -# Need to test an object that does not fully implement math interface -from datetime import timedelta, datetime +from numpy.testing import ( + BLAS_SUPPORTS_FPE, + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, + assert_raises_regex, + break_cycles, + check_support_sve, + runstring, + temppath, +) +from numpy.testing._private.utils import _no_tracing, requires_memory def assert_arg_sorted(arr, arg): @@ -72,34 +89,32 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None): if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize - buf = np.empty(size + 2*align + 1, np.uint8) + buf = np.empty(size + 2 * align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offset = ptr % align if offset != 0: offset = align - offset - if (ptr % (2*align)) == 0: + if (ptr % (2 * align)) == 0: offset += align # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 - buf = buf[offset:offset+size+1][:-1] + buf = buf[offset:offset + size + 1][:-1] buf.fill(0) data = np.ndarray(shape, dtype, buf, order=order) return data class TestFlags: - def setup_method(self): - self.a = np.arange(10) - def test_writeable(self): + arr = np.arange(10) mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 + arr.flags.writeable = False + assert_raises(ValueError, runstring, 'arr[0] = 3', mydict) + arr.flags.writeable = True + arr[0] = 5 + arr[0] = 0 def test_writeable_any_base(self): # Ensure that any base being writeable is sufficient to change flag; @@ -140,7 +155,7 @@ def test_writeable_from_readonly(self): data = b'\x00' * 100 vals = np.frombuffer(data, 'B') assert_raises(ValueError, vals.setflags, write=True) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) values = np._core.records.fromstring(data, types) vals = values['vals'] assert_raises(ValueError, vals.setflags, write=True) @@ -153,7 +168,7 @@ def test_writeable_from_buffer(self): assert_(vals.flags.writeable is False) vals.setflags(write=True) assert_(vals.flags.writeable) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) values = np._core.records.fromstring(data, types) vals = values['vals'] assert_(vals.flags.writeable) @@ -206,12 +221,7 @@ def test_writeable_from_c_data(self): with assert_raises(ValueError): view.flags.writeable = True - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with assert_raises(DeprecationWarning): - arr.flags.writeable = True - - with assert_warns(DeprecationWarning): + with assert_raises(ValueError): arr.flags.writeable = True def test_warnonwrite(self): @@ -242,18 +252,19 @@ class MyArr: assert np.asarray(MyArr()).flags.writeable is writeable def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + arr = np.arange(10) + assert_equal(arr.flags.carray, True) + assert_equal(arr.flags['C'], True) + assert_equal(arr.flags.farray, False) + assert_equal(arr.flags.behaved, True) + assert_equal(arr.flags.fnc, False) + assert_equal(arr.flags.forc, True) + assert_equal(arr.flags.owndata, True) + assert_equal(arr.flags.writeable, True) + assert_equal(arr.flags.aligned, True) + assert_equal(arr.flags.writebackifcopy, False) + assert_equal(arr.flags['X'], False) + assert_equal(arr.flags['WRITEBACKIFCOPY'], False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -301,41 +312,44 @@ def test_int(self): class TestAttributes: - def setup_method(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + def _create_arrays(self): + one = np.arange(10) + two = np.arange(20).reshape(4, 5) + three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + return one, two, three def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) + one, two, three = self._create_arrays() + assert_equal(one.shape, (10,)) + assert_equal(two.shape, (4, 5)) + assert_equal(three.shape, (2, 5, 6)) + three.shape = (10, 3, 2) + assert_equal(three.shape, (10, 3, 2)) + three.shape = (2, 5, 6) + assert_equal(one.strides, (one.itemsize,)) + num = two.itemsize + assert_equal(two.strides, (5 * num, num)) + num = three.itemsize + assert_equal(three.strides, (30 * num, 6 * num, num)) + assert_equal(one.ndim, 1) + assert_equal(two.ndim, 2) + assert_equal(three.ndim, 3) + num = two.itemsize + assert_equal(two.size, 20) + assert_equal(two.nbytes, 20 * num) + assert_equal(two.itemsize, two.dtype.itemsize) + assert_equal(two.base, np.arange(20)) def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float64)) - assert_equal(self.one.dtype.char, np.dtype(int).char) - assert self.one.dtype.char in "lq" - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') + one, _, three = self._create_arrays() + assert_equal(one.dtype, np.dtype(np.int_)) + assert_equal(three.dtype, np.dtype(np.float64)) + assert_equal(one.dtype.char, np.dtype(int).char) + assert one.dtype.char in "lq" + assert_equal(three.dtype.char, 'd') + assert_(three.dtype.str[0] in '<>') + assert_equal(one.dtype.str[1], 'i') + assert_equal(three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 @@ -346,32 +360,33 @@ def test_int_subclassing(self): assert_(not isinstance(numpy_int, int)) def test_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) + offset=offset * x.itemsize, + strides=strides * x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_raises(ValueError, make_array, 4, 4, -2) assert_raises(ValueError, make_array, 4, 2, -1) assert_raises(ValueError, make_array, 8, 3, 1) - assert_equal(make_array(8, 3, 0), np.array([3]*8)) + assert_equal(make_array(8, 3, 0), np.array([3] * 8)) # Check behavior reported in gh-2503: assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): try: r = np.ndarray([size], dtype=int, buffer=x, - offset=offset*x.itemsize) + offset=offset * x.itemsize) except Exception as e: raise RuntimeError(e) - r.strides = strides = strides*x.itemsize + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) @@ -381,24 +396,28 @@ def make_array(size, offset, strides): assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): - arr.strides = strides + with pytest.warns(DeprecationWarning): + arr.strides = strides - assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] - a.strides = 1 - a[::2].strides = 2 + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 # test 0d arr_0d = np.array(0) - arr_0d.strides = () + with pytest.warns(DeprecationWarning): + arr_0d.strides = () assert_raises(TypeError, set_strides, arr_0d, None) def test_fill(self): @@ -514,7 +533,7 @@ def test_array_copy_if_needed(self): assert_array_equal(d, [1, 4, 7]) def test_array_copy_true(self): - d = np.array([[1,2,3], [1, 2, 3]]) + d = np.array([[1, 2, 3], [1, 2, 3]]) e = np.array(d, copy=True) d[0, 1] = 3 e[0, 2] = -7 @@ -524,7 +543,7 @@ def test_array_copy_true(self): d[0, 1] = 5 e[0, 2] = 7 assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) - assert_array_equal(d, [[1, 5, 3], [1,2,3]]) + assert_array_equal(d, [[1, 5, 3], [1, 2, 3]]) def test_array_copy_str(self): with pytest.raises( @@ -540,7 +559,7 @@ def test_array_cont(self): assert_(np.ascontiguousarray(d).flags.f_contiguous) assert_(np.asfortranarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) - d = np.ones((10, 10))[::2,::2] + d = np.ones((10, 10))[::2, ::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) @@ -572,6 +591,32 @@ def test_array_as_keyword(self, func): else: func(a=3) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_signature(self, func): + sig = inspect.signature(func) + + assert len(sig.parameters) >= 3 + + arg0 = "object" if func is np.array else "a" + assert arg0 in sig.parameters + assert sig.parameters[arg0].default is inspect.Parameter.empty + assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].default is None + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "like" in sig.parameters + assert sig.parameters["like"].default is None + assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY + class TestAssignment: def test_assignment_broadcasting(self): @@ -643,7 +688,7 @@ def test_stringlike_empty_list(self): b = np.array([b'done']) class bad_sequence: - def __getitem__(self): pass + def __getitem__(self, _, /): pass def __len__(self): raise RuntimeError assert_raises(ValueError, operator.setitem, u, 0, []) @@ -686,8 +731,9 @@ def test_longdouble_assignment(self): def test_cast_to_string(self): # cast to str should do "str(scalar)", not "str(scalar.item())" - # Example: In python2, str(float) is truncated, so we want to avoid - # str(np.float64(...).item()) as this would incorrectly truncate. + # When converting a float to a string via array assignment, we + # want to ensure that the conversion uses str(scalar) to preserve + # the expected precision. a = np.zeros(1, dtype='S20') a[:] = np.array(['1.12345678901234567890'], dtype='f8') assert_equal(a[0], b"1.1234567890123457") @@ -716,46 +762,46 @@ def test_structured_non_void(self): class TestZeroRank: - def setup_method(self): - self.d = np.array(0), np.array('x', object) + def _create_arrays(self): + return np.array(0), np.array('x', object) def test_ellipsis_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[...], 0) assert_equal(b[...], 'x') assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[()], 0) assert_equal(b[()], 'x') assert_(type(a[()]) is a.dtype.type) assert_(type(b[()]) is str) def test_invalid_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[0], b) assert_raises(IndexError, lambda x: x[np.array([], int)], a) assert_raises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[...] = 42 assert_equal(a, 42) b[...] = '' assert_equal(b.item(), '') def test_empty_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[()] = 42 assert_equal(a, 42) b[()] = '' assert_equal(b.item(), '') def test_invalid_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() def assign(x, i, v): x[i] = v @@ -765,7 +811,7 @@ def assign(x, i, v): assert_raises(ValueError, assign, a, (), '') def test_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -773,16 +819,16 @@ def test_newaxis(self): assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) - assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) + assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() def subscript(x, i): x[i] assert_raises(IndexError, subscript, a, (np.newaxis, 0)) - assert_raises(IndexError, subscript, a, (np.newaxis,)*70) + assert_raises(IndexError, subscript, a, (np.newaxis,) * 70) def test_constructor(self): x = np.ndarray(()) @@ -820,26 +866,26 @@ def test_real_imag(self): class TestScalarIndexing: - def setup_method(self): - self.d = np.array([0, 1])[0] + def _create_array(self): + return np.array([0, 1])[0] def test_ellipsis_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[...], 0) assert_equal(a[...].shape, ()) def test_empty_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[()], 0) assert_equal(a[()].shape, ()) def test_invalid_subscript(self): - a = self.d + a = self._create_array() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): - a = self.d + a = self._create_array() def assign(x, i, v): x[i] = v @@ -847,7 +893,7 @@ def assign(x, i, v): assert_raises(TypeError, assign, a, 0, 42) def test_newaxis(self): - a = self.d + a = self._create_array() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -855,16 +901,16 @@ def test_newaxis(self): assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) - assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) + assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a = self.d + a = self._create_array() def subscript(x, i): x[i] assert_raises(IndexError, subscript, a, (np.newaxis, 0)) - assert_raises(IndexError, subscript, a, (np.newaxis,)*70) + assert_raises(IndexError, subscript, a, (np.newaxis,) * 70) def test_overlapping_assignment(self): # With positive strides @@ -882,7 +928,7 @@ def test_overlapping_assignment(self): assert_equal(a, [3, 2, 1, 0]) a = np.arange(6).reshape(2, 3) - a[::-1,:] = a[:, ::-1] + a[::-1, :] = a[:, ::-1] assert_equal(a, [[5, 4, 3], [2, 1, 0]]) a = np.arange(6).reshape(2, 3) @@ -931,7 +977,7 @@ def test_from_string(self): nstr = ['123', '123'] result = np.array([123, 123], dtype=int) for type in types: - msg = 'String conversion for %s' % type + msg = f'String conversion for {type}' assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) def test_void(self): @@ -967,7 +1013,6 @@ def test_structured_void_promotion(self, idx): [np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]], dtype="V") - def test_too_big_error(self): # 45341 is the smallest integer greater than sqrt(2**31 - 1). # 3037000500 is the smallest integer greater than sqrt(2**63 - 1). @@ -985,6 +1030,7 @@ def test_too_big_error(self): @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation with assert_raises(np._core._exceptions._ArrayMemoryError): @@ -1022,7 +1068,7 @@ def test_zeros_big(self): # This test can fail on 32-bit systems due to insufficient # contiguous memory. Deallocating the previous array increases the # chance of success. - del(d) + del d def test_zeros_obj(self): # test initialization from PyLong(0) @@ -1039,32 +1085,32 @@ def test_zeros_like_like_zeros(self): for c in np.typecodes['All']: if c == 'V': continue - d = np.zeros((3,3), dtype=c) + d = np.zeros((3, 3), dtype=c) assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) # explicitly check some special cases - d = np.zeros((3,3), dtype='S5') + d = np.zeros((3, 3), dtype='S5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) - d = np.zeros((3,3), dtype='U5') + d = np.zeros((3, 3), dtype='U5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) - d = np.zeros((3,3), dtype=' ndmax validation + data = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match="object too deep for desired array"): + np.array(data, ndmax=1, dtype=object) + class TestStructured: def test_subarray_field_access(self): @@ -1468,7 +1598,7 @@ def test_objview(self): def test_setfield(self): # https://github.com/numpy/numpy/issues/3126 struct_dt = np.dtype([('elem', 'i4', 5),]) - dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)]) + dt = np.dtype([('field', 'i4', 10), ('struct', struct_dt)]) x = np.zeros(1, dt) x[0]['field'] = np.ones(10, dtype='i4') x[0]['struct'] = np.ones(1, dtype=struct_dt) @@ -1530,7 +1660,7 @@ def test_zero_width_string(self): assert_equal(xx, [[b'', b''], [b'', b'']]) # check for no uninitialized memory due to viewing S0 array assert_equal(xx[:].dtype, xx.dtype) - assert_array_equal(eval(repr(xx), dict(np=np, array=np.array)), xx) + assert_array_equal(eval(repr(xx), {"np": np, "array": np.array}), xx) b = io.BytesIO() np.save(b, xx) @@ -1555,51 +1685,51 @@ def test_assignment(self): def testassign(arr, v): c = arr.copy() c[0] = v # assign using setitem - c[1:] = v # assign using "dtype_transfer" code paths + c[1:] = v # assign using "dtype_transfer" code paths return c dt = np.dtype([('foo', 'i8'), ('bar', 'i8')]) arr = np.ones(2, dt) - v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')]) - v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')]) - v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')]) + v1 = np.array([(2, 3)], dtype=[('foo', 'i8'), ('bar', 'i8')]) + v2 = np.array([(2, 3)], dtype=[('bar', 'i8'), ('foo', 'i8')]) + v3 = np.array([(2, 3)], dtype=[('bar', 'i8'), ('baz', 'i8')]) v4 = np.array([(2,)], dtype=[('bar', 'i8')]) - v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')]) + v5 = np.array([(2, 3)], dtype=[('foo', 'f8'), ('bar', 'f8')]) w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]}) - ans = np.array([(2,3),(2,3)], dtype=dt) + ans = np.array([(2, 3), (2, 3)], dtype=dt) assert_equal(testassign(arr, v1), ans) assert_equal(testassign(arr, v2), ans) assert_equal(testassign(arr, v3), ans) assert_raises(TypeError, lambda: testassign(arr, v4)) assert_equal(testassign(arr, v5), ans) w[:] = 4 - assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt)) + assert_equal(arr, np.array([(1, 4), (1, 4)], dtype=dt)) # test field-reordering, assignment by position, and self-assignment - a = np.array([(1,2,3)], + a = np.array([(1, 2, 3)], dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')]) a[['foo', 'bar']] = a[['bar', 'foo']] - assert_equal(a[0].item(), (2,1,3)) + assert_equal(a[0].item(), (2, 1, 3)) # test that this works even for 'simple_unaligned' structs # (ie, that PyArray_EquivTypes cares about field order too) - a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2)], dtype=[('a', 'i4'), ('b', 'i4')]) a[['a', 'b']] = a[['b', 'a']] - assert_equal(a[0].item(), (2,1)) + assert_equal(a[0].item(), (2, 1)) def test_structuredscalar_indexing(self): # test gh-7262 x = np.empty(shape=1, dtype="(2,)3S,(2,)3U") - assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]]) + assert_equal(x[["f0", "f1"]][0], x[0][["f0", "f1"]]) assert_equal(x[0], x[0][()]) def test_multiindex_titles(self): a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')]) - assert_raises(KeyError, lambda : a[['a','c']]) - assert_raises(KeyError, lambda : a[['a','a']]) - assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated - a[['b','c']] # no exception + assert_raises(KeyError, lambda: a[['a', 'c']]) + assert_raises(KeyError, lambda: a[['a', 'a']]) + assert_raises(ValueError, lambda: a[['b', 'b']]) # field exists, but repeated + a[['b', 'c']] # no exception def test_structured_cast_promotion_fieldorder(self): # gh-15494 @@ -1649,9 +1779,9 @@ def test_structured_cast_promotion_fieldorder(self): assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')])) @pytest.mark.parametrize("dtype_dict", [ - dict(names=["a", "b"], formats=["i4", "f"], itemsize=100), - dict(names=["a", "b"], formats=["i4", "f"], - offsets=[0, 12])]) + {"names": ["a", "b"], "formats": ["i4", "f"], "itemsize": 100}, + {"names": ["a", "b"], "formats": ["i4", "f"], + "offsets": [0, 12]}]) @pytest.mark.parametrize("align", [True, False]) def test_structured_promotion_packs(self, dtype_dict, align): # Structured dtypes are packed when promoted (we consider the packed @@ -1737,10 +1867,10 @@ def test_count_nonzero_all(self): def test_count_nonzero_unaligned(self): # prevent mistakes as e.g. gh-4060 for o in range(7): - a = np.zeros((18,), dtype=bool)[o+1:] + a = np.zeros((18,), dtype=bool)[o + 1:] a[:o] = True assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) - a = np.ones((18,), dtype=bool)[o+1:] + a = np.ones((18,), dtype=bool)[o + 1:] a[:o] = False assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) @@ -1767,11 +1897,9 @@ def _test_cast_from_flexible(self, dtype): def test_cast_from_void(self): self._test_cast_from_flexible(np.void) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_unicode(self): self._test_cast_from_flexible(np.str_) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_bytes(self): self._test_cast_from_flexible(np.bytes_) @@ -1888,9 +2016,9 @@ def test_all_where(self): [True]]) for _ax in [0, None]: assert_equal(a.all(axis=_ax, where=wh_lower), - np.all(a[wh_lower[:,0],:], axis=_ax)) + np.all(a[wh_lower[:, 0], :], axis=_ax)) assert_equal(np.all(a, axis=_ax, where=wh_lower), - a[wh_lower[:,0],:].all(axis=_ax)) + a[wh_lower[:, 0], :].all(axis=_ax)) assert_equal(a.all(where=wh_full), True) assert_equal(np.all(a, where=wh_full), True) @@ -1909,9 +2037,9 @@ def test_any_where(self): [False]]) for _ax in [0, None]: assert_equal(a.any(axis=_ax, where=wh_middle), - np.any(a[wh_middle[:,0],:], axis=_ax)) + np.any(a[wh_middle[:, 0], :], axis=_ax)) assert_equal(np.any(a, axis=_ax, where=wh_middle), - a[wh_middle[:,0],:].any(axis=_ax)) + a[wh_middle[:, 0], :].any(axis=_ax)) assert_equal(a.any(where=wh_full), False) assert_equal(np.any(a, where=wh_full), False) assert_equal(a.any(where=False), False) @@ -1951,10 +2079,10 @@ def test_compress(self): assert_equal(out, 1) def test_choose(self): - x = 2*np.ones((3,), dtype=int) - y = 3*np.ones((3,), dtype=int) - x2 = 2*np.ones((2, 3), dtype=int) - y2 = 3*np.ones((2, 3), dtype=int) + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) ind = np.array([0, 0, 1]) A = ind.choose((x, y)) @@ -1968,7 +2096,7 @@ def test_choose(self): oned = np.ones(1) # gh-12031, caused SEGFAULT - assert_raises(TypeError, oned.choose,np.void(0), [oned]) + assert_raises(TypeError, oned.choose, np.void(0), [oned]) out = np.array(0) ret = np.choose(np.array(1), [10, 20, 30], out=out) @@ -1977,9 +2105,15 @@ def test_choose(self): # gh-6272 check overlap on out x = np.arange(5) - y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + y = np.choose([0, 0, 0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') assert_equal(y, np.array([0, 1, 2])) + # gh_28206 check fail when out not writeable + x = np.arange(3) + out = np.zeros(3) + out.setflags(write=False) + assert_raises(ValueError, np.choose, [0, 1, 2], [x, x, x], out=out) + def test_prod(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] @@ -2054,6 +2188,7 @@ def check_round(arr, expected, *round_args): assert_equal(out, expected) assert out is res + check_round(np.array([1, 2, 3]), [1, 2, 3]) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) @@ -2062,6 +2197,20 @@ def check_round(arr, expected, *round_args): check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + @pytest.mark.parametrize('dt', ['uint8', int, float, complex]) + def test_round_copies(self, dt): + a = np.arange(3, dtype=dt) + assert not np.shares_memory(a.round(), a) + assert not np.shares_memory(a.round(decimals=2), a) + + out = np.empty(3, dtype=dt) + assert not np.shares_memory(a.round(out=out), a) + + a = np.arange(12).astype(dt).reshape(3, 4).T + + assert a.flags.f_contiguous + assert np.round(a).flags.f_contiguous + def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) @@ -2097,7 +2246,7 @@ def test_sort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.sort(a, kind="stable", stable=True) @@ -2114,7 +2263,7 @@ def test_sort_unsigned(self, dtype): a = np.arange(101, dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "scalar sort, kind=%s" % kind + msg = f"scalar sort, kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2129,7 +2278,7 @@ def test_sort_signed(self, dtype): a = np.arange(-50, 51, dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "scalar sort, kind=%s" % (kind) + msg = f"scalar sort, kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2149,12 +2298,12 @@ def test_sort_complex(self, part, dtype): }[dtype] a = np.arange(-50, 51, dtype=dtype) b = a[::-1].copy() - ai = (a * (1+1j)).astype(cdtype) - bi = (b * (1+1j)).astype(cdtype) + ai = (a * (1 + 1j)).astype(cdtype) + bi = (b * (1 + 1j)).astype(cdtype) setattr(ai, part, 1) setattr(bi, part, 1) for kind in self.sort_kinds: - msg = "complex sort, %s part == 1, kind=%s" % (part, kind) + msg = f"complex sort, {part} part == 1, kind={kind}" c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) @@ -2166,10 +2315,10 @@ def test_sort_complex_byte_swapping(self): # test sorting of complex arrays requiring byte-swapping, gh-5441 for endianness in '<>': for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) c = arr.copy() c.sort() - msg = 'byte-swapped complex sort, dtype={0}'.format(dt) + msg = f'byte-swapped complex sort, dtype={dt}' assert_equal(c, arr, msg) @pytest.mark.parametrize('dtype', [np.bytes_, np.str_]) @@ -2178,7 +2327,7 @@ def test_sort_string(self, dtype): a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2192,7 +2341,7 @@ def test_sort_object(self): a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2206,10 +2355,10 @@ def test_sort_object(self): @pytest.mark.parametrize("step", [1, 2]) def test_sort_structured(self, dt, step): # test record array sorts. - a = np.array([(i, i) for i in range(101*step)], dtype=dt) + a = np.array([(i, i) for i in range(101 * step)], dtype=dt) b = a[::-1] for kind in ['q', 'h', 'm']: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy()[::step] indx = c.argsort(kind=kind) c.sort(kind=kind) @@ -2218,8 +2367,8 @@ def test_sort_structured(self, dt, step): c = b.copy()[::step] indx = c.argsort(kind=kind) c.sort(kind=kind) - assert_equal(c, a[step-1::step], msg) - assert_equal(b[::step][indx], a[step-1::step], msg) + assert_equal(c, a[step - 1::step], msg) + assert_equal(b[::step][indx], a[step - 1::step], msg) @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) def test_sort_time(self, dtype): @@ -2227,7 +2376,7 @@ def test_sort_time(self, dtype): a = np.arange(0, 101, dtype=dtype) b = a[::-1] for kind in ['q', 'h', 'm']: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2253,10 +2402,9 @@ def test_sort_axis(self): def test_sort_size_0(self): # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array sort with axis={0}'.format(axis) + msg = f'test empty array sort with axis={axis}' assert_equal(np.sort(a, axis=axis), a, msg) msg = 'test empty array sort with axis=None' assert_equal(np.sort(a, axis=None), a.ravel(), msg) @@ -2270,7 +2418,7 @@ def __lt__(self, other): a = np.array([Boom()] * 100, dtype=object) for kind in self.sort_kinds: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2289,11 +2437,12 @@ def test_void_sort(self): arr[::-1].sort() def test_sort_raises(self): - #gh-9404 + # gh-9404 arr = np.array([0, datetime.now(), 1], dtype=object) for kind in self.sort_kinds: assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 + # gh-3879 + class Raiser: def raises_anything(*args, **kwargs): raise TypeError("SOMETHING ERRORED") @@ -2356,6 +2505,20 @@ def test__deepcopy__(self, dtype): with pytest.raises(AssertionError): assert_array_equal(a, b) + def test__deepcopy___void_scalar(self): + # see comments in gh-29643 + value = np.void('Rex', dtype=[('name', 'U10')]) + value_deepcopy = value.__deepcopy__(None) + value[0] = None + assert value_deepcopy[0] == 'Rex' + + @pytest.mark.parametrize("sctype", [np.int64, np.float32, np.float64]) + def test__deepcopy__scalar(self, sctype): + # test optimization from gh-29656 + value = sctype(1.1) + value_deepcopy = value.__deepcopy__(None) + assert value is value_deepcopy + def test__deepcopy__catches_failure(self): class MyObj: def __deepcopy__(self, *args, **kwargs): @@ -2413,30 +2576,30 @@ def test_argsort(self): a = np.arange(101, dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype) + msg = f"scalar argsort, kind={kind}, dtype={dtype}" assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars # but the compare function differs. - ai = a*1j + 1 - bi = b*1j + 1 + ai = a * 1j + 1 + bi = b * 1j + 1 for kind in self.sort_kinds: - msg = "complex argsort, kind=%s" % kind + msg = f"complex argsort, kind={kind}" assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j for kind in self.sort_kinds: - msg = "complex argsort, kind=%s" % kind + msg = f"complex argsort, kind={kind}" assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) # test argsort of complex arrays requiring byte-swapping, gh-5441 for endianness in '<>': for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) - msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + msg = f'byte-swapped complex argsort, dtype={dt}' assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) @@ -2447,7 +2610,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "string argsort, kind=%s" % kind + msg = f"string argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2458,7 +2621,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "unicode argsort, kind=%s" % kind + msg = f"unicode argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2469,7 +2632,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "object argsort, kind=%s" % kind + msg = f"object argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2480,7 +2643,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "structured array argsort, kind=%s" % kind + msg = f"structured array argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2490,7 +2653,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: - msg = "datetime64 argsort, kind=%s" % kind + msg = f"datetime64 argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2500,7 +2663,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: - msg = "timedelta64 argsort, kind=%s" % kind + msg = f"timedelta64 argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2514,10 +2677,9 @@ def test_argsort(self): assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array argsort with axis={0}'.format(axis) + msg = f'test empty array argsort with axis={axis}' assert_equal(np.argsort(a, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argsort with axis=None' @@ -2541,7 +2703,7 @@ def test_argsort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.argsort(a, kind="stable", stable=True) @@ -2560,10 +2722,10 @@ def test_searchsorted_floats(self, a): # test for floats arrays containing nans. Explicitly test # half, single, and double precision floats to verify that # the NaN-handling is correct. - msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype + msg = f"Test real ({a.dtype}) searchsorted with nans, side='l'" b = a.searchsorted(a, side='left') assert_equal(b, np.arange(3), msg) - msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype + msg = f"Test real ({a.dtype}) searchsorted with nans, side='r'" b = a.searchsorted(a, side='right') assert_equal(b, np.arange(1, 4), msg) # check keyword arguments @@ -2714,7 +2876,7 @@ def test_searchsorted_with_sorter(self): k = np.linspace(0, 1, 20) assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) - a = np.array([0, 1, 2, 3, 5]*20) + a = np.array([0, 1, 2, 3, 5] * 20) s = a.argsort() k = [0, 1, 2, 3, 5] expected = [0, 20, 40, 60, 80] @@ -2832,10 +2994,9 @@ def test_partition_integer(self): def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array partition with axis={0}'.format(axis) + msg = f'test empty array partition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), a, msg) msg = 'test empty array partition with axis=None' assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg) @@ -2844,10 +3005,9 @@ def test_partition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array argpartition with axis={0}'.format(axis) + msg = f'test empty array argpartition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argpartition with axis=None' @@ -3080,72 +3240,72 @@ def assert_partitioned(self, d, kth): prev = k + 1 def test_partition_iterative(self): - d = np.arange(17) - kth = (0, 1, 2, 429, 231) - assert_raises(ValueError, d.partition, kth) - assert_raises(ValueError, d.argpartition, kth) - d = np.arange(10).reshape((2, 5)) - assert_raises(ValueError, d.partition, kth, axis=0) - assert_raises(ValueError, d.partition, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=None) - - d = np.array([3, 4, 2, 1]) - p = np.partition(d, (0, 3)) - self.assert_partitioned(p, (0, 3)) - self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) - - assert_array_equal(p, np.partition(d, (-3, -1))) - assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) - - d = np.arange(17) - np.random.shuffle(d) - d.partition(range(d.size)) - assert_array_equal(np.arange(17), d) - np.random.shuffle(d) - assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) - - # test unsorted kth - d = np.arange(17) - np.random.shuffle(d) - keys = np.array([1, 3, 8, -2]) - np.random.shuffle(d) - p = np.partition(d, keys) - self.assert_partitioned(p, keys) - p = d[np.argpartition(d, keys)] - self.assert_partitioned(p, keys) - np.random.shuffle(keys) - assert_array_equal(np.partition(d, keys), p) - assert_array_equal(d[np.argpartition(d, keys)], p) - - # equal kth - d = np.arange(20)[::-1] - self.assert_partitioned(np.partition(d, [5]*4), [5]) - self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), - [5]*4 + [6, 13]) - self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) - self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], - [5]*4 + [6, 13]) - - d = np.arange(12) - np.random.shuffle(d) - d1 = np.tile(np.arange(12), (4, 1)) - map(np.random.shuffle, d1) - d0 = np.transpose(d1) - - kth = (1, 6, 7, -1) - p = np.partition(d1, kth, axis=1) - pa = d1[np.arange(d1.shape[0])[:, None], - d1.argpartition(kth, axis=1)] - assert_array_equal(p, pa) - for i in range(d1.shape[0]): - self.assert_partitioned(p[i,:], kth) - p = np.partition(d0, kth, axis=0) - pa = d0[np.argpartition(d0, kth, axis=0), - np.arange(d0.shape[1])[None,:]] - assert_array_equal(p, pa) - for i in range(d0.shape[1]): - self.assert_partitioned(p[:, i], kth) + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5] * 4), [5]) + self.assert_partitioned(np.partition(d, [5] * 4 + [6, 13]), + [5] * 4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4 + [6, 13])], + [5] * 4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i, :], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None, :]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), @@ -3189,7 +3349,7 @@ def test_partition_fuzz(self): kth = [0, idx, i, i + 1] tgt = np.sort(d)[kth] assert_array_equal(np.partition(d, kth)[kth], tgt, - err_msg="data: %r\n kth: %r" % (d, kth)) + err_msg=f"data: {d!r}\n kth: {kth!r}") @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) def test_argpartition_gh5524(self, kth_dtype): @@ -3197,7 +3357,7 @@ def test_argpartition_gh5524(self, kth_dtype): kth = np.array(1, dtype=kth_dtype)[()] d = [6, 7, 3, 2, 9, 0] p = np.argpartition(d, kth) - self.assert_partitioned(np.array(d)[p],[1]) + self.assert_partitioned(np.array(d)[p], [1]) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) @@ -3213,7 +3373,6 @@ def test_flatten(self): assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) - @pytest.mark.parametrize('func', (np.dot, np.matmul)) def test_arr_mult(self, func): a = np.array([[1, 0], [0, 1]]) @@ -3235,7 +3394,6 @@ def test_arr_mult(self, func): [684, 740, 796, 852, 908, 964]] ) - # gemm vs syrk optimizations for et in [np.float32, np.float64, np.complex64, np.complex128]: eaf = a.astype(et) @@ -3340,9 +3498,38 @@ def test_dot(self): a.dot(b=b, out=c) assert_equal(c, np.dot(a, b)) + @pytest.mark.parametrize("dtype", [np.half, np.double, np.longdouble]) + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_dot_errstate(self, dtype): + # Some dtypes use BLAS for 'dot' operation and + # not all BLAS support floating-point errors. + if not BLAS_SUPPORTS_FPE and dtype == np.double: + pytest.skip("BLAS does not support FPE") + + a = np.array([1, 1], dtype=dtype) + b = np.array([-np.inf, np.inf], dtype=dtype) + + with np.errstate(invalid='raise'): + # there are two paths, depending on the number of dimensions - test + # them both + with pytest.raises(FloatingPointError, + match="invalid value encountered in dot"): + np.dot(a, b) + + # test that fp exceptions are properly cleared + np.dot(a, a) + + with pytest.raises(FloatingPointError, + match="invalid value encountered in dot"): + np.dot(a[np.newaxis, np.newaxis, ...], + b[np.newaxis, ..., np.newaxis]) + + np.dot(a[np.newaxis, np.newaxis, ...], + a[np.newaxis, ..., np.newaxis]) + def test_dot_type_mismatch(self): c = 1. - A = np.array((1,1), dtype='i,i') + A = np.array((1, 1), dtype='i,i') assert_raises(TypeError, np.dot, c, A) assert_raises(TypeError, np.dot, A, c) @@ -3513,12 +3700,12 @@ def test_put(self): # test 1-d a = np.zeros(6, dtype=dt) - a.put([1, 3, 5], [True]*3) + a.put([1, 3, 5], [True] * 3) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) - a.put([1, 3, 5], [True]*3) + a.put([1, 3, 5], [True] * 3) assert_equal(a, tgt.reshape(2, 3)) # check must be writeable @@ -3535,7 +3722,7 @@ def test_put(self): # when calling np.put, make sure an # IndexError is raised if the # array is empty - empty_array = np.asarray(list()) + empty_array = np.asarray([]) with pytest.raises(IndexError, match="cannot replace elements of an empty array"): np.put(empty_array, 1, 1, mode="wrap") @@ -3543,7 +3730,6 @@ def test_put(self): match="cannot replace elements of an empty array"): np.put(empty_array, 1, 1, mode="clip") - def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) @@ -3586,7 +3772,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) @@ -3595,7 +3781,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) @@ -3608,7 +3794,7 @@ def test_ravel(self): # 1-element tidy strides test: a = np.array([[1]]) - a.strides = (123, 432) + a = stride_tricks.as_strided(a, strides=(123, 432)) if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) @@ -3649,8 +3835,20 @@ class ArraySubclass(np.ndarray): assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) + @pytest.mark.parametrize("shape", [(3, 224, 224), (8, 512, 512)]) + def test_tobytes_no_copy_fastpath(self, shape): + # Test correctness of non-contiguous paths for `tobytes` + rng = np.random.default_rng(0) + arr = rng.standard_normal(shape, dtype=np.float32) + noncontig = arr.transpose(1, 2, 0) + + # correctness + expected = np.ascontiguousarray(noncontig).tobytes() + got = noncontig.tobytes() + assert got == expected + def test_swapaxes(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) assert_(a.flags['OWNDATA']) b = a.copy() @@ -3670,8 +3868,8 @@ def test_swapaxes(self): shape[j] = src.shape[i] assert_equal(c.shape, shape, str((i, j, k))) # check array contents - i0, i1, i2, i3 = [dim-1 for dim in c.shape] - j0, j1, j2, j3 = [dim-1 for dim in src.shape] + i0, i1, i2, i3 = [dim - 1 for dim in c.shape] + j0, j1, j2, j3 = [dim - 1 for dim in src.shape] assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]], c[idx[i0], idx[i1], idx[i2], idx[i3]], str((i, j, k))) @@ -3682,14 +3880,14 @@ def test_swapaxes(self): b = c def test_conjugate(self): - a = np.array([1-1j, 1+1j, 23+23.0j]) + a = np.array([1 - 1j, 1 + 1j, 23 + 23.0j]) ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) - a = np.array([1-1j, 1+1j, 23+23.0j], 'F') + a = np.array([1 - 1j, 1 + 1j, 23 + 23.0j], 'F') ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) @@ -3708,25 +3906,34 @@ def test_conjugate(self): assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) - a = np.array([1-1j, 1+1j, 1, 2.0], object) + a = np.array([1 - 1j, 1 + 1j, 1, 2.0], object) ac = a.conj() assert_equal(ac, [k.conjugate() for k in a]) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) - a = np.array([1-1j, 1, 2.0, 'f'], object) - assert_raises(TypeError, lambda: a.conj()) - assert_raises(TypeError, lambda: a.conjugate()) + a = np.array([1 - 1j, 1, 2.0, 'f'], object) + assert_raises(TypeError, a.conj) + assert_raises(TypeError, a.conjugate) def test_conjugate_out(self): # Minimal test for the out argument being passed on correctly # NOTE: The ability to pass `out` is currently undocumented! - a = np.array([1-1j, 1+1j, 23+23.0j]) + a = np.array([1 - 1j, 1 + 1j, 23 + 23.0j]) out = np.empty_like(a) res = a.conjugate(out) assert res is out assert_array_equal(out, a.conjugate()) + def test_conjugate_scalar(self): + for v in 5, 5j: + a = np.array(v) + assert a.conjugate() == v.conjugate() + for a in (np.array('s'), np.array('2016', 'M'), + np.array((1, 2), [('a', int), ('b', int)])): + with pytest.raises(TypeError): + a.conjugate() + def test__complex__(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3734,21 +3941,10 @@ def test__complex__(self): '?', 'O'] for dt in dtypes: a = np.array(7, dtype=dt) - b = np.array([7], dtype=dt) - c = np.array([[[[[7]]]]], dtype=dt) - - msg = 'dtype: {0}'.format(dt) + msg = f'dtype: {dt}' ap = complex(a) assert_equal(ap, a, msg) - with assert_warns(DeprecationWarning): - bp = complex(b) - assert_equal(bp, b, msg) - - with assert_warns(DeprecationWarning): - cp = complex(c) - assert_equal(cp, c, msg) - def test__complex__should_not_work(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3756,7 +3952,11 @@ def test__complex__should_not_work(self): '?', 'O'] for dt in dtypes: a = np.array([1, 2, 3], dtype=dt) + b = np.array([7], dtype=dt) + c = np.array([[[[[7]]]]], dtype=dt) assert_raises(TypeError, complex, a) + assert_raises(TypeError, complex, b) + assert_raises(TypeError, complex, c) dt = np.dtype([('a', 'f8'), ('b', 'i1')]) b = np.array((1.0, 3), dtype=dt) @@ -3769,13 +3969,12 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with assert_warns(DeprecationWarning): - assert_raises(TypeError, complex, e) + assert_raises(TypeError, complex, e) class TestCequenceMethods: def test_array_contains(self): - assert_(4.0 in np.arange(16.).reshape(4,4)) - assert_(20.0 not in np.arange(16.).reshape(4,4)) + assert_(4.0 in np.arange(16.).reshape(4, 4)) + assert_(20.0 not in np.arange(16.).reshape(4, 4)) class TestBinop: def test_inplace(self): @@ -3869,9 +4068,9 @@ def make_obj(base, array_priority=False, array_ufunc=False, if array_priority is not False: class_namespace["__array_priority__"] = array_priority for op in ops: - class_namespace["__{0}__".format(op)] = op_impl - class_namespace["__r{0}__".format(op)] = rop_impl - class_namespace["__i{0}__".format(op)] = iop_impl + class_namespace[f"__{op}__"] = op_impl + class_namespace[f"__r{op}__"] = rop_impl + class_namespace[f"__i{op}__"] = iop_impl if array_ufunc is not False: class_namespace["__array_ufunc__"] = array_ufunc eval_namespace = {"base": base, @@ -3896,7 +4095,7 @@ def check(obj, binop_override_expected, ufunc_override_expected, if check_scalar: check_objs.append(check_objs[0][0]) for arr in check_objs: - arr_method = getattr(arr, "__{0}__".format(op)) + arr_method = getattr(arr, f"__{op}__") def first_out_arg(result): if op == "divmod": @@ -3911,39 +4110,37 @@ def first_out_arg(result): elif ufunc_override_expected: assert_equal(arr_method(obj)[0], "__array_ufunc__", err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_method(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_method, obj, err_msg=err_msg) + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) # obj __op__ arr - arr_rmethod = getattr(arr, "__r{0}__".format(op)) + arr_rmethod = getattr(arr, f"__r{op}__") if ufunc_override_expected: res = arr_rmethod(obj) assert_equal(res[0], "__array_ufunc__", err_msg=err_msg) assert_equal(res[1], ufunc, err_msg=err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_rmethod(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - # __array_ufunc__ = "asdf" creates a TypeError - assert_raises((TypeError, Coerced), - arr_rmethod, obj, err_msg=err_msg) + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) # arr __iop__ obj # array scalars don't have in-place operators if has_inplace and isinstance(arr, np.ndarray): - arr_imethod = getattr(arr, "__i{0}__".format(op)) + arr_imethod = getattr(arr, f"__i{op}__") if inplace_override_expected: assert_equal(arr_method(obj), NotImplemented, err_msg=err_msg) @@ -3953,16 +4150,15 @@ def first_out_arg(result): assert_equal(res[1], ufunc, err_msg) assert_(type(res[-1]["out"]) is tuple, err_msg) assert_(res[-1]["out"][0] is arr, err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - assert_(arr_imethod(obj) is arr, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_imethod, obj, - err_msg=err_msg) + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) op_fn = getattr(operator, op, None) if op_fn is None: @@ -4123,27 +4319,6 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kw): assert_equal(A[0], 30) assert_(isinstance(A, OutClass)) - def test_pow_override_with_errors(self): - # regression test for gh-9112 - class PowerOnly(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if ufunc is not np.power: - raise NotImplementedError - return "POWER!" - # explicit cast to float, to ensure the fast power path is taken. - a = np.array(5., dtype=np.float64).view(PowerOnly) - assert_equal(a ** 2.5, "POWER!") - with assert_raises(NotImplementedError): - a ** 0.5 - with assert_raises(NotImplementedError): - a ** 0 - with assert_raises(NotImplementedError): - a ** 1 - with assert_raises(NotImplementedError): - a ** -1 - with assert_raises(NotImplementedError): - a ** 2 - def test_pow_array_object_dtype(self): # test pow on arrays of object dtype class SomeClass: @@ -4154,8 +4329,8 @@ def __init__(self, num=None): def __mul__(self, other): raise AssertionError('__mul__ should not be called') - def __div__(self, other): - raise AssertionError('__div__ should not be called') + def __truediv__(self, other): + raise AssertionError('__truediv__ should not be called') def __pow__(self, exp): return SomeClass(num=self.num ** exp) @@ -4177,6 +4352,13 @@ def pow_for(exp, arr): assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): @@ -4334,6 +4516,37 @@ def test_intp_sequence_converters_errors(self, converter): # These converters currently convert overflows to a ValueError converter(2**64) + @pytest.mark.parametrize( + "entry_point", + [ + module + item + for item in ("sin", "strings.str_len", "fft._pocketfft_umath.ifft") + for module in ("", "numpy:") + ] + [ + "numpy.strings:str_len", + "functools:reduce", + "functools:reduce.__doc__" + ] + ) + def test_import_entry_point(self, entry_point): + modname, _, items = entry_point.rpartition(":") + if modname: + module = obj = importlib.import_module(modname) + else: + module = np + exp = functools.reduce(getattr, items.split("."), module) + got = _multiarray_tests.npy_import_entry_point(entry_point) + assert got == exp + + @pytest.mark.parametrize( + "entry_point", + ["sin.", "numpy:", "numpy:sin:__call__", "numpy.sin:__call__."] + ) + def test_import_entry_point_errors(self, entry_point): + # Don't really care about precise error. + with pytest.raises((ImportError, AttributeError)): + _multiarray_tests.npy_import_entry_point(entry_point) + class TestSubscripting: def test_test_zero_rank(self): @@ -4388,6 +4601,41 @@ def test_f_contiguous_array(self): assert_equal(f_contiguous_array, depickled_f_contiguous_array) + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.parametrize('transposed_contiguous_array', + [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + + [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + def test_transposed_contiguous_array(self, transposed_contiguous_array): + buffers = [] + # When using pickle protocol 5, arrays which can be transposed to c_contiguous + # can be serialized using out-of-band buffers + bytes_string = pickle.dumps(transposed_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_transposed_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + def test_load_legacy_pkl_protocol5(self): + # legacy byte strs are dumped in 2.2.1 + c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 + f_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01F\x94t\x94R\x94.' # noqa: E501 + transposed_contiguous_dumped = b'\x80\x05\x95\xa5\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x04K\x03K\x02\x87\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x18\x00\x01\x08\t\x10\x11\x02\x03\n\x0b\x12\x13\x04\x05\x0c\r\x14\x15\x06\x07\x0e\x0f\x16\x17\x94t\x94b.' # noqa: E501 + no_contiguous_dumped = b'\x80\x05\x95\x91\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x03K\x02\x86\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x06\x00\x01\x04\x05\x08\t\x94t\x94b.' # noqa: E501 + x = np.arange(24, dtype='uint8').reshape(3, 4, 2) + assert_equal(x, pickle.loads(c_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2, order='F') + assert_equal(x, pickle.loads(f_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2).transpose((1, 0, 2)) + assert_equal(x, pickle.loads(transposed_contiguous_dumped)) + x = np.arange(12, dtype='uint8').reshape(3, 4)[:, :2] + assert_equal(x, pickle.loads(no_contiguous_dumped)) + def test_non_contiguous_array(self): non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] assert not non_contiguous_array.flags.c_contiguous @@ -4395,12 +4643,16 @@ def test_non_contiguous_array(self): # make sure non-contiguous arrays can be pickled-depickled # using any protocol + buffers = [] for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto)) + pickle.dumps(non_contiguous_array, protocol=proto, + buffer_callback=buffers.append if proto >= 5 else None)) + assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_roundtrip(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): carray = np.array([[2, 9], [7, 0], [3, 8]]) @@ -4415,7 +4667,7 @@ def test_roundtrip(self): for a in DATA: assert_equal( a, pickle.loads(pickle.dumps(a, protocol=proto)), - err_msg="%r" % a) + err_msg=f"{a!r}") del a, DATA, carray break_cycles() # check for reference leaks (gh-12793) @@ -4427,45 +4679,59 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_int8(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." # noqa + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_float32(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01 16MB + tmp_filename = normalize_filename(tmp_path, param_filename) d = np.zeros(4 * 1024 ** 2) d.tofile(tmp_filename) assert_equal(os.path.getsize(tmp_filename), d.nbytes) @@ -5528,22 +5819,25 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, x, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_file_position_after_fromfile(self, tmp_filename): + def test_file_position_after_fromfile(self, tmp_path, param_filename): # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, + sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] + io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: with open(tmp_filename, 'wb') as f: - f.seek(size-1) + f.seek(size - 1) f.write(b'\0') for mode in ['rb', 'r+b']: @@ -5555,17 +5849,18 @@ def test_file_position_after_fromfile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_file_position_after_tofile(self, tmp_filename): + def test_file_position_after_tofile(self, tmp_path, param_filename): # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, + sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] + io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: err_msg = "%d" % (size,) with open(tmp_filename, 'wb') as f: - f.seek(size-1) + f.seek(size - 1) f.write(b'\0') f.seek(10) f.write(b'12') @@ -5580,8 +5875,9 @@ def test_file_position_after_tofile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_load_object_array_fromfile(self, tmp_filename): + def test_load_object_array_fromfile(self, tmp_path, param_filename): # gh-12300 + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: # Ensure we have a file with consistent contents pass @@ -5593,7 +5889,9 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, x, tmp_filename): + def test_fromfile_offset(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5609,13 +5907,13 @@ def test_fromfile_offset(self, x, tmp_filename): f, dtype=x.dtype, count=count_items, offset=offset_bytes ) assert_array_equal( - y, x.flat[offset_items:offset_items+count_items] + y, x.flat[offset_items:offset_items + count_items] ) # subsequent seeks should stack offset_bytes = x.dtype.itemsize z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) - assert_array_equal(z, x.flat[offset_items+count_items+1:]) + assert_array_equal(z, x.flat[offset_items + count_items + 1:]) with open(tmp_filename, 'wb') as f: x.tofile(f, sep=",") @@ -5628,22 +5926,21 @@ def test_fromfile_offset(self, x, tmp_filename): sep=",", offset=1) @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, x, tmp_filename): + def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 - old_dup = os.dup - try: - with open(tmp_filename, 'wb') as f: - x.tofile(f) - for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): - os.dup = dup - assert_raises(exc, np.fromfile, f) - finally: - os.dup = old_dup + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() + + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + monkeypatch.setattr(os, "dup", dup) + assert_raises(exc, np.fromfile, f) def _check_from(self, s, value, filename, **kw): if 'sep' not in kw: @@ -5685,38 +5982,44 @@ def test_decimal_comma_separator(): else: assert False, request.param - def test_nan(self, tmp_filename, decimal_sep_localization): + def test_nan(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], tmp_filename, sep=' ') - def test_inf(self, tmp_filename, decimal_sep_localization): + def test_inf(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], tmp_filename, sep=' ') - def test_numbers(self, tmp_filename, decimal_sep_localization): + def test_numbers(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], tmp_filename, sep=' ') - def test_binary(self, tmp_filename): + def test_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), tmp_filename, dtype=' 0) assert_(issubclass(w[0].category, RuntimeWarning)) @@ -6356,7 +6694,8 @@ def test_empty(self): assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] @@ -6414,7 +6753,8 @@ def test_mean_where(self): assert_equal(np.mean(a, where=False), np.nan) def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6428,9 +6768,10 @@ def test_var_values(self): ('clongdouble', 7), )) def test_var_complex_values(self, complex_dtype, ndec): + _, cmat, _ = self._create_data() # Test fast-paths for every builtin complex type for axis in [0, 1, None]: - mat = self.cmat.copy().astype(complex_dtype) + mat = cmat.copy().astype(complex_dtype) msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() @@ -6440,7 +6781,8 @@ def test_var_complex_values(self, complex_dtype, ndec): def test_var_dimensions(self): # _var paths for complex number introduce additions on views that # increase dimensions. Ensure this generalizes to higher dims - mat = np.stack([self.cmat]*3) + _, cmat, _ = self._create_data() + mat = np.stack([cmat] * 3) for axis in [0, 1, 2, -1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6451,7 +6793,8 @@ def test_var_dimensions(self): def test_var_complex_byteorder(self): # Test that var fast-path does not cause failures for complex arrays # with non-native byteorder - cmat = self.cmat.copy().astype('complex128') + _, cmat, _ = self._create_data() + cmat = cmat.copy().astype('complex128') cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) assert_almost_equal(cmat.var(), cmat_swapped.var()) @@ -6492,21 +6835,22 @@ def test_var_where(self): assert_allclose(np.var(a, axis=1, where=wh_full), np.var(a[wh_full].reshape((5, 3)), axis=1)) assert_allclose(np.var(a, axis=0, where=wh_partial), - np.var(a[wh_partial[:,0]], axis=0)) + np.var(a[wh_partial[:, 0]], axis=0)) with pytest.warns(RuntimeWarning) as w: assert_equal(a.var(where=False), np.nan) with pytest.warns(RuntimeWarning) as w: assert_equal(np.var(a, where=False), np.nan) def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_where(self): - a = np.arange(25).reshape((5,5))[::-1] + a = np.arange(25).reshape((5, 5))[::-1] whf = np.array([[False, True, False, True, True], [True, False, True, False, True], [True, True, False, True, False], @@ -6518,11 +6862,11 @@ def test_std_where(self): [True], [False]]) _cases = [ - (0, True, 7.07106781*np.ones(5)), - (1, True, 1.41421356*np.ones(5)), + (0, True, 7.07106781 * np.ones(5)), + (1, True, 1.41421356 * np.ones(5)), (0, whf, - np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])), - (0, whp, 2.5*np.ones(5)) + np.array([4.0824829, 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5 * np.ones(5)) ] for _ax, _wh, _res in _cases: assert_allclose(a.std(axis=_ax, where=_wh), _res) @@ -6537,13 +6881,13 @@ def test_std_where(self): np.array(_res)) assert_allclose(a.std(axis=1, where=whf), - np.std(a[whf].reshape((5,3)), axis=1)) + np.std(a[whf].reshape((5, 3)), axis=1)) assert_allclose(np.std(a, axis=1, where=whf), - (a[whf].reshape((5,3))).std(axis=1)) + (a[whf].reshape((5, 3))).std(axis=1)) assert_allclose(a.std(axis=0, where=whp), - np.std(a[whp[:,0]], axis=0)) + np.std(a[whp[:, 0]], axis=0)) assert_allclose(np.std(a, axis=0, where=whp), - (a[whp[:,0]]).std(axis=0)) + (a[whp[:, 0]]).std(axis=0)) with pytest.warns(RuntimeWarning) as w: assert_equal(a.std(where=False), np.nan) with pytest.warns(RuntimeWarning) as w: @@ -6630,63 +6974,65 @@ def test_vdot_uncontiguous(self): class TestDot: - def setup_method(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 + N = 7 + + def _create_data(self): + rng = np.random.RandomState(128) + A = rng.random((4, 2)) + b1 = rng.random((2, 1)) + b2 = rng.random(2) + b3 = rng.random((1, 2)) + b4 = rng.random(4) + return A, b1, b2, b3, b4 def test_dotmatmat(self): - A = self.A + A, _, _, _, _ = self._create_data() res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): - A, b1 = self.A, self.b1 + A, b1, _, _, _ = self._create_data() res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): - A, b2 = self.A, self.b2 + A, _, b2, _, _ = self._create_data() res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): - b3, A = self.b3, self.A + A, _, _, b3, _ = self._create_data() res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b3, b1) - tgt = np.array([[ 0.23129668]]) + tgt = np.array([[0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect1(self): @@ -6704,19 +7050,19 @@ def test_dotcolumnvect2(self): assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) + rng = np.random.RandomState(100) + b1 = rng.random((1, 1)) + b2 = rng.random((1, 4)) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) + rng = np.random.RandomState(100) + b1 = rng.random((4, 1)) + b2 = rng.random((1, 1)) res = np.dot(b1, b2) - tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) + tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) def test_all(self): @@ -6753,7 +7099,7 @@ def __mul__(self, other): # with scalar return out def __rmul__(self, other): - return self*other + return self * other U_non_cont = np.transpose([[1., 1.], [1., 2.]]) U_cont = np.ascontiguousarray(U_non_cont) @@ -6779,10 +7125,12 @@ def test_dot_3args(self): v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) + if HAS_REFCOUNT: + orig_refcount = sys.getrefcount(r) for i in range(12): dot(f, v, r) if HAS_REFCOUNT: - assert_equal(sys.getrefcount(r), 2) + assert_equal(sys.getrefcount(r), orig_refcount) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) @@ -6858,7 +7206,7 @@ def aligned_array(shape, align, dtype, order='C'): for offset in range(align): if (address + offset) % align == 0: break - tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + tmp = tmp[offset:offset + N * d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): @@ -6915,12 +7263,13 @@ def assert_dot_close(A, X, desired): @pytest.mark.slow @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_vectordot(self, dtype): # Large vector multiplications are chunked with 32bit BLAS # Test that the chunking does the right thing, see also gh-22262 - data = np.ones(2**30+100, dtype=dtype) + data = np.ones(2**30 + 100, dtype=dtype) res = np.dot(data, data) - assert res == 2**30+100 + assert res == 2**30 + 100 def test_dtype_discovery_fails(self): # See gh-14247, error checking was missing for failed dtype discovery @@ -6982,7 +7331,7 @@ def test_shapes(self): assert_(np.array(c).shape == ()) def test_result_types(self): - mat = np.ones((1,1)) + mat = np.ones((1, 1)) vec = np.ones((1,)) for dt in self.types: m = mat.astype(dt) @@ -7035,9 +7384,9 @@ def test_vector_vector_values(self): def test_vector_matrix_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([7, 10]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) @@ -7050,9 +7399,9 @@ def test_vector_matrix_values(self): # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) @@ -7062,9 +7411,9 @@ def test_vector_matrix_values(self): def test_matrix_vector_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([5, 11]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) @@ -7077,9 +7426,9 @@ def test_matrix_vector_values(self): # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) @@ -7181,8 +7530,8 @@ def test_out_arg(self): out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) - with suppress_warnings() as sup: - sup.filter(ComplexWarning, '') + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) c = c.astype(tgt.dtype) assert_array_equal(c, tgt) @@ -7229,6 +7578,7 @@ def test_out_contiguous(self): vc = np.arange(10.) vr = np.arange(6.) m0 = np.zeros((3, 0)) + @pytest.mark.parametrize('args', ( # matrix-matrix (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), @@ -7256,10 +7606,39 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes] * 3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) + def test_matmul_object(self): import fractions f = np.vectorize(fractions.Fraction) + def random_ints(): return np.random.randint(1, 1000, size=(10, 3, 3)) M1 = f(random_ints(), random_ints()) @@ -7273,7 +7652,7 @@ def random_ints(): def test_matmul_object_type_scalar(self): from fractions import Fraction as F - v = np.array([F(2,3), F(5,7)]) + v = np.array([F(2, 3), F(5, 7)]) res = self.matmul(v, v) assert_(type(res) is F) @@ -7288,7 +7667,7 @@ def test_matmul_exception_multiply(self): class add_not_multiply: def __add__(self, other): return self - a = np.full((3,3), add_not_multiply()) + a = np.full((3, 3), add_not_multiply()) with assert_raises(TypeError): b = np.matmul(a, a) @@ -7297,20 +7676,20 @@ def test_matmul_exception_add(self): class multiply_not_add: def __mul__(self, other): return self - a = np.full((3,3), multiply_not_add()) + a = np.full((3, 3), multiply_not_add()) with assert_raises(TypeError): b = np.matmul(a, a) def test_matmul_bool(self): # gh-14439 - a = np.array([[1, 0],[1, 1]], dtype=bool) + a = np.array([[1, 0], [1, 1]], dtype=bool) assert np.max(a.view(np.uint8)) == 1 b = np.matmul(a, a) # matmul with boolean output should always be 0, 1 assert np.max(b.view(np.uint8)) == 1 rg = np.random.default_rng(np.random.PCG64(43)) - d = rg.integers(2, size=4*5, dtype=np.int8) + d = rg.integers(2, size=4 * 5, dtype=np.int8) d = d.reshape(4, 5) > 0 out1 = np.matmul(d, d.reshape(5, 4)) out2 = np.dot(d, d.reshape(5, 4)) @@ -7410,7 +7789,7 @@ def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): def test_matmul_axes(): - a = np.arange(3*4*5).reshape(3, 4, 5) + a = np.arange(3 * 4 * 5).reshape(3, 4, 5) c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) assert c.shape == (3, 4, 4) d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) @@ -7425,7 +7804,7 @@ class TestInner: def test_inner_type_mismatch(self): c = 1. - A = np.array((1,1), dtype='i,i') + A = np.array((1, 1), dtype='i,i') assert_raises(TypeError, np.inner, c, A) assert_raises(TypeError, np.inner, A, c) @@ -7473,8 +7852,8 @@ def test_inner_product_with_various_contiguities(self): def test_3d_tensor(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - a = np.arange(24).reshape(2,3,4).astype(dt) - b = np.arange(24, 48).reshape(2,3,4).astype(dt) + a = np.arange(24).reshape(2, 3, 4).astype(dt) + b = np.arange(24, 48).reshape(2, 3, 4).astype(dt) desired = np.array( [[[[ 158, 182, 206], [ 230, 254, 278]], @@ -7495,27 +7874,31 @@ def test_3d_tensor(self): [3230, 3574, 3918]]]] ).astype(dt) assert_equal(np.inner(a, b), desired) - assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) + assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired) class TestChoose: - def setup_method(self): - self.x = 2*np.ones((3,), dtype=int) - self.y = 3*np.ones((3,), dtype=int) - self.x2 = 2*np.ones((2, 3), dtype=int) - self.y2 = 3*np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] + def _create_data(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = [0, 0, 1] + return x, y, x2, y2, ind def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) + x, y, _, _, ind = self._create_data() + A = np.choose(ind, (x, y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) + _, _, x2, y2, ind = self._create_data() + A = np.choose(ind, (x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) + x, _, _, y2, ind = self._create_data() + A = np.choose(ind, (x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) @pytest.mark.parametrize("ops", @@ -7525,7 +7908,7 @@ def test_broadcast2(self): (1., np.array([3], dtype=np.float32))],) def test_output_dtype(self, ops): expected_dt = np.result_type(*ops) - assert(np.choose([0], ops).dtype == expected_dt) + assert np.choose([0], ops).dtype == expected_dt def test_dimension_and_args_limit(self): # Maxdims for the legacy iterator is 32, but the maximum number @@ -7545,38 +7928,43 @@ def test_dimension_and_args_limit(self): class TestRepeat: - def setup_method(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) + def _create_data(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + return m, m_rect def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + m, _ = self._create_data() + A = np.repeat(m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): - A = np.repeat(self.m, 2) + m, _ = self._create_data() + A = np.repeat(m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + A = np.repeat(m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) - A = np.repeat(self.m_rect, 2, axis=1) + A = np.repeat(m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) @@ -7826,7 +8214,7 @@ class TestWarnings: def test_complex_warning(self): x = np.array([1, 2]) - y = np.array([1-2j, 1+2j]) + y = np.array([1 - 2j, 1 + 2j]) with warnings.catch_warnings(): warnings.simplefilter("error", ComplexWarning) @@ -7837,22 +8225,22 @@ def test_complex_warning(self): class TestMinScalarType: def test_usigned_shortshort(self): - dt = np.min_scalar_type(2**8-1) + dt = np.min_scalar_type(2**8 - 1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): - dt = np.min_scalar_type(2**16-1) + dt = np.min_scalar_type(2**16 - 1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): - dt = np.min_scalar_type(2**32-1) + dt = np.min_scalar_type(2**32 - 1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): - dt = np.min_scalar_type(2**63-1) + dt = np.min_scalar_type(2**63 - 1) wanted = np.dtype('uint64') assert_equal(wanted, dt) @@ -7870,7 +8258,7 @@ def _check(self, spec, wanted): dt = np.dtype(wanted) actual = _dtype_from_pep3118(spec) assert_equal(actual, dt, - err_msg="spec %r != dtype %r" % (spec, wanted)) + err_msg=f"spec {spec!r} != dtype {wanted!r}") def test_native_padding(self): align = np.dtype('i').alignment @@ -7879,10 +8267,10 @@ def test_native_padding(self): s = 'bi' else: s = 'b%dxi' % j - self._check('@'+s, {'f0': ('i1', 0), - 'f1': ('i', align*(1 + j//align))}) - self._check('='+s, {'f0': ('i1', 0), - 'f1': ('i', 1+j)}) + self._check('@' + s, {'f0': ('i1', 0), + 'f1': ('i', align * (1 + j // align))}) + self._check('=' + s, {'f0': ('i1', 0), + 'f1': ('i', 1 + j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays @@ -7896,9 +8284,9 @@ def test_trailing_padding(self): size = np.dtype('i').itemsize def aligned(n): - return align*(1 + (n-1)//align) + return align * (1 + (n - 1) // align) - base = dict(formats=['i'], names=['f0']) + base = {"formats": ['i'], "names": ['f0']} self._check('ix', dict(itemsize=aligned(size + 1), **base)) self._check('ixx', dict(itemsize=aligned(size + 2), **base)) @@ -7943,14 +8331,14 @@ def test_intra_padding(self): size = np.dtype('i').itemsize def aligned(n): - return (align*(1 + (n-1)//align)) + return (align * (1 + (n - 1) // align)) - self._check('(3)T{ix}', (dict( - names=['f0'], - formats=['i'], - offsets=[0], - itemsize=aligned(size + 1) - ), (3,))) + self._check('(3)T{ix}', ({ + "names": ['f0'], + "formats": ['i'], + "offsets": [0], + "itemsize": aligned(size + 1) + }, (3,))) def test_char_vs_string(self): dt = np.dtype('c') @@ -7998,7 +8386,7 @@ def test_roundtrip(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] self._check_roundtrip(x) dt = [('a', 'b'), @@ -8136,7 +8524,7 @@ def test_export_simple_nd(self): assert_equal(y.itemsize, 8) def test_export_discontiguous(self): - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) @@ -8177,7 +8565,7 @@ def test_export_record(self): assert_equal(y.ndim, 1) assert_equal(y.suboffsets, ()) - sz = sum([np.dtype(b).itemsize for a, b in dt]) + sz = sum(np.dtype(b).itemsize for a, b in dt) if np.dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: @@ -8231,11 +8619,16 @@ def test_export_and_pickle_user_dtype(self, obj, error): res = pickle.loads(pickle_obj) assert_array_equal(res, obj) + def test_repr_user_dtype(self): + dt = np.dtype(rational) + assert_equal(repr(dt), 'dtype(rational)') + def test_padding(self): for j in range(8): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np._core._internal) @@ -8245,7 +8638,6 @@ def test_reference_leak(self): if HAS_REFCOUNT: count_2 = sys.getrefcount(np._core._internal) assert_equal(count_1, count_2) - del c # avoid pyflakes unused variable warning. def test_padded_struct_array(self): dt1 = np.dtype( @@ -8267,10 +8659,13 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): + def test_relaxed_strides(self, c=stride_tricks.as_strided( # noqa: B008 + np.ones((1, 10, 10), dtype='i8'), # noqa: B008 + strides=(-1, 80, 8) + ) + ): # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). - c.strides = (-1, 80, 8) # strides need to be fixed at export assert_(memoryview(c).strides == (800, 80, 8)) @@ -8292,12 +8687,12 @@ def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): assert_(strides[-1] == 8) def test_out_of_order_fields(self): - dt = np.dtype(dict( - formats=[' np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + assert_(1 > np.array(0, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed") for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") # Unsigned integers for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed") # Unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed") for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") def test_to_bool_scalar(self): assert_equal(bool(np.array([False])), False) @@ -8918,6 +9359,8 @@ def __bool__(self): assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) if IS_PYSTON: pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("Pyodide/WASM has limited stack size") self_containing = np.array([None]) self_containing[0] = self_containing @@ -8942,10 +9385,8 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with assert_warns(DeprecationWarning): - assert_equal(int_func(np.array([1])), 1) - with assert_warns(DeprecationWarning): - assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1])) + assert_raises(TypeError, int_func, np.array([[42]])) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 @@ -8953,26 +9394,29 @@ def test_to_int_scalar(self): assert_equal(5, int_func(np.bytes_(b'5'))) assert_equal(6, int_func(np.str_('6'))) - # The delegation of int() to __trunc__ was deprecated in - # Python 3.11. - if sys.version_info < (3, 11): - class HasTrunc: - def __trunc__(self): - return 3 - assert_equal(3, int_func(np.array(HasTrunc()))) - with assert_warns(DeprecationWarning): - assert_equal(3, int_func(np.array([HasTrunc()]))) - else: - pass - class NotConvertible: def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with assert_warns(DeprecationWarning): - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) + assert_raises(TypeError, + int_func, np.array([NotConvertible()])) + + def test_to_float_scalar(self): + float_funcs = (float, lambda x: x.__float__()) + for float_func in float_funcs: + assert_equal(float_func(np.array(0)), 0.0) + assert_equal(float_func(np.array(1.0, np.float64)), 1.0) + assert_raises(TypeError, float_func, np.array([2])) + assert_raises(TypeError, float_func, np.array([3.14])) + assert_raises(TypeError, float_func, np.array([[4.0]])) + + assert_equal(5.0, float_func(np.array('5'))) + assert_equal(5.1, float_func(np.array('5.1'))) + assert_equal(6.0, float_func(np.bytes_(b'6'))) + assert_equal(6.1, float_func(np.bytes_(b'6.1'))) + assert_equal(7.0, float_func(np.str_('7'))) + assert_equal(7.1, float_func(np.str_('7.1'))) class TestWhere: @@ -9041,7 +9485,7 @@ def test_exotic(self): e = float('-Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) # With NEP 50 adopted, the float will overflow here: - e = float(1e150) + e = 1e150 with pytest.warns(RuntimeWarning, match="overflow"): res = np.where(True, d, e) assert res.dtype == np.float32 @@ -9050,15 +9494,15 @@ def test_ndim(self): c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) - r = np.where(np.array(c)[:,np.newaxis], a, b) + r = np.where(np.array(c)[:, np.newaxis], a, b) assert_array_equal(r[0], a[0]) assert_array_equal(r[1], b[0]) a = a.T b = b.T r = np.where(c, a, b) - assert_array_equal(r[:,0], a[:,0]) - assert_array_equal(r[:,1], b[:,0]) + assert_array_equal(r[:, 0], a[:, 0]) + assert_array_equal(r[:, 1], b[:, 0]) def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, @@ -9129,7 +9573,7 @@ def test_empty_result(self): x = np.zeros((1, 1)) ibad = np.vstack(np.where(x == 99.)) assert_array_equal(ibad, - np.atleast_2d(np.array([[],[]], dtype=np.intp))) + np.atleast_2d(np.array([[], []], dtype=np.intp))) def test_largedim(self): # invalid read regression gh-9304 @@ -9246,7 +9690,6 @@ def _all(self, other): __sub__ = __rsub__ = _all __mul__ = __rmul__ = _all __pow__ = __rpow__ = _all - __div__ = __rdiv__ = _all __mod__ = __rmod__ = _all __truediv__ = __rtruediv__ = _all __floordiv__ = __rfloordiv__ = _all @@ -9369,12 +9812,12 @@ class TestFormat: def test_0d(self): a = np.array(np.pi) - assert_equal('{:0.3g}'.format(a), '3.14') - assert_equal('{:0.3g}'.format(a[()]), '3.14') + assert_equal(f'{a:0.3g}', '3.14') + assert_equal(f'{a[()]:0.3g}', '3.14') def test_1d_no_format(self): a = np.array([np.pi]) - assert_equal('{}'.format(a), str(a)) + assert_equal(f'{a}', str(a)) def test_1d_format(self): # until gh-5543, ensure that the behaviour matches what it used to be @@ -9393,6 +9836,7 @@ def test_ctypes_is_available(self): assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + @pytest.mark.thread_unsafe(reason="modifies global module state") def test_ctypes_is_not_available(self): from numpy._core import _internal _internal.ctypes = None @@ -9409,17 +9853,17 @@ def _make_readonly(x): x.flags.writeable = False return x + @pytest.mark.thread_unsafe(reason="calls gc.collect()") @pytest.mark.parametrize('arr', [ np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), np.array((1, 2), dtype='i4,i4'), - np.zeros((2,), dtype= - np.dtype(dict( - formats=['2, [44, 55]) + np.place(a, a > 2, [44, 55]) assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) # hit one of the failing paths - assert_raises(ValueError, np.place, a, a>20, []) + assert_raises(ValueError, np.place, a, a > 20, []) def test_put_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous @@ -9507,7 +9952,7 @@ def test_put_noncontiguous(self): def test_putmask_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous # uses arr_putmask - np.putmask(a, a>2, a**2) + np.putmask(a, a > 2, a**2) assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) def test_take_mode_raise(self): @@ -9518,7 +9963,7 @@ def test_take_mode_raise(self): def test_choose_mod_raise(self): a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) - out = np.empty((3,3), dtype='int') + out = np.empty((3, 3), dtype='int') choices = [-10, 10] np.choose(a, choices, out=out, mode='raise') assert_equal(out, np.array([[ 10, -10, 10], @@ -9540,7 +9985,8 @@ def test_dot_out(self): def test_view_assign(self): from numpy._core._multiarray_tests import ( - npy_create_writebackifcopy, npy_resolve + npy_create_writebackifcopy, + npy_resolve, ) arr = np.arange(9).reshape(3, 3).T @@ -9561,16 +10007,15 @@ def test_view_assign(self): @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( - npy_create_writebackifcopy, npy_discard + npy_create_writebackifcopy, + npy_discard, ) arr = np.arange(9).reshape(3, 3).T @@ -9686,6 +10131,63 @@ def test_error_paths_and_promotion(self, which): # Fails discovering start dtype np.arange(*args) + def test_dtype_attribute_ignored(self): + # Until 2.3 this would raise a DeprecationWarning + class dt: + dtype = "f8" + + class vdt(np.void): + dtype = "f,f" + + assert_raises(ValueError, np.dtype, dt) + assert_raises(ValueError, np.dtype, dt()) + assert_raises(ValueError, np.dtype, vdt) + assert_raises(ValueError, np.dtype, vdt(1)) + + +class TestDTypeCoercionForbidden: + forbidden_types = [ + # The builtin scalar super types: + np.generic, np.flexible, np.number, + np.inexact, np.floating, np.complexfloating, + np.integer, np.unsignedinteger, np.signedinteger, + # character is a deprecated S1 special case: + np.character, + ] + + def test_dtype_coercion(self): + for scalar_type in self.forbidden_types: + assert_raises(TypeError, np.dtype, args=(scalar_type,)) + + def test_array_construction(self): + for scalar_type in self.forbidden_types: + assert_raises(TypeError, np.array, args=([], scalar_type,)) + + def test_not_deprecated(self): + # All specific types work + for group in np._core.sctypes.values(): + for scalar_type in group: + np.dtype(scalar_type) + + for scalar_type in [type, dict, list, tuple]: + # Typical python types are coerced to object currently: + np.dtype(scalar_type) + + +class TestDateTimeCreationTuple: + @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) + def test_dt_tuple(self, cls): + # two valid uses - (unit, num) and (unit, num, den, None) + cls(1, ('ms', 2)) + cls(1, ('ms', 2, 1, None)) + + # trying to use the event argument, removed in 1.7.0 + # it used to be a uint8 + assert_raises(TypeError, cls, args=(1, ('ms', 2, 'event'))) + assert_raises(TypeError, cls, args=(1, ('ms', 2, 63))) + assert_raises(TypeError, cls, args=(1, ('ms', 2, 1, 'event'))) + assert_raises(TypeError, cls, args=(1, ('ms', 2, 1, 63))) + class TestArrayFinalize: """ Tests __array_finalize__ """ @@ -9724,6 +10226,7 @@ def __array_finalize__(self, obj): with pytest.raises(RuntimeError, match="boohoo!"): np.arange(10).view(BadAttributeArray) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): @@ -9879,12 +10382,11 @@ def __array__(self, dtype=None, copy=None): def test_richcompare_scalar_boolean_singleton_return(): - # These are currently guaranteed to be the boolean singletons, but maybe - # returning NumPy booleans would also be OK: - assert (np.array(0) == "a") is False - assert (np.array(0) != "a") is True - assert (np.int16(0) == "a") is False - assert (np.int16(0) != "a") is True + # These are currently guaranteed to be the boolean numpy singletons + assert (np.array(0) == "a") is np.bool_(False) + assert (np.array(0) != "a") is np.bool_(True) + assert (np.int16(0) == "a") is np.bool_(False) + assert (np.int16(0) != "a") is np.bool_(True) @pytest.mark.parametrize("op", [ @@ -9923,7 +10425,12 @@ def test_npymath_complex(fun, npfun, x, y, test_dtype): def test_npymath_real(): # Smoketest npymath functions from numpy._core._multiarray_tests import ( - npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + npy_cosh, + npy_log10, + npy_sinh, + npy_tan, + npy_tanh, + ) funcs = {npy_log10: np.log10, npy_cosh: np.cosh, @@ -9964,18 +10471,18 @@ def test_uintalignment_and_alignment(): # check that C struct matches numpy struct size s = _multiarray_tests.get_struct_alignments() - for d, (alignment, size) in zip([d1,d2,d3], s): + for d, (alignment, size) in zip([d1, d2, d3], s): assert_equal(d.alignment, alignment) assert_equal(d.itemsize, size) # check that ufuncs don't complain in debug mode # (this is probably OK if the aligned flag is true above) - src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often + src = np.zeros((2, 2), dtype=d1)['f1'] # 4-byte aligned, often np.exp(src) # assert fails? # check that copy code doesn't complain in debug mode - dst = np.zeros((2,2), dtype='c8') - dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? + dst = np.zeros((2, 2), dtype='c8') + dst[:, 1] = src[:, 1] # assert in lowlevel_strided_loops fails? class TestAlignment: # adapted from scipy._lib.tests.test__util.test__aligned_zeros @@ -10024,8 +10531,8 @@ def test_strided_loop_alignments(self): xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(ComplexWarning, "Casting complex values") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 @@ -10062,7 +10569,6 @@ def test_getfield(): pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0) - class TestViewDtype: """ Verify that making a view of a non-contiguous array works as expected. @@ -10138,19 +10644,19 @@ def test_sort_float(N, dtype): assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) # (2) with +INF - infarr = np.inf*np.ones(N, dtype=dtype) + infarr = np.inf * np.ones(N, dtype=dtype) infarr[np.random.choice(infarr.shape[0], 5)] = -1.0 assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap')) # (3) with -INF - neginfarr = -np.inf*np.ones(N, dtype=dtype) + neginfarr = -np.inf * np.ones(N, dtype=dtype) neginfarr[np.random.choice(neginfarr.shape[0], 5)] = 1.0 assert_equal(np.sort(neginfarr, kind='quick'), np.sort(neginfarr, kind='heap')) # (4) with +/-INF - infarr = np.inf*np.ones(N, dtype=dtype) - infarr[np.random.choice(infarr.shape[0], (int)(N/2))] = -np.inf + infarr = np.inf * np.ones(N, dtype=dtype) + infarr[np.random.choice(infarr.shape[0], (int)(N / 2))] = -np.inf assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap')) def test_sort_float16(): @@ -10169,7 +10675,7 @@ def test_sort_int(N, dtype): # Random data with MAX and MIN sprinkled minv = np.iinfo(dtype).min maxv = np.iinfo(dtype).max - arr = np.random.randint(low=minv, high=maxv-1, size=N, dtype=dtype) + arr = np.random.randint(low=minv, high=maxv - 1, size=N, dtype=dtype) arr[np.random.choice(arr.shape[0], 10)] = minv arr[np.random.choice(arr.shape[0], 10)] = maxv assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) @@ -10200,7 +10706,7 @@ def test_argsort_float(N, dtype): # (2) Random data with inf at the end of array # See: https://github.com/intel/x86-simd-sort/pull/39 arr = -0.5 + rnd.rand(N).astype(dtype) - arr[N-1] = np.inf + arr[N - 1] = np.inf assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @@ -10220,11 +10726,27 @@ def test_argsort_int(N, dtype): # (2) random data with max value at the end of array # See: https://github.com/intel/x86-simd-sort/pull/39 arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype) - arr[N-1] = maxv + arr[N - 1] = maxv assert_arg_sorted(arr, np.argsort(arr, kind='quick')) +# Test large arrays that leverage openMP implementations from x86-simd-sort: +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) +def test_sort_largearrays(dtype): + N = 1000000 + rnd = np.random.RandomState(1100710816) + arr = -0.5 + rnd.random(N).astype(dtype) + assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) + +# Test large arrays that leverage openMP implementations from x86-simd-sort: +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_argsort_largearrays(dtype): + N = 1000000 + rnd = np.random.RandomState(1100710816) + arr = -0.5 + rnd.random(N).astype(dtype) + assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_gh_22683(): b = 777.68760986 a = np.array([b] * 10000, dtype=object) @@ -10241,6 +10763,16 @@ def test_gh_24459(): np.choose(a, [3, -1]) +def test_gh_28206(): + a = np.arange(3) + b = np.ones((3, 3), dtype=np.int64) + out = np.array([np.nan, np.nan, np.nan]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + np.choose(a, b, out=out) + + @pytest.mark.parametrize("N", np.arange(2, 512)) @pytest.mark.parametrize("dtype", [np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64]) @@ -10261,7 +10793,7 @@ def test_partition_int(N, dtype): # (2) random data with max value at the end of array arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype) - arr[N-1] = maxv + arr[N - 1] = maxv assert_arr_partitioned(np.sort(arr)[k], k, np.partition(arr, k, kind='introselect')) assert_arr_partitioned(np.sort(arr)[k], k, @@ -10361,7 +10893,7 @@ def test_to_device(self): def test_array_interface_excess_dimensions_raises(): """Regression test for gh-27949: ensure too many dims raises ValueError instead of segfault.""" - + # Dummy object to hold a custom __array_interface__ class DummyArray: def __init__(self, interface): @@ -10373,9 +10905,104 @@ def __init__(self, interface): interface = dict(base.__array_interface__) # Modify the shape to exceed NumPy's dimension limit (NPY_MAXDIMS, typically 64) - interface['shape'] = tuple([1] * 136) # match the original bug report + interface['shape'] = tuple([1] * 136) # match the original bug report dummy = DummyArray(interface) # Now, using np.asanyarray on this dummy should trigger a ValueError (not segfault) with pytest.raises(ValueError, match="dimensions must be within"): - np.asanyarray(dummy) \ No newline at end of file + np.asanyarray(dummy) + +@pytest.mark.parametrize("dtype", [np.float32, np.float64, np.uint32, np.complex128]) +def test_array_dunder_array_preserves_dtype_on_none(dtype): + """ + Regression test for: https://github.com/numpy/numpy/issues/27407 + Ensure that __array__(None) returns an array of the same dtype. + """ + a = np.array([1], dtype=dtype) + b = a.__array__(None) + assert_array_equal(a, b, strict=True) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestTextSignatures: + @pytest.mark.parametrize( + "methodname", + [ + "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", + "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", + "__reduce__", "__reduce_ex__", "__setstate__", + "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", + "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", + "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", + "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", + "repeat", "reshape", "resize", "round", "searchsorted", "setfield", + "setflags", "sort", "partition", "squeeze", "std", "sum", "swapaxes", + "take", "tofile", "tolist", "tobytes", "trace", "transpose", "var", "view", + "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", + ], + ) + def test_array_method_signatures(self, methodname: str): + method = getattr(np.ndarray, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) + def test_c_func_dispatcher_text_signature(self, func): + text_sig = func.__wrapped__.__text_signature__ + assert text_sig.startswith("(") and text_sig.endswith(")") + + sig = inspect.signature(func) + assert sig == inspect.signature(func.__wrapped__) + assert not hasattr(func, "__signature__") + + with pytest.raises(ValueError): + inspect.signature(func, follow_wrapped=False) + + @pytest.mark.parametrize( + "func", + [ + np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, + np.result_type, np.dot, np.vdot, np.bincount, np.ravel_multi_index, + np.unravel_index, np.copyto, np.putmask, np.packbits, np.unpackbits, + np.shares_memory, np.may_share_memory, np.is_busday, np.busday_offset, + np.busday_count, np.datetime_as_string, + ], + ) + def test_c_func_dispatcher_signature(self, func): + sig = inspect.signature(func) + + assert hasattr(func, "__signature__") + assert sig == func.__signature__ + assert sig.parameters + + @pytest.mark.parametrize(("func", "parameter_names"), [ + (np.arange, ("start_or_stop", "stop", "step", "dtype", "device", "like")), + (np.busdaycalendar, ("weekmask", "holidays")), + (np.char.compare_chararrays, ("a1", "a2", "cmp", "rstrip")), + (np.datetime_data, ("dtype",)), + (np.from_dlpack, ("x", "device", "copy")), + (np.frombuffer, ("buffer", "dtype", "count", "offset", "like")), + (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), + (np.fromiter, ("iter", "dtype", "count", "like")), + (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.nested_iters, ( + "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", + "buffersize", + )), + (np.promote_types, ("type1", "type2")), + ]) + def test_add_newdoc_function_signature(self, func, parameter_names): + assert not hasattr(func, "__signature__") + assert getattr(func, "__text_signature__", None) + + sig = inspect.signature(func) + assert sig.parameters + assert tuple(sig.parameters) == parameter_names diff --git a/blimgui/dist64/numpy/_core/tests/test_multiprocessing.py b/blimgui/dist64/numpy/_core/tests/test_multiprocessing.py new file mode 100644 index 0000000..b64d188 --- /dev/null +++ b/blimgui/dist64/numpy/_core/tests/test_multiprocessing.py @@ -0,0 +1,55 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY, IS_WASM + +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are explicitly multi-processed" +) + +def bool_array_writer(shm_name, n): + # writer routine for test_read_write_bool_array + import time + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + arr[i] = True + time.sleep(0.00001) + +def bool_array_reader(shm_name, n): + # reader routine for test_read_write_bool_array + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + while not arr[i]: + pass + +@pytest.mark.skipif(IS_WASM, + reason="WASM does not support _posixshmem") +@pytest.mark.skipif(IS_PYPY and sys.platform == "win32", + reason="_winapi does not support UnmapViewOfFile") +def test_read_write_bool_array(): + # See: gh-30389 + # + # Prior to Python 3.13, boolean scalar singletons (np.True / np.False) were + # regular reference-counted objects. Due to the double evaluation in + # PyArrayScalar_RETURN_BOOL_FROM_LONG, concurrent reads and writes of a + # boolean array could corrupt their refcounts, potentially causing a crash + # (e.g., `free(): invalid pointer`). + # + # This test creates a multi-process race between a writer and a reader to + # ensure that NumPy does not exhibit such failures. + from concurrent.futures import ProcessPoolExecutor + from multiprocessing import shared_memory + n = 10000 + shm = shared_memory.SharedMemory(create=True, size=n) + with ProcessPoolExecutor(max_workers=2) as executor: + f_writer = executor.submit(bool_array_writer, shm.name, n) + f_reader = executor.submit(bool_array_reader, shm.name, n) + shm.unlink() + f_writer.result() + f_reader.result() diff --git a/blimgui/dist64/numpy/_core/tests/test_multithreading.py b/blimgui/dist64/numpy/_core/tests/test_multithreading.py index 9159080..032fe38 100644 --- a/blimgui/dist64/numpy/_core/tests/test_multithreading.py +++ b/blimgui/dist64/numpy/_core/tests/test_multithreading.py @@ -1,19 +1,23 @@ import concurrent.futures +import sys import threading -import string -import numpy as np import pytest -from numpy.testing import IS_WASM, IS_64BIT -from numpy.testing._private.utils import run_threaded +import numpy as np from numpy._core import _rational_tests +from numpy._core.tests.test_stringdtype import random_unicode_string_list +from numpy.testing import IS_64BIT, IS_WASM +from numpy.testing._private.utils import run_threaded if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are already explicitly multi-threaded" +) -def test_parallel_randomstate_creation(): +def test_parallel_randomstate(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race def func(seed): @@ -21,16 +25,26 @@ def func(seed): run_threaded(func, 500, pass_count=True) + # seeding and setting state shouldn't race with generating RNG samples + rng = np.random.RandomState() + + def func(seed): + base_rng = np.random.RandomState(seed) + state = base_rng.get_state() + rng.seed(seed) + rng.random() + rng.set_state(state) + + run_threaded(func, 8, pass_count=True) def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads # to a data race that causes crashes or spurious exceptions - def func(): - arr = np.random.random((25,)) - np.isnan(arr) - - run_threaded(func, 500) + for dtype in [np.float32, np.float64, np.int32]: + for op in [np.random.random((25,)).astype(dtype), dtype(25)]: + for ufunc in [np.isnan, np.sin]: + run_threaded(lambda: ufunc(op), 500) # see gh-26690 NUM_THREADS = 50 @@ -120,6 +134,8 @@ def legacy_125(): task1.start() task2.start() + task1.join() + task2.join() def test_parallel_reduction(): @@ -218,16 +234,12 @@ def func(arr): assert arr.dtype is dt -def test_stringdtype_multithreaded_access_and_mutation( - dtype, random_string_list): +def test_stringdtype_multithreaded_access_and_mutation(): # this test uses an RNG and may crash or cause deadlocks if there is a # threading bug rng = np.random.default_rng(0x4D3D3D3) - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = rng.choice(chars, size=100 * 10, replace=True) - random_string_list = ret.view("U100") + string_list = random_unicode_string_list() def func(arr): rnd = rng.random() @@ -247,10 +259,10 @@ def func(arr): else: np.multiply(arr, np.int64(2), out=arr) else: - arr[:] = random_string_list + arr[:] = string_list with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(random_string_list, dtype=dtype) + arr = np.array(string_list, dtype="T") futures = [tpe.submit(func, arr) for _ in range(500)] for f in futures: @@ -272,12 +284,15 @@ def closure(b): def test_nonzero(dtype): # See: gh-28361 # - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed + # np.nonzero uses np.count_nonzero to determine the size of the output. + # array. In a second pass the indices of the non-zero elements are + # determined, but they can have changed # - # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure - # np.nonzero does not generate a segmentation fault - x = np.random.randint(4, size=10_000).astype(dtype) + # This test triggers a data race which is suppressed in the TSAN CI. + # The test is to ensure np.nonzero does not generate a segmentation fault + x = np.random.randint(4, size=100).astype(dtype) + expected_warning = ('number of non-zero array elements changed' + ' during function execution') def func(index): for _ in range(10): @@ -287,7 +302,105 @@ def func(index): try: _ = np.nonzero(x) except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) + assert expected_warning in str(ex) + + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) + + +# These are all implemented using PySequence_Fast, which needs locking to be safe +def np_broadcast(arrs): + for i in range(50): + np.broadcast(arrs) + +def create_array(arrs): + for i in range(50): + np.array(arrs) + +def create_nditer(arrs): + for i in range(50): + np.nditer(arrs) - run_threaded(func, max_workers=10, pass_count=True, outer_iterations=50) +@pytest.mark.parametrize( + "kernel, outcome", + ( + (np_broadcast, "error"), + (create_array, "error"), + (create_nditer, "success"), + ), +) +def test_arg_locking(kernel, outcome): + # should complete without triggering races but may error + + done = 0 + arrs = [np.array([1, 2, 3]) for _ in range(1000)] + + def read_arrs(b): + nonlocal done + b.wait() + try: + kernel(arrs) + finally: + done += 1 + + def contract_and_expand_list(b): + b.wait() + while done < 4: + if len(arrs) > 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + def replace_list_items(b): + b.wait() + rng = np.random.RandomState() + rng.seed(0x4d3d3d3) + while done < 4: + data = rng.randint(0, 1000, size=4) + arrs[data[0]] = data[1:] + + for mutation_func in (replace_list_items, contract_and_expand_list): + b = threading.Barrier(5) + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as tpe: + tasks = [tpe.submit(read_arrs, b) for _ in range(4)] + tasks.append(tpe.submit(mutation_func, b)) + for t in tasks: + t.result() + except RuntimeError as e: + if outcome == "success": + raise + assert "Inconsistent object during array creation?" in str(e) + msg = "replace_list_items should not raise errors" + assert mutation_func is contract_and_expand_list, msg + finally: + if len(tasks) < 5: + b.abort() + +@pytest.mark.skipif(sys.version_info < (3, 12), reason="Python >= 3.12 required") +def test_array__buffer__thread_safety(): + import inspect + arr = np.arange(1000) + flags = [inspect.BufferFlags.STRIDED, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + arr.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) + +@pytest.mark.skipif(sys.version_info < (3, 12), reason="Python >= 3.12 required") +def test_void_dtype__buffer__thread_safety(): + import inspect + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert isinstance(x, np.void) + flags = [inspect.BufferFlags.STRIDES, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + x.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) diff --git a/blimgui/dist64/numpy/_core/tests/test_nditer.py b/blimgui/dist64/numpy/_core/tests/test_nditer.py index 4dc53ed..6a6eea6 100644 --- a/blimgui/dist64/numpy/_core/tests/test_nditer.py +++ b/blimgui/dist64/numpy/_core/tests/test_nditer.py @@ -1,17 +1,27 @@ +import inspect +import subprocess import sys -import pytest - import textwrap -import subprocess +import warnings + +import pytest import numpy as np -import numpy._core.umath as ncu import numpy._core._multiarray_tests as _multiarray_tests -from numpy import array, arange, nditer, all +import numpy._core.umath as ncu +from numpy import all, arange, array, nditer from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles, - ) + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) +from numpy.testing._private.utils import requires_memory + def iter_multi_index(i): ret = [] @@ -77,8 +87,6 @@ def test_iter_refcount(): assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) - del it2 # avoid pyflakes unused variable warning - def test_iter_best_order(): # The iterator should always find the iteration order # with increasing memory addresses @@ -88,7 +96,7 @@ def test_iter_best_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -114,7 +122,7 @@ def test_iter_c_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -141,7 +149,7 @@ def test_iter_f_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -168,7 +176,7 @@ def test_iter_c_or_f_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -454,7 +462,7 @@ def test_iter_no_inner_full_coalesce(): a = arange(size) # Test each combination of forward and backwards indexing for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -540,69 +548,69 @@ def test_iter_broadcasting(): # Standard NumPy broadcasting rules # 1D with scalar - i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (6,)) # 2D with scalar i = nditer([arange(6).reshape(2, 3), np.int32(2)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 1D i = nditer([arange(6).reshape(2, 3), arange(3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) i = nditer([arange(2).reshape(2, 1), arange(3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 2D i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 3D with scalar i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 1D i = nditer([arange(3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 2D i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 3D i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*3) + ['multi_index'], [['readonly']] * 3) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) @@ -642,25 +650,25 @@ def test_iter_broadcasting_errors(): # 1D with 1D assert_raises(ValueError, nditer, [arange(2), arange(3)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # 2D with 1D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(2)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # 2D with 2D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], - [], [['readonly']]*2) + [], [['readonly']] * 2) assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # 3D with 3D assert_raises(ValueError, nditer, [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) + [], [['readonly']] * 2) assert_raises(ValueError, nditer, [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # Verify that the error message mentions the right shapes try: @@ -674,10 +682,10 @@ def test_iter_broadcasting_errors(): msg = str(e) # The message should contain the shape of the 3rd operand assert_(msg.find('(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) + f'Message "{msg}" doesn\'t contain operand shape (2,3)') # The message should contain the broadcast shape assert_(msg.find('(1,2,3)') >= 0, - 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) + f'Message "{msg}" doesn\'t contain broadcast shape (1,2,3)') try: nditer([arange(6).reshape(2, 3), arange(2)], @@ -690,13 +698,13 @@ def test_iter_broadcasting_errors(): msg = str(e) # The message should contain "shape->remappedshape" for each operand assert_(msg.find('(2,3)->(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) + f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' + + ('Message "%s" doesn\'t contain remapped operand shape' '(2,)->(2,newaxis)') % msg) # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, - 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) + f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') try: nditer([np.zeros((2, 1, 1)), np.zeros((2,))], @@ -707,10 +715,10 @@ def test_iter_broadcasting_errors(): msg = str(e) # The message should contain the shape of the bad operand assert_(msg.find('(2,1,1)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) + f'Message "{msg}" doesn\'t contain operand shape (2,1,1)') # The message should contain the broadcast shape assert_(msg.find('(2,1,2)') >= 0, - 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) + f'Message "{msg}" doesn\'t contain the broadcast shape (2,1,2)') def test_iter_flags_errors(): # Check that bad combinations of flags produce errors @@ -719,8 +727,6 @@ def test_iter_flags_errors(): # Not enough operands assert_raises(ValueError, nditer, [], [], []) - # Too many operands - assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) # Bad global flag assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) # Bad op flag @@ -730,7 +736,7 @@ def test_iter_flags_errors(): # Bad casting parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') # op_flags must match ops - assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) + assert_raises(ValueError, nditer, [a] * 3, [], [['readonly']] * 2) # Cannot track both a C and an F index assert_raises(ValueError, nditer, a, ['c_index', 'f_index'], [['readonly']]) @@ -836,7 +842,7 @@ def test_iter_nbo_align_contig(): assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 2 - assert_equal(au, [2]*6) + assert_equal(au, [2] * 6) del i # should not raise a warning # Byte order change by requesting NBO a = np.arange(6, dtype='f4') @@ -851,11 +857,11 @@ def test_iter_nbo_align_contig(): assert_equal(i.operands[0], a) i.operands[0][:] = 12345 i.operands[0][:] = 2 - assert_equal(au, [2]*6) + assert_equal(au, [2] * 6) # Unaligned input - a = np.zeros((6*4+1,), dtype='i1')[1:] - a.dtype = 'f4' + a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] + a = a.view('f4') a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy @@ -868,7 +874,7 @@ def test_iter_nbo_align_contig(): # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.operands[0], a) i.operands[0][:] = 3 - assert_equal(a, [3]*6) + assert_equal(a, [3] * 6) # Discontiguous input a = arange(12) @@ -1062,7 +1068,7 @@ def test_iter_object_arrays_basic(): i = nditer(a, ['refs_ok'], ['readonly']) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a) - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) @@ -1071,7 +1077,7 @@ def test_iter_object_arrays_basic(): assert_(i.iterationneedsapi) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) @@ -1080,10 +1086,10 @@ def test_iter_object_arrays_basic(): with i: for x in i: x[...] = None - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: - assert_(sys.getrefcount(obj) == rc-1) - assert_equal(a, np.array([None]*4, dtype='O')) + assert_(sys.getrefcount(obj) == rc - 1) + assert_equal(a, np.array([None] * 4, dtype='O')) def test_iter_object_arrays_conversions(): # Conversions to/from objects @@ -1093,7 +1099,7 @@ def test_iter_object_arrays_conversions(): with i: for x in i: x[...] += 1 - assert_equal(a, np.arange(6)+1) + assert_equal(a, np.arange(6) + 1) a = np.arange(6, dtype='i4') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], @@ -1101,7 +1107,7 @@ def test_iter_object_arrays_conversions(): with i: for x in i: x[...] += 1 - assert_equal(a, np.arange(6)+1) + assert_equal(a, np.arange(6) + 1) # Non-contiguous object array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) @@ -1112,9 +1118,9 @@ def test_iter_object_arrays_conversions(): with i: for x in i: x[...] += 1 - assert_equal(a, np.arange(6)+1) + assert_equal(a, np.arange(6) + 1) - #Non-contiguous value array + # Non-contiguous value array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 @@ -1126,9 +1132,10 @@ def test_iter_object_arrays_conversions(): rc = sys.getrefcount(ob) for x in i: x[...] += 1 - if HAS_REFCOUNT: - assert_(sys.getrefcount(ob) == rc-1) - assert_equal(a, np.arange(6)+98172489) + if HAS_REFCOUNT: + newrc = sys.getrefcount(ob) + assert_(newrc == rc - 1) + assert_equal(a, np.arange(6) + 98172489) def test_iter_common_dtype(): # Check that the iterator finds a common data type correctly @@ -1136,38 +1143,38 @@ def test_iter_common_dtype(): i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='same_kind') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), array([2j], dtype='c8'), array([9], dtype='f8')], ['common_dtype'], - [['readonly', 'copy']]*4, + [['readonly', 'copy']] * 4, casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) @@ -1290,36 +1297,36 @@ def test_iter_op_axes(): # Reverse the axes a = arange(6).reshape(2, 3) - i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + i = nditer([a, a.T], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) assert_(all([x == y for (x, y) in i])) a = arange(24).reshape(2, 3, 4) - i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + i = nditer([a.T, a], [], [['readonly']] * 2, op_axes=[[2, 1, 0], None]) assert_(all([x == y for (x, y) in i])) # Broadcast 1D to any dimension a = arange(1, 31).reshape(2, 3, 5) b = arange(1, 3) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [0, -1, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(2, 1, 1)).ravel()) b = arange(1, 4) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 3, 1)).ravel()) b = arange(1, 6) - i = nditer([a, b], [], [['readonly']]*2, + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [np.newaxis, np.newaxis, 0]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 1, 5)).ravel()) # Inner product-style broadcasting a = arange(24).reshape(2, 3, 4) b = arange(40).reshape(5, 2, 4) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) assert_equal(i.shape, (2, 3, 5, 2)) # Matrix product-style broadcasting a = arange(12).reshape(3, 4) b = arange(20).reshape(4, 5) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, op_axes=[[0, -1], [-1, 1]]) assert_equal(i.shape, (3, 5)) @@ -1328,25 +1335,25 @@ def test_iter_op_axes_errors(): # Wrong number of items in op_axes a = arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0], [1], [0]]) # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[2, 1], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [2, -1]]) # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 0], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 1]]) # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [0, 1, 0]]) # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) def test_iter_copy(): @@ -1482,7 +1489,7 @@ def test_iter_copy_casts_structured2(): # Array of two structured scalars: for res in res1, res2: # Cast to tuple by getitem, which may be weird and changeable?: - assert type(res["a"][0]) == tuple + assert isinstance(res["a"][0], tuple) assert res["a"][0] == (1, 1) for res in res1, res2: @@ -1515,7 +1522,7 @@ def test_iter_allocate_output_buffered_readwrite(): i.reset() for x in i: x[1][...] += x[0][...] - assert_equal(i.operands[1], a+1) + assert_equal(i.operands[1], a + 1) def test_iter_allocate_output_itorder(): # The allocated output should match the iteration order @@ -1560,19 +1567,19 @@ def test_iter_allocate_output_types_promotion(): # before NEP 50...) i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) def test_iter_allocate_output_types_byte_order(): @@ -1594,7 +1601,7 @@ def test_iter_allocate_output_types_scalar(): # If the inputs are all scalars, the output should be a scalar i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], - [['writeonly', 'allocate']] + [['readonly']]*4) + [['writeonly', 'allocate']] + [['readonly']] * 4) assert_equal(i.operands[0].dtype, np.dtype('complex128')) assert_equal(i.operands[0].ndim, 0) @@ -1707,7 +1714,7 @@ def test_iter_remove_multi_index_inner_loop(): # Removing the inner loop means there's just one iteration i.reset() assert_equal(i.itersize, 24) - assert_equal(i[0].shape, tuple()) + assert_equal(i[0].shape, ()) i.enable_external_loop() assert_equal(i.itersize, 24) assert_equal(i[0].shape, (24,)) @@ -1798,8 +1805,8 @@ def test_iter_buffering(): # Contiguous 1-dimensional array arrays.append(np.arange(10, dtype='f4')) # Unaligned array - a = np.zeros((4*16+1,), dtype='i1')[1:] - a.dtype = 'i4' + a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] + a = a.view('i4') a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array @@ -1862,7 +1869,7 @@ def assign_iter(i): assert_equal(i[0], 0) i[1] = 1 assert_equal(i[0:2], [0, 1]) - assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1] * 6))) def test_iter_buffered_cast_simple(): # Test that buffering can handle a simple cast @@ -1877,7 +1884,7 @@ def test_iter_buffered_cast_simple(): for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='f4')) + assert_equal(a, 2 * np.arange(10, dtype='f4')) def test_iter_buffered_cast_byteswapped(): # Test that buffering can handle a cast which requires swap->cast->swap @@ -1893,10 +1900,10 @@ def test_iter_buffered_cast_byteswapped(): for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='f4')) + assert_equal(a, 2 * np.arange(10, dtype='f4')) - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) a = np.arange(10, dtype='f8') a = a.view(a.dtype.newbyteorder()).byteswap() @@ -1909,7 +1916,7 @@ def test_iter_buffered_cast_byteswapped(): for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='f8')) + assert_equal(a, 2 * np.arange(10, dtype='f8')) def test_iter_buffered_cast_byteswapped_complex(): # Test that buffering can handle a cast which requires swap->cast->copy @@ -1925,7 +1932,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype='c8') a += 2j @@ -1937,7 +1944,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype=np.clongdouble) a = a.view(a.dtype.newbyteorder()).byteswap() @@ -1950,7 +1957,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) + assert_equal(a, 2 * np.arange(10, dtype=np.clongdouble) + 4j) a = np.arange(10, dtype=np.longdouble) a = a.view(a.dtype.newbyteorder()).byteswap() @@ -1962,7 +1969,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) + assert_equal(a, 2 * np.arange(10, dtype=np.longdouble)) def test_iter_buffered_cast_structured_type(): # Tests buffering of structured types @@ -1976,11 +1983,11 @@ def test_iter_buffered_cast_structured_type(): vals = [np.array(x) for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) @@ -1998,14 +2005,14 @@ def test_iter_buffered_cast_structured_type(): vals = [x.copy() for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(a[0]), rc) @@ -2122,7 +2129,7 @@ def test_iter_buffered_cast_subarray(): assert_(np.all(x['a'] == count)) x['a'][0] += 2 count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1) + 2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] @@ -2139,7 +2146,7 @@ def test_iter_buffered_cast_subarray(): assert_equal(x['a'], count) x['a'] += 2 count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1) * np.ones((1, 3, 2, 2)) + 2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'f8', (3, 2, 2))] @@ -2173,7 +2180,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + a['a'] = np.arange(6 * 3 * 2 * 2).reshape(6, 3, 2, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2187,7 +2194,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (6,))] sdt2 = [('a', 'f4', (2,))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6, 6) + a['a'] = np.arange(6 * 6).reshape(6, 6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2201,7 +2208,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (6,))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) + a['a'] = np.arange(6 * 2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2216,7 +2223,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) + a['a'] = np.arange(6 * 2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2231,7 +2238,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2, 1))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2, 1) + a['a'] = np.arange(6 * 2).reshape(6, 2, 1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2247,7 +2254,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2, 3))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + a['a'] = np.arange(6 * 2 * 3).reshape(6, 2, 3) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2320,10 +2327,82 @@ def test_iter_buffering_growinner(): assert_equal(i[0].size, a.size) +@pytest.mark.parametrize("read_or_readwrite", ["readonly", "readwrite"]) +def test_iter_contig_flag_reduce_error(read_or_readwrite): + # Test that a non-contiguous operand is rejected without buffering. + # NOTE: This is true even for a reduction, where we return a 0-stride + # below! + with pytest.raises(TypeError, match="Iterator operand required buffering"): + it = np.nditer( + (np.zeros(()),), flags=["external_loop", "reduce_ok"], + op_flags=[(read_or_readwrite, "contig"),], itershape=(10,)) + + +@pytest.mark.parametrize("arr", [ + lambda: np.zeros(()), + lambda: np.zeros((20, 1))[::20], + lambda: np.zeros((1, 20))[:, ::20] + ]) +def test_iter_contig_flag_single_operand_strides(arr): + """ + Tests the strides with the contig flag for both broadcast and non-broadcast + operands in 3 cases where the logic is needed: + 1. When everything has a zero stride, the broadcast op needs to repeated + 2. When the reduce axis is the last axis (first to iterate). + 3. When the reduce axis is the first axis (last to iterate). + + NOTE: The semantics of the cast flag are not clearly defined when + it comes to reduction. It is unclear that there are any users. + """ + first_op = np.ones((10, 10)) + broadcast_op = arr() + red_op = arr() + # Add a first operand to ensure no axis-reordering and the result shape. + iterator = np.nditer( + (first_op, broadcast_op, red_op), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2 + [("readwrite", "contig")]) + + with iterator: + iterator.reset() + for f, b, r in iterator: + # The first operand is contigouos, we should have a view + assert np.shares_memory(f, first_op) + # Although broadcast, the second op always has a contiguous stride + assert b.strides[0] == 8 + assert not np.shares_memory(b, broadcast_op) + # The reduction has a contiguous stride or a 0 stride + if red_op.ndim == 0 or red_op.shape[-1] == 1: + assert r.strides[0] == 0 + else: + # The stride is 8, although it was not originally: + assert r.strides[0] == 8 + # If the reduce stride is 0, buffering makes no difference, but we + # do it anyway right now: + assert not np.shares_memory(r, red_op) + + +@pytest.mark.xfail(reason="The contig flag was always buggy.") +def test_iter_contig_flag_incorrect(): + # This case does the wrong thing... + iterator = np.nditer( + (np.ones((10, 10)).T, np.ones((1, 10))), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2) + + with iterator: + iterator.reset() + for a, b in iterator: + # Remove a and b from locals (pytest may want to format them) + a, b = a.strides, b.strides + assert a == 8 + assert b == 8 # should be 8 but is 0 due to axis reorder + + @pytest.mark.slow def test_iter_buffered_reduce_reuse(): # large enough array for all views, including negative strides. - a = np.arange(2*3**5)[3**5:3**5+1] + a = np.arange(2 * 3**5)[3**5:3**5 + 1] flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] op_flags = [('readonly',), ('readwrite', 'allocate')] op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] @@ -2356,7 +2435,7 @@ def get_params(): comp_res = nditer2.operands[-1] - for bufsize in range(0, 3**3): + for bufsize in range(3**3): nditer1 = np.nditer([arr, None], op_axes=op_axes, flags=flags, op_flags=op_flags, buffersize=bufsize, op_dtypes=op_dtypes) @@ -2372,6 +2451,30 @@ def get_params(): assert_array_equal(res, comp_res) +def test_iter_buffered_reduce_reuse_core(): + # NumPy re-uses buffers for broadcast operands (as of writing when reading). + # Test this even if the offset is manually set at some point during + # the iteration. (not a particularly tricky path) + arr = np.empty((1, 6, 4, 1)).reshape(1, 6, 4, 1)[:, ::3, ::2, :] + arr[...] = np.arange(arr.size).reshape(arr.shape) + # First and last dimension are broadcast dimensions. + arr = np.broadcast_to(arr, (100, 2, 2, 2)) + + flags = ['buffered', 'reduce_ok', 'refs_ok', 'multi_index'] + op_flags = [('readonly',)] + + buffersize = 100 # small enough to not fit the whole array + it = np.nditer(arr, flags=flags, op_flags=op_flags, buffersize=100) + + # Iterate a bit (this will cause buffering internally) + expected = [next(it) for i in range(11)] + # Now, manually advance to inside the core (the +1) + it.iterindex = 10 * (2 * 2 * 2) + 1 + result = [next(it) for i in range(10)] + + assert expected[1:] == result + + def test_iter_no_broadcast(): # Test that the no_broadcast flag works a = np.arange(24).reshape(2, 3, 4) @@ -2691,7 +2794,7 @@ def test_iter_buffering_reduction(): # Iterator inner loop should take argument contiguity into account x = np.ones((7, 13, 8), np.int8)[4:6, 1:11:6, 1:5].transpose(1, 2, 0) x[...] = np.arange(x.size).reshape(x.shape) - y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) + y_base = np.arange(4 * 4, dtype=np.int8).reshape(4, 4) y_base_copy = y_base.copy() y = y_base[::2, :, None] @@ -2795,19 +2898,20 @@ def _is_buffered(iterator): return True return False -@pytest.mark.parametrize("a", +@pytest.mark.parametrize("arrs", [np.zeros((3,), dtype='f8'), - np.zeros((9876, 3*5), dtype='f8')[::2, :], + np.zeros((9876, 3 * 5), dtype='f8')[::2, :], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], # Also test with the last dimension strided (so it does not fit if # there is repeated access) np.zeros((9,), dtype='f8')[::3], - np.zeros((9876, 3*10), dtype='f8')[::2, ::5], + np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) -def test_iter_writemasked(a): +def test_iter_writemasked(arrs): # Note, the slicing above is to ensure that nditer cannot combine multiple # axes into one. The repetition is just to make things a bit more # interesting. + a = arrs.copy() shape = a.shape reps = shape[-1] // 3 msk = np.empty(shape, dtype=bool) @@ -2934,7 +3038,7 @@ def test_iter_non_writable_attribute_deletion(): def test_iter_writable_attribute_deletion(): it = np.nditer(np.ones(2)) - attr = [ "multi_index", "index", "iterrange", "iterindex"] + attr = ["multi_index", "index", "iterrange", "iterindex"] for s in attr: assert_raises(AttributeError, delattr, it, s) @@ -2978,7 +3082,7 @@ def test_iter_allocated_array_dtypes(): def test_0d_iter(): # Basic test for iteration of 0-d arrays: - i = nditer([2, 3], ['multi_index'], [['readonly']]*2) + i = nditer([2, 3], ['multi_index'], [['readonly']] * 2) assert_equal(i.ndim, 0) assert_equal(next(i), (2, 3)) assert_equal(i.multi_index, ()) @@ -3011,7 +3115,7 @@ def test_0d_iter(): vals = next(i) assert_equal(vals['a'], 0.5) assert_equal(vals['b'], 0) - assert_equal(vals['c'], [[(0.5)]*3]*2) + assert_equal(vals['c'], [[(0.5)] * 3] * 2) assert_equal(vals['d'], 0.5) def test_object_iter_cleanup(): @@ -3097,10 +3201,17 @@ def test_iter_too_large_with_multiindex(): for i in range(num): for mode in range(6): # an axis with size 1024 is removed: - _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) + _multiarray_tests.test_nditer_too_large(arrays, i * 2, mode) # an axis with size 1 is removed: with assert_raises(ValueError): - _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) + _multiarray_tests.test_nditer_too_large(arrays, i * 2 + 1, mode) + + +def test_invalid_call_of_enable_external_loop(): + with pytest.raises(ValueError, + match='Iterator flag EXTERNAL_LOOP cannot be used'): + np.nditer(([[1], [2]], [3, 4]), ['multi_index']).enable_external_loop() + def test_writebacks(): a = np.arange(6, dtype='f4') @@ -3135,8 +3246,8 @@ def test_writebacks(): assert_(x.flags.writebackifcopy) assert_equal(au, 6) assert_(not x.flags.writebackifcopy) - x[:] = 123 # x.data still valid - assert_equal(au, 6) # but not connected to au + x[:] = 123 # x.data still valid + assert_equal(au, 6) # but not connected to au it = nditer(au, [], [['readwrite', 'updateifcopy']], @@ -3196,7 +3307,7 @@ def add_context(x, y, out=None): def test_close_raises(): it = np.nditer(np.arange(3)) - assert_equal (next(it), 0) + assert_equal(next(it), 0) it.close() assert_raises(StopIteration, next, it) assert_raises(ValueError, getattr, it, 'operands') @@ -3210,13 +3321,10 @@ def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap() au = au.view(au.dtype.newbyteorder()) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) + casting='equiv', op_dtypes=[np.dtype('f4')]) del it - assert len(sup.log) == 1 - @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases @@ -3283,6 +3391,43 @@ def test_partial_iteration_error(in_dtype, buf_dtype): assert count == sys.getrefcount(value) +def test_arbitrary_number_of_ops(): + # 2*16 + 1 is still just a few kiB, so should be fast and easy to deal with + # but larger than any small custom integer. + ops = [np.arange(10) for a in range(2**16 + 1)] + + it = np.nditer(ops) + for i, vals in enumerate(it): + assert all(v == i for v in vals) + + +def test_arbitrary_number_of_ops_nested(): + # 2*16 + 1 is still just a few kiB, so should be fast and easy to deal with + # but larger than any small custom integer. + ops = [np.arange(10) for a in range(2**16 + 1)] + + it = np.nested_iters(ops, [[0], []]) + for i, vals in enumerate(it): + assert all(v == i for v in vals) + + +@pytest.mark.slow +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") +@requires_memory(9 * np.iinfo(np.intc).max) +@pytest.mark.thread_unsafe(reason="crashes with low memory") +def test_arbitrary_number_of_ops_error(): + # A different error may happen for more than integer operands, but that + # is too large to test nicely. + a = np.ones(1) + args = [a] * (np.iinfo(np.intc).max + 1) + with pytest.raises(ValueError, match="Too many operands to nditer"): + np.nditer(args) + + with pytest.raises(ValueError, match="Too many operands to nditer"): + np.nested_iters(args, [[0], []]) + + +@pytest.mark.thread_unsafe(reason="capfd is thread-unsafe") def test_debug_print(capfd): """ Matches the expected output of a debug print with the actual output. @@ -3296,7 +3441,7 @@ def test_debug_print(capfd): expected = """ ------ BEGIN ITERATOR DUMP ------ | Iterator Address: - | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS + | ItFlags: BUFFER REDUCE | NDim: 2 | NOp: 2 | IterSize: 50 @@ -3312,21 +3457,23 @@ def test_debug_print(capfd): | DTypes: dtype('float64') dtype('int32') | InitDataPtrs: | BaseOffsets: 0 0 + | Ptrs: + | User/buffer ptrs: | Operands: | Operand DTypes: dtype('int64') dtype('float64') | OpItFlags: - | Flags[0]: READ CAST ALIGNED - | Flags[1]: READ WRITE CAST ALIGNED REDUCE + | Flags[0]: READ CAST + | Flags[1]: READ WRITE CAST REDUCE | | BufferData: | BufferSize: 50 | Size: 5 | BufIterEnd: 5 + | BUFFER CoreSize: 5 | REDUCE Pos: 0 | REDUCE OuterSize: 10 | REDUCE OuterDim: 1 | Strides: 8 4 - | Ptrs: | REDUCE Outer Strides: 40 0 | REDUCE Outer Ptrs: | ReadTransferFn: @@ -3339,12 +3486,10 @@ def test_debug_print(capfd): | Shape: 5 | Index: 0 | Strides: 16 8 - | Ptrs: | AxisData[1]: | Shape: 10 | Index: 0 | Strides: 80 0 - | Ptrs: ------- END ITERATOR DUMP ------- """.strip().splitlines() @@ -3362,3 +3507,27 @@ def test_debug_print(capfd): # The actual output may have additional pointers listed that are # stripped from the example output: assert res_line.startswith(expected_line.strip()) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_signature_constructor(): + sig = inspect.signature(np.nditer) + + assert sig.parameters + assert "self" not in sig.parameters + assert "args" not in sig.parameters + assert "kwargs" not in sig.parameters + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "method", + [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], +) +def test_signature_methods(method): + sig = inspect.signature(method) + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/blimgui/dist64/numpy/_core/tests/test_nep50_promotions.py b/blimgui/dist64/numpy/_core/tests/test_nep50_promotions.py index 06a2ee5..8530b3d 100644 --- a/blimgui/dist64/numpy/_core/tests/test_nep50_promotions.py +++ b/blimgui/dist64/numpy/_core/tests/test_nep50_promotions.py @@ -5,16 +5,13 @@ """ import operator -import threading -import warnings -import numpy as np - -import pytest import hypothesis +import pytest from hypothesis import strategies -from numpy.testing import assert_array_equal, IS_WASM +import numpy as np +from numpy.testing import IS_WASM, assert_array_equal @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors") @@ -115,7 +112,7 @@ def test_weak_promotion_scalar_path(op): # Integer path: res = op(np.uint8(3), 5) assert res == op(3, 5) - assert res.dtype == np.uint8 or res.dtype == bool + assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 with pytest.raises(OverflowError): op(np.uint8(3), 1000) @@ -123,7 +120,7 @@ def test_weak_promotion_scalar_path(op): # Float path: res = op(np.float32(3), 5.) assert res == op(3., 5.) - assert res.dtype == np.float32 or res.dtype == bool + assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 def test_nep50_complex_promotion(): @@ -215,7 +212,7 @@ def test_expected_promotion(expected, dtypes, optional_dtypes, data): [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]) @pytest.mark.parametrize("other_val", - [-2*100, -1, 0, 9, 10, 11, 2**63, 2*100]) + [-2 * 100, -1, 0, 9, 10, 11, 2**63, 2 * 100]) @pytest.mark.parametrize("comp", [operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt]) diff --git a/blimgui/dist64/numpy/_core/tests/test_numeric.py b/blimgui/dist64/numpy/_core/tests/test_numeric.py index 8285e15..da87e5c 100644 --- a/blimgui/dist64/numpy/_core/tests/test_numeric.py +++ b/blimgui/dist64/numpy/_core/tests/test_numeric.py @@ -1,26 +1,35 @@ -import sys -import warnings +import inspect import itertools -import platform -import pytest import math +import platform +import sys +import warnings from decimal import Decimal +import pytest +from hypothesis import given, strategies as st +from hypothesis.extra import numpy as hynp + import numpy as np -from numpy._core import umath, sctypes +from numpy import ma +from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype from numpy.exceptions import AxisError from numpy.random import rand, randint, randn from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_warns, assert_array_max_ulp, HAS_REFCOUNT, IS_WASM - ) -from numpy._core._rational_tests import rational -from numpy import ma - -from hypothesis import given, strategies as st -from hypothesis.extra import numpy as hynp + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestResize: @@ -71,6 +80,13 @@ def test_negative_resize(self): with pytest.raises(ValueError, match=r"negative"): np.resize(A, new_shape=new_shape) + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + def test_subclass(self): class MyArray(np.ndarray): __array_priority__ = 1. @@ -169,12 +185,6 @@ def test_reshape_shape_arg(self): shape = (3, 4) expected = arr.reshape(shape) - with pytest.raises( - TypeError, - match="You cannot specify 'newshape' and 'shape' " - "arguments at the same time." - ): - np.reshape(arr, shape=shape, newshape=shape) with pytest.raises( TypeError, match=r"reshape\(\) missing 1 required positional " @@ -187,9 +197,6 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) - with pytest.warns(DeprecationWarning): - actual = np.reshape(arr, newshape=shape) - assert_equal(actual, expected) def test_reshape_copy_arg(self): arr = np.arange(24).reshape(2, 3, 4) @@ -243,7 +250,7 @@ def test_dunder_round(self, dtype): pytest.param(2**31 - 1, -1, marks=pytest.mark.skip(reason="Out of range of int32") ), - (2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))), + (2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))), (2**31 - 1, -math.ceil(math.log10(2**31 - 1))) ]) def test_dunder_round_edgecases(self, val, ndigits): @@ -277,6 +284,10 @@ def test_size(self): assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] @@ -729,27 +740,29 @@ def test_bitwise_xor(self): class TestBoolArray: - def setup_method(self): + def _create_bool_arrays(self): # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False + t = np.array([True] * 41, dtype=bool)[1::] + f = np.array([False] * 41, dtype=bool)[1::] + o = np.array([False] * 42, dtype=bool)[2::] + nm = f.copy() + im = t.copy() + nm[3] = True + nm[-2] = True + im[3] = False + im[-2] = False + return t, f, o, nm, im def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) + t, f, _, nm, im = self._create_bool_arrays() + assert_(t.all()) + assert_(t.any()) + assert_(not f.all()) + assert_(not f.any()) + assert_(nm.any()) + assert_(im.any()) + assert_(not nm.all()) + assert_(not im.all()) # check bad element in all positions for i in range(256 - 7): d = np.array([False] * 256, dtype=bool)[7::] @@ -763,124 +776,109 @@ def test_all_any(self): for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: d = np.array([False] * 100043, dtype=bool) d[i] = True - assert_(np.any(d), msg="%r" % i) + assert_(np.any(d), msg=f"{i!r}") e = np.array([True] * 100043, dtype=bool) e[i] = False - assert_(not np.all(e), msg="%r" % i) + assert_(not np.all(e), msg=f"{i!r}") def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(~t, f) + assert_array_equal(np.abs(~t), f) + assert_array_equal(np.abs(~f), t) + assert_array_equal(np.abs(f), f) + assert_array_equal(~np.abs(f), t) + assert_array_equal(~np.abs(t), f) + assert_array_equal(np.abs(~nm), im) + np.logical_not(t, out=o) + assert_array_equal(o, f) + np.abs(t, out=o) + assert_array_equal(o, t) def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(t | t, t) + assert_array_equal(f | f, f) + assert_array_equal(t | f, t) + assert_array_equal(f | t, t) + np.logical_or(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t & t, t) + assert_array_equal(f & f, f) + assert_array_equal(t & f, f) + assert_array_equal(f & t, f) + np.logical_and(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t ^ t, f) + assert_array_equal(f ^ f, f) + assert_array_equal(t ^ f, t) + assert_array_equal(f ^ t, t) + np.logical_xor(t, t, out=o) + assert_array_equal(o, f) + + assert_array_equal(nm & t, nm) + assert_array_equal(im & f, False) + assert_array_equal(nm & True, nm) + assert_array_equal(im & False, f) + assert_array_equal(nm | t, t) + assert_array_equal(im | f, im) + assert_array_equal(nm | True, t) + assert_array_equal(im | False, im) + assert_array_equal(nm ^ t, im) + assert_array_equal(im ^ f, im) + assert_array_equal(nm ^ True, im) + assert_array_equal(im ^ False, im) class TestBoolCmp: - def setup_method(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) + def _create_data(self, dtype, size): + # generate data using given dtype and num for size of array + a = np.ones(size, dtype=dtype) + e = np.ones(a.size, dtype=bool) # generate values for all permutation of 256bit simd vectors s = 0 - for i in range(32): - self.f[s:s+8] = [i & 2**x for x in range(8)] - self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s+4] = [i & 2**x for x in range(4)] - self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf + r = int(size / 32) + for i in range(int(size / 8)): + a[s:s + r] = [i & 2**x for x in range(r)] + e[s:s + r] = [(i & 2**x) != 0 for x in range(r)] + s += r + n = a.copy() + n[e] = np.nan + + inf = a.copy() + inf[::3][e[::3]] = np.inf + inf[1::3][e[1::3]] = -np.inf + inf[2::3][e[2::3]] = np.nan + enonan = e.copy() + enonan[2::3] = False + + sign = a.copy() + sign[e] *= -1. + sign[1::6][e[1::6]] = -np.inf # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.machine() != 'riscv64': - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. + if not (dtype == np.float32 and platform.machine() == 'riscv64'): + sign[3::6][e[3::6]] = -np.nan + sign[4::6][e[4::6]] = -0. + return a, e, n, inf, enonan, sign def test_float(self): # offset for alignment test + f, ef, nf, inff, efnonan, signf = self._create_data(np.float32, 256) for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] + assert_array_equal(f[i:] > 0, ef[i:]) + assert_array_equal(f[i:] - 1 >= 0, ef[i:]) + assert_array_equal(f[i:] == 0, ~ef[i:]) + assert_array_equal(-f[i:] < 0, ef[i:]) + assert_array_equal(-f[i:] + 1 <= 0, ef[i:]) + r = f[i:] != 0 + assert_array_equal(r, ef[i:]) + r2 = f[i:] != np.zeros_like(f[i:]) + r3 = 0 != f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -889,24 +887,25 @@ def test_float(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + assert_array_equal(np.isnan(nf[i:]), ef[i:]) + assert_array_equal(np.isfinite(nf[i:]), ~ef[i:]) + assert_array_equal(np.isfinite(inff[i:]), ~ef[i:]) + assert_array_equal(np.isinf(inff[i:]), efnonan[i:]) + assert_array_equal(np.signbit(signf[i:]), ef[i:]) def test_double(self): # offset for alignment test + d, ed, nd, infd, ednonan, signd = self._create_data(np.float64, 128) for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] + assert_array_equal(d[i:] > 0, ed[i:]) + assert_array_equal(d[i:] - 1 >= 0, ed[i:]) + assert_array_equal(d[i:] == 0, ~ed[i:]) + assert_array_equal(-d[i:] < 0, ed[i:]) + assert_array_equal(-d[i:] + 1 <= 0, ed[i:]) + r = d[i:] != 0 + assert_array_equal(r, ed[i:]) + r2 = d[i:] != np.zeros_like(d[i:]) + r3 = 0 != d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -915,21 +914,21 @@ def test_double(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + assert_array_equal(np.isnan(nd[i:]), ed[i:]) + assert_array_equal(np.isfinite(nd[i:]), ~ed[i:]) + assert_array_equal(np.isfinite(infd[i:]), ~ed[i:]) + assert_array_equal(np.isinf(infd[i:]), ednonan[i:]) + assert_array_equal(np.signbit(signd[i:]), ed[i:]) class TestSeterr: def test_default(self): err = np.geterr() assert_equal(err, - dict(divide='warn', - invalid='warn', - over='warn', - under='ignore') + {'divide': 'warn', + 'invalid': 'warn', + 'over': 'warn', + 'under': 'ignore'} ) def test_set(self): @@ -962,10 +961,10 @@ def assert_raises_fpe(self, fpeerr, flop, x, y): try: flop(x, y) assert_(False, - "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) + f"Type {ftype} did not raise fpe error '{fpeerr}'.") except FloatingPointError as exc: assert_(str(exc).find(fpeerr) >= 0, - "Type %s raised wrong fpe error '%s'." % (ftype, exc)) + f"Type {ftype} raised wrong fpe error '{exc}'.") def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): # Check that fpe exception is raised. @@ -994,7 +993,7 @@ def test_floating_exceptions(self, typecode): if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) - ft_tiny = fi._machar.tiny + ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' @@ -1003,7 +1002,7 @@ def test_floating_exceptions(self, typecode): # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) - ft_tiny = ftype(fi._machar.tiny) + ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions @@ -1016,34 +1015,37 @@ def test_floating_exceptions(self, typecode): # pass the assert if not np.isnan(ft_tiny): self.assert_raises_fpe(underflow, - lambda a, b: a/b, ft_tiny, ft_max) + lambda a, b: a / b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, - lambda a, b: a*b, ft_tiny, ft_tiny) - self.assert_raises_fpe(overflow, - lambda a, b: a*b, ft_max, ftype(2)) + lambda a, b: a * b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, - lambda a, b: a/b, ft_max, ftype(0.5)) + lambda a, b: a * b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, - lambda a, b: a+b, ft_max, ft_max*ft_eps) + lambda a, b: a / b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, - lambda a, b: a-b, -ft_max, ft_max*ft_eps) + lambda a, b: a + b, ft_max, ft_max * ft_eps) self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) + lambda a, b: a - b, -ft_max, ft_max * ft_eps) + # On AIX, pow() with double does not raise the overflow exception, + # it returns inf. Long double is the same as double. + if sys.platform != 'aix' or typecode not in 'dDgG': + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, - lambda a, b: a/b, ftype(1), ftype(0)) + lambda a, b: a / b, ftype(1), ftype(0)) self.assert_raises_fpe( - invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf) + invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf) ) self.assert_raises_fpe(invalid, - lambda a, b: a/b, ftype(0), ftype(0)) + lambda a, b: a / b, ftype(0), ftype(0)) self.assert_raises_fpe( - invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf) + invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf) ) self.assert_raises_fpe( - invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf) + invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf) ) self.assert_raises_fpe(invalid, - lambda a, b: a*b, ftype(0), ftype(np.inf)) + lambda a, b: a * b, ftype(0), ftype(np.inf)) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_warnings(self): @@ -1135,7 +1137,6 @@ def check_promotion_cases(self, promote_func): assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) - def test_coercion(self): def res_type(a, b): return np.add(a, b).dtype @@ -1146,26 +1147,26 @@ def res_type(a, b): # shouldn't narrow the float/complex type for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: b = 1.234 * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") b = np.longdouble(1.234) * a assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) + f"array type {a.dtype}") b = np.float64(1.234) * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") b = np.float32(1.234) * a - assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f4'), f"array type {a.dtype}") b = np.float16(1.234) * a - assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f2'), f"array type {a.dtype}") b = 1.234j * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") b = np.clongdouble(1.234j) * a assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) + f"array type {a.dtype}") b = np.complex128(1.234j) * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") b = np.complex64(1.234j) * a - assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('c8'), f"array type {a.dtype}") # The following use-case is problematic, and to resolve its # tricky side-effects requires more changes. @@ -1251,31 +1252,31 @@ def test_promote_types_strings(self, swap, string_dtype): S = string_dtype # Promote numeric with unsized string: - assert_equal(promote_types('bool', S), np.dtype(S+'5')) - assert_equal(promote_types('b', S), np.dtype(S+'4')) - assert_equal(promote_types('u1', S), np.dtype(S+'3')) - assert_equal(promote_types('u2', S), np.dtype(S+'5')) - assert_equal(promote_types('u4', S), np.dtype(S+'10')) - assert_equal(promote_types('u8', S), np.dtype(S+'20')) - assert_equal(promote_types('i1', S), np.dtype(S+'4')) - assert_equal(promote_types('i2', S), np.dtype(S+'6')) - assert_equal(promote_types('i4', S), np.dtype(S+'11')) - assert_equal(promote_types('i8', S), np.dtype(S+'21')) + assert_equal(promote_types('bool', S), np.dtype(S + '5')) + assert_equal(promote_types('b', S), np.dtype(S + '4')) + assert_equal(promote_types('u1', S), np.dtype(S + '3')) + assert_equal(promote_types('u2', S), np.dtype(S + '5')) + assert_equal(promote_types('u4', S), np.dtype(S + '10')) + assert_equal(promote_types('u8', S), np.dtype(S + '20')) + assert_equal(promote_types('i1', S), np.dtype(S + '4')) + assert_equal(promote_types('i2', S), np.dtype(S + '6')) + assert_equal(promote_types('i4', S), np.dtype(S + '11')) + assert_equal(promote_types('i8', S), np.dtype(S + '21')) # Promote numeric with sized string: - assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) - assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) - assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) - assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) - assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('bool', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('bool', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('b', S + '1'), np.dtype(S + '4')) + assert_equal(promote_types('b', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u1', S + '1'), np.dtype(S + '3')) + assert_equal(promote_types('u1', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u2', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('u2', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u4', S + '1'), np.dtype(S + '10')) + assert_equal(promote_types('u4', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u8', S + '1'), np.dtype(S + '20')) + assert_equal(promote_types('u8', S + '30'), np.dtype(S + '30')) # Promote with object: - assert_equal(promote_types('O', S+'30'), np.dtype('O')) + assert_equal(promote_types('O', S + '30'), np.dtype('O')) @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V6"), np.dtype("V10")], # mismatch shape @@ -1505,7 +1506,6 @@ def test_can_cast_values(self): with pytest.raises(TypeError): np.can_cast(4j, "complex128", casting="unsafe") - @pytest.mark.parametrize("dtype", list("?bhilqBHILQefdgFDG") + [rational]) def test_can_cast_scalars(self, dtype): @@ -1556,7 +1556,7 @@ def load_data(self, n, eindex): # Raise an exception at the desired index in the iterator. for e in range(n): if e == eindex: - raise NIterError('error at index %s' % eindex) + raise NIterError(f'error at index {eindex}') yield e @pytest.mark.parametrize("dtype", [int, object]) @@ -1630,6 +1630,7 @@ def test_failed_itemsetting(self): with pytest.raises(ValueError): np.fromiter(iterable, dtype=np.dtype((int, 2))) + class TestNonzero: def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) @@ -1659,8 +1660,10 @@ def test_nonzero_onedim(self): # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], # dtype=[('a', 'i4'), ('b', 'i2')]) - x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], - dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + x = np.array( + [(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')] + ) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.count_nonzero(x['c']), 3) @@ -1706,14 +1709,14 @@ def test_sparse(self): c = np.zeros(400, dtype=bool) c[10 + i:20 + i] = True - c[20 + i*2] = True + c[20 + i * 2] = True assert_equal(np.nonzero(c)[0], - np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) + np.concatenate((np.arange(10 + i, 20 + i), [20 + i * 2]))) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) def test_nonzero_float_dtypes(self, dtype): rng = np.random.default_rng(seed=10) - x = ((2**33)*rng.normal(size=100)).astype(dtype) + x = ((2**33) * rng.normal(size=100)).astype(dtype) x[rng.choice(50, size=100)] = 0 idxs = np.nonzero(x)[0] assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) @@ -1733,7 +1736,7 @@ class C(np.ndarray): for view in (C, np.ndarray): for nd in range(1, 4): - shape = tuple(range(2, 2+nd)) + shape = tuple(range(2, 2 + nd)) x = np.arange(np.prod(shape)).reshape(shape).view(view) for nzx in (np.nonzero(x), x.nonzero()): for nzx_i in nzx: @@ -1883,6 +1886,7 @@ def test_nonzero_sideeffect_safety(self): # gh-13631 class FalseThenTrue: _val = False + def __bool__(self): try: return self._val @@ -1891,6 +1895,7 @@ def __bool__(self): class TrueThenFalse: _val = True + def __bool__(self): try: return self._val @@ -1945,38 +1950,44 @@ def __bool__(self): """ # assert that an exception in first pass is handled correctly - a = np.array([ThrowsAfter(5)]*10) + a = np.array([ThrowsAfter(5)] * 10) assert_raises(ValueError, np.nonzero, a) # raise exception in second pass for 1-dimensional loop - a = np.array([ThrowsAfter(15)]*10) + a = np.array([ThrowsAfter(15)] * 10) assert_raises(ValueError, np.nonzero, a) # raise exception in second pass for n-dimensional loop - a = np.array([[ThrowsAfter(15)]]*10) + a = np.array([[ThrowsAfter(15)]] * 10) assert_raises(ValueError, np.nonzero, a) - @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have threads") - def test_structured_threadsafety(self): - # Nonzero (and some other functions) should be threadsafe for - # structured datatypes, see gh-15387. This test can behave randomly. - from concurrent.futures import ThreadPoolExecutor + def test_nonzero_byteorder(self): + values = [0., -0., 1, float('nan'), 0, 1, + np.float16(0), np.float16(12.3)] + expected_values = [0, 0, 1, 1, 0, 1, 0, 1] + + for value, expected in zip(values, expected_values): + A = np.array([value]) + A_byteswapped = (A.view(A.dtype.newbyteorder()).byteswap()).copy() - # Create a deeply nested dtype to make a failure more likely: - dt = np.dtype([("", "f8")]) - dt = np.dtype([("", dt)]) - dt = np.dtype([("", dt)] * 2) - # The array should be large enough to likely run into threading issues - arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] - def func(arr): - arr.nonzero() + assert np.count_nonzero(A) == expected + assert np.count_nonzero(A_byteswapped) == expected - tpe = ThreadPoolExecutor(max_workers=8) - futures = [tpe.submit(func, arr) for _ in range(10)] - for f in futures: - f.result() + def test_count_nonzero_non_aligned_array(self): + # gh-27523 + b = np.zeros(64 + 1, dtype=np.int8)[1:] + b = b.view(int) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 - assert arr.dtype is dt + b = np.zeros(64 + 1, dtype=np.float16)[1:] + b = b.view(float) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 class TestIndex: @@ -1986,7 +1997,9 @@ def test_boolean(self): g1 = randint(0, 5, size=15) g2 = randint(0, 8, size=15) V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + assert_( + (np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all() + ) def test_boolean_edgecase(self): a = np.array([], dtype='int32') @@ -2035,7 +2048,7 @@ def test_neg_width_boundaries(self): def test_large_neg_int64(self): # See gh-14289. assert_equal(np.binary_repr(np.int64(-2**62), width=64), - '11' + '0'*62) + '11' + '0' * 62) class TestBaseRepr: @@ -2083,7 +2096,7 @@ def _test_array_equal_parametrizations(): yield (e1, e1.copy(), False, True) yield (e1, e1.copy(), True, True) - # Non-nanable – those cannot hold nans + # Non-nanable - those cannot hold nans a12 = np.array([1, 2]) a12b = a12.copy() a123 = np.array([1, 2, 3]) @@ -2252,7 +2265,10 @@ def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + res = np.array_equiv( + np.array([1, 2]), + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ) assert_(not res) assert_(type(res) is bool) @@ -2291,16 +2307,15 @@ def assert_array_strict_equal(x, y): class TestClip: - def setup_method(self): - self.nr = 5 - self.nc = 3 + nr = 5 + nc = 3 def fastclip(self, a, m, M, out=None, **kwargs): return a.clip(m, M, out=out, **kwargs) def clip(self, a, m, M, out=None): # use a.choose to verify fastclip result - selector = np.less(a, m) + 2*np.greater(a, M) + selector = np.less(a, m) + 2 * np.greater(a, M) return selector.choose((a, m, M), out=out) # Handy functions @@ -2797,8 +2812,8 @@ def test_clip_value_min_max_flip(self, amin, amax): # case produced by hypothesis (np.zeros(10, dtype=object), 0, - -2**64+1, - np.full(10, -2**64+1, dtype=object)), + -2**64 + 1, + np.full(10, -2**64 + 1, dtype=object)), # for bugs in NPY_TIMEDELTA_MAX, based on a case # produced by hypothesis (np.zeros(10, dtype='m8') - 1, @@ -2823,16 +2838,12 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): actual = np.clip(arr, amin, amax) assert_equal(actual, expected) - @pytest.mark.xfail(reason="propagation doesn't match spec") @pytest.mark.parametrize("arr, amin, amax", [ (np.array([1] * 10, dtype='m8'), np.timedelta64('NaT'), np.zeros(10, dtype=np.int32)), ]) - @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does expected = np.minimum(np.maximum(arr, amin), amax) actual = np.clip(arr, amin, amax) assert_equal(actual, expected) @@ -2908,11 +2919,11 @@ def test_clip_min_max_args(self): np.clip(arr, 2, 3, min=2) @pytest.mark.parametrize("dtype,min,max", [ - ("int32", -2**32-1, 2**32), + ("int32", -2**32 - 1, 2**32), ("int32", -2**320, None), ("int32", None, 2**300), ("int32", -1000, 2**32), - ("int32", -2**32-1, 1000), + ("int32", -2**32 - 1, 1000), ("uint8", -1, 129), ]) def test_out_of_bound_pyints(self, dtype, min, max): @@ -2926,6 +2937,7 @@ def test_out_of_bound_pyints(self, dtype, min, max): if max is not None: assert (c <= max).all() + class TestAllclose: rtol = 1e-5 atol = 1e-8 @@ -2937,10 +2949,10 @@ def teardown_method(self): np.seterr(**self.olderr) def tst_allclose(self, x, y): - assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) + assert_(np.allclose(x, y), f"{x} and {y} not close") def tst_not_allclose(self, x, y): - assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) + assert_(not np.allclose(x, y), f"{x} and {y} shouldn't be close") def test_ip_allclose(self): # Parametric test factory. @@ -2952,10 +2964,10 @@ def test_ip_allclose(self): data = [([1, 0], [1, 0]), ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol * 2), + (aran, aran + aran * rtol), (np.inf, np.inf), (np.inf, [np.inf])] @@ -2975,9 +2987,9 @@ def test_ip_not_allclose(self): ([np.inf, np.inf], [1, 0]), ([-np.inf, 0], [np.inf, 0]), ([np.nan, 0], [np.nan, 0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + aran * atol + atol * 2), (np.array([np.inf, 1]), np.array([0, np.inf]))] for (x, y) in data: @@ -3025,9 +3037,9 @@ def _setup(self): ([1, 0], [1, 0]), ([atol], [0]), ([1], [1 + rtol + atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol), - (aran, aran + aran*rtol), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol), + (aran, aran + aran * rtol), (np.inf, np.inf), (np.inf, [np.inf]), ([np.inf, -np.inf], [np.inf, -np.inf]), @@ -3038,14 +3050,14 @@ def _setup(self): ([np.inf, np.inf], [1, -np.inf]), ([np.inf, np.inf], [1, 0]), ([np.nan, 0], [np.nan, -np.inf]), - ([atol*2], [0]), - ([1], [1 + rtol + atol*2]), - (aran, aran + rtol*1.1*aran + atol*1.1), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + rtol * 1.1 * aran + atol * 1.1), (np.array([np.inf, 1]), np.array([0, np.inf])), ] self.some_close_tests = [ - ([np.inf, 0], [np.inf, atol*2]), - ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), + ([np.inf, 0], [np.inf, atol * 2]), + ([atol, 1, 1e6 * (1 + 2 * rtol) + atol], [0, np.nan, 1e6]), (np.arange(3), [0, 1, 2.1]), (np.nan, [np.nan, np.nan, np.nan]), ([0], [atol, np.inf, -np.inf, np.nan]), @@ -3084,7 +3096,7 @@ def test_ip_isclose(self): np.isclose(x, y, rtol=rtol) def test_nep50_isclose(self): - below_one = float(1.-np.finfo('f8').eps) + below_one = float(1. - np.finfo('f8').eps) f32 = np.array(below_one, 'f4') # This is just 1 at float32 precision assert f32 > np.array(below_one) # NEP 50 broadcasting of python scalars @@ -3093,13 +3105,13 @@ def test_nep50_isclose(self): # one uses a numpy float64). assert np.isclose(f32, below_one, atol=0, rtol=0) assert np.isclose(f32, np.float32(0), atol=below_one) - assert np.isclose(f32, 2, atol=0, rtol=below_one/2) + assert np.isclose(f32, 2, atol=0, rtol=below_one / 2) assert not np.isclose(f32, np.float64(below_one), atol=0, rtol=0) assert not np.isclose(f32, np.float32(0), atol=np.float64(below_one)) - assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one/2)) + assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one / 2)) def tst_all_isclose(self, x, y): - assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) + assert_(np.all(np.isclose(x, y)), f"{x} and {y} not close") def tst_none_isclose(self, x, y): msg = "%s and %s shouldn't be close" @@ -3111,7 +3123,9 @@ def tst_isclose_allclose(self, x, y): if np.isscalar(x) and np.isscalar(y): assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + assert_array_equal( + np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y) + ) def test_ip_all_isclose(self): self._setup() @@ -3199,47 +3213,72 @@ def test_timedelta(self): assert np.allclose(a, a, atol=0, equal_nan=True) assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + def test_tol_warnings(self): + a = np.array([1, 2, 3]) + b = np.array([np.inf, np.nan, 1]) + + for i in b: + for j in b: + # Making sure that i and j are not both numbers, + # because that won't create a warning + if (i == 1) and (j == 1): + continue + + with warnings.catch_warnings(record=True) as w: + + warnings.simplefilter("always") + c = np.isclose(a, a, atol=i, rtol=j) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + expected = f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" + assert expected in str(w[-1].message) + class TestStdVar: - def setup_method(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 + def _create_data(self): + A = np.array([1, -1, 1, -1]) + real_var = 1 + return A, real_var def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) + A, real_var = self._create_data() + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) def test_scalars(self): assert_equal(np.var(1), 0) assert_equal(np.std(1), 0) def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var * len(self.A) / (len(self.A) - 1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var*len(self.A) / (len(self.A) - 1)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=1), + real_var * len(A) / (len(A) - 1)) + assert_almost_equal(np.std(A, ddof=1)**2, + real_var * len(A) / (len(A) - 1)) def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var * len(self.A) / (len(self.A) - 2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var * len(self.A) / (len(self.A) - 2)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=2), + real_var * len(A) / (len(A) - 2)) + assert_almost_equal(np.std(A, ddof=2)**2, + real_var * len(A) / (len(A) - 2)) def test_correction(self): + A, _ = self._create_data() assert_almost_equal( - np.var(self.A, correction=1), np.var(self.A, ddof=1) + np.var(A, correction=1), np.var(A, ddof=1) ) assert_almost_equal( - np.std(self.A, correction=1), np.std(self.A, ddof=1) + np.std(A, correction=1), np.std(A, ddof=1) ) err_msg = "ddof and correction can't be provided simultaneously." with assert_raises_regex(ValueError, err_msg): - np.var(self.A, ddof=1, correction=0) + np.var(A, ddof=1, correction=0) with assert_raises_regex(ValueError, err_msg): - np.std(self.A, ddof=1, correction=1) + np.std(A, ddof=1, correction=1) def test_out_scalar(self): d = np.arange(10) @@ -3268,26 +3307,22 @@ def test_scalars(self): class TestCreationFuncs: - # Test ones, zeros, empty and full. - - def setup_method(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + def check_function(self, func, fill_value=None): + dtypes_info = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} keyfunc = lambda dtype: dtype.str - self.dtypes = sorted(dtypes - variable_sized | + variable_sized = {tp for tp in dtypes_info if tp.str.endswith('0')} + dtypes = sorted(dtypes_info - variable_sized | {np.dtype(tp.str.replace("0", str(i))) for tp in variable_sized for i in range(1, 10)}, key=keyfunc) - self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 + dtypes += [type(dt) for dt in sorted(dtypes_info, key=keyfunc)] + orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + ndims = 10 - def check_function(self, func, fill_value=None): par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) + range(ndims), + orders, + dtypes) fill_kwarg = {} if fill_value is not None: fill_kwarg = {'fill_value': fill_value} @@ -3313,7 +3348,7 @@ def check_function(self, func, fill_value=None): assert_equal(arr.dtype, np.dtype(dtype_str)) else: assert_equal(arr.dtype, np.dtype(dtype.type)) - assert_(getattr(arr.flags, self.orders[order])) + assert_(getattr(arr.flags, orders[order])) if fill_value is not None: if arr.dtype.str.startswith('|S'): @@ -3340,45 +3375,48 @@ def test_for_reference_leak(self): # Make sure we have an object for reference dim = 1 beg = sys.getrefcount(dim) - np.zeros([dim]*10) + np.zeros([dim] * 10) assert_(sys.getrefcount(dim) == beg) - np.ones([dim]*10) + np.ones([dim] * 10) assert_(sys.getrefcount(dim) == beg) - np.empty([dim]*10) + np.empty([dim] * 10) assert_(sys.getrefcount(dim) == beg) - np.full([dim]*10, 0) + np.full([dim] * 10, 0) assert_(sys.getrefcount(dim) == beg) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) + def test_signatures(self, func): + sig = inspect.signature(func) + params = sig.parameters + + assert len(params) in {5, 6} + + assert 'shape' in params + assert params["shape"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["shape"].default is inspect.Parameter.empty + + assert 'dtype' in params + assert params["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["dtype"].default is None + + assert 'order' in params + assert params["order"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["order"].default == "C" + + assert 'device' in params + assert params["device"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["device"].default is None + + assert 'like' in params + assert params["like"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["like"].default is None + class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' - def setup_method(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(), (5,), (5,6,), (5,6,7,)] - def compare_array_value(self, dz, value, fill_value): if value is not None: if fill_value: @@ -3391,16 +3429,41 @@ def compare_array_value(self, dz, value, fill_value): assert_(np.all(dz == value)) def check_like_function(self, like_function, value, fill_value=False): + data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + if fill_value: fill_kwarg = {'fill_value': value} else: fill_kwarg = {} - for d, dtype in self.data: + for d, dtype in data: # default (K) order, dtype dz = like_function(d, dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) - assert_equal(np.array(dz.strides)*d.dtype.itemsize, - np.array(d.strides)*dz.dtype.itemsize) + assert_equal(np.array(dz.strides) * d.dtype.itemsize, + np.array(d.strides) * dz.dtype.itemsize) assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) if dtype is None: @@ -3443,7 +3506,7 @@ def check_like_function(self, like_function, value, fill_value=False): self.compare_array_value(dz, value, fill_value) # Test the 'shape' parameter - for s in self.shapes: + for s in shapes: for o in 'CFA': sz = like_function(d, dtype=dtype, shape=s, order=o, **fill_kwarg) @@ -3568,9 +3631,9 @@ def test_no_overwrite(self): assert_array_equal(k, np.ones(3)) def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=complex) - y = np.array([-1, -2j, 3+1j], dtype=complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) + x = np.array([1, 2, 3, 4 + 1j], dtype=complex) + y = np.array([-1, -2j, 3 + 1j], dtype=complex) + r_z = np.array([3 - 1j, 6, 8 + 1j, 11 + 5j, -5 + 8j, -4 - 1j], dtype=complex) r_z = r_z[::-1].conjugate() z = np.correlate(y, x, mode='full') assert_array_almost_equal(z, r_z) @@ -3585,13 +3648,12 @@ def test_mode(self): d = np.ones(100) k = np.ones(3) default_mode = np.correlate(d, k, mode='valid') - with assert_warns(DeprecationWarning): - valid_mode = np.correlate(d, k, mode='v') - assert_array_equal(valid_mode, default_mode) + with assert_raises(ValueError): + np.correlate(d, k, mode='v') # integer mode with assert_raises(ValueError): np.correlate(d, k, mode=-1) - assert_array_equal(np.correlate(d, k, mode=0), valid_mode) + # assert_array_equal(np.correlate(d, k, mode=), default_mode) # illegal arguments with assert_raises(TypeError): np.correlate(d, k, mode=None) @@ -3614,24 +3676,33 @@ def test_mode(self): d = np.ones(100) k = np.ones(3) default_mode = np.convolve(d, k, mode='full') - with assert_warns(DeprecationWarning): - full_mode = np.convolve(d, k, mode='f') - assert_array_equal(full_mode, default_mode) + with assert_raises(ValueError): + np.convolve(d, k, mode='f') # integer mode with assert_raises(ValueError): np.convolve(d, k, mode=-1) - assert_array_equal(np.convolve(d, k, mode=2), full_mode) + assert_array_equal(np.convolve(d, k, mode=2), default_mode) # illegal arguments with assert_raises(TypeError): np.convolve(d, k, mode=None) + def test_convolve_empty_input_error_message(self): + """ + Test that convolve raises the correct error message when inputs are empty. + Regression test for gh-30272 (variable swapping bug). + """ + with pytest.raises(ValueError, match="a cannot be empty"): + np.convolve(np.array([]), np.array([1, 2])) + + with pytest.raises(ValueError, match="v cannot be empty"): + np.convolve(np.array([1, 2]), np.array([])) class TestArgwhere: @pytest.mark.parametrize('nd', [0, 1, 2]) def test_nd(self, nd): # get an nd array with multiple elements in every dimension - x = np.empty((2,)*nd, bool) + x = np.empty((2,) * nd, bool) # none x[...] = False @@ -3725,7 +3796,7 @@ def test_roll_unsigned_shift(self): shift = np.uint16(2) assert_equal(np.roll(x, shift), np.roll(x, 2)) - shift = np.uint64(2**63+2) + shift = np.uint64(2**63 + 2) assert_equal(np.roll(x, shift), np.roll(x, 2)) def test_roll_big_int(self): @@ -3751,14 +3822,14 @@ class TestRollaxis: (3, 4): (1, 2, 3, 4)} def test_exceptions(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) assert_raises(AxisError, np.rollaxis, a, -5, 0) assert_raises(AxisError, np.rollaxis, a, 0, -5) assert_raises(AxisError, np.rollaxis, a, 4, 0) assert_raises(AxisError, np.rollaxis, a, 0, 5) def test_results(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() aind = np.indices(a.shape) assert_(a.flags['OWNDATA']) for (i, j) in self.tgtshape: @@ -3766,7 +3837,7 @@ def test_results(self): res = np.rollaxis(a, axis=i, start=j) i0, i1, i2, i3 = aind[np.array(res.shape) - 1] assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) + assert_(res.shape == self.tgtshape[(i, j)], str((i, j))) assert_(not res.flags['OWNDATA']) # negative axis, positive start @@ -3977,7 +4048,7 @@ def test_outer_out_param(): arr1 = np.ones((5,)) arr2 = np.ones((2,)) arr3 = np.linspace(-2, 2, 5) - out1 = np.ndarray(shape=(5,5)) + out1 = np.ndarray(shape=(5, 5)) out2 = np.ndarray(shape=(2, 5)) res1 = np.outer(arr1, arr3, out1) assert_equal(res1, out1) @@ -4011,7 +4082,7 @@ def test_scalar_input(self): assert_array_equal([[]], np.indices((0,), sparse=True)) def test_sparse(self): - [x, y] = np.indices((4,3), sparse=True) + [x, y] = np.indices((4, 3), sparse=True) assert_array_equal(x, np.array([[0], [1], [2], [3]])) assert_array_equal(y, np.array([[0, 1, 2]])) @@ -4138,23 +4209,36 @@ def test_number_of_arguments(self): assert_equal(mit.numiter, j) def test_broadcast_error_kwargs(self): - #gh-13455 + # gh-13455 arrs = [np.empty((5, 6, 7))] - mit = np.broadcast(*arrs) - mit2 = np.broadcast(*arrs, **{}) + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804 assert_equal(mit.shape, mit2.shape) assert_equal(mit.ndim, mit2.ndim) assert_equal(mit.nd, mit2.nd) assert_equal(mit.numiter, mit2.numiter) assert_(mit.iters[0].base is mit2.iters[0].base) - assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) + assert_raises(ValueError, np.broadcast, 1, x=1) def test_shape_mismatch_error_message(self): with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " r"arg 2 with shape \(2,\)"): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_signatures(self): + sig_new = inspect.signature(np.broadcast) + assert len(sig_new.parameters) == 1 + assert "arrays" in sig_new.parameters + assert sig_new.parameters["arrays"].kind == inspect.Parameter.VAR_POSITIONAL + + sig_reset = inspect.signature(np.broadcast.reset) + assert len(sig_reset.parameters) == 1 + assert "self" in sig_reset.parameters + assert sig_reset.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + class TestKeepdims: @@ -4170,10 +4254,16 @@ def test_raise(self): class TestTensordot: + def test_rejects_duplicate_axes(self): + a = np.ones((2, 3, 3)) + b = np.ones((3, 3, 4)) + with pytest.raises(ValueError): + np.tensordot(a, b, axes=([1, 1], [0, 0])) + def test_zero_dimension(self): # Test resolution to issue #5663 - a = np.ndarray((3,0)) - b = np.ndarray((0,4)) + a = np.ndarray((3, 0)) + b = np.ndarray((0, 4)) td = np.tensordot(a, b, (1, 0)) assert_array_equal(td, np.dot(a, b)) assert_array_equal(td, np.einsum('ij,jk', a, b)) @@ -4181,7 +4271,8 @@ def test_zero_dimension(self): def test_zero_dimensional(self): # gh-12130 arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + # contracting no axes is well defined + ret = np.tensordot(arr_0d, arr_0d, ([], [])) assert_array_equal(ret, arr_0d) diff --git a/blimgui/dist64/numpy/_core/tests/test_numerictypes.py b/blimgui/dist64/numpy/_core/tests/test_numerictypes.py index 310ba82..737db5f 100644 --- a/blimgui/dist64/numpy/_core/tests/test_numerictypes.py +++ b/blimgui/dist64/numpy/_core/tests/test_numerictypes.py @@ -1,14 +1,17 @@ -import sys import itertools +import sys import pytest + import numpy as np import numpy._core.numerictypes as nt -from numpy._core.numerictypes import ( - issctype, sctype2char, maximum_sctype, sctypes -) +from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, IS_PYPY + IS_PYPY, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, ) # This is the structure of the table used for plain objects: @@ -73,7 +76,7 @@ ] -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] +byteorder = {'little': '<', 'big': '>'}[sys.byteorder] def normalize_descr(descr): "Normalize a description adding the platform byteorder." @@ -97,8 +100,7 @@ def normalize_descr(descr): l = normalize_descr(dtype) out.append((item[0], l)) else: - raise ValueError("Expected a str or list and got %s" % - (type(item))) + raise ValueError(f"Expected a str or list and got {type(item)}") return out @@ -345,17 +347,16 @@ def test_assign(self): class TestMultipleFields: - def setup_method(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - return self.ary['f0', 'f1'] + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + return ary['f0', 'f1'] def test_no_tuple(self): assert_raises(IndexError, self._bad_call) def test_return(self): - res = self.ary[['f0', 'f2']].tolist() + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + res = ary[['f0', 'f2']].tolist() assert_(res == [(1, 3), (5, 7)]) @@ -615,6 +616,35 @@ def test_names_are_undersood_by_dtype(self, t): assert np.dtype(t.__name__).type is t +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + class TestBoolDefinition: def test_bool_definition(self): assert nt.bool is np.bool diff --git a/blimgui/dist64/numpy/_core/tests/test_overrides.py b/blimgui/dist64/numpy/_core/tests/test_overrides.py index f95536c..bbc2f04 100644 --- a/blimgui/dist64/numpy/_core/tests/test_overrides.py +++ b/blimgui/dist64/numpy/_core/tests/test_overrides.py @@ -1,20 +1,22 @@ import inspect -import sys import os +import pickle +import sys import tempfile from io import StringIO from unittest import mock -import pickle import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex) -from numpy.testing.overrides import get_overridable_numpy_array_functions from numpy._core.overrides import ( - _get_implementing_args, array_function_dispatch, - verify_matching_signatures) + _get_implementing_args, + array_function_dispatch, + verify_matching_signatures, +) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex +from numpy.testing.overrides import get_overridable_numpy_array_functions + def _return_not_implemented(self, *args, **kwargs): return NotImplemented @@ -133,7 +135,7 @@ class D: assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) def test_too_many_duck_arrays(self): - namespace = dict(__array_function__=_return_not_implemented) + namespace = {'__array_function__': _return_not_implemented} types = [type('A' + str(i), (object,), namespace) for i in range(65)] relevant_args = [t() for t in types] @@ -211,14 +213,6 @@ def test_wrong_arguments(self): with pytest.raises(TypeError, match="kwargs must be a dict"): a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) - def test_wrong_arguments(self): - # Check our implementation guards against wrong arguments. - a = np.array([1, 2]) - with pytest.raises(TypeError, match="args must be a tuple"): - a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) - with pytest.raises(TypeError, match="kwargs must be a dict"): - a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) - class TestArrayFunctionDispatch: @@ -482,7 +476,6 @@ def func(*args): func(*objs) - class TestNDArrayMethods: def test_repr(self): @@ -526,8 +519,10 @@ def test_sum_on_mock_array(self): class ArrayProxy: def __init__(self, value): self.value = value + def __array_function__(self, *args, **kwargs): return self.value.__array_function__(*args, **kwargs) + def __array__(self, *args, **kwargs): return self.value.__array__(*args, **kwargs) @@ -555,7 +550,7 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: - def setup_method(self): + def _create_MyArray(self): class MyArray: def __init__(self, function=None): self.function = function @@ -568,20 +563,22 @@ def __array_function__(self, func, types, args, kwargs): return NotImplemented return my_func(*args, **kwargs) - self.MyArray = MyArray + return MyArray + def _create_MyNoArrayFunctionArray(self): class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function - self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + return MyNoArrayFunctionArray + def _create_MySubclass(self): class MySubclass(np.ndarray): def __array_function__(self, func, types, args, kwargs): result = super().__array_function__(func, types, args, kwargs) return result.view(self.__class__) - self.MySubclass = MySubclass + return MySubclass def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): @@ -598,9 +595,10 @@ def func_args(*args, **kwargs): return args, kwargs def test_array_like_not_implemented(self): - self.add_method('array', self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises_regex(TypeError, 'no implementation found'): array_like = np.asarray(1, like=ref) @@ -627,7 +625,6 @@ def test_array_like_not_implemented(self): delimiter=',')), ] - def test_nep35_functions_as_array_functions(self,): all_array_functions = get_overridable_numpy_array_functions() like_array_functions_subset = { @@ -652,15 +649,16 @@ def test_nep35_functions_as_array_functions(self,): @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like(self, function, args, kwargs, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) - my_func = getattr(self.MyArray, function) + my_func = getattr(MyArray, function) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) @@ -678,19 +676,20 @@ def test_array_like(self, function, args, kwargs, numpy_ref): assert_equal(array_like, np_arr) else: - assert type(array_like) is self.MyArray + assert type(array_like) is MyArray assert array_like.function is my_func @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) def test_no_array_function_like(self, function, args, kwargs, ref): - self.add_method('array', self.MyNoArrayFunctionArray) - self.add_method(function, self.MyNoArrayFunctionArray) + MyNoArrayFunctionArray = self._create_MyNoArrayFunctionArray() + self.add_method('array', MyNoArrayFunctionArray) + self.add_method(function, MyNoArrayFunctionArray) np_func = getattr(np, function) # Instantiate ref if it's the MyNoArrayFunctionArray class if ref == "MyNoArrayFunctionArray": - ref = self.MyNoArrayFunctionArray.array() + ref = MyNoArrayFunctionArray.array() like_args = tuple(a() if callable(a) else a for a in args) @@ -700,11 +699,12 @@ def test_no_array_function_like(self, function, args, kwargs, ref): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_subclass(self, function, args, kwargs): - ref = np.array(1).view(self.MySubclass) + MySubclass = self._create_MySubclass() + ref = np.array(1).view(MySubclass) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) - assert type(array_like) is self.MySubclass + assert type(array_like) is MySubclass if np_func is np.empty: return np_args = tuple(a() if callable(a) else a for a in args) @@ -713,13 +713,14 @@ def test_subclass(self, function, args, kwargs): @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method("fromfile", self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method("fromfile", MyArray) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() data = np.random.random(5) @@ -734,13 +735,14 @@ def test_array_like_fromfile(self, numpy_ref): assert_equal(np_res, data) assert_equal(array_like, np_res) else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + assert type(array_like) is MyArray + assert array_like.function is MyArray.fromfile def test_exception_handling(self): - self.add_method('array', self.MyArray, enable_value_error=True) + MyArray = self._create_MyArray() + self.add_method('array', MyArray, enable_value_error=True) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises(TypeError): # Raises the error about `value_error` being invalid first @@ -748,8 +750,9 @@ def test_exception_handling(self): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_like_as_none(self, function, args, kwargs): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) diff --git a/blimgui/dist64/numpy/_core/tests/test_print.py b/blimgui/dist64/numpy/_core/tests/test_print.py index 68cdb15..ed88ec9 100644 --- a/blimgui/dist64/numpy/_core/tests/test_print.py +++ b/blimgui/dist64/numpy/_core/tests/test_print.py @@ -1,13 +1,11 @@ import sys +from io import StringIO import pytest import numpy as np -from numpy.testing import assert_, assert_equal, IS_MUSL from numpy._core.tests._locales import CommaDecimalPointLocale - - -from io import StringIO +from numpy.testing import IS_MUSL, assert_, assert_equal _REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} @@ -23,15 +21,15 @@ def test_float_types(tp): """ for x in [0, 1, -1, 1e20]: assert_equal(str(tp(x)), str(float(x)), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') if tp(1e16).itemsize > 4: assert_equal(str(tp(1e16)), str(float('1e16')), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') else: ref = '1e+16' assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) @@ -45,7 +43,7 @@ def test_nan_inf_float(tp): """ for x in [np.inf, -np.inf, np.nan]: assert_equal(str(tp(x)), _REF[x], - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) @@ -59,19 +57,19 @@ def test_complex_types(tp): """ for x in [0, 1, -1, 1e20]: assert_equal(str(tp(x)), str(complex(x)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x*1j)), str(complex(x*1j)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x * 1j)), str(complex(x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x + x * 1j)), str(complex(x + x * 1j)), + err_msg=f'Failed str formatting for type {tp}') if tp(1e16).itemsize > 8: assert_equal(str(tp(1e16)), str(complex(1e16)), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') else: ref = '(1e+16+0j)' assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') @pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) @@ -116,9 +114,10 @@ def _test_redirected_print(x, tp, ref=None): sys.stdout = stdout assert_equal(file.getvalue(), file_tp.getvalue(), - err_msg='print failed for type%s' % tp) + err_msg=f'print failed for type{tp}') +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) def test_float_type_print(tp): """Check formatting when using print """ @@ -129,12 +128,13 @@ def test_float_type_print(tp): _test_redirected_print(float(x), tp, _REF[x]) if tp(1e16).itemsize > 4: - _test_redirected_print(float(1e16), tp) + _test_redirected_print(1e16, tp) else: ref = '1e+16' - _test_redirected_print(float(1e16), tp, ref) + _test_redirected_print(1e16, tp, ref) +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) def test_complex_type_print(tp): """Check formatting when using print """ @@ -170,14 +170,14 @@ def test_scalar_format(): ('{0:g}', 1.5, np.float32), ('{0:g}', 1.5, np.float64), ('{0:g}', 1.5, np.longdouble), - ('{0:g}', 1.5+0.5j, np.complex64), - ('{0:g}', 1.5+0.5j, np.complex128), - ('{0:g}', 1.5+0.5j, np.clongdouble)] + ('{0:g}', 1.5 + 0.5j, np.complex64), + ('{0:g}', 1.5 + 0.5j, np.complex128), + ('{0:g}', 1.5 + 0.5j, np.clongdouble)] for (fmat, val, valtype) in tests: try: assert_equal(fmat.format(val), fmat.format(valtype(val)), - "failed with val %s, type %s" % (val, valtype)) + f"failed with val {val}, type {valtype}") except ValueError as e: assert_(False, "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % @@ -191,12 +191,12 @@ def test_scalar_format(): class TestCommaDecimalPointLocale(CommaDecimalPointLocale): def test_locale_single(self): - assert_equal(str(np.float32(1.2)), str(float(1.2))) + assert_equal(str(np.float32(1.2)), str(1.2)) def test_locale_double(self): - assert_equal(str(np.double(1.2)), str(float(1.2))) + assert_equal(str(np.double(1.2)), str(1.2)) @pytest.mark.skipif(IS_MUSL, reason="test flaky on musllinux") def test_locale_longdouble(self): - assert_equal(str(np.longdouble('1.2')), str(float(1.2))) + assert_equal(str(np.longdouble('1.2')), str(1.2)) diff --git a/blimgui/dist64/numpy/_core/tests/test_protocols.py b/blimgui/dist64/numpy/_core/tests/test_protocols.py index 4c2cbfa..5d93262 100644 --- a/blimgui/dist64/numpy/_core/tests/test_protocols.py +++ b/blimgui/dist64/numpy/_core/tests/test_protocols.py @@ -1,5 +1,7 @@ -import pytest import warnings + +import pytest + import numpy as np @@ -24,7 +26,7 @@ def __getattr__(self, name): return getattr(self.array, name) def __repr__(self): - return "".format(self=self) + return f"" array = Wrapper(np.arange(10)) with pytest.raises(UserWarning, match="object got converted"): @@ -38,7 +40,6 @@ class Wrapper: def __array__(self, dtype=None, copy=None): return np.array([self.val], dtype=dtype, copy=copy) - wrapped = Wrapper() arr = np.array(wrapped, dtype=str) assert arr.dtype == 'U100' diff --git a/blimgui/dist64/numpy/_core/tests/test_records.py b/blimgui/dist64/numpy/_core/tests/test_records.py index 98a80b8..8eaedda 100644 --- a/blimgui/dist64/numpy/_core/tests/test_records.py +++ b/blimgui/dist64/numpy/_core/tests/test_records.py @@ -1,17 +1,21 @@ import collections.abc +import pickle import textwrap from io import BytesIO from os import path from pathlib import Path -import pickle import pytest import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, temppath, - ) + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) class TestFromrecords: @@ -157,7 +161,7 @@ def test_0d_recarray_repr(self): np.set_printoptions(legacy=False) def test_recarray_from_repr(self): - a = np.array([(1,'ABC'), (2, "DEF")], + a = np.array([(1, 'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) recordarr = np.rec.array(a) recarr = a.view(np.recarray) @@ -181,35 +185,35 @@ def test_recarray_from_repr(self): assert_equal(recordview, recordview_r) def test_recarray_views(self): - a = np.array([(1,'ABC'), (2, "DEF")], + a = np.array([(1, 'ABC'), (2, "DEF")], dtype=[('foo', int), ('bar', 'S4')]) - b = np.array([1,2,3,4,5], dtype=np.int64) + b = np.array([1, 2, 3, 4, 5], dtype=np.int64) - #check that np.rec.array gives right dtypes + # check that np.rec.array gives right dtypes assert_equal(np.rec.array(a).dtype.type, np.record) assert_equal(type(np.rec.array(a)), np.recarray) assert_equal(np.rec.array(b).dtype.type, np.int64) assert_equal(type(np.rec.array(b)), np.recarray) - #check that viewing as recarray does the same + # check that viewing as recarray does the same assert_equal(a.view(np.recarray).dtype.type, np.record) assert_equal(type(a.view(np.recarray)), np.recarray) assert_equal(b.view(np.recarray).dtype.type, np.int64) assert_equal(type(b.view(np.recarray)), np.recarray) - #check that view to non-structured dtype preserves type=np.recarray + # check that view to non-structured dtype preserves type=np.recarray r = np.rec.array(np.ones(4, dtype="f4,i4")) rv = r.view('f8').view('f4,i4') assert_equal(type(rv), np.recarray) assert_equal(rv.dtype.type, np.record) - #check that getitem also preserves np.recarray and np.record + # check that getitem also preserves np.recarray and np.record r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'i4,i4')])) assert_equal(r['c'].dtype.type, np.record) assert_equal(type(r['c']), np.recarray) - #and that it preserves subclasses (gh-6949) + # and that it preserves subclasses (gh-6949) class C(np.recarray): pass @@ -218,10 +222,10 @@ class C(np.recarray): # check that accessing nested structures keep record type, but # not for subarrays, non-void structures, non-structured voids - test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)), + test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4', 2)), ('d', ('i8', 'i4,i4'))] - r = np.rec.array([((1,1), b'11111111', [1,1], 1), - ((1,1), b'11111111', [1,1], 1)], dtype=test_dtype) + r = np.rec.array([((1, 1), b'11111111', [1, 1], 1), + ((1, 1), b'11111111', [1, 1], 1)], dtype=test_dtype) assert_equal(r.a.dtype.type, np.record) assert_equal(r.b.dtype.type, np.void) assert_equal(r.c.dtype.type, np.float32) @@ -229,11 +233,11 @@ class C(np.recarray): # check the same, but for views r = np.rec.array(np.ones(4, dtype='i4,i4')) assert_equal(r.view('f4,f4').dtype.type, np.record) - assert_equal(r.view(('i4',2)).dtype.type, np.int32) + assert_equal(r.view(('i4', 2)).dtype.type, np.int32) assert_equal(r.view('V8').dtype.type, np.void) assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64) - #check that we can undo the view + # check that we can undo the view arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')] for arr in arrs: rec = np.rec.array(arr) @@ -297,8 +301,8 @@ def test_recarray_stringtypes(self): def test_recarray_returntypes(self): qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)} - a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')), - ('abc', (2,3), 1, ('abcde', 'jklmn'))], + a = np.rec.array([('abc ', (1, 1), 1, ('abcde', 'fgehi')), + ('abc', (2, 3), 1, ('abcde', 'jklmn'))], dtype=[('foo', 'S4'), ('bar', [('A', int), ('B', int)]), ('baz', int), ('qux', qux_fields)]) @@ -345,7 +349,7 @@ def test_tofile_fromfile(self): path = Path(path) np.random.seed(123) a = np.random.rand(10).astype('f8,i4,S5') - a[5] = (0.5,10,'abcde') + a[5] = (0.5, 10, 'abcde') with path.open("wb") as fd: a.tofile(fd) x = np._core.records.fromfile( @@ -355,26 +359,26 @@ def test_tofile_fromfile(self): class TestRecord: - def setup_method(self): - self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], + def _create_data(self): + return np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], dtype=[("col1", " 2 ** 31 c_arr = np.ctypeslib.as_ctypes(arr) @@ -2472,21 +2483,22 @@ def test_complex_conversion_error(self): def test__array_interface__descr(self): # gh-17068 - dt = np.dtype(dict(names=['a', 'b'], - offsets=[0, 0], - formats=[np.int64, np.int64])) + dt = np.dtype({'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.int64, np.int64]}) descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] assert descr == [('', '|V8')] # instead of [(b'', '|V8')] @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') @requires_memory(free_bytes=9e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dot_big_stride(self): # gh-17111 # blas stride = stride//itemsize > int32 max int32_max = np.iinfo(np.int32).max n = int32_max + 3 a = np.empty([n], dtype=np.float32) - b = a[::n-1] + b = a[::n - 1] b[...] = 1 assert b.strides[0] > int32_max * b.dtype.itemsize assert np.dot(b, b) == 2.0 @@ -2558,7 +2570,7 @@ def test_load_ufunc_pickle(self): # ufuncs are pickled with a semi-private path in # numpy.core._multiarray_umath and must be loadable without warning # despite np.core being deprecated. - test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' # noqa + test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' result = pickle.loads(test_data, encoding='bytes') assert result is np.add @@ -2573,21 +2585,23 @@ def test__array_namespace__(self): assert xp is np xp = arr.__array_namespace__(api_version="2023.12") assert xp is np + xp = arr.__array_namespace__(api_version="2024.12") + assert xp is np xp = arr.__array_namespace__(api_version=None) assert xp is np with pytest.raises( ValueError, - match="Version \"2024.12\" of the Array API Standard " + match="Version \"2025.12\" of the Array API Standard " "is not supported." ): - arr.__array_namespace__(api_version="2024.12") + arr.__array_namespace__(api_version="2025.12") with pytest.raises( ValueError, match="Only None and strings are allowed as the Array API version" ): - arr.__array_namespace__(api_version=2023) + arr.__array_namespace__(api_version=2024) def test_isin_refcnt_bug(self): # gh-25295 diff --git a/blimgui/dist64/numpy/_core/tests/test_scalar_ctors.py b/blimgui/dist64/numpy/_core/tests/test_scalar_ctors.py index 1ba7843..849d379 100644 --- a/blimgui/dist64/numpy/_core/tests/test_scalar_ctors.py +++ b/blimgui/dist64/numpy/_core/tests/test_scalar_ctors.py @@ -4,9 +4,8 @@ import pytest import numpy as np -from numpy.testing import ( - assert_equal, assert_almost_equal, assert_warns, - ) +from numpy.testing import assert_almost_equal, assert_equal + class TestFromString: def test_floating(self): @@ -26,7 +25,7 @@ def test_floating_overflow(self): assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') @@ -35,7 +34,7 @@ def test_floating_overflow(self): assert_equal(fsingle, -np.inf) fdouble = np.double('-1e10000') assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf) diff --git a/blimgui/dist64/numpy/_core/tests/test_scalar_methods.py b/blimgui/dist64/numpy/_core/tests/test_scalar_methods.py index 7ad87f3..fe3944e 100644 --- a/blimgui/dist64/numpy/_core/tests/test_scalar_methods.py +++ b/blimgui/dist64/numpy/_core/tests/test_scalar_methods.py @@ -2,15 +2,17 @@ Test the scalar constructors, which also do type-coercion """ import fractions +import inspect import platform +import sys import types -from typing import Any, Type +from typing import Any, Literal import pytest -import numpy as np +import numpy as np from numpy._core import sctypes -from numpy.testing import assert_equal, assert_raises, IS_MUSL +from numpy.testing import IS_PYPY, assert_equal, assert_raises class TestAsIntegerRatio: @@ -105,7 +107,7 @@ def test_roundtrip(self, ftype, frac_vals, exp_vals): # the values may not fit in any float type pytest.skip("longdouble too small on this platform") - assert_equal(nf / df, f, "{}/{}".format(n, d)) + assert_equal(nf / df, f, f"{n}/{d}") class TestIsInteger: @@ -143,7 +145,7 @@ class TestClassGetItem: np.signedinteger, np.floating, ]) - def test_abc(self, cls: Type[np.number]) -> None: + def test_abc(self, cls: type[np.number]) -> None: alias = cls[Any] assert isinstance(alias, types.GenericAlias) assert alias.__origin__ is cls @@ -164,15 +166,19 @@ def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None: np.complexfloating[arg_tup] @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) - def test_abc_non_numeric(self, cls: Type[np.generic]) -> None: + def test_abc_non_numeric(self, cls: type[np.generic]) -> None: with pytest.raises(TypeError): cls[Any] @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - with pytest.raises(TypeError): - cls[Any] + if cls in {np.bool, np.datetime64}: + # these are intentionally subscriptable + assert cls[Any] + else: + with pytest.raises(TypeError): + cls[Any] @pytest.mark.parametrize("arg_len", range(4)) def test_subscript_tuple(self, arg_len: int) -> None: @@ -186,15 +192,19 @@ def test_subscript_tuple(self, arg_len: int) -> None: def test_subscript_scalar(self) -> None: assert np.number[Any] + @pytest.mark.parametrize("subscript", [Literal[True], Literal[False]]) + def test_subscript_bool(self, subscript: Literal[True, False]) -> None: + assert isinstance(np.bool[subscript], types.GenericAlias) + class TestBitCount: # derived in part from the cpython test "test_bit_count" - @pytest.mark.parametrize("itype", sctypes['int']+sctypes['uint']) + @pytest.mark.parametrize("itype", sctypes['int'] + sctypes['uint']) def test_small(self, itype): for a in range(max(np.iinfo(itype).min, 0), 128): msg = f"Smoke test for {itype}({a}).bit_count()" - assert itype(a).bit_count() == bin(a).count("1"), msg + assert itype(a).bit_count() == a.bit_count(), msg def test_bit_count(self): for exp in [10, 17, 63]: @@ -210,7 +220,7 @@ class TestDevice: Test scalar.device attribute and scalar.to_device() method. """ scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), - np.complex128(1+1j)] + np.complex128(1 + 1j)] @pytest.mark.parametrize("scalar", scalars) def test_device(self, scalar): @@ -244,3 +254,75 @@ def test_array_wrap(scalar): arr1d = np.array([3], dtype=np.int8) assert scalar.__array_wrap__(arr1d) is arr1d assert scalar.__array_wrap__(arr1d, None, True) is arr1d + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestSignature: + # test that scalar types have a valid __text_signature__ or __signature__ set + @pytest.mark.parametrize( + "sctype", + [ + *sctypes["int"], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], + ) + def test_constructor_signatures(self, sctype: type[np.generic]): + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") + + assert sig.parameters + + @pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], + ) + def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]): + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("sctype", sctypes["float"]) + def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]): + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize( + "method_name", + [ + "__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax", + "argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress", + "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump", + "dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min", + "nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round", + "searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace", + "transpose", "var", "view", + ], + ) + def test_array_scalar_method_signatures(self, method_name: str): + # methods shared by np.generic and np.ndarray should have the same signature + fn_generic = getattr(np.generic, method_name) + sig_generic = inspect.signature(fn_generic) + assert "self" in sig_generic.parameters + assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + fn_ndarray = getattr(np.ndarray, method_name) + sig_ndarray = inspect.signature(fn_ndarray) + assert sig_generic == sig_ndarray diff --git a/blimgui/dist64/numpy/_core/tests/test_scalarbuffer.py b/blimgui/dist64/numpy/_core/tests/test_scalarbuffer.py index e8a5c1c..9f74816 100644 --- a/blimgui/dist64/numpy/_core/tests/test_scalarbuffer.py +++ b/blimgui/dist64/numpy/_core/tests/test_scalarbuffer.py @@ -1,11 +1,11 @@ """ Test scalar buffer interface adheres to PEP 3118 """ -import numpy as np -from numpy._core._rational_tests import rational -from numpy._core._multiarray_tests import get_buffer_info import pytest +import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types @@ -55,8 +55,8 @@ def test_scalar_dim(self, scalar): @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) def test_scalar_code_and_properties(self, scalar, code): x = scalar() - expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0, - shape=(), format=code, readonly=True) + expected = {'strides': (), 'itemsize': x.dtype.itemsize, 'ndim': 0, + 'shape': (), 'format': code, 'readonly': True} mv_x = memoryview(x) assert self._as_dict(mv_x) == expected @@ -93,8 +93,8 @@ def test_void_scalar_structured_data(self): get_buffer_info(x, ["WRITABLE"]) def _as_dict(self, m): - return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, - ndim=m.ndim, format=m.format, readonly=m.readonly) + return {'strides': m.strides, 'shape': m.shape, 'itemsize': m.itemsize, + 'ndim': m.ndim, 'format': m.format, 'readonly': m.readonly} def test_datetime_memoryview(self): # gh-11656 @@ -102,8 +102,8 @@ def test_datetime_memoryview(self): dt1 = np.datetime64('2016-01-01') dt2 = np.datetime64('2017-01-01') - expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,), - format='B', readonly=True) + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,), + 'format': 'B', 'readonly': True} v = memoryview(dt1) assert self._as_dict(v) == expected @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w', - readonly=True) + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/blimgui/dist64/numpy/_core/tests/test_scalarinherit.py b/blimgui/dist64/numpy/_core/tests/test_scalarinherit.py index 3751c6c..acca626 100644 --- a/blimgui/dist64/numpy/_core/tests/test_scalarinherit.py +++ b/blimgui/dist64/numpy/_core/tests/test_scalarinherit.py @@ -93,7 +93,7 @@ class MyBytes(bytes, np.generic): pass ret = s + MyBytes(b'abc') - assert(type(ret) is type(s)) + assert type(ret) is type(s) assert ret == b"defabc" def test_char_repeat(self): diff --git a/blimgui/dist64/numpy/_core/tests/test_scalarmath.py b/blimgui/dist64/numpy/_core/tests/test_scalarmath.py index c21f782..6a87859 100644 --- a/blimgui/dist64/numpy/_core/tests/test_scalarmath.py +++ b/blimgui/dist64/numpy/_core/tests/test_scalarmath.py @@ -1,23 +1,29 @@ import contextlib -import sys -import warnings import itertools import operator import platform -from numpy._utils import _pep440 +import sys +import warnings + import pytest from hypothesis import given, settings -from hypothesis.strategies import sampled_from from hypothesis.extra import numpy as hynp +from hypothesis.strategies import sampled_from import numpy as np -from numpy.exceptions import ComplexWarning from numpy._core._rational_tests import rational +from numpy._utils import _pep440 +from numpy.exceptions import ComplexWarning from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_almost_equal, - assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, check_support_sve, - ) + IS_PYPY, + _gen_alignment_data, + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + check_support_sve, +) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, @@ -45,7 +51,7 @@ class TestTypes: def test_types(self): for atype in types: a = atype(1) - assert_(a == 1, "error with %r: got %r" % (atype, a)) + assert_(a == 1, f"error with {atype!r}: got {a!r}") def test_type_add(self): # list of types @@ -68,7 +74,7 @@ def test_type_add(self): (k, np.dtype(atype).char, l, np.dtype(btype).char)) def test_type_create(self): - for k, atype in enumerate(types): + for atype in types: a = np.array([1, 2, 3], atype) b = atype([1, 2, 3]) assert_equal(a, b) @@ -171,11 +177,11 @@ def test_blocked(self): inp2[...] += np.arange(inp2.size, dtype=dt) + 1 assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) + np.multiply(inp2, inp2), err_msg=msg) # skip true divide for ints if dt != np.int32: assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) + np.divide(1, inp2), err_msg=msg) inp1[...] = np.ones_like(inp1) np.add(inp1, 2, out=out) @@ -202,13 +208,13 @@ def test_small_types(self): for t in [np.int8, np.int16, np.float16]: a = t(3) b = a ** 4 - assert_(b == 81, "error with %r: got %r" % (t, b)) + assert_(b == 81, f"error with {t!r}: got {b!r}") def test_large_types(self): for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: a = t(51) b = a ** 4 - msg = "error with %r: got %r" % (t, b) + msg = f"error with {t!r}: got {b!r}" if np.issubdtype(t, np.integer): assert_(b == 6765201, msg) else: @@ -259,8 +265,7 @@ def test_mixed_types(self): a = t1(3) b = t2(2) result = a**b - msg = ("error with %r and %r:" - "got %r, expected %r") % (t1, t2, result, 9) + msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}" if np.issubdtype(np.dtype(result), np.integer): assert_(result == 9, msg) else: @@ -298,10 +303,10 @@ def test_modulus_basic(self): for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1)[()] - b = np.array(sg2*19, dtype=dt2)[()] + a = np.array(sg1 * 71, dtype=dt1)[()] + b = np.array(sg2 * 19, dtype=dt2)[()] div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -315,7 +320,7 @@ def test_float_modulus_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) + tgt = [divmod(*t) for t in arg] a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that @@ -326,11 +331,11 @@ def test_float_modulus_exact(self): for op in [floordiv_and_mod, divmod]: for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) + msg = f'op: {op.__name__}, dtype: {dt}' fa = a.astype(dt) fb = b.astype(dt) # use list comprehension so a_ and b_ are scalars - div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) assert_equal(div, tgtdiv, err_msg=msg) assert_equal(rem, tgtrem, err_msg=msg) @@ -342,11 +347,11 @@ def test_float_modulus_roundoff(self): for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1)[()] - b = np.array(sg2*6e-8, dtype=dt2)[()] + a = np.array(sg1 * 78 * 6e-8, dtype=dt1)[()] + b = np.array(sg2 * 6e-8, dtype=dt2)[()] div, rem = op(a, b) # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -358,31 +363,26 @@ def test_float_modulus_corner_cases(self): b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = operator.mod(a, b) - assert_(rem <= b, 'dt: %s' % dt) + assert_(rem <= b, f'dt: {dt}') rem = operator.mod(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) + assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") - sup.filter(RuntimeWarning, "invalid value encountered in divmod") + with warnings.catch_warnings(), np.errstate(all='ignore'): for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = operator.mod(fone, fzer) - assert_(np.isnan(rem), 'dt: %s' % dt) + assert_(np.isnan(rem), f'dt: {dt}') # MSVC 2008 returns NaN here, so disable the check. #rem = operator.mod(fone, finf) #assert_(rem == fone, 'dt: %s' % dt) rem = operator.mod(fone, fnan) - assert_(np.isnan(rem), 'dt: %s' % dt) + assert_(np.isnan(rem), f'dt: {dt}') rem = operator.mod(finf, fone) - assert_(np.isnan(rem), 'dt: %s' % dt) + assert_(np.isnan(rem), f'dt: {dt}') for op in [floordiv_and_mod, divmod]: div, mod = op(fone, fzer) assert_(np.isinf(div)) and assert_(np.isnan(mod)) @@ -397,6 +397,15 @@ def test_inplace_floordiv_handling(self): match=r"Cannot cast ufunc 'floor_divide' output from"): a //= b +class TestComparison: + def test_comparision_different_types(self): + x = np.array(1) + y = np.array('s') + eq = x == y + neq = x != y + assert eq is np.bool_(False) + assert neq is np.bool_(True) + class TestComplexDivision: def test_zero_division(self): @@ -404,17 +413,17 @@ def test_zero_division(self): for t in [np.complex64, np.complex128]: a = t(0.0) b = t(1.0) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.inf, np.inf)) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.inf, np.nan)) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.nan, np.inf)) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.nan, np.nan)) - assert_(np.isnan(b/a)) + assert_(np.isnan(b / a)) b = t(0.) - assert_(np.isnan(b/a)) + assert_(np.isnan(b / a)) def test_signed_zeros(self): with np.errstate(all="ignore"): @@ -422,14 +431,14 @@ def test_signed_zeros(self): # tupled (numerator, denominator, expected) # for testing as expected == numerator/denominator data = ( - (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), - (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), - (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) + (( 0.0, -1.0), ( 0.0, 1.0), (-1.0, -0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + (( 0.0, -1.0), (-0.0, -1.0), ( 1.0, 0.0)), + (( 0.0, -1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0, -1.0), (-1.0, 0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, 1.0), ( 0.0, -1.0), (-1.0, -0.0)) ) for cases in data: n = cases[0] @@ -446,7 +455,7 @@ def test_branches(self): for t in [np.complex64, np.complex128]: # tupled (numerator, denominator, expected) # for testing as expected == numerator/denominator - data = list() + data = [] # trigger branch: real(fabs(denom)) > imag(fabs(denom)) # followed by else condition as neither are == 0 @@ -457,7 +466,7 @@ def test_branches(self): # is performed in test_zero_division(), so this is skipped # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) - data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) + data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0))) for cases in data: n = cases[0] @@ -507,21 +516,17 @@ def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") def test_int_from_infinite_longdouble___int__(self): x = np.longdouble(np.inf) assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @@ -545,43 +550,43 @@ def test_int_from_longdouble(self): def test_numpy_scalar_relational_operators(self): # All integer for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(1 > np.array(0, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1)[()], f"type {dt1} failed") for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") - #Unsigned integers + # Unsigned integers for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") - #unsigned vs signed + # unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") - #Signed integers and floats + # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1)[()], f"type {dt1} failed") for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") def test_scalar_comparison_to_none(self): # Scalars should just return False and not give a warnings. @@ -619,18 +624,18 @@ def _test_type_repr(self, t): finfo = np.finfo(t) last_fraction_bit_idx = finfo.nexp + finfo.nmant last_exponent_bit_idx = finfo.nexp - storage_bytes = np.dtype(t).itemsize*8 + storage_bytes = np.dtype(t).itemsize * 8 # could add some more types to the list below for which in ['small denorm', 'small norm']: # Values from https://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes, dtype=np.uint8) + constr = np.array([0x00] * storage_bytes, dtype=np.uint8) if which == 'small denorm': byte = last_fraction_bit_idx // 8 - bytebit = 7-(last_fraction_bit_idx % 8) + bytebit = 7 - (last_fraction_bit_idx % 8) constr[byte] = 1 << bytebit elif which == 'small norm': byte = last_exponent_bit_idx // 8 - bytebit = 7-(last_exponent_bit_idx % 8) + bytebit = 7 - (last_exponent_bit_idx % 8) constr[byte] = 1 << bytebit else: raise ValueError('hmm') @@ -682,12 +687,8 @@ def test_seq_repeat(self): for numpy_type in deprecated_types: i = np.dtype(numpy_type).type() - assert_equal( - assert_warns(DeprecationWarning, operator.mul, seq, i), - seq * int(i)) - assert_equal( - assert_warns(DeprecationWarning, operator.mul, i, seq), - int(i) * seq) + with assert_raises(TypeError): + operator.mul(seq, i) for numpy_type in forbidden_types: i = np.dtype(numpy_type).type() @@ -720,8 +721,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] if dt in np.typecodes['UnsignedInteger']: @@ -738,8 +739,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] assert_equal(operator.sub(a, a), 0) @@ -760,8 +761,8 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).max) assert_equal(absfunc(x), x.real) - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) x = test_dtype(np.finfo(test_dtype).tiny) assert_equal(absfunc(x), x.real) @@ -818,8 +819,8 @@ def test_shift_all_bits(self, type_code, op): assert_equal(res_scl, 0) # Result on scalars should be the same as on arrays - val_arr = np.array([val_scl]*32, dtype=dt) - shift_arr = np.array([shift]*32, dtype=dt) + val_arr = np.array([val_scl] * 32, dtype=dt) + shift_arr = np.array([shift] * 32, dtype=dt) res_arr = op(val_arr, shift_arr) assert_equal(res_arr, res_scl) @@ -852,7 +853,7 @@ def test_float_and_complex_hashes(self, type_code): def test_complex_hashes(self, type_code): # Test some complex valued hashes specifically: scalar = np.dtype(type_code).type - for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]: + for val in [np.pi + 1j, np.inf - 3j, 3j, 6. + 1j]: numpy_val = scalar(val) assert hash(complex(numpy_val)) == hash(numpy_val) @@ -870,6 +871,7 @@ def recursionlimit(n): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -881,6 +883,7 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_right(o, op, type_): try: with recursionlimit(200): @@ -1065,8 +1068,8 @@ def test_longdouble_complex(): # Simple test to check longdouble and complex combinations, since these # need to go through promotion, which longdouble needs to be careful about. x = np.longdouble(1) - assert x + 1j == 1+1j - assert 1j + x == 1+1j + assert x + 1j == 1 + 1j + assert 1j + x == 1 + 1j @pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) diff --git a/blimgui/dist64/numpy/_core/tests/test_scalarprint.py b/blimgui/dist64/numpy/_core/tests/test_scalarprint.py index b109ce7..accdd1e 100644 --- a/blimgui/dist64/numpy/_core/tests/test_scalarprint.py +++ b/blimgui/dist64/numpy/_core/tests/test_scalarprint.py @@ -1,32 +1,30 @@ """ Test printing of scalar types. """ -import code import platform + import pytest -import sys -from tempfile import TemporaryFile import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, IS_MUSL) +from numpy.testing import IS_MUSL, assert_, assert_equal, assert_raises + class TestRealScalars: def test_str(self): svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] styps = [np.float16, np.float32, np.float64, np.longdouble] wanted = [ - ['0.0', '0.0', '0.0', '0.0' ], + ['0.0', '0.0', '0.0', '0.0' ], # noqa: E202 ['-0.0', '-0.0', '-0.0', '-0.0'], - ['1.0', '1.0', '1.0', '1.0' ], + ['1.0', '1.0', '1.0', '1.0' ], # noqa: E202 ['-1.0', '-1.0', '-1.0', '-1.0'], - ['inf', 'inf', 'inf', 'inf' ], + ['inf', 'inf', 'inf', 'inf' ], # noqa: E202 ['-inf', '-inf', '-inf', '-inf'], - ['nan', 'nan', 'nan', 'nan']] + ['nan', 'nan', 'nan', 'nan' ]] # noqa: E202 for wants, val in zip(wanted, svals): for want, styp in zip(wants, styps): - msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) + msg = f'for str({np.dtype(styp).name}({val!r}))' assert_equal(str(styp(val)), want, err_msg=msg) def test_scalar_cutoffs(self): @@ -48,49 +46,33 @@ def check(v): check(1e15) check(1e16) - def test_py2_float_print(self): - # gh-10753 - # In python2, the python float type implements an obsolete method - # tp_print, which overrides tp_repr and tp_str when using "print" to - # output to a "real file" (ie, not a StringIO). Make sure we don't - # inherit it. - x = np.double(0.1999999999999) - with TemporaryFile('r+t') as f: - print(x, file=f) - f.seek(0) - output = f.read() - assert_equal(output, str(x) + '\n') - # In python2 the value float('0.1999999999999') prints with reduced - # precision as '0.2', but we want numpy's np.double('0.1999999999999') - # to print the unique value, '0.1999999999999'. - - # gh-11031 - # Only in the python2 interactive shell and when stdout is a "real" - # file, the output of the last command is printed to stdout without - # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print - # x` are potentially different. Make sure they are the same. The only - # way I found to get prompt-like output is using an actual prompt from - # the 'code' module. Again, must use tempfile to get a "real" file. - - # dummy user-input which enters one line and then ctrl-Ds. - def userinput(): - yield 'np.sqrt(2)' - raise EOFError - gen = userinput() - input_func = lambda prompt="": next(gen) - - with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: - orig_stdout, orig_stderr = sys.stdout, sys.stderr - sys.stdout, sys.stderr = fo, fe - - code.interact(local={'np': np}, readfunc=input_func, banner='') - - sys.stdout, sys.stderr = orig_stdout, orig_stderr - - fo.seek(0) - capture = fo.read().strip() - - assert_equal(capture, repr(np.sqrt(2))) + test_cases_gh_28679 = [ + (np.half, -0.000099, "-9.9e-05"), + (np.half, 0.0001, "0.0001"), + (np.half, 999, "999.0"), + (np.half, -1000, "-1e+03"), + (np.single, 0.000099, "9.9e-05"), + (np.single, -0.000100001, "-0.000100001"), + (np.single, 999999, "999999.0"), + (np.single, -1000000, "-1e+06") + ] + + @pytest.mark.parametrize("dtype, input_val, expected_str", test_cases_gh_28679) + def test_gh_28679(self, dtype, input_val, expected_str): + # test cutoff to exponent notation for half and single + assert_equal(str(dtype(input_val)), expected_str) + + test_cases_legacy_2_2 = [ + (np.half(65504), "65500.0"), + (np.single(1.e15), "1000000000000000.0"), + (np.single(1.e16), "1e+16"), + ] + + @pytest.mark.parametrize("input_val, expected_str", test_cases_legacy_2_2) + def test_legacy_2_2_mode(self, input_val, expected_str): + # test legacy cutoff to exponent notation for half and single + with np.printoptions(legacy='2.2'): + assert_equal(str(input_val), expected_str) def test_dragon4(self): # these tests are adapted from Ryan Juckett's dragon4 implementation, @@ -125,7 +107,6 @@ def test_dragon4(self): assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), '9.9999999999999694e-311') - # test rounding # 3.1415927410 is closest float32 to np.pi assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), @@ -148,7 +129,6 @@ def test_dragon4(self): "3.14159265358979311599796346854418516159057617187500") assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") - # smallest numbers assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), "0.00000000000000000000000000000000000000000000140129846432" @@ -263,21 +243,21 @@ def test_dragon4(self): available_float_dtypes = [np.float16, np.float32, np.float64, np.float128]\ if hasattr(np, 'float128') else [np.float16, np.float32, np.float64] - + @pytest.mark.parametrize("tp", available_float_dtypes) def test_dragon4_positional_interface(self, tp): # test is flaky for musllinux on np.float128 if IS_MUSL and tp == np.float128: pytest.skip("Skipping flaky test of float128 on musllinux") - + fpos = np.format_float_positional - + # test padding assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") assert_equal(fpos(tp('-10.2'), pad_left=4, pad_right=4), " -10.2 ") - + # test fixed (non-unique) mode assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") @@ -286,7 +266,7 @@ def test_dragon4_positional_interface_trim(self, tp): # test is flaky for musllinux on np.float128 if IS_MUSL and tp == np.float128: pytest.skip("Skipping flaky test of float128 on musllinux") - + fpos = np.format_float_positional # test trimming # trim of 'k' or '.' only affects non-unique mode, since unique @@ -311,27 +291,27 @@ def test_dragon4_positional_interface_trim(self, tp): "1.2" if tp != np.float16 else "1.2002") assert_equal(fpos(tp('1.'), trim='-'), "1") assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") - + @pytest.mark.parametrize("tp", available_float_dtypes) @pytest.mark.parametrize("pad_val", [10**5, np.iinfo("int32").max]) def test_dragon4_positional_interface_overflow(self, tp, pad_val): # test is flaky for musllinux on np.float128 if IS_MUSL and tp == np.float128: pytest.skip("Skipping flaky test of float128 on musllinux") - + fpos = np.format_float_positional - #gh-28068 - with pytest.raises(RuntimeError, - match="Float formating result too large"): + # gh-28068 + with pytest.raises(RuntimeError, + match="Float formatting result too large"): fpos(tp('1.047'), unique=False, precision=pad_val) - with pytest.raises(RuntimeError, - match="Float formating result too large"): + with pytest.raises(RuntimeError, + match="Float formatting result too large"): fpos(tp('1.047'), precision=2, pad_left=pad_val) - with pytest.raises(RuntimeError, - match="Float formating result too large"): + with pytest.raises(RuntimeError, + match="Float formatting result too large"): fpos(tp('1.047'), precision=2, pad_right=pad_val) @pytest.mark.parametrize("tp", available_float_dtypes) @@ -339,7 +319,7 @@ def test_dragon4_scientific_interface(self, tp): # test is flaky for musllinux on np.float128 if IS_MUSL and tp == np.float128: pytest.skip("Skipping flaky test of float128 on musllinux") - + fsci = np.format_float_scientific # test exp_digits @@ -357,7 +337,7 @@ def test_ppc64_ibm_double_double128(self): # which happens when the first double is normal and the second is # subnormal. x = np.float128('2.123123123123123123123123123123123e-286') - got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] + got = [str(x / np.float128('2e' + str(i))) for i in range(40)] expected = [ "1.06156156156156156156156156156157e-286", "1.06156156156156156156156156156158e-287", @@ -404,7 +384,7 @@ def test_ppc64_ibm_double_double128(self): # Note: we follow glibc behavior, but it (or gcc) might not be right. # In particular we can get two values that print the same but are not # equal: - a = np.float128('2')/np.float128('3') + a = np.float128('2') / np.float128('3') b = np.float128(str(a)) assert_equal(str(a), str(b)) assert_(a != b) diff --git a/blimgui/dist64/numpy/_core/tests/test_shape_base.py b/blimgui/dist64/numpy/_core/tests/test_shape_base.py index e90c89a..2383b93 100644 --- a/blimgui/dist64/numpy/_core/tests/test_shape_base.py +++ b/blimgui/dist64/numpy/_core/tests/test_shape_base.py @@ -1,16 +1,37 @@ +import sys + import pytest + import numpy as np from numpy._core import ( - array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, - newaxis, concatenate, stack - ) + arange, + array, + atleast_1d, + atleast_2d, + atleast_3d, + block, + concatenate, + hstack, + newaxis, + stack, + vstack, +) +from numpy._core.shape_base import ( + _block_concatenate, + _block_dispatcher, + _block_setup, + _block_slicing, +) from numpy.exceptions import AxisError -from numpy._core.shape_base import (_block_dispatcher, _block_setup, - _block_concatenate, _block_slicing) from numpy.testing import ( - assert_, assert_raises, assert_array_equal, assert_equal, - assert_raises_regex, assert_warns, IS_PYPY - ) + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import requires_memory class TestAtleast1d: @@ -111,7 +132,7 @@ def test_2D_array(self): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) res = [atleast_3d(a), atleast_3d(b)] - desired = [a[:,:, newaxis], b[:,:, newaxis]] + desired = [a[:, :, newaxis], b[:, :, newaxis]] assert_array_equal(res, desired) def test_3D_array(self): @@ -156,7 +177,7 @@ def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): hstack(np.arange(3) for _ in range(2)) with pytest.raises(TypeError, match="arrays to stack must be"): - hstack((x for x in np.ones((3, 2)))) + hstack(x for x in np.ones((3, 2))) def test_casting_and_dtype(self): a = np.array([1, 2, 3]) @@ -225,7 +246,6 @@ def test_casting_and_dtype_type_error(self): vstack((a, b), casting="safe", dtype=np.int64) - class TestConcatenate: def test_returns_copy(self): a = np.eye(3) @@ -236,7 +256,7 @@ def test_returns_copy(self): def test_exceptions(self): # test axis must be in bounds for ndim in [1, 2, 3]: - a = np.ones((1,)*ndim) + a = np.ones((1,) * ndim) np.concatenate((a, a), axis=0) # OK assert_raises(AxisError, np.concatenate, (a, a), axis=ndim) assert_raises(AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) @@ -262,9 +282,8 @@ def test_exceptions(self): assert_raises_regex( ValueError, "all the input array dimensions except for the concatenation axis " - "must match exactly, but along dimension {}, the array at " - "index 0 has size 1 and the array at index 1 has size 2" - .format(i), + f"must match exactly, but along dimension {i}, the array at " + "index 0 has size 1 and the array at index 1 has size 2", np.concatenate, (a, b), axis=axis[1]) assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) a = np.moveaxis(a, -1, 0) @@ -274,6 +293,22 @@ def test_exceptions(self): # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) + @pytest.mark.slow + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) + @requires_memory(2 * np.iinfo(np.intc).max) + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) @@ -349,12 +384,20 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_concatenate_same_value(self): + r4 = list(range(4)) + with pytest.raises(ValueError, match="^casting must be one of"): + concatenate([r4, r4], casting="same_value") + + @pytest.mark.skipif( + IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython" + ) def test_operator_concat(self): import operator a = array([1, 2]) b = array([3, 4]) - n = [1,2] + n = [1, 2] res = array([1, 2, 3, 4]) assert_raises(TypeError, operator.concat, a, b) assert_raises(TypeError, operator.concat, a, n) @@ -367,8 +410,8 @@ def test_bad_out_shape(self): b = array([3, 4]) assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4, 1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1, 4))) concatenate((a, b), out=np.empty(4)) @pytest.mark.parametrize("axis", [None, 0]) @@ -479,13 +522,13 @@ def test_stack(): with pytest.raises(TypeError, match="arrays to stack must be"): stack(x for x in range(3)) - #casting and dtype test + # casting and dtype test a = np.array([1, 2, 3]) b = np.array([2.5, 3.5, 4.5]) res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) expected_res = np.array([[1, 2], [2, 3], [3, 4]]) assert_array_equal(res, expected_res) - #casting and dtype with TypeError + # casting and dtype with TypeError with assert_raises(TypeError): stack((a, b), dtype=np.int64, axis=1, casting="safe") @@ -765,9 +808,10 @@ def test_block_with_mismatched_shape(self, block): assert_raises(ValueError, block, [a, b]) assert_raises(ValueError, block, [b, a]) - to_block = [[np.ones((2,3)), np.ones((2,2))], - [np.ones((2,2)), np.ones((2,2))]] + to_block = [[np.ones((2, 3)), np.ones((2, 2))], + [np.ones((2, 2)), np.ones((2, 2))]] assert_raises(ValueError, block, to_block) + def test_no_lists(self, block): assert_equal(block(1), np.array(1)) assert_equal(block(np.eye(3)), np.eye(3)) @@ -817,8 +861,8 @@ def test_different_ndims_depths(self, block): def test_block_memory_order(self, block): # 3D - arr_c = np.zeros((3,)*3, order='C') - arr_f = np.zeros((3,)*3, order='F') + arr_c = np.zeros((3,) * 3, order='C') + arr_f = np.zeros((3,) * 3, order='F') b_c = [[[arr_c, arr_c], [arr_c, arr_c]], diff --git a/blimgui/dist64/numpy/_core/tests/test_simd.py b/blimgui/dist64/numpy/_core/tests/test_simd.py index 6b8f208..9bcd1f6 100644 --- a/blimgui/dist64/numpy/_core/tests/test_simd.py +++ b/blimgui/dist64/numpy/_core/tests/test_simd.py @@ -4,9 +4,12 @@ import math import operator import re + import pytest -from numpy._core._simd import targets, clear_floatstatus, get_floatstatus + from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._simd import clear_floatstatus, get_floatstatus, targets + def check_floatstatus(divbyzero=False, overflow=False, underflow=False, invalid=False, @@ -26,7 +29,7 @@ class _Test_Utility: # submodule of the desired SIMD extension, e.g. targets["AVX512F"] npyv = None # the current data type suffix e.g. 's8' - sfx = None + sfx = None # target name can be 'baseline' or one or more of CPU features target_name = None @@ -118,7 +121,7 @@ def _cpu_features(self): if target == "baseline": target = __cpu_baseline__ else: - target = target.split('__') # multi-target separator + target = target.split('__') # multi-target separator return ' '.join(target) class _SIMD_BOOL(_Test_Utility): @@ -185,7 +188,7 @@ def test_operators_logical(self): assert data_xnor == vxnor def test_tobits(self): - data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)]) + data2bits = lambda data: sum(int(x != 0) << i for i, x in enumerate(data, 0)) for data in (self._data(), self._data(reverse=True)): vdata = self._load_b(data) data_bits = data2bits(data) @@ -216,10 +219,10 @@ def test_pack(self): spack = [(i & 0xFF) for i in (list(rdata) + list(data))] vpack = pack_simd(vrdata, vdata) elif self.sfx == "b32": - spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))] + spack = [(i & 0xFF) for i in (2 * list(rdata) + 2 * list(data))] vpack = pack_simd(vrdata, vrdata, vdata, vdata) elif self.sfx == "b64": - spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))] + spack = [(i & 0xFF) for i in (4 * list(rdata) + 4 * list(data))] vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, vdata, vdata, vdata, vdata) assert vpack == spack @@ -268,7 +271,8 @@ def test_operators_shift(self): shr = self.shr(vdata_a, count) assert shr == data_shr_a - # shift by zero or max or out-range immediate constant is not applicable and illogical + # shift by zero or max or out-range immediate constant is not + # applicable and illogical for count in range(1, self._scalar_size()): # load to cast data_shl_a = self.load([a << count for a in data_a]) @@ -367,7 +371,7 @@ class _SIMD_FP(_Test_Utility): To test all float vector types at once """ def test_arithmetic_fused(self): - vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3 + vdata_a, vdata_b, vdata_c = [self.load(self._data())] * 3 vdata_cx2 = self.add(vdata_c, vdata_c) # multiply and add, a*b + c data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) @@ -398,7 +402,7 @@ def test_abs(self): abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) for case, desired in abs_cases: - data_abs = [desired]*self.nlanes + data_abs = [desired] * self.nlanes vabs = self.abs(self.setall(case)) assert vabs == pytest.approx(data_abs, nan_ok=True) @@ -412,11 +416,12 @@ def test_sqrt(self): sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) for case, desired in sqrt_cases: - data_sqrt = [desired]*self.nlanes - sqrt = self.sqrt(self.setall(case)) + data_sqrt = [desired] * self.nlanes + sqrt = self.sqrt(self.setall(case)) assert sqrt == pytest.approx(data_sqrt, nan_ok=True) - data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) sqrt = self.sqrt(vdata) assert sqrt == data_sqrt @@ -427,11 +432,11 @@ def test_square(self): # square square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) for case, desired in square_cases: - data_square = [desired]*self.nlanes - square = self.square(self.setall(case)) + data_square = [desired] * self.nlanes + square = self.square(self.setall(case)) assert square == pytest.approx(data_square, nan_ok=True) - data_square = [x*x for x in data] + data_square = [x * x for x in data] square = self.square(vdata) assert square == data_square @@ -451,13 +456,13 @@ def test_rounding(self, intrin, func): # special cases round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf)) for case, desired in round_cases: - data_round = [desired]*self.nlanes + data_round = [desired] * self.nlanes _round = intrin(self.setall(case)) assert _round == pytest.approx(data_round, nan_ok=True) for x in range(0, 2**20, 256**2): for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15): - data = self.load([(x+a)*w for a in range(self.nlanes)]) + data = self.load([(x + a) * w for a in range(self.nlanes)]) data_round = [func(x) for x in data] _round = intrin(data) assert _round == data_round @@ -507,7 +512,7 @@ def test_max_min(self, intrin): func = eval(intrin[:3]) reduce_intrin = getattr(self, "reduce_" + intrin) intrin = getattr(self, intrin) - hf_nlanes = self.nlanes//2 + hf_nlanes = self.nlanes // 2 cases = ( ([0.0, -0.0], [-0.0, 0.0]), @@ -518,8 +523,8 @@ def test_max_min(self, intrin): ([-10, 10], [-10, 10]) ) for op1, op2 in cases: - vdata_a = self.load(op1*hf_nlanes) - vdata_b = self.load(op2*hf_nlanes) + vdata_a = self.load(op1 * hf_nlanes) + vdata_b = self.load(op2 * hf_nlanes) data = func(vdata_a, vdata_b) simd = intrin(vdata_a, vdata_b) assert simd == data @@ -545,7 +550,7 @@ def test_max_min(self, intrin): (nan, nan) ) for op1, op2 in cases: - vdata_ab = self.load([op1, op2]*hf_nlanes) + vdata_ab = self.load([op1, op2] * hf_nlanes) data = test_nan(op1, op2) simd = reduce_intrin(vdata_ab) assert simd == pytest.approx(data, nan_ok=True) @@ -562,11 +567,11 @@ def test_reciprocal(self): recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) for case, desired in recip_cases: - data_recip = [desired]*self.nlanes + data_recip = [desired] * self.nlanes recip = self.recip(self.setall(case)) assert recip == pytest.approx(data_recip, nan_ok=True) - data_recip = self.load([1/x for x in data]) # load to truncate precision + data_recip = self.load([1 / x for x in data]) # load to truncate precision recip = self.recip(vdata) assert recip == data_recip @@ -576,7 +581,7 @@ def test_special_cases(self): npyv_notnan_##SFX """ nnan = self.notnan(self.setall(self._nan())) - assert nnan == [0]*self.nlanes + assert nnan == [0] * self.nlanes @pytest.mark.parametrize("intrin_name", [ "rint", "trunc", "ceil", "floor" @@ -608,8 +613,8 @@ def to_bool(vector): cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan), (ninf, nan), (-0.0, +0.0)) for case_operand1, case_operand2 in cmp_cases: - data_a = [case_operand1]*self.nlanes - data_b = [case_operand2]*self.nlanes + data_a = [case_operand1] * self.nlanes + data_b = [case_operand2] * self.nlanes vdata_a = self.setall(case_operand1) vdata_b = self.setall(case_operand2) vcmp = to_bool(intrin(vdata_a, vdata_b)) @@ -657,10 +662,10 @@ def test_memory_load(self): assert loads_data == data # load lower part loadl = self.loadl(data) - loadl_half = list(loadl)[:self.nlanes//2] - data_half = data[:self.nlanes//2] + loadl_half = list(loadl)[:self.nlanes // 2] + data_half = data[:self.nlanes // 2] assert loadl_half == data_half - assert loadl != data # detect overflow + assert loadl != data # detect overflow def test_memory_store(self): data = self._data() @@ -680,12 +685,12 @@ def test_memory_store(self): # store lower part store_l = [0] * self.nlanes self.storel(store_l, vdata) - assert store_l[:self.nlanes//2] == data[:self.nlanes//2] - assert store_l != vdata # detect overflow + assert store_l[:self.nlanes // 2] == data[:self.nlanes // 2] + assert store_l != vdata # detect overflow # store higher part store_h = [0] * self.nlanes self.storeh(store_h, vdata) - assert store_h[:self.nlanes//2] == data[self.nlanes//2:] + assert store_h[:self.nlanes // 2] == data[self.nlanes // 2:] assert store_h != vdata # detect overflow @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ @@ -698,14 +703,14 @@ def test_memory_partial_load(self, intrin, elsizes, scale, fill): npyv_load_tillz, npyv_load_till = eval(intrin) data = self._data() lanes = list(range(1, self.nlanes + 1)) - lanes += [self.nlanes**2, self.nlanes**4] # test out of range + lanes += [self.nlanes**2, self.nlanes**4] # test out of range for n in lanes: load_till = npyv_load_till(data, n, *fill) load_tillz = npyv_load_tillz(data, n) n *= scale - data_till = data[:n] + fill * ((self.nlanes-n) // scale) + data_till = data[:n] + fill * ((self.nlanes - n) // scale) assert load_till == data_till - data_tillz = data[:n] + [0] * (self.nlanes-n) + data_tillz = data[:n] + [0] * (self.nlanes - n) assert load_tillz == data_tillz @pytest.mark.parametrize("intrin, elsizes, scale", [ @@ -723,7 +728,7 @@ def test_memory_partial_store(self, intrin, elsizes, scale): lanes += [self.nlanes**2, self.nlanes**4] for n in lanes: data_till = data_rev.copy() - data_till[:n*scale] = data[:n*scale] + data_till[:n * scale] = data[:n * scale] store_till = self._data(reverse=True) npyv_store_till(store_till, n, vdata) assert store_till == data_till @@ -738,15 +743,15 @@ def test_memory_noncont_load(self, intrin, elsizes, scale): npyv_loadn = eval(intrin) for stride in range(-64, 64): if stride < 0: - data = self._data(stride, -stride*self.nlanes) + data = self._data(stride, -stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) )) elif stride == 0: data = self._data() - data_stride = data[0:scale] * (self.nlanes//scale) + data_stride = data[0:scale] * (self.nlanes // scale) else: - data = self._data(count=stride*self.nlanes) + data = self._data(count=stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[i::stride] for i in range(scale)])) ) @@ -766,15 +771,15 @@ def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): lanes += [self.nlanes**2, self.nlanes**4] for stride in range(-64, 64): if stride < 0: - data = self._data(stride, -stride*self.nlanes) + data = self._data(stride, -stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) )) elif stride == 0: data = self._data() - data_stride = data[0:scale] * (self.nlanes//scale) + data_stride = data[0:scale] * (self.nlanes // scale) else: - data = self._data(count=stride*self.nlanes) + data = self._data(count=stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[i::stride] for i in range(scale)]) )) @@ -783,7 +788,7 @@ def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): nscale = n * scale llanes = self.nlanes - nscale data_stride_till = ( - data_stride[:nscale] + fill * (llanes//scale) + data_stride[:nscale] + fill * (llanes // scale) ) loadn_till = npyv_loadn_till(data, stride, n, *fill) assert loadn_till == data_stride_till @@ -804,25 +809,25 @@ def test_memory_noncont_store(self, intrin, elsizes, scale): hlanes = self.nlanes // scale for stride in range(1, 64): data_storen = [0xff] * stride * self.nlanes - for s in range(0, hlanes*stride, stride): - i = (s//stride)*scale - data_storen[s:s+scale] = data[i:i+scale] + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s:s + scale] = data[i:i + scale] storen = [0xff] * stride * self.nlanes - storen += [0x7f]*64 + storen += [0x7f] * 64 npyv_storen(storen, stride, vdata) assert storen[:-64] == data_storen - assert storen[-64:] == [0x7f]*64 # detect overflow + assert storen[-64:] == [0x7f] * 64 # detect overflow for stride in range(-64, 0): data_storen = [0xff] * -stride * self.nlanes - for s in range(0, hlanes*stride, stride): - i = (s//stride)*scale - data_storen[s-scale:s or None] = data[i:i+scale] - storen = [0x7f]*64 + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s - scale:s or None] = data[i:i + scale] + storen = [0x7f] * 64 storen += [0xff] * -stride * self.nlanes npyv_storen(storen, stride, vdata) assert storen[64:] == data_storen - assert storen[:64] == [0x7f]*64 # detect overflow + assert storen[:64] == [0x7f] * 64 # detect overflow # stride 0 data_storen = [0x7f] * self.nlanes storen = data_storen.copy() @@ -846,34 +851,34 @@ def test_memory_noncont_partial_store(self, intrin, elsizes, scale): for stride in range(1, 64): for n in lanes: data_till = [0xff] * stride * self.nlanes - tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale) - for s in range(0, hlanes*stride, stride)[:n]: - i = (s//stride)*scale - data_till[s:s+scale] = tdata[i:i+scale] + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s:s + scale] = tdata[i:i + scale] storen_till = [0xff] * stride * self.nlanes - storen_till += [0x7f]*64 + storen_till += [0x7f] * 64 npyv_storen_till(storen_till, stride, n, vdata) assert storen_till[:-64] == data_till - assert storen_till[-64:] == [0x7f]*64 # detect overflow + assert storen_till[-64:] == [0x7f] * 64 # detect overflow for stride in range(-64, 0): for n in lanes: data_till = [0xff] * -stride * self.nlanes - tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale) - for s in range(0, hlanes*stride, stride)[:n]: - i = (s//stride)*scale - data_till[s-scale:s or None] = tdata[i:i+scale] - storen_till = [0x7f]*64 + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s - scale:s or None] = tdata[i:i + scale] + storen_till = [0x7f] * 64 storen_till += [0xff] * -stride * self.nlanes npyv_storen_till(storen_till, stride, n, vdata) assert storen_till[64:] == data_till - assert storen_till[:64] == [0x7f]*64 # detect overflow + assert storen_till[:64] == [0x7f] * 64 # detect overflow # stride 0 for n in lanes: data_till = [0x7f] * self.nlanes storen_till = data_till.copy() - data_till[0:scale] = data[:n*scale][-scale:] + data_till[0:scale] = data[:n * scale][-scale:] npyv_storen_till(storen_till, 0, n, vdata) assert storen_till == data_till @@ -891,7 +896,7 @@ def test_lut(self, intrin, table_size, elsize): return intrin = eval(intrin) idx_itrin = getattr(self.npyv, f"setall_u{elsize}") - table = range(0, table_size) + table = range(table_size) for i in table: broadi = self.setall(i) idx = idx_itrin(i) @@ -944,14 +949,14 @@ def test_misc(self): self.npyv.cleanup() def test_reorder(self): - data_a, data_b = self._data(), self._data(reverse=True) + data_a, data_b = self._data(), self._data(reverse=True) vdata_a, vdata_b = self.load(data_a), self.load(data_b) # lower half part - data_a_lo = data_a[:self.nlanes//2] - data_b_lo = data_b[:self.nlanes//2] + data_a_lo = data_a[:self.nlanes // 2] + data_b_lo = data_b[:self.nlanes // 2] # higher half part - data_a_hi = data_a[self.nlanes//2:] - data_b_hi = data_b[self.nlanes//2:] + data_a_hi = data_a[self.nlanes // 2:] + data_b_hi = data_b[self.nlanes // 2:] # combine two lower parts combinel = self.combinel(vdata_a, vdata_b) assert combinel == data_a_lo + data_b_lo @@ -971,7 +976,7 @@ def test_reorder(self): ]) vzip = self.zip(vdata_a, vdata_b) assert vzip == (data_zipl, data_ziph) - vzip = [0]*self.nlanes*2 + vzip = [0] * self.nlanes * 2 self._x2("store")(vzip, (vdata_a, vdata_b)) assert vzip == list(data_zipl) + list(data_ziph) @@ -987,8 +992,8 @@ def test_reorder_rev64(self): if ssize == 64: return data_rev64 = [ - y for x in range(0, self.nlanes, 64//ssize) - for y in reversed(range(x, x + 64//ssize)) + y for x in range(0, self.nlanes, 64 // ssize) + for y in reversed(range(x, x + 64 // ssize)) ] rev64 = self.rev64(self.load(range(self.nlanes))) assert rev64 == data_rev64 @@ -1002,16 +1007,16 @@ def test_reorder_permi128(self): if ssize < 32: return data = self.load(self._data()) - permn = 128//ssize - permd = permn-1 - nlane128 = self.nlanes//permn + permn = 128 // ssize + permd = permn - 1 + nlane128 = self.nlanes // permn shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6] for i in range(permn): indices = [(i >> shf) & permd for shf in shfl] vperm = self.permi128(data, *indices) data_vperm = [ data[j + (e & -permn)] - for e, j in enumerate(indices*nlane128) + for e, j in enumerate(indices * nlane128) ] assert vperm == data_vperm @@ -1032,6 +1037,7 @@ def test_operators_comparison(self, func, intrin): intrin = getattr(self, intrin) mask_true = self._true_mask() + def to_bool(vector): return [lane == mask_true for lane in vector] @@ -1059,8 +1065,8 @@ def test_operators_logical(self): vxor = cast(self.xor(vdata_a, vdata_b)) assert vxor == data_xor - data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) - vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) assert vor == data_or data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) @@ -1103,11 +1109,11 @@ def test_operators_crosstest(self, intrin, data): def test_conversion_boolean(self): bsfx = "b" + self.sfx[1:] - to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx)) - from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx)) + to_boolean = getattr(self.npyv, f"cvt_{bsfx}_{self.sfx}") + from_boolean = getattr(self.npyv, f"cvt_{self.sfx}_{bsfx}") false_vb = to_boolean(self.setall(0)) - true_vb = self.cmpeq(self.setall(0), self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) assert false_vb != true_vb false_vsfx = from_boolean(false_vb) @@ -1122,16 +1128,16 @@ def test_conversion_expand(self): """ if self.sfx not in ("u8", "u16"): return - totype = self.sfx[0]+str(int(self.sfx[1:])*2) + totype = self.sfx[0] + str(int(self.sfx[1:]) * 2) expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") # close enough from the edge to detect any deviation - data = self._data(self._int_max() - self.nlanes) + data = self._data(self._int_max() - self.nlanes) vdata = self.load(data) edata = expand(vdata) # lower half part - data_lo = data[:self.nlanes//2] + data_lo = data[:self.nlanes // 2] # higher half part - data_hi = data[self.nlanes//2:] + data_hi = data[self.nlanes // 2:] assert edata == (data_lo, data_hi) def test_arithmetic_subadd(self): @@ -1143,11 +1149,11 @@ def test_arithmetic_subadd(self): vdata_a, vdata_b = self.load(data_a), self.load(data_b) # non-saturated - data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast - add = self.add(vdata_a, vdata_b) + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) assert add == data_add - data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) - sub = self.sub(vdata_a, vdata_b) + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) assert sub == data_sub def test_arithmetic_mul(self): @@ -1187,6 +1193,7 @@ def test_arithmetic_intdiv(self): return int_min = self._int_min() + def trunc_div(a, d): """ Divide towards zero works with large integers > 2^53, @@ -1201,17 +1208,17 @@ def trunc_div(a, d): data = [1, -int_min] # to test overflow data += range(0, 2**8, 2**5) - data += range(0, 2**8, 2**5-1) + data += range(0, 2**8, 2**5 - 1) bsize = self._scalar_size() if bsize > 8: data += range(2**8, 2**16, 2**13) - data += range(2**8, 2**16, 2**13-1) + data += range(2**8, 2**16, 2**13 - 1) if bsize > 16: data += range(2**16, 2**32, 2**29) - data += range(2**16, 2**32, 2**29-1) + data += range(2**16, 2**32, 2**29 - 1) if bsize > 32: data += range(2**32, 2**64, 2**61) - data += range(2**32, 2**64, 2**61-1) + data += range(2**32, 2**64, 2**61 - 1) # negate data += [-x for x in data] for dividend, divisor in itertools.product(data, data): @@ -1246,7 +1253,7 @@ def test_arithmetic_reduce_sumup(self): """ if self.sfx not in ("u8", "u16"): return - rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes) + rdata = (0, self.nlanes, self._int_min(), self._int_max() - self.nlanes) for r in rdata: data = self._data(r) vdata = self.load(data) @@ -1262,7 +1269,7 @@ def test_mask_conditional(self): """ vdata_a = self.load(self._data()) vdata_b = self.load(self._data(reverse=True)) - true_mask = self.cmpeq(self.zero(), self.zero()) + true_mask = self.cmpeq(self.zero(), self.zero()) false_mask = self.cmpneq(self.zero(), self.zero()) data_sub = self.sub(vdata_b, vdata_a) @@ -1289,21 +1296,22 @@ def test_mask_conditional(self): ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b) assert ifdivz == self.zero() + bool_sfx = ("b8", "b16", "b32", "b64") int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") -fp_sfx = ("f32", "f64") +fp_sfx = ("f32", "f64") all_sfx = int_sfx + fp_sfx tests_registry = { bool_sfx: _SIMD_BOOL, - int_sfx : _SIMD_INT, - fp_sfx : _SIMD_FP, + int_sfx: _SIMD_INT, + fp_sfx: _SIMD_FP, ("f32",): _SIMD_FP32, ("f64",): _SIMD_FP64, - all_sfx : _SIMD_ALL + all_sfx: _SIMD_ALL } for target_name, npyv in targets.items(): simd_width = npyv.simd if npyv else '' - pretty_name = target_name.split('__') # multi-target separator + pretty_name = target_name.split('__') # multi-target separator if len(pretty_name) > 1: # multi-target pretty_name = f"({' '.join(pretty_name)})" @@ -1311,7 +1319,7 @@ def test_mask_conditional(self): pretty_name = pretty_name[0] skip = "" - skip_sfx = dict() + skip_sfx = {} if not npyv: skip = f"target '{pretty_name}' isn't supported by current machine" elif not npyv.simd: @@ -1328,8 +1336,10 @@ def test_mask_conditional(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name) - tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) globals()[tcls.__name__] = tcls diff --git a/blimgui/dist64/numpy/_core/tests/test_simd_module.py b/blimgui/dist64/numpy/_core/tests/test_simd_module.py index 0b9729e..7056fef 100644 --- a/blimgui/dist64/numpy/_core/tests/test_simd_module.py +++ b/blimgui/dist64/numpy/_core/tests/test_simd_module.py @@ -1,5 +1,7 @@ import pytest + from numpy._core._simd import targets + """ This testing unit only for checking the sanity of common functionality, therefore all we need is just to take one submodule that represents any @@ -21,7 +23,8 @@ int_sfx = unsigned_sfx + signed_sfx all_sfx = unsigned_sfx + int_sfx -@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") class Test_SIMD_MODULE: @pytest.mark.parametrize('sfx', all_sfx) @@ -36,7 +39,7 @@ def test_type_name(self, sfx): assert vector.__name__ == "npyv_" + sfx def test_raises(self): - a, b = [npyv.setall_u32(1)]*2 + a, b = [npyv.setall_u32(1)] * 2 for sfx in all_sfx: vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") pytest.raises(TypeError, vcb("add"), a) @@ -45,7 +48,8 @@ def test_raises(self): pytest.raises(TypeError, vcb("setall"), [1]) pytest.raises(TypeError, vcb("load"), 1) pytest.raises(ValueError, vcb("load"), [1]) - pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) @pytest.mark.skipif(not npyv2, reason=( "could not find a second SIMD extension with NPYV support" @@ -93,7 +97,7 @@ def test_truncate_f32(self): assert round(f32, 1) == 0.1 def test_compare(self): - data_range = range(0, npyv.nlanes_u32) + data_range = range(npyv.nlanes_u32) vdata = npyv.load_u32(data_range) assert vdata == list(data_range) assert vdata == tuple(data_range) diff --git a/blimgui/dist64/numpy/_core/tests/test_stringdtype.py b/blimgui/dist64/numpy/_core/tests/test_stringdtype.py index fd63812..b7e4975 100644 --- a/blimgui/dist64/numpy/_core/tests/test_stringdtype.py +++ b/blimgui/dist64/numpy/_core/tests/test_stringdtype.py @@ -1,35 +1,39 @@ -import concurrent.futures import copy import itertools import os -import string import pickle +import string import sys import tempfile -import numpy as np import pytest -from numpy.dtypes import StringDType +import numpy as np from numpy._core.tests._natype import pd_NA -from numpy.testing import assert_array_equal, IS_WASM, IS_PYPY - - -@pytest.fixture -def string_list(): - return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] +from numpy.dtypes import StringDType +from numpy.testing import IS_PYPY, assert_array_equal -@pytest.fixture -def random_string_list(): +def random_unicode_string_list(): + """Returns an array of 10 100-character strings containing random text""" chars = list(string.ascii_letters + string.digits) chars = np.array(chars, dtype="U1") ret = np.random.choice(chars, size=100 * 10, replace=True) return ret.view("U100") +def get_dtype(na_object, coerce=True): + """Helper to work around pd_NA boolean behavior""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) + + @pytest.fixture(params=[True, False]) def coerce(request): + """Coerce input to strings or raise an error for non-string input""" return request.param @@ -38,25 +42,24 @@ def coerce(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object(request): + """Possible values for the missing data sentinel""" return request.param -def get_dtype(na_object, coerce=True): - # explicit is check for pd_NA because != with pd_NA returns pd_NA - if na_object is pd_NA or na_object != "unset": - return StringDType(na_object=na_object, coerce=coerce) - else: - return StringDType(coerce=coerce) - - @pytest.fixture() def dtype(na_object, coerce): + """Cartesian project of missing data sentinel and string coercion options""" return get_dtype(na_object, coerce) +@pytest.fixture +def string_list(): + """Mix of short and long strings, some with unicode, some without""" + return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] + -# second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): + """Second copy of the coerce fixture for tests that need two instances""" return request.param @@ -65,11 +68,13 @@ def coerce2(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object2(request): + """Second copy of the na_object fixture for tests that need two instances""" return request.param @pytest.fixture() def dtype2(na_object2, coerce2): + """Second copy of the dtype fixture for tests that need two instances""" # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object2 is pd_NA or na_object2 != "unset": return StringDType(na_object=na_object2, coerce=coerce2) @@ -146,12 +151,12 @@ def test_set_replace_na(i): s_long = "-=+" * 100 strings = [s_medium, s_empty, s_short, s_medium, s_long] a = np.array(strings, StringDType(na_object=np.nan)) - for s in [a[i], s_medium+s_short, s_short, s_empty, s_long]: + for s in [a[i], s_medium + s_short, s_short, s_empty, s_long]: a[i] = np.nan assert np.isnan(a[i]) a[i] = s assert a[i] == s - assert_array_equal(a, strings[:i] + [s] + strings[i+1:]) + assert_array_equal(a, strings[:i] + [s] + strings[i + 1:]) def test_null_roundtripping(): @@ -163,8 +168,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( @@ -276,7 +281,7 @@ def test_unicode_casts(self, dtype, strings): def test_void_casts(self, dtype, strings): sarr = np.array(strings, dtype=dtype) utf8_bytes = [s.encode("utf-8") for s in strings] - void_dtype = f"V{max([len(s) for s in utf8_bytes])}" + void_dtype = f"V{max(len(s) for s in utf8_bytes)}" varr = np.array(utf8_bytes, dtype=void_dtype) assert_array_equal(varr, sarr.astype(void_dtype)) assert_array_equal(varr.astype(dtype), sarr) @@ -285,7 +290,7 @@ def test_bytes_casts(self, dtype, strings): sarr = np.array(strings, dtype=dtype) try: utf8_bytes = [s.encode("ascii") for s in strings] - bytes_dtype = f"S{max([len(s) for s in utf8_bytes])}" + bytes_dtype = f"S{max(len(s) for s in utf8_bytes)}" barr = np.array(utf8_bytes, dtype=bytes_dtype) assert_array_equal(barr, sarr.astype(bytes_dtype)) assert_array_equal(barr.astype(dtype), sarr) @@ -302,12 +307,13 @@ def test_bytes_casts(self, dtype, strings): sarr.astype("S20") -def test_additional_unicode_cast(random_string_list, dtype): - arr = np.array(random_string_list, dtype=dtype) +def test_additional_unicode_cast(dtype): + string_list = random_unicode_string_list() + arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) # tests the casts via the comparison promoter - assert_array_equal(arr, arr.astype(random_string_list.dtype)) + assert_array_equal(arr, arr.astype(string_list.dtype)) def test_insert_scalar(dtype, string_list): @@ -460,7 +466,6 @@ def test_sort(strings, arr_sorted): if argsorted is not None: assert np.array_equal(argsorted, np.argsort(strings)) - # make a copy so we don't mutate the lists in the fixture strings = strings.copy() arr_sorted = np.array(sorted(strings), dtype=dtype) @@ -537,10 +542,10 @@ def test_fancy_indexing(string_list): ] lops = [ - ['a'*25, 'b'*25], + ['a' * 25, 'b' * 25], ['', ''], ['hello', 'world'], - ['hello', 'world'*25], + ['hello', 'world' * 25], ] # see gh-27003 and gh-27053 @@ -548,11 +553,30 @@ def test_fancy_indexing(string_list): for lop in lops: a = np.array(lop, dtype="T") assert_array_equal(a[ind], a) - rop = ['d'*25, 'e'*25] + rop = ['d' * 25, 'e' * 25] for b in [rop, np.array(rop, dtype="T")]: a[ind] = b assert_array_equal(a, b) - assert a[0] == 'd'*25 + assert a[0] == 'd' * 25 + + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + + +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) def test_creation_functions(): @@ -573,10 +597,10 @@ def test_concatenate(string_list): def test_resize_method(string_list): sarr = np.array(string_list, dtype="T") if IS_PYPY: - sarr.resize(len(string_list)+3, refcheck=False) + sarr.resize(len(string_list) + 3, refcheck=False) else: - sarr.resize(len(string_list)+3) - assert_array_equal(sarr, np.array(string_list + ['']*3, dtype="T")) + sarr.resize(len(string_list) + 3) + assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) def test_create_with_copy_none(string_list): @@ -756,6 +780,21 @@ def test_float_casts(typename): assert_array_equal(eres, res) +def test_float_nan_cast_na_object(): + # gh-28157 + dt = np.dtypes.StringDType(na_object=np.nan) + arr1 = np.full((1,), fill_value=np.nan, dtype=dt) + arr2 = np.full_like(arr1, fill_value=np.nan) + + assert arr1.item() is np.nan + assert arr2.item() is np.nan + + inp = [1.2, 2.3, np.nan] + arr = np.array(inp).astype(dt) + assert arr[2] is np.nan + assert arr[0] == '1.2' + + @pytest.mark.parametrize( "typename", [ @@ -1186,7 +1225,7 @@ def test_nat_casts(): for arr in [dt_array, td_array]: assert_array_equal( arr.astype(dtype), - np.array([output_object]*arr.size, dtype=dtype)) + np.array([output_object] * arr.size, dtype=dtype)) def test_nat_conversion(): @@ -1214,38 +1253,22 @@ def test_growing_strings(dtype): assert_array_equal(arr, uarr) -@pytest.mark.skipif(IS_WASM, reason="no threading support in wasm") -def test_threaded_access_and_mutation(dtype, random_string_list): - # this test uses an RNG and may crash or cause deadlocks if there is a - # threading bug - rng = np.random.default_rng(0x4D3D3D3) - - def func(arr): - rnd = rng.random() - # either write to random locations in the array, compute a ufunc, or - # re-initialize the array - if rnd < 0.25: - num = np.random.randint(0, arr.size) - arr[num] = arr[num] + "hello" - elif rnd < 0.5: - if rnd < 0.375: - np.add(arr, arr) - else: - np.add(arr, arr, out=arr) - elif rnd < 0.75: - if rnd < 0.875: - np.multiply(arr, np.int64(2)) - else: - np.multiply(arr, np.int64(2), out=arr) - else: - arr[:] = random_string_list - - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(random_string_list, dtype=dtype) - futures = [tpe.submit(func, arr) for _ in range(500)] +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') - for f in futures: - f.result() + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) UFUNC_TEST_DATA = [ @@ -1361,11 +1384,10 @@ def test_unary(string_array, unicode_array, function_name): # to avoid these errors we'd need to add NA support to _vec_string with pytest.raises((ValueError, TypeError)): func(na_arr) + elif function_name == "splitlines": + assert func(na_arr)[0] == func(dtype.na_object)[()] else: - if function_name == "splitlines": - assert func(na_arr)[0] == func(dtype.na_object)[()] - else: - assert func(na_arr)[0] == func(dtype.na_object) + assert func(na_arr)[0] == func(dtype.na_object) return if function_name == "str_len" and not is_str: # str_len always errors for any non-string null, even NA ones because @@ -1650,17 +1672,17 @@ class TestImplementation: """ @classmethod - def setup_class(self): - self.MISSING = 0x80 - self.INITIALIZED = 0x40 - self.OUTSIDE_ARENA = 0x20 - self.LONG = 0x10 - self.dtype = StringDType(na_object=np.nan) - self.sizeofstr = self.dtype.itemsize - sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t) + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) # Below, size is not strictly correct, since it really uses # 7 (or 3) bytes, but good enough for the tests here. - self.view_dtype = np.dtype([ + cls.view_dtype = np.dtype([ ('offset', f'u{sp}'), ('size', f'u{sp // 2}'), ('xsiz', f'V{sp // 2 - 1}'), @@ -1671,13 +1693,13 @@ def setup_class(self): ('size', f'u{sp // 2}'), ('offset', f'u{sp}'), ]) - self.s_empty = "" - self.s_short = "01234" - self.s_medium = "abcdefghijklmnopqrstuvwxyz" - self.s_long = "-=+" * 100 - self.a = np.array( - [self.s_empty, self.s_short, self.s_medium, self.s_long], - self.dtype) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) def get_view(self, a): # Cannot view a StringDType as anything else directly, since diff --git a/blimgui/dist64/numpy/_core/tests/test_strings.py b/blimgui/dist64/numpy/_core/tests/test_strings.py index bbdf4ab..2c907a7 100644 --- a/blimgui/dist64/numpy/_core/tests/test_strings.py +++ b/blimgui/dist64/numpy/_core/tests/test_strings.py @@ -1,10 +1,11 @@ +import operator import sys + import pytest -import operator import numpy as np - -from numpy.testing import assert_array_equal, assert_raises, IS_PYPY +from numpy._core._exceptions import _UFuncNoLoopError +from numpy.testing import IS_PYPY, assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory COMPARISONS = [ @@ -135,6 +136,7 @@ def test_string_size_dtype_large_repr(str_dt): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_coercion_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize try: @@ -162,6 +164,7 @@ def __str__(self): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_addition_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize @@ -191,6 +194,14 @@ def test_large_string_cast(): a.astype("U") +@pytest.mark.parametrize("dt", ["S1", "U1"]) +def test_in_place_mutiply_no_overflow(dt): + # see gh-30495 + a = np.array("a", dtype=dt) + a *= 20 + assert_array_equal(a, np.array("a", dtype=dt)) + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: @@ -224,9 +235,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): @@ -363,24 +385,26 @@ def test_str_len(self, in_, out, dt): ("", "xx", 0, None, -1), ("", "xx", 1, 1, -1), ("", "xx", MAX, 0, -1), - pytest.param(99*"a" + "b", "b", 0, None, 99, + pytest.param(99 * "a" + "b", "b", 0, None, 99, id="99*a+b-b-0-None-99"), - pytest.param(98*"a" + "ba", "ba", 0, None, 98, + pytest.param(98 * "a" + "ba", "ba", 0, None, 98, id="98*a+ba-ba-0-None-98"), - pytest.param(100*"a", "b", 0, None, -1, + pytest.param(100 * "a", "b", 0, None, -1, id="100*a-b-0-None--1"), - pytest.param(30000*"a" + 100*"b", 100*"b", 0, None, 30000, + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 30000, id="30000*a+100*b-100*b-0-None-30000"), - pytest.param(30000*"a", 100*"b", 0, None, -1, + pytest.param(30000 * "a", 100 * "b", 0, None, -1, id="30000*a-100*b-0-None--1"), - pytest.param(15000*"a" + 15000*"b", 15000*"b", 0, None, 15000, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 15000, id="15000*a+15000*b-15000*b-0-None-15000"), - pytest.param(15000*"a" + 15000*"b", 15000*"c", 0, None, -1, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, -1, id="15000*a+15000*b-15000*c-0-None--1"), (["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3], None, [3, -1]), ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), ]) def test_find(self, a, sub, start, end, out, dt): if "😊" in a and dt == "S": @@ -429,17 +453,17 @@ def test_rfind(self, a, sub, start, end, out, dt): ("aaa", "", -1, None, 2), ("aaa", "", -10, None, 4), ("aaa", "aaaa", 0, None, 0), - pytest.param(98*"a" + "ba", "ba", 0, None, 1, + pytest.param(98 * "a" + "ba", "ba", 0, None, 1, id="98*a+ba-ba-0-None-1"), - pytest.param(30000*"a" + 100*"b", 100*"b", 0, None, 1, + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 1, id="30000*a+100*b-100*b-0-None-1"), - pytest.param(30000*"a", 100*"b", 0, None, 0, + pytest.param(30000 * "a", 100 * "b", 0, None, 0, id="30000*a-100*b-0-None-0"), - pytest.param(30000*"a" + 100*"ab", "ab", 0, None, 100, + pytest.param(30000 * "a" + 100 * "ab", "ab", 0, None, 100, id="30000*a+100*ab-ab-0-None-100"), - pytest.param(15000*"a" + 15000*"b", 15000*"b", 0, None, 1, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 1, id="15000*a+15000*b-15000*b-0-None-1"), - pytest.param(15000*"a" + 15000*"b", 15000*"c", 0, None, 0, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, 0, id="15000*a+15000*b-15000*c-0-None-0"), ("", "", 0, None, 1), ("", "", 1, 1, 0), @@ -648,7 +672,7 @@ def test_strip(self, a, chars, out, dt): ("ABCADAA", "A", "", -1, "BCD"), ("BCD", "A", "", -1, "BCD"), ("*************", "A", "", -1, "*************"), - ("^"+"A"*1000+"^", "A", "", 999, "^A^"), + ("^" + "A" * 1000 + "^", "A", "", 999, "^A^"), ("the", "the", "", -1, ""), ("theater", "the", "", -1, "ater"), ("thethe", "the", "", -1, ""), @@ -808,6 +832,20 @@ def test_expandtabs_raises_overflow(self, dt): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) + FILL_ERROR = "The fill character must be exactly one character long" def test_center_raises_multiple_character_fill(self, dt): @@ -833,6 +871,7 @@ def test_rjust_raises_multiple_character_fill(self, dt): ('abc', 6, ' ', ' abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '***abc****'), ]) def test_center(self, buf, width, fillchar, res, dt): @@ -846,6 +885,7 @@ def test_center(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', 'abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', 'abc*******'), ]) def test_ljust(self, buf, width, fillchar, res, dt): @@ -859,6 +899,7 @@ def test_ljust(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', ' abc'), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '*******abc'), ]) def test_rjust(self, buf, width, fillchar, res, dt): @@ -880,6 +921,7 @@ def test_rjust(self, buf, width, fillchar, res, dt): ('-0123', 5, '-0123'), ('000', 3, '000'), ('34', 1, '34'), + ('34', -1, '34'), ('0034', 4, '0034'), ]) def test_zfill(self, buf, width, res, dt): @@ -941,6 +983,101 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): assert_array_equal(act3, res3) assert_array_equal(act1 + act2 + act3, buf) + @pytest.mark.parametrize("args", [ + (None,), + (None, None), + (None, None, -1), + (0,), + (0, None), + (0, None, -1), + (1,), + (1, None), + (1, None, -1), + (3,), + (3, None), + (5,), + (5, None), + (5, 5), + (5, 5, -1), + (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index + (-1,), + (-1, None), + (-1, None, -1), + (-3,), + (-3, None), + ([3, 4],), + ([3, 4], None), + ([2, 4],), + ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), + ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), + (1, 4), + (-3, 5), + (None, -1), + (0, [4, 2]), + ([1, 2], [-1, -2]), + (1, 5, 2), + (None, None, -1), + ([0, 6], [-1, 0], [2, -1]), + ]) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "你好世界" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + def test_slice_unsupported(self, dt): + with pytest.raises(TypeError, match="did not contain a loop"): + np.strings.slice(np.array([1, 2, 3]), 4) + + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) + def test_slice_int_type_promotion(self, int_dt, dt): + buf = np.array(["hello", "world"], dtype=dt) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) @pytest.mark.parametrize("dt", ["U", "T"]) class TestMethodsWithUnicode: @@ -1161,7 +1298,7 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): [ ("λμ", "μ"), ("λμ", "λ"), - ("λ"*5 + "μ"*2, "μ"), + ("λ" * 5 + "μ" * 2, "μ"), ("λ" * 5 + "μ" * 2, "λ"), ("λ" * 5 + "A" + "μ" * 2, "μλ"), ("λμ" * 5, "μ"), @@ -1178,6 +1315,37 @@ def test_strip_functions_unicode(self, source, strip, method, dt): assert_array_equal(actual, expected) + @pytest.mark.parametrize("args", [ + (None,), + (0,), + (1,), + (5,), + (15,), + (22,), + (-1,), + (-3,), + ([3, 4],), + ([-5, 5],), + ([0, -8],), + (1, 12), + (-12, 15), + (None, -1), + (0, [17, 6]), + ([1, 2], [-1, -2]), + (1, 11, 2), + (None, None, -1), + ([0, 10], [-1, 0], [2, -1]), + ]) + def test_slice(self, args, dt): + buf = np.array(["Приве́т नमस्ते שָׁלוֹם", "😀😃😄😁😆😅🤣😂🙂🙃"], + dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + class TestMixedTypeMethods: def test_center(self): @@ -1255,21 +1423,21 @@ class TestReplaceOnArrays: def test_replace_count_and_size(self, dt): a = np.array(["0123456789" * i for i in range(4)], dtype=dt) r1 = np.strings.replace(a, "5", "ABCDE") - assert r1.dtype.itemsize == check_itemsize(3*10 + 3*4, dt) + assert r1.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) r1_res = np.array(["01234ABCDE6789" * i for i in range(4)], dtype=dt) assert_array_equal(r1, r1_res) r2 = np.strings.replace(a, "5", "ABCDE", 1) - assert r2.dtype.itemsize == check_itemsize(3*10 + 4, dt) + assert r2.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) r3 = np.strings.replace(a, "5", "ABCDE", 0) assert r3.dtype.itemsize == a.dtype.itemsize assert_array_equal(r3, a) # Negative values mean to replace all. r4 = np.strings.replace(a, "5", "ABCDE", -1) - assert r4.dtype.itemsize == check_itemsize(3*10 + 3*4, dt) + assert r4.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) assert_array_equal(r4, r1) # We can do count on an element-by-element basis. r5 = np.strings.replace(a, "5", "ABCDE", [-1, -1, -1, 1]) - assert r5.dtype.itemsize == check_itemsize(3*10 + 4, dt) + assert r5.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) assert_array_equal(r5, np.array( ["01234ABCDE6789" * i for i in range(3)] + ["01234ABCDE6789" + "0123456789" * 2], dtype=dt)) @@ -1285,3 +1453,71 @@ def test_replace_broadcasting(self, dt): dtype=dt)) r3 = np.strings.replace(a, ["0", "0,0", "0,0,0"], "X") assert_array_equal(r3, np.array(["X,X,X", "X,0", "X"], dtype=dt)) + + +class TestOverride: + @classmethod + def setup_class(cls): + class Override: + + def __array_function__(self, *args, **kwargs): + return "function" + + def __array_ufunc__(self, *args, **kwargs): + return "ufunc" + + cls.override = Override() + + @pytest.mark.parametrize("func, kwargs", [ + (np.strings.center, dict(width=10)), + (np.strings.capitalize, {}), + (np.strings.decode, {}), + (np.strings.encode, {}), + (np.strings.expandtabs, {}), + (np.strings.ljust, dict(width=10)), + (np.strings.lower, {}), + (np.strings.mod, dict(values=2)), + (np.strings.multiply, dict(i=2)), + (np.strings.partition, dict(sep="foo")), + (np.strings.rjust, dict(width=10)), + (np.strings.rpartition, dict(sep="foo")), + (np.strings.swapcase, {}), + (np.strings.title, {}), + (np.strings.translate, dict(table=None)), + (np.strings.upper, {}), + (np.strings.zfill, dict(width=10)), + ]) + def test_override_function(self, func, kwargs): + assert func(self.override, **kwargs) == "function" + + @pytest.mark.parametrize("func, args, kwargs", [ + (np.strings.add, (None, ), {}), + (np.strings.lstrip, (), {}), + (np.strings.rstrip, (), {}), + (np.strings.strip, (), {}), + (np.strings.equal, (None, ), {}), + (np.strings.not_equal, (None, ), {}), + (np.strings.greater_equal, (None, ), {}), + (np.strings.less_equal, (None, ), {}), + (np.strings.greater, (None, ), {}), + (np.strings.less, (None, ), {}), + (np.strings.count, ("foo", ), {}), + (np.strings.endswith, ("foo", ), {}), + (np.strings.find, ("foo", ), {}), + (np.strings.index, ("foo", ), {}), + (np.strings.isalnum, (), {}), + (np.strings.isalpha, (), {}), + (np.strings.isdecimal, (), {}), + (np.strings.isdigit, (), {}), + (np.strings.islower, (), {}), + (np.strings.isnumeric, (), {}), + (np.strings.isspace, (), {}), + (np.strings.istitle, (), {}), + (np.strings.isupper, (), {}), + (np.strings.rfind, ("foo", ), {}), + (np.strings.rindex, ("foo", ), {}), + (np.strings.startswith, ("foo", ), {}), + (np.strings.str_len, (), {}), + ]) + def test_override_ufunc(self, func, args, kwargs): + assert func(self.override, *args, **kwargs) == "ufunc" diff --git a/blimgui/dist64/numpy/_core/tests/test_ufunc.py b/blimgui/dist64/numpy/_core/tests/test_ufunc.py index 41c2b78..c7bc160 100644 --- a/blimgui/dist64/numpy/_core/tests/test_ufunc.py +++ b/blimgui/dist64/numpy/_core/tests/test_ufunc.py @@ -1,27 +1,35 @@ -import warnings -import itertools -import sys import ctypes as ct +import inspect +import itertools import pickle +import sys +import warnings import pytest from pytest import param import numpy as np -import numpy._core.umath as ncu -import numpy._core._umath_tests as umt -import numpy.linalg._umath_linalg as uml import numpy._core._operand_flag_tests as opflag_tests import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt +import numpy._core.umath as ncu +import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_no_warnings, - assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM, IS_PYPY, - ) + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) from numpy.testing._private.utils import requires_memory - UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] @@ -153,16 +161,16 @@ def test_binary_PyUFunc_On_Om_method(self, foo=foo): def test_python_complex_conjugate(self): # The conjugate ufunc should fall back to calling the method: - arr = np.array([1+2j, 3-4j], dtype="O") + arr = np.array([1 + 2j, 3 - 4j], dtype="O") assert isinstance(arr[0], complex) res = np.conjugate(arr) assert res.dtype == np.dtype("O") - assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O")) + assert_array_equal(res, np.array([1 - 2j, 3 + 4j], dtype="O")) @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_unary_PyUFunc_O_O_method_full(self, ufunc): """Compare the result of the object loop with non-object one""" - val = np.float64(np.pi/4) + val = np.float64(np.pi / 4) class MyFloat(np.float64): def __getattr__(self, attr): @@ -310,6 +318,7 @@ def test_all_ufunc(self): # from include/numpy/ufuncobject.h size_inferred = 2 can_ignore = 4 + def test_signature0(self): # the arguments to test_signature are: nin, nout, core_signature enabled, num_dims, ixs, flags, sizes = umt.test_signature( @@ -337,7 +346,7 @@ def test_signature2(self): assert_equal(enabled, 1) assert_equal(num_dims, (2, 1, 1)) assert_equal(ixs, (0, 1, 2, 3)) - assert_equal(flags, (self.size_inferred,)*4) + assert_equal(flags, (self.size_inferred,) * 4) assert_equal(sizes, (-1, -1, -1, -1)) def test_signature3(self): @@ -346,7 +355,7 @@ def test_signature3(self): assert_equal(enabled, 1) assert_equal(num_dims, (2, 1, 2)) assert_equal(ixs, (0, 1, 2, 1, 3)) - assert_equal(flags, (self.size_inferred,)*4) + assert_equal(flags, (self.size_inferred,) * 4) assert_equal(sizes, (-1, -1, -1, -1)) def test_signature4(self): @@ -356,7 +365,7 @@ def test_signature4(self): assert_equal(enabled, 1) assert_equal(num_dims, (2, 2, 2)) assert_equal(ixs, (0, 1, 1, 2, 0, 2)) - assert_equal(flags, (self.size_inferred,)*3) + assert_equal(flags, (self.size_inferred,) * 3) assert_equal(sizes, (-1, -1, -1)) def test_signature5(self): @@ -436,14 +445,13 @@ def test_get_signature(self): assert_equal(np.vecdot.signature, "(n),(n)->()") def test_forced_sig(self): - a = 0.5*np.arange(3, dtype='f8') + a = 0.5 * np.arange(3, dtype='f8') assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) - with pytest.warns(DeprecationWarning): - assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', casting='unsafe') assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) - with pytest.warns(DeprecationWarning): - assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), - [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), casting='unsafe') assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), casting='unsafe'), [0, 0, 1]) @@ -451,17 +459,15 @@ def test_forced_sig(self): np.add(a, 0.5, out=b) assert_equal(b, [0.5, 1, 1.5]) b[:] = 0 - with pytest.warns(DeprecationWarning): + with assert_raises(TypeError): np.add(a, 0.5, sig='i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 + assert_equal(b, [0, 0, 0]) np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 - with pytest.warns(DeprecationWarning): + with assert_raises(TypeError): np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 + assert_equal(b, [0, 0, 0]) np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) @@ -486,8 +492,8 @@ def test_signature_dtype_type(self): np.add(3, 4, signature=(float_dtype, float_dtype, None)) @pytest.mark.parametrize("get_kwarg", [ - lambda dt: dict(dtype=dt), - lambda dt: dict(signature=(dt, None, None))]) + param(lambda dt: {"dtype": dt}, id="dtype"), + param(lambda dt: {"signature": (dt, None, None)}, id="signature")]) def test_signature_dtype_instances_allowed(self, get_kwarg): # We allow certain dtype instances when there is a clear singleton # and the given one is equivalent; mainly for backcompat. @@ -497,13 +503,9 @@ def test_signature_dtype_instances_allowed(self, get_kwarg): assert int64 is not int64_2 assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 - td = np.timedelta(2, "s") + td = np.timedelta64(2, "s") assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" - @pytest.mark.parametrize("get_kwarg", [ - param(lambda x: dict(dtype=x), id="dtype"), - param(lambda x: dict(signature=(x, None, None)), id="signature")]) - def test_signature_dtype_instances_allowed(self, get_kwarg): msg = "The `dtype` and `signature` arguments to ufuncs" with pytest.raises(TypeError, match=msg): @@ -653,9 +655,9 @@ def test_true_divide(self): # Check with no output type specified if tc in 'FDG': - tgt = complex(x)/complex(y) + tgt = complex(x) / complex(y) else: - tgt = float(x)/float(y) + tgt = float(x) / float(y) res = np.true_divide(x, y) rtol = max(np.finfo(res).resolution, 1e-15) @@ -664,7 +666,7 @@ def test_true_divide(self): if tc in 'bhilqBHILQ': assert_(res.dtype.name == 'float64') else: - assert_(res.dtype.name == dt.name ) + assert_(res.dtype.name == dt.name) # Check with output type specified. This also checks for the # incorrect casts in issue gh-3484 because the unary '-' does @@ -681,11 +683,11 @@ def test_true_divide(self): # Casting complex to float is not allowed assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) else: - tgt = float(x)/float(y) + tgt = float(x) / float(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -701,11 +703,11 @@ def test_true_divide(self): for tcout in 'FDG': dtout = np.dtype(tcout) - tgt = complex(x)/complex(y) + tgt = complex(x) / complex(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -846,7 +848,7 @@ def test_matvec(self): np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) @pytest.mark.parametrize("matrix", [ None, - np.array([[1.+1j, 0.5, -0.5j], + np.array([[1. + 1j, 0.5, -0.5j], [0.25, 2j, 0.], [4., 0., -1j]])]) def test_vecmatvec_identity(self, matrix, vec): @@ -923,10 +925,10 @@ def test_broadcast(self): msg = "broadcast" a = np.arange(4).reshape((2, 1, 2)) b = np.arange(4).reshape((1, 2, 2)) - assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) msg = "extend & broadcast loop dimensions" b = np.arange(4).reshape((2, 2)) - assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) # Broadcast in core dimensions should fail a = np.arange(8).reshape((4, 2)) b = np.arange(4).reshape((4, 1)) @@ -994,31 +996,31 @@ def test_out_broadcast_errors(self, arr, out): def test_type_cast(self): msg = "type cast" a = np.arange(6, dtype='short').reshape((2, 3)) - assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1), + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), err_msg=msg) msg = "type cast on one argument" a = np.arange(6).reshape((2, 3)) b = a + 0.1 - assert_array_almost_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), + assert_array_almost_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) def test_endian(self): msg = "big endian" a = np.arange(6, dtype='>i4').reshape((2, 3)) - assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1), + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), err_msg=msg) msg = "little endian" a = np.arange(6, dtype='()' @@ -1297,18 +1340,18 @@ def test_innerwt(self): a = np.arange(6).reshape((2, 3)) b = np.arange(10, 16).reshape((2, 3)) w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) a = np.arange(100, 124).reshape((2, 3, 4)) b = np.arange(200, 224).reshape((2, 3, 4)) w = np.arange(300, 324).reshape((2, 3, 4)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) def test_innerwt_empty(self): """Test generalized ufunc with zero-sized operands""" a = np.array([], dtype='f8') b = np.array([], dtype='f8') w = np.array([], dtype='f8') - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) def test_cross1d(self): """Test with fixed-sized signature.""" @@ -1409,18 +1452,18 @@ def test_matrix_multiply_umath_empty(self): def compare_matrix_multiply_results(self, tp): d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) - msg = "matrix multiply on type %s" % d1.dtype.name + msg = f"matrix multiply on type {d1.dtype.name}" def permute_n(n): if n == 1: return ([0],) ret = () - base = permute_n(n-1) + base = permute_n(n - 1) for perm in base: for i in range(n): - new = perm + [n-1] - new[n-1] = new[i] - new[i] = n-1 + new = perm + [n - 1] + new[n - 1] = new[i] + new[i] = n - 1 ret += (new,) return ret @@ -1428,17 +1471,17 @@ def slice_n(n): if n == 0: return ((),) ret = () - base = slice_n(n-1) + base = slice_n(n - 1) for sl in base: - ret += (sl+(slice(None),),) - ret += (sl+(slice(0, 1),),) + ret += (sl + (slice(None),),) + ret += (sl + (slice(0, 1),),) return ret def broadcastable(s1, s2): - return s1 == s2 or s1 == 1 or s2 == 1 + return s1 == s2 or 1 in {s1, s2} permute_3 = permute_n(3) - slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) + slice_3 = slice_n(3) + ((slice(None, None, -1),) * 3,) ref = True for p1 in permute_3: @@ -1454,9 +1497,8 @@ def broadcastable(s1, s2): assert_array_almost_equal( umt.matrix_multiply(a1, a2), np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * - a1[..., np.newaxis,:], axis=-1), - err_msg=msg + ' %s %s' % (str(a1.shape), - str(a2.shape))) + a1[..., np.newaxis, :], axis=-1), + err_msg=msg + f' {str(a1.shape)} {str(a2.shape)}') assert_equal(ref, True, err_msg="reference check") @@ -1512,7 +1554,8 @@ def __eq__(self, other): arr1d = np.array([HasComparisons()]) assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) def test_object_array_reduction(self): @@ -1542,7 +1585,7 @@ def test_object_array_accumulate_inplace(self): np.add.accumulate(arr, out=arr) np.add.accumulate(arr, out=arr) assert_array_equal(arr, - np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object), + np.array([[1] * i for i in [1, 3, 6, 10]], dtype=object), ) # And the same if the axis argument is used @@ -1551,7 +1594,7 @@ def test_object_array_accumulate_inplace(self): np.add.accumulate(arr, out=arr, axis=-1) np.add.accumulate(arr, out=arr, axis=-1) assert_array_equal(arr[0, :], - np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object), + np.array([[2] * i for i in [1, 3, 6, 10]], dtype=object), ) def test_object_array_accumulate_failure(self): @@ -1668,9 +1711,6 @@ def test_where_param(self): assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - # With casting on output a = np.ones(10, np.int64) b = np.ones(10, np.int64) @@ -1682,12 +1722,12 @@ def test_where_param_alloc(self): # With casting and allocated output a = np.array([1], dtype=np.int64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) # No casting and allocated output a = np.array([1], dtype=np.float64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) def test_where_with_broadcasting(self): # See gh-17198 @@ -1701,6 +1741,17 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 + def test_where_warns(self): + a = np.arange(7) + mask = a % 2 == 0 + with pytest.warns(UserWarning, match="'where' used without 'out'"): + result1 = np.add(a, a, where=mask) + # Does not warn + result2 = np.add(a, a, where=mask, out=None) + # Sanity check + assert np.all(result1[::2] == [0, 4, 8, 12]) + assert np.all(result2[::2] == [0, 4, 8, 12]) + @staticmethod def identityless_reduce_arrs(): yield np.empty((2, 3, 4), order='C') @@ -1712,16 +1763,17 @@ def identityless_reduce_arrs(): # Not contiguous yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] # Not contiguous and not aligned - a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') a = a[1:].view(dtype='f8') a.shape = (3, 4, 5) a = a[1:, 1:, 1:] yield a - @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("arrs", identityless_reduce_arrs()) @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) - def test_identityless_reduction(self, a, pos): + def test_identityless_reduction(self, arrs, pos): # np.minimum.reduce is an identityless reduction + a = arrs.copy() a[...] = 1 a[pos] = 0 @@ -1745,6 +1797,7 @@ def test_identityless_reduction(self, a, pos): @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, reason="test array too large for 32bit platform") + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_identityless_reduction_huge_array(self): # Regression test for gh-20921 (copying identity incorrectly failed) arr = np.zeros((2, 2**31), 'uint8') @@ -1870,7 +1923,7 @@ def test_identityless_reduction_nonreorderable(self): assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are + # If we have an n x m array and do a reduction with axis=1, then we are # doing n reductions, and each reduction takes an m-element array. For # a reduction operation without an identity, then: # n > 0, m > 0: fine @@ -2057,10 +2110,40 @@ def __rmul__(self, other): MyThing.rmul_count += 1 return self - np.float64(5)*MyThing((3, 3)) + np.float64(5) * MyThing((3, 3)) assert_(MyThing.rmul_count == 1, MyThing.rmul_count) assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + def test_array_wrap_array_priority(self): + class ArrayPriorityBase(np.ndarray): + @classmethod + def __array_wrap__(cls, array, context=None, return_scalar=False): + return cls + + class ArrayPriorityMinus0(ArrayPriorityBase): + __array_priority__ = 0 + + class ArrayPriorityMinus1000(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus1000b(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus2000(ArrayPriorityBase): + __array_priority__ = -2000 + + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) + + assert np.add(x, y) is ArrayPriorityMinus1000 + assert np.add(y, x) is ArrayPriorityMinus1000 + assert np.add(x, xb) is ArrayPriorityMinus1000 + assert np.add(xb, x) is ArrayPriorityMinus1000b + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 + assert type(np.add(xb, x, np.zeros(2))) is np.ndarray + @pytest.mark.parametrize("a", ( np.arange(10, dtype=int), np.arange(10, dtype=_rational_tests.rational), @@ -2119,7 +2202,7 @@ def test_ufunc_at_inner_loops(self, typecode, ufunc): for i, v in zip(indx, vals): # Make sure all the work happens inside the ufunc # in order to duplicate error/warning handling - ufunc(atag[i], v, out=atag[i:i+1], casting="unsafe") + ufunc(atag[i], v, out=atag[i:i + 1], casting="unsafe") assert_equal(atag, a) # If w_loop warned, make sure w_at warned as well if len(w_loop) > 0: @@ -2361,10 +2444,9 @@ def test_at_broadcast_failure(self): with pytest.raises(ValueError): np.add.at(arr, [0, 1], [1, 2, 3]) - def test_reduce_arguments(self): f = np.add.reduce - d = np.ones((5,2), dtype=int) + d = np.ones((5, 2), dtype=int) o = np.ones((2,), dtype=d.dtype) r = o * 5 assert_equal(f(d), r) @@ -2433,11 +2515,11 @@ class MyA(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return getattr(ufunc, method)(*(input.view(np.ndarray) for input in inputs), **kwargs) - a = np.arange(12.).reshape(4,3) + a = np.arange(12.).reshape(4, 3) ra = a.view(dtype=('f8,f8,f8')).squeeze() mra = ra.view(MyA) - target = np.array([ True, False, False, False], dtype=bool) + target = np.array([True, False, False, False], dtype=bool) assert_equal(np.all(target == (mra == ra[0])), True) def test_scalar_equal(self): @@ -2562,7 +2644,7 @@ def test_reducelike_out_promotes(self): # For legacy dtypes, the signature currently has to be forced if `out=` # is passed. The two paths below should differ, without `dtype=` the # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`! - arr = np.full(5, 2**25-1, dtype=np.int64) + arr = np.full(5, 2**25 - 1, dtype=np.int64) # float32 and int64 promote to float64: res = np.zeros((), dtype=np.float32) @@ -2597,10 +2679,10 @@ def test_reduce_noncontig_output(self): # # gh-8036 - x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) - x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) - y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) - y = y_base[::2,:] + x = np.arange(7 * 13 * 8, dtype=np.int16).reshape(7, 13, 8) + x = x[4:6, 1:11:6, 1:5].transpose(1, 2, 0) + y_base = np.arange(4 * 4, dtype=np.int16).reshape(4, 4) + y = y_base[::2, :] y_base_copy = y_base.copy() @@ -2609,8 +2691,8 @@ def test_reduce_noncontig_output(self): # The results should match, and y_base shouldn't get clobbered assert_equal(r0, r1) - assert_equal(y_base[1,:], y_base_copy[1,:]) - assert_equal(y_base[3,:], y_base_copy[3,:]) + assert_equal(y_base[1, :], y_base_copy[1, :]) + assert_equal(y_base[3, :], y_base_copy[3, :]) @pytest.mark.parametrize("with_cast", [True, False]) def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast): @@ -2815,7 +2897,7 @@ def test_ufunc_noncontiguous(ufunc): # since different algorithms (libm vs. intrinsics) can be used # for different input strides res_eps = np.finfo(dt).eps - tol = 3*res_eps + tol = 3 * res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: @@ -2895,6 +2977,21 @@ def test_ufunc_input_floatingpoint_error(bad_offset): np.add(arr, arr, dtype=np.intp, casting="unsafe") +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], +) +def test_ufunc_method_signatures(methodname: str): + method = getattr(np.ufunc, methodname) + + try: + _ = inspect.signature(method) + except ValueError as e: + pytest.fail(e.args[0]) + + def test_trivial_loop_invalid_cast(): # This tests the fast-path "invalid cast", see gh-19904. with pytest.raises(TypeError, @@ -2905,7 +3002,7 @@ def test_trivial_loop_invalid_cast(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") @pytest.mark.parametrize("offset", - [0, ncu.BUFSIZE//2, int(1.5*ncu.BUFSIZE)]) + [0, ncu.BUFSIZE // 2, int(1.5 * ncu.BUFSIZE)]) def test_reduce_casterrors(offset): # Test reporting of casting errors in reductions, we test various # offsets to where the casting error will occur, since these may occur @@ -2932,6 +3029,45 @@ def test_reduce_casterrors(offset): assert out[()] < value * offset +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + def test_object_reduce_cleanup_on_failure(): # Test cleanup, including of the initial value (manually provided or not) with pytest.raises(TypeError): @@ -3002,7 +3138,7 @@ def test_addition_reduce_negative_zero(dtype, use_initial): # Test various length, in case SIMD paths or chunking play a role. # 150 extends beyond the pairwise blocksize; probably not important. - for i in range(0, 150): + for i in range(150): arr = np.array([neg_zero] * i, dtype=dtype) res = np.sum(arr, **kwargs) if i > 0 or use_initial: @@ -3029,7 +3165,7 @@ def test_addition_unicode_inverse_byte_order(order1, order2): arr1 = np.array([element], dtype=f"{order1}U4") arr2 = np.array([element], dtype=f"{order2}U4") result = arr1 + arr2 - assert result == 2*element + assert result == 2 * element @pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64]) @@ -3128,6 +3264,7 @@ def test_resolve_dtypes_reduction_errors(self): @pytest.mark.skipif(not hasattr(ct, "pythonapi"), reason="`ctypes.pythonapi` required for capsule unpacking.") + @pytest.mark.thread_unsafe(reason="modifies global object in the ctypes API") def test_loop_access(self): # This is a basic test for the full strided loop access data_t = ct.c_char_p * 2 @@ -3199,3 +3336,70 @@ def test_long_arrays(self): t[28][414] = 1 tc = np.cos(t) assert_equal(tc[0][0], tc[28][414]) + + +class TestUFuncInspectSignature: + PARAMS_COMMON = { + "casting": "same_kind", + "order": "K", + "dtype": None, + "subok": True, + "signature": None, + } + + PARAMS_UFUNC = { + "where": True, + } | PARAMS_COMMON + + PARAMS_GUFUNC = { + "axes": np._NoValue, + "axis": np._NoValue, + "keepdims": False, + } | PARAMS_COMMON + + @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) + def test_dunder_signature_attr(self, ufunc: np.ufunc): + assert hasattr(ufunc, "__signature__") + assert isinstance(ufunc.__signature__, inspect.Signature) + assert inspect.signature(ufunc) == ufunc.__signature__ + + @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) + def test_params_common_positional(self, ufunc: np.ufunc): + sig = inspect.signature(ufunc) + + # check positional-only parameters + posonly_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.POSITIONAL_ONLY} + assert len(posonly_params) == ufunc.nin + assert all(default is inspect.Parameter.empty + for default in posonly_params.values()) + + # check 'out' parameter + out_param = sig.parameters.get("out") + assert out_param is not None + assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) + def test_params_common_ufunc(self, ufunc: np.ufunc): + assert ufunc.signature is None # sanity check + + sig = inspect.signature(ufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_UFUNC + + @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) + def test_params_common_gufunc(self, gufunc: np.ufunc): + assert gufunc.signature is not None # sanity check + + sig = inspect.signature(gufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_GUFUNC diff --git a/blimgui/dist64/numpy/_core/tests/test_umath.py b/blimgui/dist64/numpy/_core/tests/test_umath.py index 737622f..964bb58 100644 --- a/blimgui/dist64/numpy/_core/tests/test_umath.py +++ b/blimgui/dist64/numpy/_core/tests/test_umath.py @@ -1,24 +1,37 @@ -import platform -import warnings import fnmatch +import inspect import itertools -import pytest -import sys import operator +import platform +import sys +import warnings +from collections import namedtuple from fractions import Fraction from functools import reduce -from collections import namedtuple +import pytest + +import numpy as np import numpy._core.umath as ncu from numpy._core import _umath_tests as ncu_tests, sctypes -import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL, - IS_PYPY, HAS_REFCOUNT - ) + HAS_REFCOUNT, + IS_MUSL, + IS_PYPY, + IS_WASM, + _gen_alignment_data, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_no_warnings, + assert_raises, + assert_raises_regex, +) from numpy.testing._private.utils import _glibc_older_than UFUNCS = [obj for obj in np._core.umath.__dict__.values() @@ -269,9 +282,9 @@ class ArrSubclass(np.ndarray): pass arr = np.arange(10).view(ArrSubclass) - + orig_refcount = sys.getrefcount(arr) arr *= 1 - assert sys.getrefcount(arr) == 2 + assert sys.getrefcount(arr) == orig_refcount class TestComparisons: @@ -389,13 +402,13 @@ def test_object_nonbool_dtype_error(self): (operator.eq, np.equal), (operator.ne, np.not_equal) ]) - @pytest.mark.parametrize("vals", [(2**60, 2**60+1), (2**60+1, 2**60)]) + @pytest.mark.parametrize("vals", [(2**60, 2**60 + 1), (2**60 + 1, 2**60)]) def test_large_integer_direct_comparison( self, dtypes, py_comp, np_comp, vals): # Note that float(2**60) + 1 == float(2**60). a1 = np.array([2**60], dtype=dtypes[0]) a2 = np.array([2**60 + 1], dtype=dtypes[1]) - expected = py_comp(2**60, 2**60+1) + expected = py_comp(2**60, 2**60 + 1) assert py_comp(a1, a2) == expected assert np_comp(a1, a2) == expected @@ -501,7 +514,7 @@ def test_division_int_boundary(self, dtype, ex_val): c_div = lambda n, d: ( 0 if d == 0 else ( - fo.min if (n and n == fo.min and d == -1) else n//d + fo.min if (n and n == fo.min and d == -1) else n // d ) ) with np.errstate(divide='ignore'): @@ -563,7 +576,7 @@ def test_division_int_reduce(self, dtype, ex_val): a = eval(ex_val) lst = a.tolist() c_div = lambda n, d: ( - 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + 0 if d == 0 or (n and n == fo.min and d == -1) else n // d ) with np.errstate(divide='ignore'): @@ -585,19 +598,19 @@ def test_division_int_reduce(self, dtype, ex_val): @pytest.mark.parametrize( "dividend,divisor,quotient", - [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12), - (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12), - (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), - (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), - (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), - (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), - (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), - (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), - (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), - (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), - (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), - (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), - (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), + [(np.timedelta64(2, 'Y'), np.timedelta64(2, 'M'), 12), + (np.timedelta64(2, 'Y'), np.timedelta64(-2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(-2, 'M'), 12), + (np.timedelta64(2, 'M'), np.timedelta64(-2, 'Y'), -1), + (np.timedelta64(2, 'Y'), np.timedelta64(0, 'M'), 0), + (np.timedelta64(2, 'Y'), 2, np.timedelta64(1, 'Y')), + (np.timedelta64(2, 'Y'), -2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), 2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -3, np.timedelta64(0, 'Y')), + (np.timedelta64(-2, 'Y'), 0, np.timedelta64('Nat', 'Y')), ]) def test_division_int_timedelta(self, dividend, divisor, quotient): # If either divisor is 0 or quotient is Nat, check for division by 0 @@ -607,8 +620,8 @@ def test_division_int_timedelta(self, dividend, divisor, quotient): # Test for arrays as well msg = "Timedelta arrays floor division check" - dividend_array = np.array([dividend]*5) - quotient_array = np.array([quotient]*5) + dividend_array = np.array([dividend] * 5) + quotient_array = np.array([quotient] * 5) assert all(dividend_array // divisor == quotient_array), msg else: if IS_WASM: @@ -620,31 +633,31 @@ def test_division_int_timedelta(self, dividend, divisor, quotient): def test_division_complex(self): # check that implementation is correct msg = "Complex division implementation check" - x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) - assert_almost_equal(x**2/x, x, err_msg=msg) + x = np.array([1. + 1. * 1j, 1. + .5 * 1j, 1. + 2. * 1j], dtype=np.complex128) + assert_almost_equal(x**2 / x, x, err_msg=msg) # check overflow, underflow msg = "Complex division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = x**2/x - assert_almost_equal(y/x, [1, 1], err_msg=msg) + y = x**2 / x + assert_almost_equal(y / x, [1, 1], err_msg=msg) def test_zero_division_complex(self): with np.errstate(invalid="ignore", divide="ignore"): x = np.array([0.0], dtype=np.complex128) - y = 1.0/x + y = 1.0 / x assert_(np.isinf(y)[0]) - y = complex(np.inf, np.nan)/x + y = complex(np.inf, np.nan) / x assert_(np.isinf(y)[0]) - y = complex(np.nan, np.inf)/x + y = complex(np.nan, np.inf) / x assert_(np.isinf(y)[0]) - y = complex(np.inf, np.inf)/x + y = complex(np.inf, np.inf) / x assert_(np.isinf(y)[0]) - y = 0.0/x + y = 0.0 / x assert_(np.isnan(y)[0]) def test_floor_division_complex(self): # check that floor division, divmod and remainder raises type errors - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128) with pytest.raises(TypeError): x // 7 with pytest.raises(TypeError): @@ -656,8 +669,8 @@ def test_floor_division_signed_zero(self): # Check that the sign bit is correctly set when dividing positive and # negative zero by one. x = np.zeros(10) - assert_equal(np.signbit(x//1), 0) - assert_equal(np.signbit((-x)//1), 1) + assert_equal(np.signbit(x // 1), 0) + assert_equal(np.signbit((-x) // 1), 1) @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), reason="gh-22982") @@ -690,14 +703,14 @@ def test_floor_division_corner_cases(self, dtype): fone = np.array(1.0, dtype=dtype) fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) div = np.floor_divide(fnan, fone) - assert(np.isnan(div)), "div: %s" % div + assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) - assert(np.isnan(div)), "div: %s" % div + assert np.isnan(div), f"div: {div}" div = np.floor_divide(fnan, fzer) - assert(np.isnan(div)), "div: %s" % div + assert np.isnan(div), f"div: {div}" # verify 1.0//0.0 computations return inf with np.errstate(divide='ignore'): z = np.floor_divide(y, x) @@ -723,10 +736,10 @@ def test_remainder_basic(self): for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1) - b = np.array(sg2*19, dtype=dt2) + a = np.array(sg1 * 71, dtype=dt1) + b = np.array(sg2 * 19, dtype=dt2) div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -740,7 +753,7 @@ def test_float_remainder_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) + tgt = [divmod(*t) for t in arg] a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that @@ -751,7 +764,7 @@ def test_float_remainder_exact(self): for op in [floor_divide_and_remainder, np.divmod]: for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) + msg = f'op: {op.__name__}, dtype: {dt}' fa = a.astype(dt) fb = b.astype(dt) div, rem = op(fa, fb) @@ -766,11 +779,11 @@ def test_float_remainder_roundoff(self): for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1) - b = np.array(sg2*6e-8, dtype=dt2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1) + b = np.array(sg2 * 6e-8, dtype=dt2) div, rem = op(a, b) # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -846,30 +859,30 @@ def test_float_divmod_corner_cases(self): fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in divmod") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) + warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) div, rem = np.divmod(fone, fzer) - assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isinf(div), f'dt: {dt}, div: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(fzer, fzer) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) - assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + assert_(np.isnan(div)), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(finf, finf) - assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isnan(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(finf, fzer) - assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isinf(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(fnan, fone) - assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) - assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" div, rem = np.divmod(fone, fnan) - assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) - assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" div, rem = np.divmod(fnan, fzer) - assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) - assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" def test_float_remainder_corner_cases(self): # Check remainder magnitude. @@ -880,48 +893,48 @@ def test_float_remainder_corner_cases(self): b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = np.remainder(a, b) - assert_(rem <= b, 'dt: %s' % dt) + assert_(rem <= b, f'dt: {dt}') rem = np.remainder(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) + assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "invalid value encountered in fmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) + warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = np.remainder(fone, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') # MSVC 2008 returns NaN here, so disable the check. #rem = np.remainder(fone, finf) #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) rem = np.remainder(finf, fone) fmod = np.fmod(finf, fone) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') rem = np.remainder(finf, finf) fmod = np.fmod(finf, fone) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') rem = np.remainder(finf, fzer) fmod = np.fmod(finf, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') rem = np.remainder(fone, fnan) fmod = np.fmod(fone, fnan) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') rem = np.remainder(fnan, fzer) fmod = np.fmod(fnan, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') rem = np.remainder(fnan, fone) fmod = np.fmod(fnan, fone) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') class TestDivisionIntegerOverflowsAndDivideByZero: @@ -1008,7 +1021,7 @@ def test_overflows(self, dividend_dtype, divisor_dtype, operation): # that is a multiple of the register's size. We resort to the # default implementation for the leftover elements. # We try to cover all paths here. - arrays = [np.array([np.iinfo(dividend_dtype).min]*i, + arrays = [np.array([np.iinfo(dividend_dtype).min] * i, dtype=dividend_dtype) for i in range(1, 129)] divisor = np.array([-1], dtype=divisor_dtype) # If dividend is a larger type than the divisor (`else` case), @@ -1038,7 +1051,7 @@ def test_overflows(self, dividend_dtype, divisor_dtype, operation): result = np.array(operation(a, divisor)).flatten('f') expected_array = np.array( [self.overflow_results[operation].nocast( - dividend_dtype)]*len(a)).flatten() + dividend_dtype)] * len(a)).flatten() assert_array_equal(result, expected_array) else: # Scalars @@ -1055,7 +1068,7 @@ def test_overflows(self, dividend_dtype, divisor_dtype, operation): result = np.array(operation(a, divisor)).flatten('f') expected_array = np.array( [self.overflow_results[operation].casted( - dividend_dtype)]*len(a)).flatten() + dividend_dtype)] * len(a)).flatten() assert_array_equal(result, expected_array) @@ -1081,7 +1094,7 @@ def test_power_float(self): y = x.copy() y **= 2 assert_equal(y, [1., 4., 9.]) - assert_almost_equal(x**(-1), [1., 0.5, 1./3]) + assert_almost_equal(x**(-1), [1., 0.5, 1. / 3]) assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) for out, inp, msg in _gen_alignment_data(dtype=np.float32, @@ -1101,21 +1114,21 @@ def test_power_float(self): assert_equal(out, exp, err_msg=msg) def test_power_complex(self): - x = np.array([1+2j, 2+3j, 3+4j]) + x = np.array([1 + 2j, 2 + 3j, 3 + 4j]) assert_equal(x**0, [1., 1., 1.]) assert_equal(x**1, x) - assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) - assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) - assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) - assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) - assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) - assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, - (-117-44j)/15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), - ncu.sqrt(3+4j)]) - norm = 1./((x**14)[0]) + assert_almost_equal(x**2, [-3 + 4j, -5 + 12j, -7 + 24j]) + assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) + assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) + assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) + assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2]) + assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, + (-117 - 44j) / 15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)]) + norm = 1. / ((x**14)[0]) assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443+16124j, 23161315+58317492j, + [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, 5583548873 + 2465133864j]]) # Ticket #836 @@ -1127,13 +1140,13 @@ def assert_complex_equal(x, y): z = np.array([z], dtype=np.complex128) with np.errstate(invalid="ignore"): assert_complex_equal(z**1, z) - assert_complex_equal(z**2, z*z) - assert_complex_equal(z**3, z*z*z) + assert_complex_equal(z**2, z * z) + assert_complex_equal(z**3, z * z * z) def test_power_zero(self): # ticket #1271 zero = np.array([0j]) - one = np.array([1+0j]) + one = np.array([1 + 0j]) cnan = np.array([complex(np.nan, np.nan)]) # FIXME cinf not tested. #cinf = np.array([complex(np.inf, 0)]) @@ -1150,38 +1163,38 @@ def assert_complex_equal(x, y): # zero power assert_complex_equal(np.power(zero, 0), one) with np.errstate(invalid="ignore"): - assert_complex_equal(np.power(zero, 0+1j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) # negative power for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: assert_complex_equal(np.power(zero, -p), cnan) - assert_complex_equal(np.power(zero, -1+0.2j), cnan) + assert_complex_equal(np.power(zero, -1 + 0.2j), cnan) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_zero_power_nonzero(self): # Testing 0^{Non-zero} issue 18378 - zero = np.array([0.0+0.0j]) + zero = np.array([0.0 + 0.0j]) cnan = np.array([complex(np.nan, np.nan)]) def assert_complex_equal(x, y): assert_array_equal(x.real, y.real) assert_array_equal(x.imag, y.imag) - #Complex powers with positive real part will not generate a warning - assert_complex_equal(np.power(zero, 1+4j), zero) - assert_complex_equal(np.power(zero, 2-3j), zero) - #Testing zero values when real part is greater than zero - assert_complex_equal(np.power(zero, 1+1j), zero) - assert_complex_equal(np.power(zero, 1+0j), zero) - assert_complex_equal(np.power(zero, 1-1j), zero) - #Complex powers will negative real part or 0 (provided imaginary + # Complex powers with positive real part will not generate a warning + assert_complex_equal(np.power(zero, 1 + 4j), zero) + assert_complex_equal(np.power(zero, 2 - 3j), zero) + # Testing zero values when real part is greater than zero + assert_complex_equal(np.power(zero, 1 + 1j), zero) + assert_complex_equal(np.power(zero, 1 + 0j), zero) + assert_complex_equal(np.power(zero, 1 - 1j), zero) + # Complex powers will negative real part or 0 (provided imaginary # part is not zero) will generate a NAN and hence a RUNTIME warning with pytest.warns(expected_warning=RuntimeWarning) as r: - assert_complex_equal(np.power(zero, -1+1j), cnan) - assert_complex_equal(np.power(zero, -2-3j), cnan) - assert_complex_equal(np.power(zero, -7+0j), cnan) - assert_complex_equal(np.power(zero, 0+1j), cnan) - assert_complex_equal(np.power(zero, 0-1j), cnan) + assert_complex_equal(np.power(zero, -1 + 1j), cnan) + assert_complex_equal(np.power(zero, -2 - 3j), cnan) + assert_complex_equal(np.power(zero, -7 + 0j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + assert_complex_equal(np.power(zero, 0 - 1j), cnan) assert len(r) == 5 def test_fast_power(self): @@ -1264,7 +1277,7 @@ def test_type_conversion(self): arg_type = '?bhilBHILefdgFDG' res_type = 'ddddddddddddgDDG' for dtin, dtout in zip(arg_type, res_type): - msg = "dtin: %s, dtout: %s" % (dtin, dtout) + msg = f"dtin: {dtin}, dtout: {dtout}" arg = np.ones(1, dtype=dtin) res = np.float_power(arg, arg) assert_(res.dtype.name == np.dtype(dtout).name, msg) @@ -1336,8 +1349,8 @@ def test_logaddexp2_range(self): def test_inf(self): inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g']: @@ -1367,7 +1380,7 @@ def test_log_values(self): for dt in ['f', 'd', 'g']: log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ + yf = np.array(y, dtype=dt) * log2_ assert_almost_equal(np.log(xf), yf) # test aliasing(issue #17761) @@ -1391,10 +1404,10 @@ def test_log_values_maxofdtype(self): def test_log_strides(self): np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) for ii in sizes: - x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii)) + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0, size=ii)) x_special = x_f64.copy() x_special[3:-1:4] = 1.0 y_true = np.log(x_f64) @@ -1423,10 +1436,10 @@ def test_log_precision_float64(self, z, wref): # Reference values were computed with mpmath, with mp.dps = 200. @pytest.mark.parametrize( 'z, wref', - [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12+3e-06j)), + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12 + 3e-06j)), (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), (np.complex64(0.9999999 + 1e-06j), - np.complex64(-1.192088e-07+1.0000001e-06j))], + np.complex64(-1.192088e-07 + 1.0000001e-06j))], ) def test_log_precision_float32(self, z, wref): w = np.log(z) @@ -1440,15 +1453,15 @@ def test_exp_values(self): for dt in ['f', 'd', 'g']: log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ + yf = np.array(y, dtype=dt) * log2_ assert_almost_equal(np.exp(yf), xf) def test_exp_strides(self): np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) for ii in sizes: - x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii)) + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) y_true = np.exp(x_f64) for jj in strides: assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) @@ -1823,41 +1836,41 @@ def test_expm1(self): @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) @pytest.mark.parametrize("data, escape", ( ([0.03], LTONE_INVALID_ERR), - ([0.03]*32, LTONE_INVALID_ERR), + ([0.03] * 32, LTONE_INVALID_ERR), # neg ([-1.0], NEG_INVALID_ERR), - ([-1.0]*32, NEG_INVALID_ERR), + ([-1.0] * 32, NEG_INVALID_ERR), # flat ([1.0], ONE_INVALID_ERR), - ([1.0]*32, ONE_INVALID_ERR), + ([1.0] * 32, ONE_INVALID_ERR), # zero ([0.0], BYZERO_ERR), - ([0.0]*32, BYZERO_ERR), + ([0.0] * 32, BYZERO_ERR), ([-0.0], BYZERO_ERR), - ([-0.0]*32, BYZERO_ERR), + ([-0.0] * 32, BYZERO_ERR), # nan ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), - ([0.5, 0.5, 0.5, np.nan]*32, LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.nan] * 32, LTONE_INVALID_ERR), ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), - ([np.nan, 1.0, 1.0, 1.0]*32, ONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0] * 32, ONE_INVALID_ERR), ([np.nan], []), - ([np.nan]*32, []), + ([np.nan] * 32, []), # inf ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), - ([0.5, 0.5, 0.5, np.inf]*32, INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.inf] * 32, INF_INVALID_ERR + LTONE_INVALID_ERR), ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), - ([np.inf, 1.0, 1.0, 1.0]*32, INF_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0] * 32, INF_INVALID_ERR), ([np.inf], INF_INVALID_ERR), - ([np.inf]*32, INF_INVALID_ERR), + ([np.inf] * 32, INF_INVALID_ERR), # ninf ([0.5, 0.5, 0.5, -np.inf], NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), - ([0.5, 0.5, 0.5, -np.inf]*32, + ([0.5, 0.5, 0.5, -np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), - ([-np.inf, 1.0, 1.0, 1.0]*32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), - ([-np.inf]*32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), )) def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): if escape and ufunc in escape: @@ -1865,8 +1878,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) @@ -1889,10 +1909,10 @@ class TestFPClass: def test_fpclass(self, stride): arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') - nan = np.array([True, True, False, False, False, False, False, False, False, False]) - inf = np.array([False, False, True, True, False, False, False, False, False, False]) - sign = np.array([False, True, False, True, True, False, True, False, False, True]) - finite = np.array([False, False, False, False, True, True, True, True, True, True]) + nan = np.array([True, True, False, False, False, False, False, False, False, False]) # noqa: E221 + inf = np.array([False, False, True, True, False, False, False, False, False, False]) # noqa: E221 + sign = np.array([False, True, False, True, True, False, True, False, False, True]) # noqa: E221 + finite = np.array([False, False, False, False, True, True, True, True, True, True]) # noqa: E221 assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) @@ -1981,17 +2001,17 @@ def test_fp_noncontiguous(self, dtype): assert_equal(np.isfinite(data_split), finite_split) class TestLDExp: - @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("dtype", ['f', 'd']) def test_ldexp(self, dtype, stride): mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) - exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') - out = np.zeros(8, dtype=dtype) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) class TestFRExp: - @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("dtype", ['f', 'd']) @pytest.mark.skipif(not sys.platform.startswith('linux'), reason="np.frexp gives different answers for NAN/INF on windows and linux") @@ -1999,35 +2019,36 @@ class TestFRExp: def test_frexp(self, dtype, stride): arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) - exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') - out_mant = np.ones(8, dtype=dtype) - out_exp = 2*np.ones(8, dtype='i') + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2 * np.ones(8, dtype='i') mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) assert_equal(mant_true[::stride], mant) assert_equal(exp_true[::stride], exp) assert_equal(out_mant[::stride], mant_true[::stride]) assert_equal(out_exp[::stride], exp_true[::stride]) + # func : [maxulperror, low, high] -avx_ufuncs = {'sqrt' :[1, 0., 100.], - 'absolute' :[0, -100., 100.], - 'reciprocal' :[1, 1., 100.], - 'square' :[1, -100., 100.], - 'rint' :[0, -100., 100.], - 'floor' :[0, -100., 100.], - 'ceil' :[0, -100., 100.], - 'trunc' :[0, -100., 100.]} +avx_ufuncs = {'sqrt' : [1, 0., 100.], # noqa: E203 + 'absolute' : [0, -100., 100.], # noqa: E203 + 'reciprocal' : [1, 1., 100.], # noqa: E203 + 'square' : [1, -100., 100.], # noqa: E203 + 'rint' : [0, -100., 100.], # noqa: E203 + 'floor' : [0, -100., 100.], # noqa: E203 + 'ceil' : [0, -100., 100.], # noqa: E203 + 'trunc' : [0, -100., 100.]} # noqa: E203 class TestAVXUfuncs: def test_avx_based_ufunc(self): - strides = np.array([-4,-3,-2,-1,1,2,3,4]) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) np.random.seed(42) for func, prop in avx_ufuncs.items(): maxulperr = prop[0] minval = prop[1] maxval = prop[2] # various array sizes to ensure masking in AVX is tested - for size in range(1,32): + for size in range(1, 32): myfunc = getattr(np, func) x_f32 = np.random.uniform(low=minval, high=maxval, size=size).astype(np.float32) @@ -2055,26 +2076,26 @@ def test_avx_based_ufunc(self): class TestAVXFloat32Transcendental: def test_exp_float32(self): np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) + x_f32 = np.float32(np.random.uniform(low=0.0, high=88.1, size=1000000)) x_f64 = np.float64(x_f32) assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) def test_log_float32(self): np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000)) + x_f32 = np.float32(np.random.uniform(low=0.0, high=1000, size=1000000)) x_f64 = np.float64(x_f32) assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) def test_sincos_float32(self): np.random.seed(42) N = 1000000 - M = np.int_(N/20) + M = np.int_(N / 20) index = np.random.randint(low=0, high=N, size=M) - x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) + x_f32 = np.float32(np.random.uniform(low=-100., high=100., size=N)) if not _glibc_older_than("2.17"): # test coverage for elements > 117435.992f for which glibc is used # this is known to be problematic on old glibc, so skip it there - x_f32[index] = np.float32(10E+10*np.random.rand(M)) + x_f32[index] = np.float32(10E+10 * np.random.rand(M)) x_f64 = np.float64(x_f32) assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) @@ -2085,10 +2106,10 @@ def test_sincos_float32(self): def test_strided_float32(self): np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) for ii in sizes: - x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) + x_f32 = np.float32(np.random.uniform(low=0.01, high=88.1, size=ii)) x_f32_large = x_f32.copy() x_f32_large[3:-1:4] = 120000.0 exp_true = np.exp(x_f32) @@ -2124,8 +2145,8 @@ def test_logaddexp_range(self): def test_inf(self): inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g']: @@ -2149,7 +2170,7 @@ def test_reduce(self): class TestLog1p: def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) - assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1 + 1e-6)) def test_special(self): with np.errstate(invalid="ignore", divide="ignore"): @@ -2162,8 +2183,8 @@ def test_special(self): class TestExpm1: def test_expm1(self): - assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) - assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2) - 1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6) - 1) def test_special(self): assert_equal(ncu.expm1(np.inf), np.inf) @@ -2194,13 +2215,13 @@ def test_reduce(self): def assert_hypot_isnan(x, y): with np.errstate(invalid='ignore'): assert_(np.isnan(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not nan") def assert_hypot_isinf(x, y): with np.errstate(invalid='ignore'): assert_(np.isinf(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not inf") class TestHypotSpecialValues: @@ -2221,23 +2242,23 @@ def test_no_fpe(self): def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) + assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan") def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf") def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf") def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0") def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0") class TestArctan2SpecialValues: @@ -2361,7 +2382,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.maximum.reduce([1, 2j]), 1) - assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) + assert_equal(np.maximum.reduce([1 + 3j, 2j]), 1 + 3j) def test_float_nans(self): nan = np.nan @@ -2395,13 +2416,13 @@ def test_object_array(self): assert_equal(np.maximum(arg1, arg2), arg2) def test_strided_array(self): - arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) - arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) - maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) # noqa: E221 + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) out = np.ones(8) out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) - assert_equal(np.maximum(arr1,arr2), maxtrue) - assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2]) + assert_equal(np.maximum(arr1, arr2), maxtrue) + assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) @@ -2453,7 +2474,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.minimum.reduce([1, 2j]), 2j) - assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) + assert_equal(np.minimum.reduce([1 + 3j, 2j]), 2j) def test_float_nans(self): nan = np.nan @@ -2488,12 +2509,12 @@ def test_object_array(self): def test_strided_array(self): arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) - arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) - mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) out = np.ones(8) out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) - assert_equal(np.minimum(arr1,arr2), mintrue) - assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2]) + assert_equal(np.minimum(arr1, arr2), mintrue) + assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) @@ -2545,7 +2566,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.fmax.reduce([1, 2j]), 1) - assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) + assert_equal(np.fmax.reduce([1 + 3j, 2j]), 1 + 3j) def test_float_nans(self): nan = np.nan @@ -2608,7 +2629,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.fmin.reduce([1, 2j]), 2j) - assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) + assert_equal(np.fmin.reduce([1 + 3j, 2j]), 2j) def test_float_nans(self): nan = np.nan @@ -2726,7 +2747,7 @@ def test_values(self): for dt in self.bitwise_types: zeros = np.array([0], dtype=dt) ones = np.array([-1]).astype(dt) - msg = "dt = '%s'" % dt.char + msg = f"dt = '{dt.char}'" assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) @@ -2750,7 +2771,7 @@ def test_types(self): for dt in self.bitwise_types: zeros = np.array([0], dtype=dt) ones = np.array([-1]).astype(dt) - msg = "dt = '%s'" % dt.char + msg = f"dt = '{dt.char}'" assert_(np.bitwise_not(zeros).dtype == dt, msg) assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) @@ -2769,7 +2790,7 @@ def test_reduction(self): zeros = np.array([0], dtype=dt) ones = np.array([-1]).astype(dt) for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) + msg = f"dt: '{dt}', f: '{f}'" assert_equal(f.reduce(zeros), zeros, err_msg=msg) assert_equal(f.reduce(ones), ones, err_msg=msg) @@ -2778,7 +2799,7 @@ def test_reduction(self): # No object array types empty = np.array([], dtype=dt) for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) + msg = f"dt: '{dt}', f: '{f}'" tgt = np.array(f.identity).astype(dt) res = f.reduce(empty) assert_equal(res, tgt, err_msg=msg) @@ -2789,7 +2810,7 @@ def test_reduction(self): # function and is not the same as the type returned by the identity # method. for f in binary_funcs: - msg = "dt: '%s'" % (f,) + msg = f"dt: '{f}'" empty = np.array([], dtype=object) tgt = f.identity res = f.reduce(empty) @@ -2797,7 +2818,7 @@ def test_reduction(self): # Non-empty object arrays do not use the identity for f in binary_funcs: - msg = "dt: '%s'" % (f,) + msg = f"dt: '{f}'" btype = np.array([True], dtype=object) assert_(type(f.reduce(btype)) is bool, msg) @@ -2814,7 +2835,7 @@ def test_bitwise_count(self, input_dtype_obj, bitsize): input_dtype, np.signedinteger) or input_dtype == np.object_: assert i == np.bitwise_count(input_dtype(-num)), msg - a = np.array([2**i-1 for i in range(1, bitsize)], dtype=input_dtype) + a = np.array([2**i - 1 for i in range(1, bitsize)], dtype=input_dtype) bitwise_count_a = np.bitwise_count(a) expected = np.arange(1, bitsize, dtype=input_dtype) @@ -2841,13 +2862,13 @@ def test_floating_point(self): class TestDegrees: def test_degrees(self): assert_almost_equal(ncu.degrees(np.pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) + assert_almost_equal(ncu.degrees(-0.5 * np.pi), -90.0) class TestRadians: def test_radians(self): assert_almost_equal(ncu.radians(180.0), np.pi) - assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5 * np.pi) class TestHeavside: @@ -2891,14 +2912,14 @@ def test_sign_complex(self): complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan 0.0, # 0. - 3.0, -3.0, -2j, 3.0+4.0j, -8.0+6.0j + 3.0, -3.0, -2j, 3.0 + 4.0j, -8.0 + 6.0j ]) out = np.zeros(a.shape, a.dtype) tgt = np.array([ 1., -1., 1j, -1j, ] + [complex(np.nan, np.nan)] * 5 + [ 0.0, - 1.0, -1.0, -1j, 0.6+0.8j, -0.8+0.6j]) + 1.0, -1.0, -1j, 0.6 + 0.8j, -0.8 + 0.6j]) with np.errstate(invalid='ignore'): res = ncu.sign(a) @@ -2935,10 +2956,12 @@ def test_minmax_blocked(self): for i in range(inp.size): inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan - emsg = lambda: '%r\n%s' % (inp, msg) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + emsg = lambda: f'{inp!r}\n{msg}' + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) assert_(np.isnan(inp.max()), msg=emsg) assert_(np.isnan(inp.min()), msg=emsg) @@ -2979,7 +3002,7 @@ def test_abs_neg_blocked(self): assert_equal(out, tgt, err_msg=msg) assert_((out >= 0).all()) - tgt = [-1*(i) for i in inp] + tgt = [-1 * (i) for i in inp] np.negative(inp, out=out) assert_equal(out, tgt, err_msg=msg) @@ -2993,7 +3016,7 @@ def test_abs_neg_blocked(self): np.abs(inp, out=out) assert_array_equal(out, d, err_msg=msg) - assert_array_equal(-inp, -1*inp, err_msg=msg) + assert_array_equal(-inp, -1 * inp, err_msg=msg) d = -1 * inp np.negative(inp, out=out) assert_array_equal(out, d, err_msg=msg) @@ -3118,23 +3141,24 @@ def do_test(f_call, f_expected): # assert_equal produces truly useless error messages raise AssertionError("\n".join([ "Bad arguments passed in ufunc call", - " expected: {}".format(expected), - " __array_wrap__ got: {}".format(w) + f" expected: {expected}", + f" __array_wrap__ got: {w}" ])) # method not on the out argument - do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) # method on the out argument - do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) # Also check the where mask handling: - do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + out = np.zeros([1], dtype=float) + do_test(lambda a: np.add(a, 0, where=False, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) def test_wrap_with_iterable(self): @@ -3164,7 +3188,7 @@ def __new__(cls): return np.asarray(1.0, 'float64').view(cls).copy() a = A() - x = np.float64(1)*a + x = np.float64(1) * a assert_(isinstance(x, A)) assert_array_equal(x, np.array(1)) @@ -3470,7 +3494,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduce') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'keepdims': 'keep0', 'axis': 'axis0'}) @@ -3483,7 +3507,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduce') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'keepdims': 'keep0', 'axis': 'axis0', @@ -3522,7 +3546,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'accumulate') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3533,7 +3557,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'accumulate') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3558,7 +3582,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduceat') assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3569,7 +3593,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduceat') assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3691,7 +3715,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): kwargs = kwargs.copy() if "out" in kwargs: - kwargs["out"] = self._unwrap(kwargs["out"]) + kwargs["out"] = self._unwrap(kwargs["out"])[0] if kwargs["out"] is NotImplemented: return NotImplemented @@ -3722,21 +3746,28 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): array = np.array([1, 2, 3]) where = np.array([True, False, True]) - expected = ufunc(array, where=where) + out = np.zeros(3, dtype=array.dtype) + expected = ufunc(array, where=where, out=out) with pytest.raises(TypeError): - ufunc(array, where=where.view(OverriddenArrayOld)) + ufunc( + array, + where=where.view(OverriddenArrayOld), + out=out, + ) result_1 = ufunc( array, - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out, ) assert isinstance(result_1, OverriddenArrayNew) assert np.all(np.array(result_1) == expected, where=where) result_2 = ufunc( array.view(OverriddenArrayNew), - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out.view(OverriddenArrayNew), ) assert isinstance(result_2, OverriddenArrayNew) assert np.all(np.array(result_2) == expected, where=where) @@ -4016,12 +4047,15 @@ def test_array_ufunc_direct_call(self): res = a.__array_ufunc__(np.add, "__call__", a, a) assert_array_equal(res, a + a) + @pytest.mark.thread_unsafe(reason="modifies global module") + @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" expected_dict = ( {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} ) + expected_dict["__signature__"] = inspect.signature(np.add) np.add.__doc__ = new_doc assert np.add.__doc__ == new_doc @@ -4073,7 +4107,7 @@ def _test_lcm_inner(self, dtype): # negatives are ignored a = np.array([12, -12, 12, -12], dtype=dtype) b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.lcm(a, b), [60]*4) + assert_equal(np.lcm(a, b), [60] * 4) # reduce a = np.array([3, 12, 20], dtype=dtype) @@ -4094,7 +4128,7 @@ def _test_gcd_inner(self, dtype): # negatives are ignored a = np.array([12, -12, 12, -12], dtype=dtype) b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.gcd(a, b), [4]*4) + assert_equal(np.gcd(a, b), [4] * 4) # reduce a = np.array([15, 25, 35], dtype=dtype) @@ -4108,9 +4142,9 @@ def _test_gcd_inner(self, dtype): def test_lcm_overflow(self): # verify that we don't overflow when a*b does overflow big = np.int32(np.iinfo(np.int32).max // 11) - a = 2*big - b = 5*big - assert_equal(np.lcm(a, b), 10*big) + a = 2 * big + b = 5 * big + assert_equal(np.lcm(a, b), 10 * big) def test_gcd_overflow(self): for dtype in (np.int32, np.int64): @@ -4118,16 +4152,16 @@ def test_gcd_overflow(self): # not relevant for lcm, where the result is unrepresentable anyway a = dtype(np.iinfo(dtype).min) # negative power of two q = -(a // 4) - assert_equal(np.gcd(a, q*3), q) - assert_equal(np.gcd(a, -q*3), q) + assert_equal(np.gcd(a, q * 3), q) + assert_equal(np.gcd(a, -q * 3), q) def test_decimal(self): from decimal import Decimal a = np.array([1, 1, -1, -1]) * Decimal('0.20') b = np.array([1, -1, 1, -1]) * Decimal('0.12') - assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) - assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) + assert_equal(np.gcd(a, b), 4 * [Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4 * [Decimal('0.60')]) def test_float(self): # not well-defined on float due to rounding errors @@ -4166,7 +4200,6 @@ def test_inf_and_nan(self): assert_raises(TypeError, np.gcd, 4, float(np.inf)) - class TestRoundingFunctions: def test_object_direct(self): @@ -4174,8 +4207,10 @@ def test_object_direct(self): class C: def __floor__(self): return 1 + def __ceil__(self): return 2 + def __trunc__(self): return 3 @@ -4226,8 +4261,8 @@ def test_it(self): x = .5 fr = f(x) fz = f(complex(x)) - assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) - assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) + assert_almost_equal(fz.real, fr, err_msg=f'real part {f}') + assert_almost_equal(fz.imag, 0., err_msg=f'imag part {f}') @pytest.mark.xfail(IS_WASM, reason="doesn't work") def test_precisions_consistent(self): @@ -4236,68 +4271,68 @@ def test_precisions_consistent(self): fcf = f(np.csingle(z)) fcd = f(np.cdouble(z)) fcl = f(np.clongdouble(z)) - assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) - assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) + assert_almost_equal(fcf, fcd, decimal=6, err_msg=f'fch-fcd {f}') + assert_almost_equal(fcl, fcd, decimal=15, err_msg=f'fch-fcl {f}') @pytest.mark.xfail(IS_WASM, reason="doesn't work") def test_branch_cuts(self): # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) # noqa: E221 _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) # noqa: E221 _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True) - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True) _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1) _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1) @pytest.mark.xfail(IS_WASM, reason="doesn't work") def test_branch_cuts_complex64(self): # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) def test_against_cmath(self): import cmath - points = [-1-1j, -1+1j, +1-1j, +1+1j] + points = [-1 - 1j, -1 + 1j, +1 - 1j, +1 + 1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(complex).eps + atol = 4 * np.finfo(complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) @@ -4310,7 +4345,7 @@ def test_against_cmath(self): b = cfunc(p) assert_( abs(a - b) < atol, - "%s %s: %s; cmath: %s" % (fname, p, a, b) + f"{fname} {p}: {a}; cmath: {b}" ) @pytest.mark.xfail( @@ -4338,22 +4373,22 @@ def check(x, rtol): x = x.astype(real_dtype) z = x.astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) + d = np.absolute(np.arcsinh(x) / np.arcsinh(z).real - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsinh')) - z = (1j*x).astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) + z = (1j * x).astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsin(z).imag - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsin')) z = x.astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) + d = np.absolute(np.arctanh(x) / np.arctanh(z).real - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arctanh')) - z = (1j*x).astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) + z = (1j * x).astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctan(z).imag - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arctan')) @@ -4371,28 +4406,28 @@ def check(x, rtol): # It's not guaranteed that the system-provided arc functions # are accurate down to a few epsilons. (Eg. on Linux 64-bit) # So, give more leeway for long complex tests here: - check(x_series, 50.0*eps) + check(x_series, 50.0 * eps) else: - check(x_series, 2.1*eps) - check(x_basic, 2.0*eps/1e-3) + check(x_series, 2.1 * eps) + check(x_basic, 2.0 * eps / 1e-3) # Check a few points - z = np.array([1e-5*(1+1j)], dtype=dtype) + z = np.array([1e-5 * (1 + 1j)], dtype=dtype) p = 9.999999999333333333e-6 + 1.000000000066666666e-5j - d = np.absolute(1-np.arctanh(z)/p) + d = np.absolute(1 - np.arctanh(z) / p) assert_(np.all(d < 1e-15)) p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j - d = np.absolute(1-np.arcsinh(z)/p) + d = np.absolute(1 - np.arcsinh(z) / p) assert_(np.all(d < 1e-15)) p = 9.999999999333333333e-6j + 1.000000000066666666e-5 - d = np.absolute(1-np.arctan(z)/p) + d = np.absolute(1 - np.arctan(z) / p) assert_(np.all(d < 1e-15)) p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 - d = np.absolute(1-np.arcsin(z)/p) + d = np.absolute(1 - np.arcsin(z) / p) assert_(np.all(d < 1e-15)) # Check continuity across switchover points @@ -4404,15 +4439,15 @@ def check(func, z0, d=1): assert_(np.all(zp != zm), (zp, zm)) # NB: the cancellation error at the switchover is at least eps - good = (abs(func(zp) - func(zm)) < 2*eps) + good = (abs(func(zp) - func(zm)) < 2 * eps) assert_(np.all(good), (func, z0[~good])) for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): - pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) + pts = [rp + 1j * ip for rp in (-1e-3, 0, 1e-3) for ip in (-1e-3, 0, 1e-3) if rp != 0 or ip != 0] check(func, pts, 1) check(func, pts, 1j) - check(func, pts, 1+1j) + check(func, pts, 1 + 1j) @np.errstate(all="ignore") def test_promotion_corner_cases(self): @@ -4453,7 +4488,7 @@ def __new__(subtype, shape): return self a = simple((3, 4)) - assert_equal(a+a, a) + assert_equal(a + a, a) class TestFrompyfunc: @@ -4516,13 +4551,13 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, atol = 1e-4 y0 = f(x0) - yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) - ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) + yp = f(x0 + dx * scale * np.absolute(x0) / np.absolute(dx)) + ym = f(x0 - dx * scale * np.absolute(x0) / np.absolute(dx)) assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) - assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.real - ym.real * re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag * im_sign) < atol), (y0, ym)) if sig_zero_ok: # check that signed zeros also work as a displacement @@ -4532,15 +4567,15 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, x = x0[jr] x.real = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) - assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), (y0[jr], ym)) if np.any(ji): x = x0[ji] x.imag = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) - assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), (y0[ji], ym)) def test_copysign(): assert_(np.copysign(1, -1) == -1) @@ -4581,8 +4616,8 @@ def test_nextafter_0(): for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): # The value of tiny for double double is NaN, so we need to pass the # assert - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(t).tiny): tiny = np.finfo(t).tiny assert_( @@ -4685,6 +4720,18 @@ def test_reduceat(): np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) assert_array_almost_equal(h1, h2) +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + def test_reduceat_empty(): """Reduceat should work with empty arrays""" indices = np.array([], 'i4') @@ -4715,11 +4762,11 @@ def test_complex_nan_comparisons(): if np.isfinite(x) and np.isfinite(y): continue - assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) - assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) - assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) - assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) - assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) + assert_equal(x < y, False, err_msg=f"{x!r} < {y!r}") + assert_equal(x > y, False, err_msg=f"{x!r} > {y!r}") + assert_equal(x <= y, False, err_msg=f"{x!r} <= {y!r}") + assert_equal(x >= y, False, err_msg=f"{x!r} >= {y!r}") + assert_equal(x == y, False, err_msg=f"{x!r} == {y!r}") def test_rint_big_int(): @@ -4751,7 +4798,7 @@ def test_memoverlap_accumulate_cmp(ufunc, dtype): if ufunc.signature: pytest.skip('For generic signatures only') for size in (2, 8, 32, 64, 128, 256): - arr = np.array([0, 1, 1]*size, dtype=dtype) + arr = np.array([0, 1, 1] * size, dtype=dtype) acc = ufunc.accumulate(arr, dtype='?') acc_u8 = acc.view(np.uint8) exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8) @@ -4768,7 +4815,7 @@ def test_memoverlap_accumulate_symmetric(ufunc, dtype): pytest.skip('For generic signatures only') with np.errstate(all='ignore'): for size in (2, 8, 32, 64, 128, 256): - arr = np.array([0, 1, 2]*size).astype(dtype) + arr = np.array([0, 1, 2] * size).astype(dtype) acc = ufunc.accumulate(arr, dtype=dtype) exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype) assert_equal(exp, acc) @@ -4849,6 +4896,15 @@ def test_bad_legacy_ufunc_silent_errors(): ncu_tests.always_error.at(arr, [0, 1, 2], arr) +def test_bad_legacy_unary_ufunc_silent_errors(): + # Unary has a special scalar path right now, so test it explicitly. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(np.arange(3).astype(np.float64)) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(1.5) + + @pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) def test_bad_legacy_gufunc_silent_errors(x1): # Verify that an exception raised in a gufunc loop propagates correctly. @@ -4895,3 +4951,12 @@ def test_ufunc_arg(self): @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_string_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) + + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) diff --git a/blimgui/dist64/numpy/_core/tests/test_umath_accuracy.py b/blimgui/dist64/numpy/_core/tests/test_umath_accuracy.py index 71967dd..7145fa8 100644 --- a/blimgui/dist64/numpy/_core/tests/test_umath_accuracy.py +++ b/blimgui/dist64/numpy/_core/tests/test_umath_accuracy.py @@ -1,12 +1,14 @@ -import numpy as np import os -from os import path import sys +from ctypes import POINTER, c_double, c_float, c_int, c_longlong, cast, pointer +from os import path + import pytest -from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER + +import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than -from numpy._core._multiarray_umath import __cpu_features__ UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] @@ -41,6 +43,7 @@ def convert(s, datatype="np.float32"): return fp.contents.value # dereference the pointer, get the float + str_to_float = np.vectorize(convert) class TestAccuracy: @@ -57,16 +60,24 @@ def test_validate_transcendentals(self): r for r in fid if r[0] not in ('$', '#') ) data = np.genfromtxt(file_without_comments, - dtype=('|S39','|S39','|S39',int), - names=('type','input','output','ulperr'), + dtype=('|S39', '|S39', '|S39', int), + names=('type', 'input', 'output', 'ulperr'), delimiter=',', skip_header=1) npname = path.splitext(filename)[0].split('-')[3] npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] @@ -74,7 +85,7 @@ def test_validate_transcendentals(self): assert_array_max_ulp(npfunc(inval), outval, maxulperr) @pytest.mark.skipif(IS_AVX512FP16, - reason = "SVML FP16 have slightly higher ULP errors") + reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): diff --git a/blimgui/dist64/numpy/_core/tests/test_umath_complex.py b/blimgui/dist64/numpy/_core/tests/test_umath_complex.py index c7f4cec..a89eb45 100644 --- a/blimgui/dist64/numpy/_core/tests/test_umath_complex.py +++ b/blimgui/dist64/numpy/_core/tests/test_umath_complex.py @@ -1,13 +1,19 @@ -import sys import platform +import sys + import pytest import numpy as np + # import the c-extension module directly since _arg is not exported via umath import numpy._core._multiarray_umath as ncu from numpy.testing import ( - assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp - ) + assert_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' @@ -16,7 +22,7 @@ # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. -#FIXME: this will probably change when we require full C99 compatibility +# FIXME: this will probably change when we require full C99 compatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) @@ -28,7 +34,6 @@ reason="Inadequate C99 complex support") - class TestCexp: def test_simple(self): check = check_complex_value @@ -61,8 +66,8 @@ def test_special_values(self): check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y - check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) - check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) + check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) + check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) @@ -131,7 +136,7 @@ def test_special_values2(self): class TestClog: def test_simple(self): - x = np.array([1+0j, 1+2j]) + x = np.array([1 + 0j, 1 + 2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) assert_almost_equal(y, y_r) @@ -280,7 +285,7 @@ def test_simple(self): check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) - rres = 0.5*np.sqrt(2) + rres = 0.5 * np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) @@ -315,9 +320,9 @@ def test_special_values(self): check(f, ncu.PZERO, np.inf, np.inf, np.inf) check(f, ncu.NZERO, np.inf, np.inf, np.inf) - check(f, np.inf, np.inf, np.inf, np.inf) - check(f, -np.inf, np.inf, np.inf, np.inf) - check(f, -np.nan, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) # noqa: E221 + check(f, -np.nan, np.inf, np.inf, np.inf) # noqa: E221 # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) @@ -334,7 +339,7 @@ def test_special_values(self): def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. + # FIXME: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) @@ -361,23 +366,23 @@ def teardown_method(self): np.seterr(**self.olderr) def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) assert_almost_equal(y, y_r) def test_scalar(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) lx = list(range(len(x))) # Hardcode the expected `builtins.complex` values, # as complex exponentiation is broken as of bpo-44698 p_r = [ - 1+0j, - 0.20787957635076193+0j, - 0.35812203996480685+0.6097119028618724j, - 0.12659112128185032+0.48847676699581527j, + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, complex(np.inf, np.nan), complex(np.nan, np.nan), ] @@ -387,17 +392,17 @@ def test_scalar(self): assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) lx = list(range(len(x))) # Hardcode the expected `builtins.complex` values, # as complex exponentiation is broken as of bpo-44698 p_r = [ - 1+0j, - 0.20787957635076193+0j, - 0.35812203996480685+0.6097119028618724j, - 0.12659112128185032+0.48847676699581527j, + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, complex(np.inf, np.nan), complex(np.nan, np.nan), ] @@ -414,14 +419,14 @@ def teardown_method(self): np.seterr(**self.olderr) def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) assert_almost_equal(y, y_r) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=complex) + x = np.array([1 + 0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, ncu.NZERO)], dtype=complex) @@ -471,9 +476,9 @@ def g(a, b): class TestCarg: def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) - check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) + check_real_value(ncu._arg, 0, 1, 0.5 * np.pi, False) - check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) + check_real_value(ncu._arg, 1, 1, 0.25 * np.pi, False) check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) # TODO This can be xfail when the generator functions are got rid of. @@ -554,35 +559,39 @@ def check_complex_value(f, x1, y1, x2, y2, exact=True): assert_almost_equal(f(z1), z2) class TestSpecialComplexAVX: - @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): - arr = np.array([complex(np.nan , np.nan), - complex(np.nan , np.inf), - complex(np.inf , np.nan), - complex(np.inf , np.inf), - complex(0. , np.inf), - complex(np.inf , 0.), - complex(0. , 0.), - complex(0. , np.nan), - complex(np.nan , 0.)], dtype=astype) - abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) - sq_true = np.array([complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(-np.inf, np.nan), - complex(np.inf, np.nan), - complex(0., 0.), - complex(np.nan, np.nan), - complex(np.nan, np.nan)], dtype=astype) + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) with np.errstate(invalid='ignore'): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) assert_equal(np.square(arr[::stride]), sq_true[::stride]) class TestComplexAbsoluteAVX: - @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19]) - @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) # test to ensure masking and strides work as intended in the AVX implementation def test_array(self, arraysize, stride, astype): @@ -592,25 +601,25 @@ def test_array(self, arraysize, stride, astype): # Testcase taken as is from https://github.com/numpy/numpy/issues/16660 class TestComplexAbsoluteMixedDTypes: - @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) - def test_array(self, stride, astype, func): - dtype = [('template_id', ' _ExtOjbDict: ... +def _make_extobj(*, all: _ErrKind = ..., **kwargs: Unpack[_ExtOjbDict]) -> CapsuleType: ... diff --git a/blimgui/dist64/numpy/_distributor_init.py b/blimgui/dist64/numpy/_distributor_init.py index 5979e48..1978a53 100644 --- a/blimgui/dist64/numpy/_distributor_init.py +++ b/blimgui/dist64/numpy/_distributor_init.py @@ -10,6 +10,6 @@ """ try: - from . import _distributor_init_local + from . import _distributor_init_local # noqa: F401 except ImportError: pass diff --git a/blimgui/dist64/numpy/_expired_attrs_2_0.py b/blimgui/dist64/numpy/_expired_attrs_2_0.py index 66c320a..4100987 100644 --- a/blimgui/dist64/numpy/_expired_attrs_2_0.py +++ b/blimgui/dist64/numpy/_expired_attrs_2_0.py @@ -25,7 +25,6 @@ "It's still available as `np.lib.add_docstring`.", "add_newdoc_ufunc": "It's an internal function and doesn't have a replacement.", - "compat": "There's no replacement, as Python 2 is no longer supported.", "safe_eval": "Use `ast.literal_eval` instead.", "float_": "Use `np.float64` instead.", "complex_": "Use `np.complex128` instead.", @@ -62,7 +61,6 @@ "or use `typing.deprecated`.", "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " "directly, or use `typing.deprecated`.", - "disp": "Use your own printing function instead.", "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " diff --git a/blimgui/dist64/numpy/_expired_attrs_2_0.pyi b/blimgui/dist64/numpy/_expired_attrs_2_0.pyi index d00fa75..11cd7a7 100644 --- a/blimgui/dist64/numpy/_expired_attrs_2_0.pyi +++ b/blimgui/dist64/numpy/_expired_attrs_2_0.pyi @@ -18,7 +18,6 @@ class _ExpiredAttributesType(TypedDict): add_newdoc: str add_docstring: str add_newdoc_ufunc: str - compat: str safe_eval: str float_: str complex_: str @@ -48,7 +47,6 @@ class _ExpiredAttributesType(TypedDict): recfromtxt: str deprecate: str deprecate_with_doc: str - disp: str find_common_type: str round_: str get_array_wrap: str diff --git a/blimgui/dist64/numpy/_globals.py b/blimgui/dist64/numpy/_globals.py index f916e06..c992dbd 100644 --- a/blimgui/dist64/numpy/_globals.py +++ b/blimgui/dist64/numpy/_globals.py @@ -49,6 +49,7 @@ class _NoValueType: """ __instance = None + def __new__(cls): # ensure that only one instance exists if not cls.__instance: @@ -93,3 +94,28 @@ def __bool__(self): return False raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() diff --git a/blimgui/dist64/numpy/_pyinstaller/hook-numpy.py b/blimgui/dist64/numpy/_pyinstaller/hook-numpy.py index babb5a6..f25b0f9 100644 --- a/blimgui/dist64/numpy/_pyinstaller/hook-numpy.py +++ b/blimgui/dist64/numpy/_pyinstaller/hook-numpy.py @@ -5,8 +5,8 @@ https://pyinstaller.readthedocs.io/en/stable/hooks.html """ -from PyInstaller.compat import is_conda, is_pure_conda -from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies +from PyInstaller.compat import is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs # Collect all DLLs inside numpy's installation folder, dump them into built # app's root. diff --git a/blimgui/dist64/numpy/_pyinstaller/hook-numpy.pyi b/blimgui/dist64/numpy/_pyinstaller/hook-numpy.pyi index 2978b8b..c5f0267 100644 --- a/blimgui/dist64/numpy/_pyinstaller/hook-numpy.pyi +++ b/blimgui/dist64/numpy/_pyinstaller/hook-numpy.pyi @@ -1,13 +1,6 @@ from typing import Final -# from `PyInstaller.compat` -is_conda: Final[bool] -is_pure_conda: Final[bool] +binaries: Final[list[tuple[str, str]]] = ... -# from `PyInstaller.utils.hooks` -def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... - -binaries: Final[list[tuple[str, str]]] - -hiddenimports: Final[list[str]] -excludedimports: Final[list[str]] +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... diff --git a/blimgui/dist64/numpy/_pyinstaller/tests/__init__.py b/blimgui/dist64/numpy/_pyinstaller/tests/__init__.py index c439294..3b88525 100644 --- a/blimgui/dist64/numpy/_pyinstaller/tests/__init__.py +++ b/blimgui/dist64/numpy/_pyinstaller/tests/__init__.py @@ -1,6 +1,6 @@ -from numpy.testing import IS_WASM, IS_EDITABLE import pytest +from numpy.testing import IS_EDITABLE, IS_WASM if IS_WASM: pytest.skip( diff --git a/blimgui/dist64/numpy/_pytesttester.py b/blimgui/dist64/numpy/_pytesttester.py index cb5ce5a..7a92c98 100644 --- a/blimgui/dist64/numpy/_pytesttester.py +++ b/blimgui/dist64/numpy/_pytesttester.py @@ -28,8 +28,8 @@ simplify circular import issues. For the same reason, it contains no numpy imports at module scope, instead importing numpy within function calls. """ -import sys import os +import sys __all__ = ['PytestTester'] @@ -37,9 +37,9 @@ def _show_numpy_info(): import numpy as np - print("NumPy version %s" % np.__version__) + print(f"NumPy version {np.__version__}") info = np.lib._utils_impl._opt_info() - print("NumPy CPU features: ", (info if info else 'nothing enabled')) + print("NumPy CPU features: ", (info or 'nothing enabled')) class PytestTester: @@ -123,9 +123,10 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import pytest import warnings + import pytest + module = sys.modules[self.module_name] module_path = os.path.abspath(module.__path__[0]) @@ -141,7 +142,7 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # Filter out distutils cpu warnings (could be localized to # distutils tests). ASV has problems with top level import, # so fetch module for suppression here. - from numpy.distutils import cpuinfo + from numpy.distutils import cpuinfo # noqa: F401 # Filter out annoying import messages. Want these in both develop and # release mode. @@ -165,7 +166,7 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, pytest_args += list(extra_argv) if verbose > 1: - pytest_args += ["-" + "v"*(verbose - 1)] + pytest_args += ["-" + "v" * (verbose - 1)] if coverage: pytest_args += ["--cov=" + module_path] @@ -182,7 +183,7 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, pytest_args += ["-m", label] if durations >= 0: - pytest_args += ["--durations=%s" % durations] + pytest_args += [f"--durations={durations}"] if tests is None: tests = [self.module_name] diff --git a/blimgui/dist64/numpy/_pytesttester.pyi b/blimgui/dist64/numpy/_pytesttester.pyi index 72d2781..576d40e 100644 --- a/blimgui/dist64/numpy/_pytesttester.pyi +++ b/blimgui/dist64/numpy/_pytesttester.pyi @@ -8,11 +8,11 @@ class PytestTester: def __init__(self, module_name: str) -> None: ... def __call__( self, - label: L["fast", "full"] = ..., - verbose: int = ..., - extra_argv: None | Iterable[str] = ..., - doctests: L[False] = ..., - coverage: bool = ..., - durations: int = ..., - tests: None | Iterable[str] = ..., + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, ) -> bool: ... diff --git a/blimgui/dist64/numpy/_typing/__init__.py b/blimgui/dist64/numpy/_typing/__init__.py index 67ddb9b..2237b25 100644 --- a/blimgui/dist64/numpy/_typing/__init__.py +++ b/blimgui/dist64/numpy/_typing/__init__.py @@ -1,154 +1,173 @@ """Private counterpart of ``numpy.typing``.""" -from __future__ import annotations +import sys -from ._nested_sequence import ( - _NestedSequence as _NestedSequence, -) -from ._nbit_base import ( - NBitBase as NBitBase, - _8Bit as _8Bit, - _16Bit as _16Bit, - _32Bit as _32Bit, - _64Bit as _64Bit, - _80Bit as _80Bit, - _96Bit as _96Bit, - _128Bit as _128Bit, - _256Bit as _256Bit, -) -from ._nbit import ( - _NBitByte as _NBitByte, - _NBitShort as _NBitShort, - _NBitIntC as _NBitIntC, - _NBitIntP as _NBitIntP, - _NBitInt as _NBitInt, - _NBitLong as _NBitLong, - _NBitLongLong as _NBitLongLong, - _NBitHalf as _NBitHalf, - _NBitSingle as _NBitSingle, - _NBitDouble as _NBitDouble, - _NBitLongDouble as _NBitLongDouble, +from ._array_like import ( + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, ) + +# from ._char_codes import ( _BoolCodes as _BoolCodes, - _UInt8Codes as _UInt8Codes, - _UInt16Codes as _UInt16Codes, - _UInt32Codes as _UInt32Codes, - _UInt64Codes as _UInt64Codes, + _ByteCodes as _ByteCodes, + _BytesCodes as _BytesCodes, + _CDoubleCodes as _CDoubleCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _CSingleCodes as _CSingleCodes, + _DoubleCodes as _DoubleCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _HalfCodes as _HalfCodes, + _InexactCodes as _InexactCodes, _Int8Codes as _Int8Codes, _Int16Codes as _Int16Codes, _Int32Codes as _Int32Codes, _Int64Codes as _Int64Codes, - _Float16Codes as _Float16Codes, - _Float32Codes as _Float32Codes, - _Float64Codes as _Float64Codes, - _Complex64Codes as _Complex64Codes, - _Complex128Codes as _Complex128Codes, - _ByteCodes as _ByteCodes, - _ShortCodes as _ShortCodes, _IntCCodes as _IntCCodes, - _IntPCodes as _IntPCodes, _IntCodes as _IntCodes, + _IntegerCodes as _IntegerCodes, + _IntPCodes as _IntPCodes, _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, _LongLongCodes as _LongLongCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _ShortCodes as _ShortCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _SingleCodes as _SingleCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, _UByteCodes as _UByteCodes, - _UShortCodes as _UShortCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, _UIntCCodes as _UIntCCodes, - _UIntPCodes as _UIntPCodes, _UIntCodes as _UIntCodes, + _UIntPCodes as _UIntPCodes, _ULongCodes as _ULongCodes, _ULongLongCodes as _ULongLongCodes, - _HalfCodes as _HalfCodes, - _SingleCodes as _SingleCodes, - _DoubleCodes as _DoubleCodes, - _LongDoubleCodes as _LongDoubleCodes, - _CSingleCodes as _CSingleCodes, - _CDoubleCodes as _CDoubleCodes, - _CLongDoubleCodes as _CLongDoubleCodes, - _DT64Codes as _DT64Codes, - _TD64Codes as _TD64Codes, - _StrCodes as _StrCodes, - _BytesCodes as _BytesCodes, - _VoidCodes as _VoidCodes, - _ObjectCodes as _ObjectCodes, - _StringCodes as _StringCodes, _UnsignedIntegerCodes as _UnsignedIntegerCodes, - _SignedIntegerCodes as _SignedIntegerCodes, - _IntegerCodes as _IntegerCodes, - _FloatingCodes as _FloatingCodes, - _ComplexFloatingCodes as _ComplexFloatingCodes, - _InexactCodes as _InexactCodes, - _NumberCodes as _NumberCodes, - _CharacterCodes as _CharacterCodes, - _FlexibleCodes as _FlexibleCodes, - _GenericCodes as _GenericCodes, -) -from ._scalars import ( - _CharLike_co as _CharLike_co, - _BoolLike_co as _BoolLike_co, - _UIntLike_co as _UIntLike_co, - _IntLike_co as _IntLike_co, - _FloatLike_co as _FloatLike_co, - _ComplexLike_co as _ComplexLike_co, - _TD64Like_co as _TD64Like_co, - _NumberLike_co as _NumberLike_co, - _ScalarLike_co as _ScalarLike_co, - _VoidLike_co as _VoidLike_co, -) -from ._shape import ( - _Shape as _Shape, - _ShapeLike as _ShapeLike, + _UShortCodes as _UShortCodes, + _VoidCodes as _VoidCodes, ) + +# from ._dtype_like import ( - DTypeLike as DTypeLike, _DTypeLike as _DTypeLike, - _SupportsDType as _SupportsDType, - _VoidDTypeLike as _VoidDTypeLike, _DTypeLikeBool as _DTypeLikeBool, - _DTypeLikeUInt as _DTypeLikeUInt, - _DTypeLikeInt as _DTypeLikeInt, - _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeBytes as _DTypeLikeBytes, _DTypeLikeComplex as _DTypeLikeComplex, - _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, _DTypeLikeObject as _DTypeLikeObject, - _DTypeLikeVoid as _DTypeLikeVoid, _DTypeLikeStr as _DTypeLikeStr, - _DTypeLikeBytes as _DTypeLikeBytes, - _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _HasDType as _HasDType, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, ) -from ._array_like import ( - NDArray as NDArray, - ArrayLike as ArrayLike, - _ArrayLike as _ArrayLike, - _ArrayLikeInt as _ArrayLikeInt, - _ArrayLikeBool_co as _ArrayLikeBool_co, - _ArrayLikeUInt_co as _ArrayLikeUInt_co, - _ArrayLikeInt_co as _ArrayLikeInt_co, - _ArrayLikeFloat_co as _ArrayLikeFloat_co, - _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, - _ArrayLikeComplex_co as _ArrayLikeComplex_co, - _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, - _ArrayLikeNumber_co as _ArrayLikeNumber_co, - _ArrayLikeTD64_co as _ArrayLikeTD64_co, - _ArrayLikeDT64_co as _ArrayLikeDT64_co, - _ArrayLikeObject_co as _ArrayLikeObject_co, - _ArrayLikeVoid_co as _ArrayLikeVoid_co, - _ArrayLikeStr_co as _ArrayLikeStr_co, - _ArrayLikeBytes_co as _ArrayLikeBytes_co, - _ArrayLikeString_co as _ArrayLikeString_co, - _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, - _ArrayLikeUnknown as _ArrayLikeUnknown, - _FiniteNestedSequence as _FiniteNestedSequence, - _SupportsArray as _SupportsArray, - _SupportsArrayFunc as _SupportsArrayFunc, - _UnknownType as _UnknownType, + +# +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, +) + +# +from ._nbit_base import ( # type: ignore[deprecated] + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, ) +# +from ._nested_sequence import _NestedSequence as _NestedSequence + +# +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) + +# +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike + +# from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, - _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, - _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, ) + +# wrapping the public aliases in `TypeAliasType` helps with introspection readability +if sys.version_info >= (3, 12): + from typing import TypeAliasType + + from ._array_like import ArrayLike as _ArrayLikeAlias + from ._dtype_like import DTypeLike as _DTypeLikeAlias + + ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) + DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) + +else: + from ._array_like import ArrayLike as ArrayLike + from ._dtype_like import DTypeLike as DTypeLike diff --git a/blimgui/dist64/numpy/_typing/_add_docstring.py b/blimgui/dist64/numpy/_typing/_add_docstring.py index b12ad5a..8f7d784 100644 --- a/blimgui/dist64/numpy/_typing/_add_docstring.py +++ b/blimgui/dist64/numpy/_typing/_add_docstring.py @@ -120,7 +120,7 @@ def _parse_docstrings() -> str: add_newdoc('NDArray', repr(NDArray), """ - A `np.ndarray[tuple[int, ...], np.dtype[+ScalarType]] ` + A `np.ndarray[tuple[Any, ...], np.dtype[ScalarT]] ` type alias :term:`generic ` w.r.t. its `dtype.type `. @@ -137,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[int, ...], numpy.dtype[+_ScalarType_co]] + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] >>> print(npt.NDArray[np.float64]) - numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.float64]] + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) diff --git a/blimgui/dist64/numpy/_typing/_array_like.py b/blimgui/dist64/numpy/_typing/_array_like.py index 9db1fe4..148a5dd 100644 --- a/blimgui/dist64/numpy/_typing/_array_like.py +++ b/blimgui/dist64/numpy/_typing/_array_like.py @@ -1,29 +1,13 @@ -from __future__ import annotations - import sys -from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable, TYPE_CHECKING +from collections.abc import Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable import numpy as np -from numpy import ( - ndarray, - dtype, - generic, - unsignedinteger, - integer, - floating, - complexfloating, - number, - timedelta64, - datetime64, - object_, - void, - str_, - bytes_, -) +from numpy import dtype + from ._nbit_base import _32Bit, _64Bit from ._nested_sequence import _NestedSequence -from ._shape import _Shape +from ._shape import _AnyShape if TYPE_CHECKING: StringDType = np.dtypes.StringDType @@ -33,12 +17,11 @@ from numpy._core.multiarray import StringDType _T = TypeVar("_T") -_ScalarType = TypeVar("_ScalarType", bound=generic) -_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) -_DType = TypeVar("_DType", bound=dtype[Any]) -_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) -NDArray: TypeAlias = ndarray[_Shape, dtype[_ScalarType_co]] +NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -46,8 +29,8 @@ # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads @runtime_checkable -class _SupportsArray(Protocol[_DType_co]): - def __array__(self) -> ndarray[Any, _DType_co]: ... +class _SupportsArray(Protocol[_DTypeT_co]): + def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... @runtime_checkable @@ -73,16 +56,16 @@ def __array_function__( # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` _ArrayLike: TypeAlias = ( - _SupportsArray[dtype[_ScalarType]] - | _NestedSequence[_SupportsArray[dtype[_ScalarType]]] + _SupportsArray[dtype[_ScalarT]] + | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] ) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest _DualArrayLike: TypeAlias = ( - _SupportsArray[_DType] - | _NestedSequence[_SupportsArray[_DType]] + _SupportsArray[_DTypeT] + | _NestedSequence[_SupportsArray[_DTypeT]] | _T | _NestedSequence[_T] ) @@ -94,99 +77,30 @@ def __array_function__( class _Buffer(Protocol): def __buffer__(self, flags: int, /) -> memoryview: ... -ArrayLike: TypeAlias = _Buffer | _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, -] +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co: TypeAlias = _DualArrayLike[ - dtype[np.bool], - bool, -] -_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[ - dtype[np.bool] | dtype[unsignedinteger[Any]], - bool, -] -_ArrayLikeInt_co: TypeAlias = _DualArrayLike[ - dtype[np.bool] | dtype[integer[Any]], - bool | int, -] -_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[ - dtype[np.bool] | dtype[integer[Any]] | dtype[floating[Any]], - bool | int | float, -] -_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[ - ( - dtype[np.bool] - | dtype[integer[Any]] - | dtype[floating[Any]] - | dtype[complexfloating[Any, Any]] - ), - bool | int | float | complex, -] -_ArrayLikeNumber_co: TypeAlias = _DualArrayLike[ - dtype[np.bool] | dtype[number[Any]], - bool | int | float | complex, -] -_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[ - dtype[np.bool] | dtype[integer[Any]] | dtype[timedelta64], - bool | int, -] -_ArrayLikeDT64_co: TypeAlias = ( - _SupportsArray[dtype[datetime64]] - | _NestedSequence[_SupportsArray[dtype[datetime64]]] -) -_ArrayLikeObject_co: TypeAlias = ( - _SupportsArray[dtype[object_]] - | _NestedSequence[_SupportsArray[dtype[object_]]] -) - -_ArrayLikeVoid_co: TypeAlias = ( - _SupportsArray[dtype[void]] - | _NestedSequence[_SupportsArray[dtype[void]]] -) -_ArrayLikeStr_co: TypeAlias = _DualArrayLike[ - dtype[str_], - str, -] -_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[ - dtype[bytes_], - bytes, -] -_ArrayLikeString_co: TypeAlias = _DualArrayLike[ - StringDType, - str -] -_ArrayLikeAnyString_co: TypeAlias = ( - _ArrayLikeStr_co | - _ArrayLikeBytes_co | - _ArrayLikeString_co -) +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] +_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] +_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] +_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] + +_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] +_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] +_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] __Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool __Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool -_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float | int] -_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex | float | int] +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] # NOTE: This includes `builtins.bool`, but not `numpy.bool`. -_ArrayLikeInt: TypeAlias = _DualArrayLike[ - dtype[integer[Any]], - int, -] - -# Extra ArrayLike type so that pyright can deal with NDArray[Any] -# Used as the first overload, should only match NDArray[Any], -# not any actual types. -# https://github.com/numpy/numpy/pull/22193 -if sys.version_info >= (3, 11): - from typing import Never as _UnknownType -else: - from typing import NoReturn as _UnknownType - - -_ArrayLikeUnknown: TypeAlias = _DualArrayLike[ - dtype[_UnknownType], - _UnknownType, -] +_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] diff --git a/blimgui/dist64/numpy/_typing/_callable.pyi b/blimgui/dist64/numpy/_typing/_callable.pyi deleted file mode 100644 index dee99ba..0000000 --- a/blimgui/dist64/numpy/_typing/_callable.pyi +++ /dev/null @@ -1,365 +0,0 @@ -""" -A module with various ``typing.Protocol`` subclasses that implement -the ``__call__`` magic method. - -See the `Mypy documentation`_ on protocols for more details. - -.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols - -""" - -from typing import ( - TypeAlias, - TypeVar, - final, - overload, - Any, - NoReturn, - Protocol, - type_check_only, -) - -import numpy as np -from numpy import ( - generic, - number, - integer, - unsignedinteger, - signedinteger, - int8, - int_, - floating, - float64, - complexfloating, - complex128, -) -from ._nbit import _NBitInt -from ._scalars import ( - _BoolLike_co, - _IntLike_co, - _NumberLike_co, -) -from . import NBitBase -from ._array_like import NDArray -from ._nested_sequence import _NestedSequence - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T1_contra = TypeVar("_T1_contra", contravariant=True) -_T2_contra = TypeVar("_T2_contra", contravariant=True) - -_2Tuple: TypeAlias = tuple[_T1, _T1] - -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) - -_IntType = TypeVar("_IntType", bound=integer[Any]) -_FloatType = TypeVar("_FloatType", bound=floating[Any]) -_NumberType = TypeVar("_NumberType", bound=number[Any]) -_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number[Any]) -_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) - -@type_check_only -class _BoolOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolBitOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - -@type_check_only -class _BoolSub(Protocol): - # Note that `other: bool` is absent here - @overload - def __call__(self, other: bool, /) -> NoReturn: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolTrueDiv(Protocol): - @overload - def __call__(self, other: float | _IntLike_co, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> int8: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - @overload - def __call__(self, other: _FloatType, /) -> _FloatType: ... - -@type_check_only -class _BoolDivMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... - @overload # platform dependent - def __call__(self, other: int, /) -> _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... - @overload - def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... - @overload - def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... - -@type_check_only -class _IntTrueDiv(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -@type_check_only -class _UnsignedIntOp(Protocol[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - @overload - def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - @overload - def __call__(self, other: signedinteger, /) -> Any: ... - -@type_check_only -class _UnsignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[Any]: ... - @overload - def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int | signedinteger[Any], /) -> Any: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int | signedinteger[Any], /) -> _2Tuple[Any]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... - -@type_check_only -class _SignedIntOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... - -@type_check_only -class _FloatOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -@type_check_only -class _FloatMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -class _FloatDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... - @overload - def __call__( - self, other: int, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... - @overload - def __call__( - self, other: float, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... - -@type_check_only -class _NumberOp(Protocol): - def __call__(self, other: _NumberLike_co, /) -> Any: ... - -@final -@type_check_only -class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsLE(Protocol): - def __le__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGE(Protocol): - def __ge__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGE, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsLT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/blimgui/dist64/numpy/_typing/_char_codes.py b/blimgui/dist64/numpy/_typing/_char_codes.py index 3560527..92ac14d 100644 --- a/blimgui/dist64/numpy/_typing/_char_codes.py +++ b/blimgui/dist64/numpy/_typing/_char_codes.py @@ -182,7 +182,6 @@ _Float16Codes, _Float32Codes, _Float64Codes, - _LongDoubleCodes, _HalfCodes, _SingleCodes, _DoubleCodes, diff --git a/blimgui/dist64/numpy/_typing/_dtype_like.py b/blimgui/dist64/numpy/_typing/_dtype_like.py index 52753cb..810d4eb 100644 --- a/blimgui/dist64/numpy/_typing/_dtype_like.py +++ b/blimgui/dist64/numpy/_typing/_dtype_like.py @@ -1,63 +1,26 @@ from collections.abc import Sequence # noqa: F811 -from typing import ( - Any, - TypeAlias, - TypeVar, - Protocol, - TypedDict, - runtime_checkable, -) +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar import numpy as np -from ._shape import _ShapeLike - from ._char_codes import ( _BoolCodes, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _Int8Codes, - _Int16Codes, - _Int32Codes, - _Int64Codes, - _Float16Codes, - _Float32Codes, - _Float64Codes, - _Complex64Codes, - _Complex128Codes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _IntPCodes, - _IntCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, - _UIntPCodes, - _UIntCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, - _CLongDoubleCodes, + _BytesCodes, + _ComplexFloatingCodes, _DT64Codes, - _TD64Codes, + _FloatingCodes, + _NumberCodes, + _ObjectCodes, + _SignedIntegerCodes, _StrCodes, - _BytesCodes, + _TD64Codes, + _UnsignedIntegerCodes, _VoidCodes, - _ObjectCodes, ) -_SCT = TypeVar("_SCT", bound=np.generic) -_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) _DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types @@ -78,172 +41,74 @@ class _DTypeDict(_DTypeDictBase, total=False): aligned: bool -# A protocol for anything with the dtype attribute -@runtime_checkable -class _SupportsDType(Protocol[_DType_co]): +class _HasDType(Protocol[_DTypeT_co]): + @property + def dtype(self) -> _DTypeT_co: ... + + +class _HasNumPyDType(Protocol[_DTypeT_co]): @property - def dtype(self) -> _DType_co: ... + def __numpy_dtype__(self, /) -> _DTypeT_co: ... + + +_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = ( - np.dtype[_SCT] - | type[_SCT] - | _SupportsDType[np.dtype[_SCT]] -) +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # Would create a dtype[np.void] _VoidDTypeLike: TypeAlias = ( - # (flexible_dtype, itemsize) - tuple[_DTypeLikeNested, int] - # (fixed_dtype, shape) - | tuple[_DTypeLikeNested, _ShapeLike] + # If a tuple, then it can be either: + # - (flexible_dtype, itemsize) + # - (fixed_dtype, shape) + # - (base_dtype, new_dtype) + # But because `_DTypeLikeNested = Any`, the first two cases are redundant + + # tuple[_DTypeLikeNested, int] | tuple[_DTypeLikeNested, _ShapeLike] | + tuple[_DTypeLikeNested, _DTypeLikeNested] + # [(field_name, field_dtype, field_shape), ...] - # # The type here is quite broad because NumPy accepts quite a wide - # range of inputs inside the list; see the tests for some - # examples. + # range of inputs inside the list; see the tests for some examples. | list[Any] - # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., - # 'itemsize': ...} - | _DTypeDict - # (base_dtype, new_dtype) - | tuple[_DTypeLikeNested, _DTypeLikeNested] -) -# Anything that can be coerced into numpy.dtype. -# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = ( - np.dtype[Any] - # default data type (float64) - | None - # array-scalar types and generic types - | type[Any] # NOTE: We're stuck with `type[Any]` due to object dtypes - # anything with a dtype attribute - | _SupportsDType[np.dtype[Any]] - # character codes, type strings or comma-separated fields, e.g., 'float64' - | str - | _VoidDTypeLike + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., 'itemsize': ...} + | _DTypeDict ) -# NOTE: while it is possible to provide the dtype as a dict of -# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), -# this syntax is officially discouraged and -# therefore not included in the type-union defining `DTypeLike`. -# -# See https://github.com/numpy/numpy/issues/16891 for more details. - # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool: TypeAlias = ( - type[bool] - | type[np.bool] - | np.dtype[np.bool] - | _SupportsDType[np.dtype[np.bool]] - | _BoolCodes -) -_DTypeLikeUInt: TypeAlias = ( - type[np.unsignedinteger[Any]] - | np.dtype[np.unsignedinteger[Any]] - | _SupportsDType[np.dtype[np.unsignedinteger[Any]]] - | _UInt8Codes - | _UInt16Codes - | _UInt32Codes - | _UInt64Codes - | _UByteCodes - | _UShortCodes - | _UIntCCodes - | _LongCodes - | _ULongLongCodes - | _UIntPCodes - | _UIntCodes -) +_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes _DTypeLikeInt: TypeAlias = ( - type[int] - | type[np.signedinteger[Any]] - | np.dtype[np.signedinteger[Any]] - | _SupportsDType[np.dtype[np.signedinteger[Any]]] - | _Int8Codes - | _Int16Codes - | _Int32Codes - | _Int64Codes - | _ByteCodes - | _ShortCodes - | _IntCCodes - | _LongCodes - | _LongLongCodes - | _IntPCodes - | _IntCodes -) -_DTypeLikeFloat: TypeAlias = ( - type[float] - | type[np.floating[Any]] - | np.dtype[np.floating[Any]] - | _SupportsDType[np.dtype[np.floating[Any]]] - | _Float16Codes - | _Float32Codes - | _Float64Codes - | _HalfCodes - | _SingleCodes - | _DoubleCodes - | _LongDoubleCodes + type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes ) +_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes _DTypeLikeComplex: TypeAlias = ( - type[complex] - | type[np.complexfloating[Any]] - | np.dtype[np.complexfloating[Any]] - | _SupportsDType[np.dtype[np.complexfloating[Any]]] - | _Complex64Codes - | _Complex128Codes - | _CSingleCodes - | _CDoubleCodes - | _CLongDoubleCodes -) -_DTypeLikeDT64: TypeAlias = ( - type[np.timedelta64] - | np.dtype[np.timedelta64] - | _SupportsDType[np.dtype[np.timedelta64]] - | _TD64Codes -) -_DTypeLikeTD64: TypeAlias = ( - type[np.datetime64] - | np.dtype[np.datetime64] - | _SupportsDType[np.dtype[np.datetime64]] - | _DT64Codes + type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes ) -_DTypeLikeStr: TypeAlias = ( - type[str] - | type[np.str_] - | np.dtype[np.str_] - | _SupportsDType[np.dtype[np.str_]] - | _StrCodes -) -_DTypeLikeBytes: TypeAlias = ( - type[bytes] - | type[np.bytes_] - | np.dtype[np.bytes_] - | _SupportsDType[np.dtype[np.bytes_]] - | _BytesCodes +_DTypeLikeComplex_co: TypeAlias = ( + type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes ) +_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes +_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes +_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes _DTypeLikeVoid: TypeAlias = ( - type[np.void] - | np.dtype[np.void] - | _SupportsDType[np.dtype[np.void]] - | _VoidCodes - | _VoidDTypeLike -) -_DTypeLikeObject: TypeAlias = ( - type - | np.dtype[np.object_] - | _SupportsDType[np.dtype[np.object_]] - | _ObjectCodes + type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes ) +_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes -_DTypeLikeComplex_co: TypeAlias = ( - _DTypeLikeBool - | _DTypeLikeUInt - | _DTypeLikeInt - | _DTypeLikeFloat - | _DTypeLikeComplex -) + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discouraged and +# therefore not included in the type-union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. diff --git a/blimgui/dist64/numpy/_typing/_extended_precision.py b/blimgui/dist64/numpy/_typing/_extended_precision.py index 2af4889..dac2b98 100644 --- a/blimgui/dist64/numpy/_typing/_extended_precision.py +++ b/blimgui/dist64/numpy/_typing/_extended_precision.py @@ -6,22 +6,10 @@ """ import numpy as np -from . import ( - _80Bit, - _96Bit, - _128Bit, - _256Bit, -) -uint128 = np.unsignedinteger[_128Bit] -uint256 = np.unsignedinteger[_256Bit] -int128 = np.signedinteger[_128Bit] -int256 = np.signedinteger[_256Bit] -float80 = np.floating[_80Bit] +from . import _96Bit, _128Bit + float96 = np.floating[_96Bit] float128 = np.floating[_128Bit] -float256 = np.floating[_256Bit] -complex160 = np.complexfloating[_80Bit, _80Bit] complex192 = np.complexfloating[_96Bit, _96Bit] complex256 = np.complexfloating[_128Bit, _128Bit] -complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/blimgui/dist64/numpy/_typing/_nbit.py b/blimgui/dist64/numpy/_typing/_nbit.py index cdba6a3..276f17c 100644 --- a/blimgui/dist64/numpy/_typing/_nbit.py +++ b/blimgui/dist64/numpy/_typing/_nbit.py @@ -1,8 +1,8 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" from typing import TypeAlias -from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin _NBitByte: TypeAlias = _8Bit diff --git a/blimgui/dist64/numpy/_typing/_nbit_base.py b/blimgui/dist64/numpy/_typing/_nbit_base.py index 92218f2..bcf7d91 100644 --- a/blimgui/dist64/numpy/_typing/_nbit_base.py +++ b/blimgui/dist64/numpy/_typing/_nbit_base.py @@ -1,7 +1,8 @@ """A module with the precisions of generic `~numpy.number` types.""" -from .._utils import set_module from typing import final +from numpy._utils import set_module + @final # Disallow the creation of arbitrary `NBitBase` subclasses @set_module("numpy.typing") @@ -9,13 +10,17 @@ class NBitBase: """ A type representing `numpy.number` precision during static type checking. - Used exclusively for the purpose static type checking, `NBitBase` + Used exclusively for the purpose of static type checking, `NBitBase` represents the base of a hierarchical set of subclasses. Each subsequent subclass is herein used for representing a lower level of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. .. versionadded:: 1.20 + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + Examples -------- Below is a typical usage example: `NBitBase` is herein used for annotating @@ -25,7 +30,6 @@ class NBitBase: .. code-block:: python - >>> from __future__ import annotations >>> from typing import TypeVar, TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt @@ -48,11 +52,11 @@ class NBitBase: ... # note: out: numpy.floating[numpy.typing._64Bit*] """ + # Deprecated in NumPy 2.3, 2025-05-01 def __init_subclass__(cls) -> None: allowed_names = { - "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", - "_64Bit", "_32Bit", "_16Bit", "_8Bit", + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" } if cls.__name__ not in allowed_names: raise TypeError('cannot inherit from final class "NBitBase"') @@ -61,40 +65,30 @@ def __init_subclass__(cls) -> None: @final @set_module("numpy._typing") # Silence errors about subclassing a `@final`-decorated class -class _256Bit(NBitBase): # type: ignore[misc] - pass - -@final -@set_module("numpy._typing") -class _128Bit(_256Bit): # type: ignore[misc] - pass - -@final -@set_module("numpy._typing") -class _96Bit(_128Bit): # type: ignore[misc] +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _80Bit(_96Bit): # type: ignore[misc] +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _64Bit(_80Bit): # type: ignore[misc] +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _32Bit(_64Bit): # type: ignore[misc] +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _16Bit(_32Bit): # type: ignore[misc] +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _8Bit(_16Bit): # type: ignore[misc] +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass diff --git a/blimgui/dist64/numpy/_typing/_nbit_base.pyi b/blimgui/dist64/numpy/_typing/_nbit_base.pyi new file mode 100644 index 0000000..d88c9f4 --- /dev/null +++ b/blimgui/dist64/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,39 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/blimgui/dist64/numpy/_typing/_nested_sequence.py b/blimgui/dist64/numpy/_typing/_nested_sequence.py index b6fb08d..892e069 100644 --- a/blimgui/dist64/numpy/_typing/_nested_sequence.py +++ b/blimgui/dist64/numpy/_typing/_nested_sequence.py @@ -1,14 +1,6 @@ """A module containing the `_NestedSequence` protocol.""" -from __future__ import annotations - -from typing import ( - Any, - TypeVar, - Protocol, - runtime_checkable, - TYPE_CHECKING, -) +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable if TYPE_CHECKING: from collections.abc import Iterator @@ -36,8 +28,6 @@ class _NestedSequence(Protocol[_T_co]): -------- .. code-block:: python - >>> from __future__ import annotations - >>> from typing import TYPE_CHECKING >>> import numpy as np >>> from numpy._typing import _NestedSequence @@ -64,7 +54,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: + def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": """Implement ``self[x]``.""" raise NotImplementedError @@ -72,11 +62,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/blimgui/dist64/numpy/_typing/_scalars.py b/blimgui/dist64/numpy/_typing/_scalars.py index fa9fab3..bd51985 100644 --- a/blimgui/dist64/numpy/_typing/_scalars.py +++ b/blimgui/dist64/numpy/_typing/_scalars.py @@ -4,24 +4,17 @@ # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart - _CharLike_co: TypeAlias = str | bytes -# The 6 `Like_co` type-aliases below represent all scalars that can be +# The `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) _BoolLike_co: TypeAlias = bool | np.bool -_UIntLike_co: TypeAlias = np.unsignedinteger[Any] | _BoolLike_co -_IntLike_co: TypeAlias = int | np.integer[Any] | _BoolLike_co -_FloatLike_co: TypeAlias = float | np.floating[Any] | _IntLike_co -_ComplexLike_co: TypeAlias = ( - complex - | np.complexfloating[Any, Any] - | _FloatLike_co -) -_TD64Like_co: TypeAlias = np.timedelta64 | _IntLike_co - -_NumberLike_co: TypeAlias = int | float | complex | np.number[Any] | np.bool -_ScalarLike_co: TypeAlias = int | float | complex | str | bytes | np.generic - +_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool +_IntLike_co: TypeAlias = int | np.integer | np.bool +_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool +_ComplexLike_co: TypeAlias = complex | np.number | np.bool +_NumberLike_co: TypeAlias = _ComplexLike_co +_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool # `_VoidLike_co` is technically not a scalar, but it's close enough _VoidLike_co: TypeAlias = tuple[Any, ...] | np.void +_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic diff --git a/blimgui/dist64/numpy/_typing/_shape.py b/blimgui/dist64/numpy/_typing/_shape.py index e297e34..2325d60 100644 --- a/blimgui/dist64/numpy/_typing/_shape.py +++ b/blimgui/dist64/numpy/_typing/_shape.py @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import SupportsIndex, TypeAlias +from typing import Any, SupportsIndex, TypeAlias _Shape: TypeAlias = tuple[int, ...] +_AnyShape: TypeAlias = tuple[Any, ...] # Anything that can be coerced to a shape tuple _ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/blimgui/dist64/numpy/_typing/_ufunc.py b/blimgui/dist64/numpy/_typing/_ufunc.py index 835b099..97cf201 100644 --- a/blimgui/dist64/numpy/_typing/_ufunc.py +++ b/blimgui/dist64/numpy/_typing/_ufunc.py @@ -1,4 +1,4 @@ -from .. import ufunc +from numpy import ufunc _UFunc_Nin1_Nout1 = ufunc _UFunc_Nin2_Nout1 = ufunc diff --git a/blimgui/dist64/numpy/_typing/_ufunc.pyi b/blimgui/dist64/numpy/_typing/_ufunc.pyi index 3d1ae58..a95b58a 100644 --- a/blimgui/dist64/numpy/_typing/_ufunc.pyi +++ b/blimgui/dist64/numpy/_typing/_ufunc.pyi @@ -4,24 +4,27 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" +""" # noqa: PYI021 +from _typeshed import Incomplete +from types import EllipsisType from typing import ( Any, Generic, Literal, + LiteralString, + Never, NoReturn, Protocol, SupportsIndex, TypeAlias, TypedDict, TypeVar, + Unpack, overload, type_check_only, ) -from typing_extensions import LiteralString, Unpack - import numpy as np from numpy import _CastingKind, _OrderKACF, ufunc from numpy.typing import NDArray @@ -36,9 +39,9 @@ _2Tuple: TypeAlias = tuple[_T, _T] _3Tuple: TypeAlias = tuple[_T, _T, _T] _4Tuple: TypeAlias = tuple[_T, _T, _T, _T] -_2PTuple: TypeAlias = tuple[_T, _T, Unpack[tuple[_T, ...]]] -_3PTuple: TypeAlias = tuple[_T, _T, _T, Unpack[tuple[_T, ...]]] -_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, Unpack[tuple[_T, ...]]] +_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] +_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] +_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] _NTypes = TypeVar("_NTypes", bound=int, covariant=True) _IDType = TypeVar("_IDType", covariant=True) @@ -48,8 +51,7 @@ _Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) _NIn = TypeVar("_NIn", bound=int, covariant=True) _NOut = TypeVar("_NOut", bound=int, covariant=True) _ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) - +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) @type_check_only class _SupportsArrayUFunc(Protocol): @@ -69,6 +71,11 @@ class _UFunc3Kwargs(TypedDict, total=False): subok: bool signature: _3Tuple[str | None] | str | None +@type_check_only +class _ReduceKwargs(TypedDict, total=False): + initial: Incomplete # = + where: _ArrayLikeBool_co | None # = True + # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. # In such cases the respective methods return `NoReturn` @@ -86,7 +93,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -103,61 +110,59 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: None = None, *, - where: None | _ArrayLikeBool_co = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _2Tuple[None | str] = ..., - ) -> Any: ... + signature: str | _2Tuple[str | None] = ..., + ) -> Incomplete: ... @overload def __call__( self, - __x1: ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: None | _ArrayLikeBool_co = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _2Tuple[None | str] = ..., - ) -> NDArray[Any]: ... + signature: str | _2Tuple[str | None] = ..., + ) -> NDArray[Incomplete]: ... @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: None | _ArrayLikeBool_co = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _2Tuple[None | str] = ..., - ) -> Any: ... + signature: str | _2Tuple[str | None] = ..., + ) -> Incomplete: ... - def at( - self, - a: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - /, - ) -> None: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -177,94 +182,114 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = None, + out: EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def __call__( self, x1: ArrayLike, - x2: NDArray[np.generic], + x2: np.ndarray, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[np.generic], + x1: np.ndarray, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: np.ndarray | tuple[np.ndarray], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... - def at( + def accumulate( self, - a: NDArray[Any], - indices: _ArrayLikeInt_co, - b: ArrayLike, + array: ArrayLike, /, - ) -> None: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + @overload # type: ignore[override] + def reduce( # out=None (default), keepdims=False (default) + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[False] = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> Incomplete: ... + @overload # out=ndarray or out=... def reduce( self, array: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] = ..., - keepdims: bool = ..., - initial: Any = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - - def accumulate( + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: np.ndarray | EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @overload # keepdims=True + def reduce( self, array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] = ..., - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... def reduceat( self, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] = ..., - ) -> NDArray[Any]: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... - @overload # (scalar, scalar) -> scalar - def outer( + @overload # type: ignore[override] + def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, B: _ScalarLike_co, @@ -273,29 +298,29 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def outer( self, A: ArrayLike, - B: NDArray[np.generic], + B: np.ndarray, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[np.generic], + A: np.ndarray, B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def outer( self, @@ -303,10 +328,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: np.ndarray | tuple[np.ndarray] | EllipsisType, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def outer( self, @@ -314,17 +339,25 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -341,60 +374,64 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, *, - where: None | _ArrayLikeBool_co = ..., + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> _2Tuple[Any]: ... + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, - __x1: ArrayLike, - __out1: None | NDArray[Any] = ..., - __out2: None | NDArray[Any] = ..., + x1: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Incomplete]]: ... @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: None | NDArray[Any] = ..., - __out2: None | NDArray[Any] = ..., + x1: _SupportsArrayUFunc, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> _2Tuple[Any]: ... + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -411,47 +448,50 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, *, - where: None | _ArrayLikeBool_co = ..., + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _4Tuple[None | str] = ..., - ) -> _2Tuple[Any]: ... + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: None | NDArray[Any] = ..., - __out2: None | NDArray[Any] = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _4Tuple[None | str] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Incomplete]]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -469,41 +509,43 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = None, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): - where: None | _ArrayLikeBool_co + where: _ArrayLikeBool_co | None casting: _CastingKind order: _OrderKACF dtype: DTypeLike @@ -512,7 +554,7 @@ class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): @type_check_only class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): - where: None | _ArrayLikeBool_co + where: _ArrayLikeBool_co | None casting: _CastingKind order: _OrderKACF dtype: DTypeLike @@ -521,7 +563,7 @@ class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): @type_check_only class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): - where: None | _ArrayLikeBool_co + where: _ArrayLikeBool_co | None casting: _CastingKind order: _OrderKACF dtype: DTypeLike @@ -530,7 +572,7 @@ class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): @type_check_only class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): - where: None | _ArrayLikeBool_co + where: _ArrayLikeBool_co | None casting: _CastingKind order: _OrderKACF dtype: DTypeLike @@ -557,7 +599,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -565,7 +607,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -573,23 +615,24 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: _ArrayType | tuple[_ArrayType], + out: _ArrayT | tuple[_ArrayT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ArrayType: ... + ) -> _ArrayT: ... @overload def __call__( self, x1: _SupportsArrayUFunc, /, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> Any: ... + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] @@ -612,7 +655,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -621,7 +664,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -630,158 +673,136 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: ArrayLike, /, - out: _ArrayType | tuple[_ArrayType], + out: _ArrayT | tuple[_ArrayT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayType: ... + ) -> _ArrayT: ... @overload def __call__( self, x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... - - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... + ) -> Incomplete: ... - @overload - def reduce( + @overload # type: ignore[override] + def accumulate( self, array: ArrayLike, - axis: None | _ShapeLike, - dtype: DTypeLike, - out: _ArrayType, /, - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _ArrayType: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... @overload - def reduce( + def accumulate( self, + array: ArrayLike, /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + + @overload # type: ignore[override] + def reduce( # out=array + self, array: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, *, - out: _ArrayType | tuple[_ArrayType], - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _ArrayType: ... - @overload + out: _ArrayT | tuple[_ArrayT], + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> _ArrayT: ... + @overload # out=... def reduce( self, + array: ArrayLike, /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload # keepdims=True + def reduce( + self, array: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, *, keepdims: Literal[True], - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + **kwargs: Unpack[_ReduceKwargs], ) -> NDArray[np.object_]: ... @overload def reduce( self, - /, array: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], ) -> _ReturnType_co | NDArray[np.object_]: ... - @overload + @overload # type: ignore[override] def reduceat( self, array: ArrayLike, - indices: _ArrayLikeInt_co, - axis: SupportsIndex, - dtype: DTypeLike, - out: _ArrayType, - /, - ) -> _ArrayType: ... - @overload - def reduceat( - self, /, - array: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, *, - out: _ArrayType | tuple[_ArrayType], - ) -> _ArrayType: ... + out: _ArrayT | tuple[_ArrayT], + ) -> _ArrayT: ... @overload def reduceat( self, - /, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload def reduceat( self, - /, array: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., - ) -> Any: ... - - @overload - def accumulate( - self, - array: ArrayLike, - axis: SupportsIndex, - dtype: DTypeLike, - out: _ArrayType, /, - ) -> _ArrayType: ... - @overload - def accumulate( - self, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - *, - out: _ArrayType | tuple[_ArrayType], - ) -> _ArrayType: ... - @overload - def accumulate( - self, - /, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., - ) -> NDArray[np.object_]: ... + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + ) -> Incomplete: ... - @overload + @overload # type: ignore[override] def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -789,8 +810,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -798,28 +820,39 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, - out: _ArrayType, + /, + *, + out: _ArrayT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayType: ... + ) -> _ArrayT: ... @overload def outer( self, A: _SupportsArrayUFunc, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def outer( self, A: _ScalarLike_co, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] @@ -842,7 +875,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co: ... @overload @@ -853,7 +886,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -864,9 +897,9 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: _ArrayType | tuple[_ArrayType], + out: _ArrayT | tuple[_ArrayT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ArrayType: ... + ) -> _ArrayT: ... @overload def __call__( self, @@ -875,15 +908,15 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] @@ -904,7 +937,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co]: ... @overload @@ -913,7 +946,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... @overload @@ -922,21 +955,21 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: ArrayLike, /, *xs: ArrayLike, - out: _2PTuple[_ArrayType], + out: _2PTuple[_ArrayT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ArrayType]: ... + ) -> _2PTuple[_ArrayT]: ... @overload def __call__( self, x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: None | _2PTuple[NDArray[Any]] = ..., + out: _2PTuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] diff --git a/blimgui/dist64/numpy/_utils/__init__.py b/blimgui/dist64/numpy/_utils/__init__.py index cc1a36c..d101c03 100644 --- a/blimgui/dist64/numpy/_utils/__init__.py +++ b/blimgui/dist64/numpy/_utils/__init__.py @@ -10,7 +10,8 @@ import functools import warnings -from ._convertions import asunicode, asbytes + +from ._convertions import asbytes, asunicode def set_module(module): @@ -26,6 +27,12 @@ def example(): """ def decorator(func): if module is not None: + if isinstance(func, type): + try: + func._module_source = func.__module__ + except (AttributeError): + pass + func.__module__ = module return func return decorator diff --git a/blimgui/dist64/numpy/_utils/__init__.pyi b/blimgui/dist64/numpy/_utils/__init__.pyi index de68d8d..a27cc3c 100644 --- a/blimgui/dist64/numpy/_utils/__init__.pyi +++ b/blimgui/dist64/numpy/_utils/__init__.pyi @@ -1,11 +1,8 @@ -from collections.abc import Callable, Iterable -from typing import Protocol, overload, type_check_only - from _typeshed import IdentityFunction -from typing_extensions import TypeVar +from collections.abc import Callable, Iterable +from typing import Protocol, TypeVar, overload, type_check_only -from ._convertions import asbytes as asbytes -from ._convertions import asunicode as asunicode +from ._convertions import asbytes as asbytes, asunicode as asunicode ### @@ -21,7 +18,7 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: _HasModuleT) -> _HasModuleT: ... +def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... # def _rename_parameter( diff --git a/blimgui/dist64/numpy/_utils/_inspect.py b/blimgui/dist64/numpy/_utils/_inspect.py index 381ca39..772dd5b 100644 --- a/blimgui/dist64/numpy/_utils/_inspect.py +++ b/blimgui/dist64/numpy/_utils/_inspect.py @@ -58,6 +58,7 @@ def iscode(object): """ return isinstance(object, types.CodeType) + # ------------------------------------------------ argument list extraction # These constants are from Python's compile.h. CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 diff --git a/blimgui/dist64/numpy/_utils/_inspect.pyi b/blimgui/dist64/numpy/_utils/_inspect.pyi index 8eba974..b415829 100644 --- a/blimgui/dist64/numpy/_utils/_inspect.pyi +++ b/blimgui/dist64/numpy/_utils/_inspect.pyi @@ -1,9 +1,8 @@ import types -from collections.abc import Callable, Mapping -from typing import Any, Final, TypeAlias, overload - from _typeshed import SupportsLenAndGetItem -from typing_extensions import TypeIs, TypeVar +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, TypeVar, overload +from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] diff --git a/blimgui/dist64/numpy/_utils/_pep440.py b/blimgui/dist64/numpy/_utils/_pep440.py index 6f56d60..eda6b6c 100644 --- a/blimgui/dist64/numpy/_utils/_pep440.py +++ b/blimgui/dist64/numpy/_utils/_pep440.py @@ -33,7 +33,6 @@ import itertools import re - __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", ] @@ -172,7 +171,7 @@ def __str__(self): return self._version def __repr__(self): - return "".format(repr(str(self))) + return f"" @property def public(self): @@ -293,7 +292,7 @@ def __init__(self, version): # Validate the version and parse it into pieces match = self._regex.search(version) if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) + raise InvalidVersion(f"Invalid version: '{version}'") # Store the parsed out pieces of the version self._version = _Version( @@ -325,14 +324,14 @@ def __init__(self, version): ) def __repr__(self): - return "".format(repr(str(self))) + return f"" def __str__(self): parts = [] # Epoch if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) + parts.append(f"{self._version.epoch}!") # Release segment parts.append(".".join(str(x) for x in self._version.release)) @@ -343,16 +342,16 @@ def __str__(self): # Post-release if self._version.post is not None: - parts.append(".post{0}".format(self._version.post[1])) + parts.append(f".post{self._version.post[1]}") # Development release if self._version.dev is not None: - parts.append(".dev{0}".format(self._version.dev[1])) + parts.append(f".dev{self._version.dev[1]}") # Local version segment if self._version.local is not None: parts.append( - "+{0}".format(".".join(str(x) for x in self._version.local)) + f"+{'.'.join(str(x) for x in self._version.local)}" ) return "".join(parts) @@ -367,7 +366,7 @@ def base_version(self): # Epoch if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) + parts.append(f"{self._version.epoch}!") # Release segment parts.append(".".join(str(x) for x in self._version.release)) diff --git a/blimgui/dist64/numpy/_utils/_pep440.pyi b/blimgui/dist64/numpy/_utils/_pep440.pyi index 42c4640..3246be1 100644 --- a/blimgui/dist64/numpy/_utils/_pep440.pyi +++ b/blimgui/dist64/numpy/_utils/_pep440.pyi @@ -5,15 +5,12 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NamedTuple, TypeVar, final, type_check_only, ) -from typing import ( - Literal as L, -) - from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/blimgui/dist64/numpy/char/__init__.py b/blimgui/dist64/numpy/char/__init__.py index ddaafab..0a360f7 100644 --- a/blimgui/dist64/numpy/char/__init__.py +++ b/blimgui/dist64/numpy/char/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.defchararray import __all__, __doc__ from numpy._core.defchararray import * +from numpy._core.defchararray import __all__, __doc__ diff --git a/blimgui/dist64/numpy/char/__init__.pyi b/blimgui/dist64/numpy/char/__init__.pyi index 538c473..c736eb2 100644 --- a/blimgui/dist64/numpy/char/__init__.pyi +++ b/blimgui/dist64/numpy/char/__init__.pyi @@ -1,34 +1,39 @@ from numpy._core.defchararray import ( - equal, - not_equal, - greater_equal, - less_equal, - greater, - less, - str_len, add, - multiply, - mod, + array, + asarray, capitalize, center, + chararray, + compare_chararrays, count, decode, encode, endswith, + equal, expandtabs, find, + greater, + greater_equal, index, isalnum, isalpha, + isdecimal, isdigit, islower, + isnumeric, isspace, istitle, isupper, join, + less, + less_equal, ljust, lower, lstrip, + mod, + multiply, + not_equal, partition, replace, rfind, @@ -40,18 +45,13 @@ from numpy._core.defchararray import ( split, splitlines, startswith, + str_len, strip, swapcase, title, translate, upper, zfill, - isnumeric, - isdecimal, - array, - asarray, - compare_chararrays, - chararray ) __all__ = [ diff --git a/blimgui/dist64/numpy/compat/__init__.py b/blimgui/dist64/numpy/compat/__init__.py deleted file mode 100644 index 0869ce2..0000000 --- a/blimgui/dist64/numpy/compat/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -This module is deprecated since 1.26.0 and will be removed in future versions. - -""" - -import warnings - -from .._utils import _inspect -from .._utils._inspect import getargspec, formatargspec -from . import py3k -from .py3k import * - -warnings.warn( - "`np.compat`, which was used during the Python 2 to 3 transition," - " is deprecated since 1.26.0, and will be removed", - DeprecationWarning, stacklevel=2 -) - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/blimgui/dist64/numpy/compat/py3k.py b/blimgui/dist64/numpy/compat/py3k.py deleted file mode 100644 index 2d1dd03..0000000 --- a/blimgui/dist64/numpy/compat/py3k.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -Python 3.X compatibility tools. - -While this file was originally intended for Python 2 -> 3 transition, -it is now used to create a compatibility layer between different -minor versions of Python 3. - -While the active version of numpy may not support a given version of python, we -allow downstream libraries to continue to use these shims for forward -compatibility with numpy while they transition their code to newer versions of -Python. -""" -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', - 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', - 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] - -import sys -import os -from pathlib import Path -import io -try: - import pickle5 as pickle -except ImportError: - import pickle - -long = int -integer_types = (int,) -basestring = str -unicode = str -bytes = bytes - -def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - -def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') - -def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - -def isfileobj(f): - if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): - return False - try: - # BufferedReader/Writer may raise OSError when - # fetching `fileno()` (e.g. when wrapping BytesIO). - f.fileno() - return True - except OSError: - return False - -def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - -def sixu(s): - return s - -strchar = 'U' - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) - -def is_pathlib_path(obj): - """ - Check whether obj is a `pathlib.Path` object. - - Prefer using ``isinstance(obj, os.PathLike)`` instead of this function. - """ - return isinstance(obj, Path) - -# from Python 3.7 -class contextlib_nullcontext: - """Context manager that does no additional processing. - - Used as a stand-in for a normal context manager, when a particular - block of code is only sometimes used with a normal context manager: - - cm = optional_cm if condition else nullcontext() - with cm: - # Perform operation, using optional_cm if condition is True - - .. note:: - Prefer using `contextlib.nullcontext` instead of this context manager. - """ - - def __init__(self, enter_result=None): - self.enter_result = enter_result - - def __enter__(self): - return self.enter_result - - def __exit__(self, *excinfo): - pass - - -def npy_load_module(name, fn, info=None): - """ - Load a module. Uses ``load_module`` which will be deprecated in python - 3.12. An alternative that uses ``exec_module`` is in - numpy.distutils.misc_util.exec_mod_from_location - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Only here for backward compatibility with Python 2.*. - - Returns - ------- - mod : module - - """ - # Explicitly lazy import this to avoid paying the cost - # of importing importlib at startup - from importlib.machinery import SourceFileLoader - return SourceFileLoader(name, fn).load_module() - - -os_fspath = os.fspath -os_PathLike = os.PathLike diff --git a/blimgui/dist64/numpy/conftest.py b/blimgui/dist64/numpy/conftest.py index 193f350..47493a2 100644 --- a/blimgui/dist64/numpy/conftest.py +++ b/blimgui/dist64/numpy/conftest.py @@ -2,20 +2,18 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os -import string import sys import tempfile -from contextlib import contextmanager import warnings +from contextlib import contextmanager +from pathlib import Path import hypothesis import pytest -import numpy -import numpy as np +import numpy from numpy._core._multiarray_tests import get_fpu_mode -from numpy._core.tests._natype import pd_NA -from numpy.testing._private.utils import NOGIL_BUILD, get_stringdtype_dtype +from numpy.testing._private.utils import NOGIL_BUILD try: from scipy_doctest.conftest import dt_config @@ -23,6 +21,11 @@ except ModuleNotFoundError: HAVE_SCPDT = False +try: + import pytest_run_parallel # noqa: F401 + PARALLEL_RUN_AVALIABLE = True +except ModuleNotFoundError: + PARALLEL_RUN_AVALIABLE = False _old_fpu_mode = None _collect_results = {} @@ -65,6 +68,17 @@ def pytest_configure(config): "slow: Tests that are very slow.") config.addinivalue_line("markers", "slow_pypy: Tests that are very slow on pypy.") + if not PARALLEL_RUN_AVALIABLE: + config.addinivalue_line("markers", + "parallel_threads(n): run the given test function in parallel " + "using `n` threads.", + ) + config.addinivalue_line("markers", + "iterations(n): run the given test function `n` times in each thread", + ) + config.addinivalue_line("markers", + "thread_unsafe: mark the test function as single-threaded", + ) def pytest_addoption(parser): @@ -102,8 +116,8 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): tr.line("code that re-enables the GIL should do so in a subprocess.") pytest.exit("GIL re-enabled during tests", returncode=1) -#FIXME when yield tests are gone. -@pytest.hookimpl() +# FIXME when yield tests are gone. +@pytest.hookimpl(tryfirst=True) def pytest_itemcollected(item): """ Check FPU precision mode was not changed during test collection. @@ -122,6 +136,11 @@ def pytest_itemcollected(item): _collect_results[item] = (_old_fpu_mode, mode) _old_fpu_mode = mode + # mark f2py tests as thread unsafe + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) + @pytest.fixture(scope="function", autouse=True) def check_fpu_mode(request): @@ -133,25 +152,20 @@ def check_fpu_mode(request): new_mode = get_fpu_mode() if old_mode != new_mode: - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " during the test".format(old_mode, new_mode)) + raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to " + f"{new_mode:#x} during the test") collect_result = _collect_results.get(request.node) if collect_result is not None: old_mode, new_mode = collect_result - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " when collecting the test".format(old_mode, - new_mode)) + raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to " + f"{new_mode:#x} when collecting the test") @pytest.fixture(autouse=True) def add_np(doctest_namespace): doctest_namespace['np'] = numpy -@pytest.fixture(autouse=True) -def env_setup(monkeypatch): - monkeypatch.setenv('PYTHONHASHSEED', '0') - if HAVE_SCPDT: @@ -167,12 +181,12 @@ def warnings_errors_and_rng(test=None): "msvccompiler", "Deprecated call", "numpy.core", - "`np.compat`", "Importing from numpy.matlib", "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", ] + "NumPy warning suppression and assertion utilities are deprecated." + ] msg = "|".join(msgs) msgs_r = [ @@ -230,32 +244,5 @@ def warnings_errors_and_rng(test=None): 'numpy/_core/cversions.py', 'numpy/_pyinstaller', 'numpy/random/_examples', - 'numpy/compat', 'numpy/f2py/_backends/_distutils.py', ] - - -@pytest.fixture -def random_string_list(): - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = np.random.choice(chars, size=100 * 10, replace=True) - return ret.view("U100") - - -@pytest.fixture(params=[True, False]) -def coerce(request): - return request.param - - -@pytest.fixture( - params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], - ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], -) -def na_object(request): - return request.param - - -@pytest.fixture() -def dtype(na_object, coerce): - return get_stringdtype_dtype(na_object, coerce) diff --git a/blimgui/dist64/numpy/core/__init__.py b/blimgui/dist64/numpy/core/__init__.py index 7e0f320..f5bd550 100644 --- a/blimgui/dist64/numpy/core/__init__.py +++ b/blimgui/dist64/numpy/core/__init__.py @@ -4,6 +4,7 @@ `numpy.core` will be removed in the future. """ from numpy import _core + from ._utils import _raise_warning @@ -21,7 +22,7 @@ def _ufunc_reconstruct(module, name): # force lazy-loading of submodules to ensure a warning is printed -__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", +__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", # noqa: F822 "einsumfunc", "fromnumeric", "function_base", "getlimits", "_internal", "multiarray", "_multiarray_umath", "numeric", "numerictypes", "overrides", "records", "shape_base", "umath"] diff --git a/blimgui/dist64/numpy/core/_dtype.py b/blimgui/dist64/numpy/core/_dtype.py index 7228c09..9bda910 100644 --- a/blimgui/dist64/numpy/core/_dtype.py +++ b/blimgui/dist64/numpy/core/_dtype.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import _dtype + from ._utils import _raise_warning ret = getattr(_dtype, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/_dtype_ctypes.py b/blimgui/dist64/numpy/core/_dtype_ctypes.py index e59a996..4e82bed 100644 --- a/blimgui/dist64/numpy/core/_dtype_ctypes.py +++ b/blimgui/dist64/numpy/core/_dtype_ctypes.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import _dtype_ctypes + from ._utils import _raise_warning ret = getattr(_dtype_ctypes, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/_internal.py b/blimgui/dist64/numpy/core/_internal.py index 3080bfb..f9c3d21 100644 --- a/blimgui/dist64/numpy/core/_internal.py +++ b/blimgui/dist64/numpy/core/_internal.py @@ -1,5 +1,6 @@ from numpy._core import _internal + # Build a new array from the information in a pickle. # Note that the name numpy.core._internal._reconstruct is embedded in # pickles of ndarrays made with NumPy before release 1.0 @@ -16,6 +17,7 @@ def _reconstruct(subtype, shape, dtype): def __getattr__(attr_name): from numpy._core import _internal + from ._utils import _raise_warning ret = getattr(_internal, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/_multiarray_umath.py b/blimgui/dist64/numpy/core/_multiarray_umath.py index 235f667..7c188fd 100644 --- a/blimgui/dist64/numpy/core/_multiarray_umath.py +++ b/blimgui/dist64/numpy/core/_multiarray_umath.py @@ -1,5 +1,5 @@ -from numpy._core import _multiarray_umath from numpy import ufunc +from numpy._core import _multiarray_umath for item in _multiarray_umath.__dir__(): # ufuncs appear in pickles with a path in numpy.core._multiarray_umath @@ -11,13 +11,15 @@ def __getattr__(attr_name): from numpy._core import _multiarray_umath + from ._utils import _raise_warning if attr_name in {"_ARRAY_API", "_UFUNC_API"}: - from numpy.version import short_version + import sys import textwrap import traceback - import sys + + from numpy.version import short_version msg = textwrap.dedent(f""" A module that was compiled using NumPy 1.x cannot be run in diff --git a/blimgui/dist64/numpy/core/arrayprint.py b/blimgui/dist64/numpy/core/arrayprint.py index 17bcd64..6f3bcf8 100644 --- a/blimgui/dist64/numpy/core/arrayprint.py +++ b/blimgui/dist64/numpy/core/arrayprint.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import arrayprint + from ._utils import _raise_warning ret = getattr(arrayprint, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/defchararray.py b/blimgui/dist64/numpy/core/defchararray.py index 941bcb7..da28880 100644 --- a/blimgui/dist64/numpy/core/defchararray.py +++ b/blimgui/dist64/numpy/core/defchararray.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import defchararray + from ._utils import _raise_warning ret = getattr(defchararray, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/einsumfunc.py b/blimgui/dist64/numpy/core/einsumfunc.py index 58d1c02..d1e974e 100644 --- a/blimgui/dist64/numpy/core/einsumfunc.py +++ b/blimgui/dist64/numpy/core/einsumfunc.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import einsumfunc + from ._utils import _raise_warning ret = getattr(einsumfunc, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/fromnumeric.py b/blimgui/dist64/numpy/core/fromnumeric.py index 987a6e2..e50084f 100644 --- a/blimgui/dist64/numpy/core/fromnumeric.py +++ b/blimgui/dist64/numpy/core/fromnumeric.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import fromnumeric + from ._utils import _raise_warning ret = getattr(fromnumeric, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/function_base.py b/blimgui/dist64/numpy/core/function_base.py index 3e7332b..88fc226 100644 --- a/blimgui/dist64/numpy/core/function_base.py +++ b/blimgui/dist64/numpy/core/function_base.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import function_base + from ._utils import _raise_warning ret = getattr(function_base, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/getlimits.py b/blimgui/dist64/numpy/core/getlimits.py index 2a0b78d..65c1107 100644 --- a/blimgui/dist64/numpy/core/getlimits.py +++ b/blimgui/dist64/numpy/core/getlimits.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import getlimits + from ._utils import _raise_warning ret = getattr(getlimits, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/multiarray.py b/blimgui/dist64/numpy/core/multiarray.py index ea7f4db..bb604a8 100644 --- a/blimgui/dist64/numpy/core/multiarray.py +++ b/blimgui/dist64/numpy/core/multiarray.py @@ -12,6 +12,7 @@ def __getattr__(attr_name): from numpy._core import multiarray + from ._utils import _raise_warning ret = getattr(multiarray, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/numeric.py b/blimgui/dist64/numpy/core/numeric.py index deb3cfc..dcf0fd1 100644 --- a/blimgui/dist64/numpy/core/numeric.py +++ b/blimgui/dist64/numpy/core/numeric.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import numeric + from ._utils import _raise_warning sentinel = object() diff --git a/blimgui/dist64/numpy/core/numerictypes.py b/blimgui/dist64/numpy/core/numerictypes.py index 2faf7ec..9ed72d6 100644 --- a/blimgui/dist64/numpy/core/numerictypes.py +++ b/blimgui/dist64/numpy/core/numerictypes.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import numerictypes + from ._utils import _raise_warning ret = getattr(numerictypes, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/overrides.py b/blimgui/dist64/numpy/core/overrides.py index a414303..eac0bb4 100644 --- a/blimgui/dist64/numpy/core/overrides.py +++ b/blimgui/dist64/numpy/core/overrides.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import overrides + from ._utils import _raise_warning ret = getattr(overrides, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/records.py b/blimgui/dist64/numpy/core/records.py index 180f201..e3c84dd 100644 --- a/blimgui/dist64/numpy/core/records.py +++ b/blimgui/dist64/numpy/core/records.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import records + from ._utils import _raise_warning ret = getattr(records, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/shape_base.py b/blimgui/dist64/numpy/core/shape_base.py index f5ef307..2372a66 100644 --- a/blimgui/dist64/numpy/core/shape_base.py +++ b/blimgui/dist64/numpy/core/shape_base.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import shape_base + from ._utils import _raise_warning ret = getattr(shape_base, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/core/umath.py b/blimgui/dist64/numpy/core/umath.py index af8fff1..836c0b3 100644 --- a/blimgui/dist64/numpy/core/umath.py +++ b/blimgui/dist64/numpy/core/umath.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import umath + from ._utils import _raise_warning ret = getattr(umath, attr_name, None) if ret is None: diff --git a/blimgui/dist64/numpy/ctypeslib/__init__.py b/blimgui/dist64/numpy/ctypeslib/__init__.py new file mode 100644 index 0000000..fd3c773 --- /dev/null +++ b/blimgui/dist64/numpy/ctypeslib/__init__.py @@ -0,0 +1,13 @@ +from ._ctypeslib import ( + __all__, + __doc__, + _concrete_ndptr, + _ndptr, + as_array, + as_ctypes, + as_ctypes_type, + c_intp, + ctypes, + load_library, + ndpointer, +) diff --git a/blimgui/dist64/numpy/ctypeslib/__init__.pyi b/blimgui/dist64/numpy/ctypeslib/__init__.pyi new file mode 100644 index 0000000..f088d02 --- /dev/null +++ b/blimgui/dist64/numpy/ctypeslib/__init__.pyi @@ -0,0 +1,15 @@ +import ctypes +from ctypes import c_int64 as _c_intp + +from ._ctypeslib import ( + __all__ as __all__, + __doc__ as __doc__, + _concrete_ndptr as _concrete_ndptr, + _ndptr as _ndptr, + as_array as as_array, + as_ctypes as as_ctypes, + as_ctypes_type as as_ctypes_type, + c_intp as c_intp, + load_library as load_library, + ndpointer as ndpointer, +) diff --git a/blimgui/dist64/numpy/ctypeslib.py b/blimgui/dist64/numpy/ctypeslib/_ctypeslib.py similarity index 90% rename from blimgui/dist64/numpy/ctypeslib.py rename to blimgui/dist64/numpy/ctypeslib/_ctypeslib.py index a34f1ed..9255603 100644 --- a/blimgui/dist64/numpy/ctypeslib.py +++ b/blimgui/dist64/numpy/ctypeslib/_ctypeslib.py @@ -1,602 +1,603 @@ -""" -============================ -``ctypes`` Utility Functions -============================ - -See Also --------- -load_library : Load a C library. -ndpointer : Array restype/argtype with verification. -as_ctypes : Create a ctypes array from an ndarray. -as_array : Create an ndarray from a ctypes array. - -References ----------- -.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html - -Examples --------- -Load the C library: - ->>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP - -Our result type, an ndarray that must be of type double, be 1-dimensional -and is C-contiguous in memory: - ->>> array_1d_double = np.ctypeslib.ndpointer( -... dtype=np.double, -... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP - -Our C-function typically takes an array and updates its values -in-place. For example:: - - void foo_func(double* x, int length) - { - int i; - for (i = 0; i < length; i++) { - x[i] = i*i; - } - } - -We wrap it using: - ->>> _lib.foo_func.restype = None #doctest: +SKIP ->>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP - -Then, we're ready to call ``foo_func``: - ->>> out = np.empty(15, dtype=np.double) ->>> _lib.foo_func(out, len(out)) #doctest: +SKIP - -""" -__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array', - 'as_ctypes_type'] - -import os -import numpy as np -from numpy._core.multiarray import _flagdict, flagsobj - -try: - import ctypes -except ImportError: - ctypes = None - -if ctypes is None: - def _dummy(*args, **kwds): - """ - Dummy object that raises an ImportError if ctypes is not available. - - Raises - ------ - ImportError - If ctypes is not available. - - """ - raise ImportError("ctypes is not available.") - load_library = _dummy - as_ctypes = _dummy - as_array = _dummy - from numpy import intp as c_intp - _ndptr_base = object -else: - import numpy._core._internal as nic - c_intp = nic._getintp_ctype() - del nic - _ndptr_base = ctypes.c_void_p - - # Adapted from Albert Strasheim - def load_library(libname, loader_path): - """ - It is possible to load a library using - - >>> lib = ctypes.cdll[] # doctest: +SKIP - - But there are cross-platform considerations, such as library file extensions, - plus the fact Windows will just load the first library it finds with that name. - NumPy supplies the load_library function as a convenience. - - .. versionchanged:: 1.20.0 - Allow libname and loader_path to take any - :term:`python:path-like object`. - - Parameters - ---------- - libname : path-like - Name of the library, which can have 'lib' as a prefix, - but without an extension. - loader_path : path-like - Where the library can be found. - - Returns - ------- - ctypes.cdll[libpath] : library object - A ctypes library object - - Raises - ------ - OSError - If there is no library with the expected extension, or the - library is defective and cannot be loaded. - """ - # Convert path-like objects into strings - libname = os.fsdecode(libname) - loader_path = os.fsdecode(loader_path) - - ext = os.path.splitext(libname)[1] - if not ext: - import sys - import sysconfig - # Try to load library with platform-specific name, otherwise - # default to libname.[so|dll|dylib]. Sometimes, these files are - # built erroneously on non-linux platforms. - base_ext = ".so" - if sys.platform.startswith("darwin"): - base_ext = ".dylib" - elif sys.platform.startswith("win"): - base_ext = ".dll" - libname_ext = [libname + base_ext] - so_ext = sysconfig.get_config_var("EXT_SUFFIX") - if not so_ext == base_ext: - libname_ext.insert(0, libname + so_ext) - else: - libname_ext = [libname] - - loader_path = os.path.abspath(loader_path) - if not os.path.isdir(loader_path): - libdir = os.path.dirname(loader_path) - else: - libdir = loader_path - - for ln in libname_ext: - libpath = os.path.join(libdir, ln) - if os.path.exists(libpath): - try: - return ctypes.cdll[libpath] - except OSError: - ## defective lib file - raise - ## if no successful return in the libname_ext loop: - raise OSError("no file with expected extension") - - -def _num_fromflags(flaglist): - num = 0 - for val in flaglist: - num += _flagdict[val] - return num - -_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'WRITEBACKIFCOPY'] -def _flags_fromnum(num): - res = [] - for key in _flagnames: - value = _flagdict[key] - if (num & value): - res.append(key) - return res - - -class _ndptr(_ndptr_base): - @classmethod - def from_param(cls, obj): - if not isinstance(obj, np.ndarray): - raise TypeError("argument must be an ndarray") - if cls._dtype_ is not None \ - and obj.dtype != cls._dtype_: - raise TypeError("array must have data type %s" % cls._dtype_) - if cls._ndim_ is not None \ - and obj.ndim != cls._ndim_: - raise TypeError("array must have %d dimension(s)" % cls._ndim_) - if cls._shape_ is not None \ - and obj.shape != cls._shape_: - raise TypeError("array must have shape %s" % str(cls._shape_)) - if cls._flags_ is not None \ - and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError("array must have flags %s" % - _flags_fromnum(cls._flags_)) - return obj.ctypes - - -class _concrete_ndptr(_ndptr): - """ - Like _ndptr, but with `_shape_` and `_dtype_` specified. - - Notably, this means the pointer has enough information to reconstruct - the array, which is not generally true. - """ - def _check_retval_(self): - """ - This method is called when this class is used as the .restype - attribute for a shared-library function, to automatically wrap the - pointer into an array. - """ - return self.contents - - @property - def contents(self): - """ - Get an ndarray viewing the data pointed to by this pointer. - - This mirrors the `contents` attribute of a normal ctypes pointer - """ - full_dtype = np.dtype((self._dtype_, self._shape_)) - full_ctype = ctypes.c_char * full_dtype.itemsize - buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents - return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) - - -# Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism -_pointer_type_cache = {} -def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - """ - Array-checking restype/argtypes. - - An ndpointer instance is used to describe an ndarray in restypes - and argtypes specifications. This approach is more flexible than - using, for example, ``POINTER(c_double)``, since several restrictions - can be specified, which are verified upon calling the ctypes function. - These include data type, number of dimensions, shape and flags. If a - given array does not satisfy the specified restrictions, - a ``TypeError`` is raised. - - Parameters - ---------- - dtype : data-type, optional - Array data-type. - ndim : int, optional - Number of array dimensions. - shape : tuple of ints, optional - Array shape. - flags : str or tuple of str - Array flags; may be one or more of: - - - C_CONTIGUOUS / C / CONTIGUOUS - - F_CONTIGUOUS / F / FORTRAN - - OWNDATA / O - - WRITEABLE / W - - ALIGNED / A - - WRITEBACKIFCOPY / X - - Returns - ------- - klass : ndpointer type object - A type object, which is an ``_ndtpr`` instance containing - dtype, ndim, shape and flags information. - - Raises - ------ - TypeError - If a given array does not satisfy the specified restrictions. - - Examples - -------- - >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, - ... ndim=1, - ... flags='C_CONTIGUOUS')] - ... #doctest: +SKIP - >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) - ... #doctest: +SKIP - - """ - - # normalize dtype to dtype | None - if dtype is not None: - dtype = np.dtype(dtype) - - # normalize flags to int | None - num = None - if flags is not None: - if isinstance(flags, str): - flags = flags.split(',') - elif isinstance(flags, (int, np.integer)): - num = flags - flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): - num = flags.num - flags = _flags_fromnum(num) - if num is None: - try: - flags = [x.strip().upper() for x in flags] - except Exception as e: - raise TypeError("invalid flags specification") from e - num = _num_fromflags(flags) - - # normalize shape to tuple | None - if shape is not None: - try: - shape = tuple(shape) - except TypeError: - # single integer -> 1-tuple - shape = (shape,) - - cache_key = (dtype, ndim, shape, num) - - try: - return _pointer_type_cache[cache_key] - except KeyError: - pass - - # produce a name for the new type - if dtype is None: - name = 'any' - elif dtype.names is not None: - name = str(id(dtype)) - else: - name = dtype.str - if ndim is not None: - name += "_%dd" % ndim - if shape is not None: - name += "_"+"x".join(str(x) for x in shape) - if flags is not None: - name += "_"+"_".join(flags) - - if dtype is not None and shape is not None: - base = _concrete_ndptr - else: - base = _ndptr - - klass = type("ndpointer_%s"%name, (base,), - {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) - _pointer_type_cache[cache_key] = klass - return klass - - -if ctypes is not None: - def _ctype_ndarray(element_type, shape): - """ Create an ndarray of the given element type and shape """ - for dim in shape[::-1]: - element_type = dim * element_type - # prevent the type name include np.ctypeslib - element_type.__module__ = None - return element_type - - - def _get_scalar_type_map(): - """ - Return a dictionary mapping native endian scalar dtype to ctypes types - """ - ct = ctypes - simple_types = [ - ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, - ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, - ct.c_float, ct.c_double, - ct.c_bool, - ] - return {np.dtype(ctype): ctype for ctype in simple_types} - - - _scalar_type_map = _get_scalar_type_map() - - - def _ctype_from_dtype_scalar(dtype): - # swapping twice ensure that `=` is promoted to <, >, or | - dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') - dtype_native = dtype.newbyteorder('=') - try: - ctype = _scalar_type_map[dtype_native] - except KeyError as e: - raise NotImplementedError( - "Converting {!r} to a ctypes type".format(dtype) - ) from None - - if dtype_with_endian.byteorder == '>': - ctype = ctype.__ctype_be__ - elif dtype_with_endian.byteorder == '<': - ctype = ctype.__ctype_le__ - - return ctype - - - def _ctype_from_dtype_subarray(dtype): - element_dtype, shape = dtype.subdtype - ctype = _ctype_from_dtype(element_dtype) - return _ctype_ndarray(ctype, shape) - - - def _ctype_from_dtype_structured(dtype): - # extract offsets of each field - field_data = [] - for name in dtype.names: - field_dtype, offset = dtype.fields[name][:2] - field_data.append((offset, name, _ctype_from_dtype(field_dtype))) - - # ctypes doesn't care about field order - field_data = sorted(field_data, key=lambda f: f[0]) - - if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): - # union, if multiple fields all at address 0 - size = 0 - _fields_ = [] - for offset, name, ctype in field_data: - _fields_.append((name, ctype)) - size = max(size, ctypes.sizeof(ctype)) - - # pad to the right size - if dtype.itemsize != size: - _fields_.append(('', ctypes.c_char * dtype.itemsize)) - - # we inserted manual padding, so always `_pack_` - return type('union', (ctypes.Union,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - else: - last_offset = 0 - _fields_ = [] - for offset, name, ctype in field_data: - padding = offset - last_offset - if padding < 0: - raise NotImplementedError("Overlapping fields") - if padding > 0: - _fields_.append(('', ctypes.c_char * padding)) - - _fields_.append((name, ctype)) - last_offset = offset + ctypes.sizeof(ctype) - - - padding = dtype.itemsize - last_offset - if padding > 0: - _fields_.append(('', ctypes.c_char * padding)) - - # we inserted manual padding, so always `_pack_` - return type('struct', (ctypes.Structure,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - - - def _ctype_from_dtype(dtype): - if dtype.fields is not None: - return _ctype_from_dtype_structured(dtype) - elif dtype.subdtype is not None: - return _ctype_from_dtype_subarray(dtype) - else: - return _ctype_from_dtype_scalar(dtype) - - - def as_ctypes_type(dtype): - r""" - Convert a dtype into a ctypes type. - - Parameters - ---------- - dtype : dtype - The dtype to convert - - Returns - ------- - ctype - A ctype scalar, union, array, or struct - - Raises - ------ - NotImplementedError - If the conversion is not possible - - Notes - ----- - This function does not losslessly round-trip in either direction. - - ``np.dtype(as_ctypes_type(dt))`` will: - - - insert padding fields - - reorder fields to be sorted by offset - - discard field titles - - ``as_ctypes_type(np.dtype(ctype))`` will: - - - discard the class names of `ctypes.Structure`\ s and - `ctypes.Union`\ s - - convert single-element `ctypes.Union`\ s into single-element - `ctypes.Structure`\ s - - insert padding fields - - Examples - -------- - Converting a simple dtype: - - >>> dt = np.dtype('int8') - >>> ctype = np.ctypeslib.as_ctypes_type(dt) - >>> ctype - - - Converting a structured dtype: - - >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) - >>> ctype = np.ctypeslib.as_ctypes_type(dt) - >>> ctype - - - """ - return _ctype_from_dtype(np.dtype(dtype)) - - - def as_array(obj, shape=None): - """ - Create a numpy array from a ctypes array or POINTER. - - The numpy array shares the memory with the ctypes object. - - The shape parameter must be given if converting from a ctypes POINTER. - The shape parameter is ignored if converting from a ctypes array - - Examples - -------- - Converting a ctypes integer array: - - >>> import ctypes - >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4) - >>> np_array = np.ctypeslib.as_array(ctypes_array) - >>> np_array - array([0, 1, 2, 3, 4], dtype=int32) - - Converting a ctypes POINTER: - - >>> import ctypes - >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4) - >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int)) - >>> np_array = np.ctypeslib.as_array(pointer, (5,)) - >>> np_array - array([0, 1, 2, 3, 4], dtype=int32) - - """ - if isinstance(obj, ctypes._Pointer): - # convert pointers to an array of the desired shape - if shape is None: - raise TypeError( - 'as_array() requires a shape argument when called on a ' - 'pointer') - p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) - obj = ctypes.cast(obj, p_arr_type).contents - - return np.asarray(obj) - - - def as_ctypes(obj): - """ - Create and return a ctypes object from a numpy array. Actually - anything that exposes the __array_interface__ is accepted. - - Examples - -------- - Create ctypes object from inferred int ``np.array``: - - >>> inferred_int_array = np.array([1, 2, 3]) - >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array) - >>> type(c_int_array) - - >>> c_int_array[:] - [1, 2, 3] - - Create ctypes object from explicit 8 bit unsigned int ``np.array`` : - - >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8) - >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array) - >>> type(c_int_array) - - >>> c_int_array[:] - [1, 2, 3] - - """ - ai = obj.__array_interface__ - if ai["strides"]: - raise TypeError("strided arrays not supported") - if ai["version"] != 3: - raise TypeError("only __array_interface__ version 3 supported") - addr, readonly = ai["data"] - if readonly: - raise TypeError("readonly arrays unsupported") - - # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows - # dtype.itemsize (gh-14214) - ctype_scalar = as_ctypes_type(ai["typestr"]) - result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) - result = result_type.from_address(addr) - result.__keep = obj - return result +""" +============================ +``ctypes`` Utility Functions +============================ + +See Also +-------- +load_library : Load a C library. +ndpointer : Array restype/argtype with verification. +as_ctypes : Create a ctypes array from an ndarray. +as_array : Create an ndarray from a ctypes array. + +References +---------- +.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html + +Examples +-------- +Load the C library: + +>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP + +Our result type, an ndarray that must be of type double, be 1-dimensional +and is C-contiguous in memory: + +>>> array_1d_double = np.ctypeslib.ndpointer( +... dtype=np.double, +... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP + +Our C-function typically takes an array and updates its values +in-place. For example:: + + void foo_func(double* x, int length) + { + int i; + for (i = 0; i < length; i++) { + x[i] = i*i; + } + } + +We wrap it using: + +>>> _lib.foo_func.restype = None #doctest: +SKIP +>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP + +Then, we're ready to call ``foo_func``: + +>>> out = np.empty(15, dtype=np.double) +>>> _lib.foo_func(out, len(out)) #doctest: +SKIP + +""" +__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array', + 'as_ctypes_type'] + +import os + +import numpy as np +import numpy._core.multiarray as mu +from numpy._utils import set_module + +try: + import ctypes +except ImportError: + ctypes = None + +if ctypes is None: + @set_module("numpy.ctypeslib") + def _dummy(*args, **kwds): + """ + Dummy object that raises an ImportError if ctypes is not available. + + Raises + ------ + ImportError + If ctypes is not available. + + """ + raise ImportError("ctypes is not available.") + load_library = _dummy + as_ctypes = _dummy + as_ctypes_type = _dummy + as_array = _dummy + ndpointer = _dummy + from numpy import intp as c_intp + _ndptr_base = object +else: + import numpy._core._internal as nic + c_intp = nic._getintp_ctype() + del nic + _ndptr_base = ctypes.c_void_p + + # Adapted from Albert Strasheim + @set_module("numpy.ctypeslib") + def load_library(libname, loader_path): + """ + It is possible to load a library using + + >>> lib = ctypes.cdll[] # doctest: +SKIP + + But there are cross-platform considerations, such as library file extensions, + plus the fact Windows will just load the first library it finds with that name. + NumPy supplies the load_library function as a convenience. + + .. versionchanged:: 1.20.0 + Allow libname and loader_path to take any + :term:`python:path-like object`. + + Parameters + ---------- + libname : path-like + Name of the library, which can have 'lib' as a prefix, + but without an extension. + loader_path : path-like + Where the library can be found. + + Returns + ------- + ctypes.cdll[libpath] : library object + A ctypes library object + + Raises + ------ + OSError + If there is no library with the expected extension, or the + library is defective and cannot be loaded. + """ + # Convert path-like objects into strings + libname = os.fsdecode(libname) + loader_path = os.fsdecode(loader_path) + + ext = os.path.splitext(libname)[1] + if not ext: + import sys + import sysconfig + # Try to load library with platform-specific name, otherwise + # default to libname.[so|dll|dylib]. Sometimes, these files are + # built erroneously on non-linux platforms. + base_ext = ".so" + if sys.platform.startswith("darwin"): + base_ext = ".dylib" + elif sys.platform.startswith("win"): + base_ext = ".dll" + libname_ext = [libname + base_ext] + so_ext = sysconfig.get_config_var("EXT_SUFFIX") + if not so_ext == base_ext: + libname_ext.insert(0, libname + so_ext) + else: + libname_ext = [libname] + + loader_path = os.path.abspath(loader_path) + if not os.path.isdir(loader_path): + libdir = os.path.dirname(loader_path) + else: + libdir = loader_path + + for ln in libname_ext: + libpath = os.path.join(libdir, ln) + if os.path.exists(libpath): + try: + return ctypes.cdll[libpath] + except OSError: + # defective lib file + raise + # if no successful return in the libname_ext loop: + raise OSError("no file with expected extension") + + +def _num_fromflags(flaglist): + num = 0 + for val in flaglist: + num += mu._flagdict[val] + return num + + +_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', + 'OWNDATA', 'WRITEBACKIFCOPY'] +def _flags_fromnum(num): + res = [] + for key in _flagnames: + value = mu._flagdict[key] + if (num & value): + res.append(key) + return res + + +class _ndptr(_ndptr_base): + @classmethod + def from_param(cls, obj): + if not isinstance(obj, np.ndarray): + raise TypeError("argument must be an ndarray") + if cls._dtype_ is not None \ + and obj.dtype != cls._dtype_: + raise TypeError(f"array must have data type {cls._dtype_}") + if cls._ndim_ is not None \ + and obj.ndim != cls._ndim_: + raise TypeError("array must have %d dimension(s)" % cls._ndim_) + if cls._shape_ is not None \ + and obj.shape != cls._shape_: + raise TypeError(f"array must have shape {str(cls._shape_)}") + if cls._flags_ is not None \ + and ((obj.flags.num & cls._flags_) != cls._flags_): + raise TypeError(f"array must have flags {_flags_fromnum(cls._flags_)}") + return obj.ctypes + + +class _concrete_ndptr(_ndptr): + """ + Like _ndptr, but with `_shape_` and `_dtype_` specified. + + Notably, this means the pointer has enough information to reconstruct + the array, which is not generally true. + """ + def _check_retval_(self): + """ + This method is called when this class is used as the .restype + attribute for a shared-library function, to automatically wrap the + pointer into an array. + """ + return self.contents + + @property + def contents(self): + """ + Get an ndarray viewing the data pointed to by this pointer. + + This mirrors the `contents` attribute of a normal ctypes pointer + """ + full_dtype = np.dtype((self._dtype_, self._shape_)) + full_ctype = ctypes.c_char * full_dtype.itemsize + buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents + return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) + + +# Factory for an array-checking class with from_param defined for +# use with ctypes argtypes mechanism +_pointer_type_cache = {} + +@set_module("numpy.ctypeslib") +def ndpointer(dtype=None, ndim=None, shape=None, flags=None): + """ + Array-checking restype/argtypes. + + An ndpointer instance is used to describe an ndarray in restypes + and argtypes specifications. This approach is more flexible than + using, for example, ``POINTER(c_double)``, since several restrictions + can be specified, which are verified upon calling the ctypes function. + These include data type, number of dimensions, shape and flags. If a + given array does not satisfy the specified restrictions, + a ``TypeError`` is raised. + + Parameters + ---------- + dtype : data-type, optional + Array data-type. + ndim : int, optional + Number of array dimensions. + shape : tuple of ints, optional + Array shape. + flags : str or tuple of str + Array flags; may be one or more of: + + - C_CONTIGUOUS / C / CONTIGUOUS + - F_CONTIGUOUS / F / FORTRAN + - OWNDATA / O + - WRITEABLE / W + - ALIGNED / A + - WRITEBACKIFCOPY / X + + Returns + ------- + klass : ndpointer type object + A type object, which is an ``_ndtpr`` instance containing + dtype, ndim, shape and flags information. + + Raises + ------ + TypeError + If a given array does not satisfy the specified restrictions. + + Examples + -------- + >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, + ... ndim=1, + ... flags='C_CONTIGUOUS')] + ... #doctest: +SKIP + >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) + ... #doctest: +SKIP + + """ + + # normalize dtype to dtype | None + if dtype is not None: + dtype = np.dtype(dtype) + + # normalize flags to int | None + num = None + if flags is not None: + if isinstance(flags, str): + flags = flags.split(',') + elif isinstance(flags, (int, np.integer)): + num = flags + flags = _flags_fromnum(num) + elif isinstance(flags, mu.flagsobj): + num = flags.num + flags = _flags_fromnum(num) + if num is None: + try: + flags = [x.strip().upper() for x in flags] + except Exception as e: + raise TypeError("invalid flags specification") from e + num = _num_fromflags(flags) + + # normalize shape to tuple | None + if shape is not None: + try: + shape = tuple(shape) + except TypeError: + # single integer -> 1-tuple + shape = (shape,) + + cache_key = (dtype, ndim, shape, num) + + try: + return _pointer_type_cache[cache_key] + except KeyError: + pass + + # produce a name for the new type + if dtype is None: + name = 'any' + elif dtype.names is not None: + name = str(id(dtype)) + else: + name = dtype.str + if ndim is not None: + name += "_%dd" % ndim + if shape is not None: + name += "_" + "x".join(str(x) for x in shape) + if flags is not None: + name += "_" + "_".join(flags) + + if dtype is not None and shape is not None: + base = _concrete_ndptr + else: + base = _ndptr + + klass = type(f"ndpointer_{name}", (base,), + {"_dtype_": dtype, + "_shape_": shape, + "_ndim_": ndim, + "_flags_": num}) + _pointer_type_cache[cache_key] = klass + return klass + + +if ctypes is not None: + def _ctype_ndarray(element_type, shape): + """ Create an ndarray of the given element type and shape """ + for dim in shape[::-1]: + element_type = dim * element_type + # prevent the type name include np.ctypeslib + element_type.__module__ = None + return element_type + + def _get_scalar_type_map(): + """ + Return a dictionary mapping native endian scalar dtype to ctypes types + """ + ct = ctypes + simple_types = [ + ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, + ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, + ct.c_float, ct.c_double, + ct.c_bool, + ] + return {np.dtype(ctype): ctype for ctype in simple_types} + + _scalar_type_map = _get_scalar_type_map() + + def _ctype_from_dtype_scalar(dtype): + # swapping twice ensure that `=` is promoted to <, >, or | + dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') + dtype_native = dtype.newbyteorder('=') + try: + ctype = _scalar_type_map[dtype_native] + except KeyError as e: + raise NotImplementedError( + f"Converting {dtype!r} to a ctypes type" + ) from None + + if dtype_with_endian.byteorder == '>': + ctype = ctype.__ctype_be__ + elif dtype_with_endian.byteorder == '<': + ctype = ctype.__ctype_le__ + + return ctype + + def _ctype_from_dtype_subarray(dtype): + element_dtype, shape = dtype.subdtype + ctype = _ctype_from_dtype(element_dtype) + return _ctype_ndarray(ctype, shape) + + def _ctype_from_dtype_structured(dtype): + # extract offsets of each field + field_data = [] + for name in dtype.names: + field_dtype, offset = dtype.fields[name][:2] + field_data.append((offset, name, _ctype_from_dtype(field_dtype))) + + # ctypes doesn't care about field order + field_data = sorted(field_data, key=lambda f: f[0]) + + if len(field_data) > 1 and all(offset == 0 for offset, _, _ in field_data): + # union, if multiple fields all at address 0 + size = 0 + _fields_ = [] + for offset, name, ctype in field_data: + _fields_.append((name, ctype)) + size = max(size, ctypes.sizeof(ctype)) + + # pad to the right size + if dtype.itemsize != size: + _fields_.append(('', ctypes.c_char * dtype.itemsize)) + + # we inserted manual padding, so always `_pack_` + return type('union', (ctypes.Union,), { + '_fields_': _fields_, + '_pack_': 1, + '__module__': None, + }) + else: + last_offset = 0 + _fields_ = [] + for offset, name, ctype in field_data: + padding = offset - last_offset + if padding < 0: + raise NotImplementedError("Overlapping fields") + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + _fields_.append((name, ctype)) + last_offset = offset + ctypes.sizeof(ctype) + + padding = dtype.itemsize - last_offset + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + # we inserted manual padding, so always `_pack_` + return type('struct', (ctypes.Structure,), { + '_fields_': _fields_, + '_pack_': 1, + '__module__': None, + }) + + def _ctype_from_dtype(dtype): + if dtype.fields is not None: + return _ctype_from_dtype_structured(dtype) + elif dtype.subdtype is not None: + return _ctype_from_dtype_subarray(dtype) + else: + return _ctype_from_dtype_scalar(dtype) + + @set_module("numpy.ctypeslib") + def as_ctypes_type(dtype): + r""" + Convert a dtype into a ctypes type. + + Parameters + ---------- + dtype : dtype + The dtype to convert + + Returns + ------- + ctype + A ctype scalar, union, array, or struct + + Raises + ------ + NotImplementedError + If the conversion is not possible + + Notes + ----- + This function does not losslessly round-trip in either direction. + + ``np.dtype(as_ctypes_type(dt))`` will: + + - insert padding fields + - reorder fields to be sorted by offset + - discard field titles + + ``as_ctypes_type(np.dtype(ctype))`` will: + + - discard the class names of `ctypes.Structure`\ s and + `ctypes.Union`\ s + - convert single-element `ctypes.Union`\ s into single-element + `ctypes.Structure`\ s + - insert padding fields + + Examples + -------- + Converting a simple dtype: + + >>> dt = np.dtype('int8') + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + + Converting a structured dtype: + + >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + + """ + return _ctype_from_dtype(np.dtype(dtype)) + + @set_module("numpy.ctypeslib") + def as_array(obj, shape=None): + """ + Create a numpy array from a ctypes array or POINTER. + + The numpy array shares the memory with the ctypes object. + + The shape parameter must be given if converting from a ctypes POINTER. + The shape parameter is ignored if converting from a ctypes array + + Examples + -------- + Converting a ctypes integer array: + + >>> import ctypes + >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4) + >>> np_array = np.ctypeslib.as_array(ctypes_array) + >>> np_array + array([0, 1, 2, 3, 4], dtype=int32) + + Converting a ctypes POINTER: + + >>> import ctypes + >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4) + >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int)) + >>> np_array = np.ctypeslib.as_array(pointer, (5,)) + >>> np_array + array([0, 1, 2, 3, 4], dtype=int32) + + """ + if isinstance(obj, ctypes._Pointer): + # convert pointers to an array of the desired shape + if shape is None: + raise TypeError( + 'as_array() requires a shape argument when called on a ' + 'pointer') + p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) + obj = ctypes.cast(obj, p_arr_type).contents + + return np.asarray(obj) + + @set_module("numpy.ctypeslib") + def as_ctypes(obj): + """ + Create and return a ctypes object from a numpy array. Actually + anything that exposes the __array_interface__ is accepted. + + Examples + -------- + Create ctypes object from inferred int ``np.array``: + + >>> inferred_int_array = np.array([1, 2, 3]) + >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array) + >>> type(c_int_array) + + >>> c_int_array[:] + [1, 2, 3] + + Create ctypes object from explicit 8 bit unsigned int ``np.array`` : + + >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8) + >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array) + >>> type(c_int_array) + + >>> c_int_array[:] + [1, 2, 3] + + """ + ai = obj.__array_interface__ + if ai["strides"]: + raise TypeError("strided arrays not supported") + if ai["version"] != 3: + raise TypeError("only __array_interface__ version 3 supported") + addr, readonly = ai["data"] + if readonly: + raise TypeError("readonly arrays unsupported") + + # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows + # dtype.itemsize (gh-14214) + ctype_scalar = as_ctypes_type(ai["typestr"]) + result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) + result = result_type.from_address(addr) + result.__keep = obj + return result diff --git a/blimgui/dist64/numpy/ctypeslib.pyi b/blimgui/dist64/numpy/ctypeslib/_ctypeslib.pyi similarity index 71% rename from blimgui/dist64/numpy/ctypeslib.pyi rename to blimgui/dist64/numpy/ctypeslib/_ctypeslib.pyi index cf4a77a..8881141 100644 --- a/blimgui/dist64/numpy/ctypeslib.pyi +++ b/blimgui/dist64/numpy/ctypeslib/_ctypeslib.pyi @@ -1,250 +1,236 @@ -# NOTE: Numpy's mypy plugin is used for importing the correct -# platform-specific `ctypes._SimpleCData[int]` sub-type -import ctypes -from ctypes import c_int64 as _c_intp - -from _typeshed import StrOrBytesPath -from collections.abc import Iterable, Sequence -from typing import ( - Literal as L, - Any, - TypeAlias, - TypeVar, - Generic, - overload, - ClassVar, -) - -import numpy as np -from numpy import ( - ndarray, - dtype, - generic, - byte, - short, - intc, - long, - longlong, - ubyte, - ushort, - uintc, - ulong, - ulonglong, - single, - double, - longdouble, - void, -) -from numpy._core._internal import _ctypes -from numpy._core.multiarray import flagsobj -from numpy._typing import ( - # Arrays - NDArray, - _ArrayLike, - - # Shapes - _Shape, - _ShapeLike, - - # DTypes - DTypeLike, - _DTypeLike, - _VoidDTypeLike, - _BoolCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, -) - -__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] - -# TODO: Add a proper `_Shape` bound once we've got variadic typevars -_DType = TypeVar("_DType", bound=dtype[Any]) -_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any]) -_SCT = TypeVar("_SCT", bound=generic) - -_FlagsKind: TypeAlias = L[ - 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', - 'F_CONTIGUOUS', 'FORTRAN', 'F', - 'ALIGNED', 'A', - 'WRITEABLE', 'W', - 'OWNDATA', 'O', - 'WRITEBACKIFCOPY', 'X', -] - -# TODO: Add a shape typevar once we have variadic typevars (PEP 646) -class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]): - # In practice these 4 classvars are defined in the dynamic class - # returned by `ndpointer` - _dtype_: ClassVar[_DTypeOptional] - _shape_: ClassVar[None] - _ndim_: ClassVar[None | int] - _flags_: ClassVar[None | list[_FlagsKind]] - - @overload - @classmethod - def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... - @overload - @classmethod - def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes[Any]: ... - -class _concrete_ndptr(_ndptr[_DType]): - _dtype_: ClassVar[_DType] - _shape_: ClassVar[tuple[int, ...]] - @property - def contents(self) -> ndarray[_Shape, _DType]: ... - -def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... - -c_intp = _c_intp - -@overload -def ndpointer( - dtype: None = ..., - ndim: int = ..., - shape: None | _ShapeLike = ..., - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_ndptr[None]]: ... -@overload -def ndpointer( - dtype: _DTypeLike[_SCT], - ndim: int = ..., - *, - shape: _ShapeLike, - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_concrete_ndptr[dtype[_SCT]]]: ... -@overload -def ndpointer( - dtype: DTypeLike, - ndim: int = ..., - *, - shape: _ShapeLike, - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_concrete_ndptr[dtype[Any]]]: ... -@overload -def ndpointer( - dtype: _DTypeLike[_SCT], - ndim: int = ..., - shape: None = ..., - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_ndptr[dtype[_SCT]]]: ... -@overload -def ndpointer( - dtype: DTypeLike, - ndim: int = ..., - shape: None = ..., - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_ndptr[dtype[Any]]]: ... - -@overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... -@overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... -@overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... -@overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... -@overload -def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... -@overload -def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... -@overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... -@overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... -@overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... -@overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... -@overload -def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... -@overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... -@overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... -@overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... -@overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... -@overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` -@overload -def as_ctypes_type(dtype: str) -> type[Any]: ... - -@overload -def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... -@overload -def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ... -@overload -def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ... - -@overload -def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... -@overload -def as_ctypes(obj: byte) -> ctypes.c_byte: ... -@overload -def as_ctypes(obj: short) -> ctypes.c_short: ... -@overload -def as_ctypes(obj: intc) -> ctypes.c_int: ... -@overload -def as_ctypes(obj: long) -> ctypes.c_long: ... -@overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... -@overload -def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... -@overload -def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... -@overload -def as_ctypes(obj: uintc) -> ctypes.c_uint: ... -@overload -def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... -@overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... -@overload -def as_ctypes(obj: single) -> ctypes.c_float: ... -@overload -def as_ctypes(obj: double) -> ctypes.c_double: ... -@overload -def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... -@overload -def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` -@overload -def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... -@overload -def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... -@overload -def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... -@overload -def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... -@overload -def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... -@overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... -@overload -def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... -@overload -def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... -@overload -def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... -@overload -def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... -@overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... -@overload -def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... -@overload -def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... -@overload -def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... -@overload -def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` +# NOTE: Numpy's mypy plugin is used for importing the correct +# platform-specific `ctypes._SimpleCData[int]` sub-type +import ctypes +from _typeshed import StrOrBytesPath +from collections.abc import Iterable, Sequence +from ctypes import c_int64 as _c_intp +from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import ( + byte, + double, + dtype, + generic, + intc, + long, + longdouble, + longlong, + ndarray, + short, + single, + ubyte, + uintc, + ulong, + ulonglong, + ushort, + void, +) +from numpy._core._internal import _ctypes +from numpy._core.multiarray import flagsobj +from numpy._typing import ( + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _BoolCodes, + _ByteCodes, + _DoubleCodes, + _DTypeLike, + _IntCCodes, + _LongCodes, + _LongDoubleCodes, + _LongLongCodes, + _ShapeLike, + _ShortCodes, + _SingleCodes, + _UByteCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UShortCodes, + _VoidDTypeLike, +) + +__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] + +# TODO: Add a proper `_Shape` bound once we've got variadic typevars +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) +_ScalarT = TypeVar("_ScalarT", bound=generic) + +_FlagsKind: TypeAlias = L[ + "C_CONTIGUOUS", "CONTIGUOUS", "C", + "F_CONTIGUOUS", "FORTRAN", "F", + "ALIGNED", "A", + "WRITEABLE", "W", + "OWNDATA", "O", + "WRITEBACKIFCOPY", "X", +] + +# TODO: Add a shape typevar once we have variadic typevars (PEP 646) +class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): + # In practice these 4 classvars are defined in the dynamic class + # returned by `ndpointer` + _dtype_: ClassVar[_DTypeOptionalT] + _shape_: ClassVar[_AnyShape | None] + _ndim_: ClassVar[int | None] + _flags_: ClassVar[list[_FlagsKind] | None] + + @overload # type: ignore[override] + @classmethod + def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... + @overload + @classmethod + def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ... + +class _concrete_ndptr(_ndptr[_DTypeT]): + _dtype_: ClassVar[_DTypeT] + _shape_: ClassVar[_AnyShape] + @property + def contents(self) -> ndarray[_AnyShape, _DTypeT]: ... + +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... + +c_intp = _c_intp + +@overload +def ndpointer( + dtype: None = None, + ndim: int | None = None, + shape: _ShapeLike | None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[None]]: ... +@overload +def ndpointer( + dtype: _DTypeLike[_ScalarT], + ndim: int | None = None, + *, + shape: _ShapeLike, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike | None, + ndim: int | None = None, + *, + shape: _ShapeLike, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_concrete_ndptr[dtype]]: ... +@overload +def ndpointer( + dtype: _DTypeLike[_ScalarT], + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[dtype[_ScalarT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike | None, + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[dtype]]: ... + +@overload +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... +@overload +def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... +@overload +def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... +@overload +def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... +@overload +def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... +@overload +def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... +@overload +def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... +@overload +def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... +@overload +def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... +@overload +def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... +@overload +def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... +@overload +def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... +@overload +def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... +@overload +def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... +@overload +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... +@overload +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` +@overload +def as_ctypes_type(dtype: str) -> type[Any]: ... + +@overload +def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +@overload +def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... + +@overload +def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... +@overload +def as_ctypes(obj: byte) -> ctypes.c_byte: ... +@overload +def as_ctypes(obj: short) -> ctypes.c_short: ... +@overload +def as_ctypes(obj: intc) -> ctypes.c_int: ... +@overload +def as_ctypes(obj: long) -> ctypes.c_long: ... +@overload +def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... # type: ignore[overload-cannot-match] +@overload +def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +@overload +def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +@overload +def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +@overload +def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... +@overload +def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... # type: ignore[overload-cannot-match] +@overload +def as_ctypes(obj: single) -> ctypes.c_float: ... +@overload +def as_ctypes(obj: double) -> ctypes.c_double: ... +@overload +def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +@overload +def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +@overload +def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... +@overload +def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +@overload +def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +@overload +def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +@overload +def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... +@overload +def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... # type: ignore[overload-cannot-match] +@overload +def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +@overload +def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +@overload +def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +@overload +def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... +@overload +def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... # type: ignore[overload-cannot-match] +@overload +def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +@overload +def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +@overload +def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +@overload +def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` diff --git a/blimgui/dist64/numpy/dtypes.pyi b/blimgui/dist64/numpy/dtypes.pyi index 375806c..fbabd98 100644 --- a/blimgui/dist64/numpy/dtypes.pyi +++ b/blimgui/dist64/numpy/dtypes.pyi @@ -1,59 +1,68 @@ # ruff: noqa: ANN401 -from types import MemberDescriptorType -from typing import Any, ClassVar, Generic, NoReturn, TypeAlias, final, type_check_only -from typing import Literal as L - -from typing_extensions import LiteralString, Self, TypeVar +from typing import ( + Any, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Self, + TypeAlias, + final, + overload, + type_check_only, +) +from typing_extensions import TypeVar import numpy as np __all__ = [ # noqa: RUF022 - 'BoolDType', - 'Int8DType', - 'ByteDType', - 'UInt8DType', - 'UByteDType', - 'Int16DType', - 'ShortDType', - 'UInt16DType', - 'UShortDType', - 'Int32DType', - 'IntDType', - 'UInt32DType', - 'UIntDType', - 'Int64DType', - 'LongDType', - 'UInt64DType', - 'ULongDType', - 'LongLongDType', - 'ULongLongDType', - 'Float16DType', - 'Float32DType', - 'Float64DType', - 'LongDoubleDType', - 'Complex64DType', - 'Complex128DType', - 'CLongDoubleDType', - 'ObjectDType', - 'BytesDType', - 'StrDType', - 'VoidDType', - 'DateTime64DType', - 'TimeDelta64DType', - 'StringDType', + "BoolDType", + "Int8DType", + "ByteDType", + "UInt8DType", + "UByteDType", + "Int16DType", + "ShortDType", + "UInt16DType", + "UShortDType", + "Int32DType", + "IntDType", + "UInt32DType", + "UIntDType", + "Int64DType", + "LongDType", + "UInt64DType", + "ULongDType", + "LongLongDType", + "ULongLongDType", + "Float16DType", + "Float32DType", + "Float64DType", + "LongDoubleDType", + "Complex64DType", + "Complex128DType", + "CLongDoubleDType", + "ObjectDType", + "BytesDType", + "StrDType", + "VoidDType", + "DateTime64DType", + "TimeDelta64DType", + "StringDType", ] # Helper base classes (typing-only) -_SCT_co = TypeVar("_SCT_co", bound=np.generic, covariant=True) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) @type_check_only -class _SimpleDType(np.dtype[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] +class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] names: None # pyright: ignore[reportIncompatibleVariableOverride] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property - def base(self) -> np.dtype[_SCT_co]: ... + def base(self) -> np.dtype[_ScalarT_co]: ... @property def fields(self) -> None: ... @property @@ -68,7 +77,7 @@ class _SimpleDType(np.dtype[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(_SimpleDType[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] +class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -417,7 +426,7 @@ class ObjectDType( # type: ignore[misc] class BytesDType( # type: ignore[misc] _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, - _NBit[L[1],_ItemSize_co], + _NBit[L[1], _ItemSize_co], _SimpleDType[np.bytes_], Generic[_ItemSize_co], ): @@ -433,7 +442,7 @@ class BytesDType( # type: ignore[misc] class StrDType( # type: ignore[misc] _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, - _NBit[L[4],_ItemSize_co], + _NBit[L[4], _ItemSize_co], _SimpleDType[np.str_], Generic[_ItemSize_co], ): @@ -568,40 +577,54 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) + @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], _NativeOrder, _NBit[L[8], L[16]], - # TODO: Replace the (invalid) `str` with the scalar type, once implemented - np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues,reportInvalidTypeArguments] + # TODO(jorenham): change once we have a string scalar type: + # https://github.com/numpy/numpy/issues/28165 + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments] + Generic[_NaObjectT_co], ): + @property + def na_object(self) -> _NaObjectT_co: ... @property def coerce(self) -> L[True]: ... - na_object: ClassVar[MemberDescriptorType] # does not get instantiated # - def __new__(cls, /) -> StringDType: ... - def __getitem__(self, key: Any, /) -> NoReturn: ... - @property - def base(self) -> StringDType: ... + @overload + def __new__(cls, /, *, coerce: bool = True) -> Self: ... + @overload + def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ... + + # + def __getitem__(self, key: Never, /) -> NoReturn: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @property def fields(self) -> None: ... @property - def hasobject(self) -> L[True]: ... + def base(self) -> Self: ... @property - def isalignedstruct(self) -> L[False]: ... + def ndim(self) -> L[0]: ... @property - def isnative(self) -> L[True]: ... + def shape(self) -> tuple[()]: ... + + # @property def name(self) -> L["StringDType64", "StringDType128"]: ... @property - def ndim(self) -> L[0]: ... + def subdtype(self) -> None: ... @property - def shape(self) -> tuple[()]: ... + def type(self) -> type[str]: ... @property def str(self) -> L["|T8", "|T16"]: ... + + # @property - def subdtype(self) -> None: ... + def hasobject(self) -> L[True]: ... @property - def type(self) -> type[str]: ... # type: ignore[valid-type] + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... diff --git a/blimgui/dist64/numpy/exceptions.py b/blimgui/dist64/numpy/exceptions.py index 590c75d..3df3946 100644 --- a/blimgui/dist64/numpy/exceptions.py +++ b/blimgui/dist64/numpy/exceptions.py @@ -1,14 +1,13 @@ """ -Exceptions and Warnings (:mod:`numpy.exceptions`) -================================================= +Exceptions and Warnings +======================= General exceptions used by NumPy. Note that some exceptions may be module specific, such as linear algebra errors. .. versionadded:: NumPy 1.25 - The exceptions module is new in NumPy 1.25. Older exceptions remain - available through the main NumPy namespace for compatibility. + The exceptions module is new in NumPy 1.25. .. currentmodule:: numpy.exceptions @@ -95,11 +94,11 @@ class RankWarning(RuntimeWarning): # Exception used in shares_memory() class TooHardError(RuntimeError): - """max_work was exceeded. + """``max_work`` was exceeded. This is raised whenever the maximum number of candidate solutions to consider specified by the ``max_work`` parameter is exceeded. - Assigning a finite number to max_work may have caused the operation + Assigning a finite number to ``max_work`` may have caused the operation to fail. """ @@ -171,7 +170,7 @@ class AxisError(ValueError, IndexError): """ - __slots__ = ("axis", "ndim", "_msg") + __slots__ = ("_msg", "axis", "ndim") def __init__(self, axis, ndim=None, msg_prefix=None): if ndim is msg_prefix is None: @@ -243,5 +242,5 @@ class DTypePromotionError(TypeError): DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` mismatch. - """ # NOQA + """ # noqa: E501 pass diff --git a/blimgui/dist64/numpy/exceptions.pyi b/blimgui/dist64/numpy/exceptions.pyi index 16f2ce3..cd7bf05 100644 --- a/blimgui/dist64/numpy/exceptions.pyi +++ b/blimgui/dist64/numpy/exceptions.pyi @@ -17,9 +17,11 @@ class TooHardError(RuntimeError): ... class DTypePromotionError(TypeError): ... class AxisError(ValueError, IndexError): - axis: None | int - ndim: None | int + __slots__ = "_msg", "axis", "ndim" + + axis: int | None + ndim: int | None @overload - def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ... @overload - def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... + def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ... diff --git a/blimgui/dist64/numpy/f2py/__init__.py b/blimgui/dist64/numpy/f2py/__init__.py index a47cc66..151193a 100644 --- a/blimgui/dist64/numpy/f2py/__init__.py +++ b/blimgui/dist64/numpy/f2py/__init__.py @@ -9,14 +9,14 @@ """ __all__ = ['run_main', 'get_include'] -import sys -import subprocess import os +import subprocess +import sys import warnings from numpy.exceptions import VisibleDeprecationWarning -from . import f2py2e -from . import diagnose + +from . import diagnose, f2py2e run_main = f2py2e.run_main main = f2py2e.main @@ -79,8 +79,7 @@ def __getattr__(attr): return test else: - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") def __dir__(): diff --git a/blimgui/dist64/numpy/f2py/__init__.pyi b/blimgui/dist64/numpy/f2py/__init__.pyi index d5bdbba..64540d3 100644 --- a/blimgui/dist64/numpy/f2py/__init__.pyi +++ b/blimgui/dist64/numpy/f2py/__init__.pyi @@ -1,42 +1,5 @@ -from _typeshed import StrOrBytesPath -import subprocess -from collections.abc import Iterable -from typing import Literal as L, overload, TypedDict, type_check_only +from .f2py2e import main as main, run_main -__all__ = ["run_main", "get_include"] - -@type_check_only -class _F2PyDictBase(TypedDict): - csrc: list[str] - h: list[str] - -@type_check_only -class _F2PyDict(_F2PyDictBase, total=False): - fsrc: list[str] - ltx: list[str] - -def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... - -@overload -def compile( - source: str | bytes, - modulename: str = ..., - extra_args: str | list[str] = ..., - verbose: bool = ..., - source_fn: StrOrBytesPath | None = ..., - extension: L[".f", ".f90"] = ..., - full_output: L[False] = ..., -) -> int: ... -@overload -def compile( - source: str | bytes, - modulename: str = ..., - extra_args: str | list[str] = ..., - verbose: bool = ..., - source_fn: StrOrBytesPath | None = ..., - extension: L[".f", ".f90"] = ..., - *, - full_output: L[True], -) -> subprocess.CompletedProcess[bytes]: ... +__all__ = ["get_include", "run_main"] def get_include() -> str: ... diff --git a/blimgui/dist64/numpy/f2py/__version__.py b/blimgui/dist64/numpy/f2py/__version__.py index 8813675..3cf646f 100644 --- a/blimgui/dist64/numpy/f2py/__version__.py +++ b/blimgui/dist64/numpy/f2py/__version__.py @@ -1 +1 @@ -from numpy.version import version +from numpy.version import version # noqa: F401 diff --git a/blimgui/dist64/numpy/f2py/__version__.pyi b/blimgui/dist64/numpy/f2py/__version__.pyi new file mode 100644 index 0000000..85b4225 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/__version__.pyi @@ -0,0 +1 @@ +from numpy.version import version as version diff --git a/blimgui/dist64/numpy/f2py/_backends/__init__.pyi b/blimgui/dist64/numpy/f2py/_backends/__init__.pyi new file mode 100644 index 0000000..43625c6 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/_backends/__init__.pyi @@ -0,0 +1,5 @@ +from typing import Literal as L + +from ._backend import Backend + +def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ... diff --git a/blimgui/dist64/numpy/f2py/_backends/_backend.py b/blimgui/dist64/numpy/f2py/_backends/_backend.py index dd16e87..a7a7812 100644 --- a/blimgui/dist64/numpy/f2py/_backends/_backend.py +++ b/blimgui/dist64/numpy/f2py/_backends/_backend.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from abc import ABC, abstractmethod diff --git a/blimgui/dist64/numpy/f2py/_backends/_backend.pyi b/blimgui/dist64/numpy/f2py/_backends/_backend.pyi new file mode 100644 index 0000000..ed24519 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/_backends/_backend.pyi @@ -0,0 +1,46 @@ +import abc +from pathlib import Path +from typing import Any, Final + +class Backend(abc.ABC): + modulename: Final[str] + sources: Final[list[str | Path]] + extra_objects: Final[list[str]] + build_dir: Final[str | Path] + include_dirs: Final[list[str | Path]] + library_dirs: Final[list[str | Path]] + libraries: Final[list[str]] + define_macros: Final[list[tuple[str, str | None]]] + undef_macros: Final[list[str]] + f2py_flags: Final[list[str]] + sysinfo_flags: Final[list[str]] + fc_flags: Final[list[str]] + flib_flags: Final[list[str]] + setup_flags: Final[list[str]] + remove_build_dir: Final[bool] + extra_dat: Final[dict[str, Any]] + + def __init__( + self, + /, + modulename: str, + sources: list[str | Path], + extra_objects: list[str], + build_dir: str | Path, + include_dirs: list[str | Path], + library_dirs: list[str | Path], + libraries: list[str], + define_macros: list[tuple[str, str | None]], + undef_macros: list[str], + f2py_flags: list[str], + sysinfo_flags: list[str], + fc_flags: list[str], + flib_flags: list[str], + setup_flags: list[str], + remove_build_dir: bool, + extra_dat: dict[str, Any], + ) -> None: ... + + # + @abc.abstractmethod + def compile(self) -> None: ... diff --git a/blimgui/dist64/numpy/f2py/_backends/_distutils.py b/blimgui/dist64/numpy/f2py/_backends/_distutils.py index 3da7b8d..ebce1e8 100644 --- a/blimgui/dist64/numpy/f2py/_backends/_distutils.py +++ b/blimgui/dist64/numpy/f2py/_backends/_distutils.py @@ -1,14 +1,15 @@ -from ._backend import Backend - -from numpy.distutils.core import setup, Extension -from numpy.distutils.system_info import get_info -from numpy.distutils.misc_util import dict_append -from numpy.exceptions import VisibleDeprecationWarning import os -import sys import shutil +import sys import warnings +from numpy.distutils.core import Extension, setup +from numpy.distutils.misc_util import dict_append +from numpy.distutils.system_info import get_info +from numpy.exceptions import VisibleDeprecationWarning + +from ._backend import Backend + class DistutilsBackend(Backend): def __init__(sef, *args, **kwargs): diff --git a/blimgui/dist64/numpy/f2py/_backends/_distutils.pyi b/blimgui/dist64/numpy/f2py/_backends/_distutils.pyi new file mode 100644 index 0000000..56bbf7e --- /dev/null +++ b/blimgui/dist64/numpy/f2py/_backends/_distutils.pyi @@ -0,0 +1,13 @@ +from typing_extensions import deprecated, override + +from ._backend import Backend + +class DistutilsBackend(Backend): + @deprecated( + "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and " + "use a custom build script" + ) + # NOTE: the `sef` typo matches runtime + def __init__(sef, *args: object, **kwargs: object) -> None: ... + @override + def compile(self) -> None: ... diff --git a/blimgui/dist64/numpy/f2py/_backends/_meson.py b/blimgui/dist64/numpy/f2py/_backends/_meson.py index e699679..ada3925 100644 --- a/blimgui/dist64/numpy/f2py/_backends/_meson.py +++ b/blimgui/dist64/numpy/f2py/_backends/_meson.py @@ -1,17 +1,14 @@ -from __future__ import annotations - -import os import errno +import os +import re import shutil import subprocess import sys -import re +from itertools import chain from pathlib import Path - -from ._backend import Backend from string import Template -from itertools import chain +from ._backend import Backend class MesonTemplate: @@ -53,6 +50,7 @@ def __init__( self.pipeline = [ self.initialize_template, self.sources_substitution, + self.objects_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, @@ -82,6 +80,11 @@ def sources_substitution(self) -> None: [f"{self.indent}'''{source}'''," for source in self.sources] ) + def objects_substitution(self) -> None: + self.substitutions["obj_list"] = ",\n".join( + [f"{self.indent}'''{obj}'''," for obj in self.objects] + ) + def deps_substitution(self) -> None: self.substitutions["dep_list"] = f",\n{self.indent}".join( [f"{self.indent}dependency('{dep}')," for dep in self.deps] @@ -97,13 +100,13 @@ def libraries_substitution(self) -> None: self.substitutions["lib_declarations"] = "\n".join( [ - f"{lib.replace('.','_')} = declare_dependency(link_args : ['-l{lib}'])" + f"{lib.replace('.', '_')} = declare_dependency(link_args : ['-l{lib}'])" for lib in self.libraries ] ) self.substitutions["lib_list"] = f"\n{self.indent}".join( - [f"{self.indent}{lib.replace('.','_')}," for lib in self.libraries] + [f"{self.indent}{lib.replace('.', '_')}," for lib in self.libraries] ) self.substitutions["lib_dir_list"] = f"\n{self.indent}".join( [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))] @@ -127,7 +130,7 @@ def generate_meson_build(self): node() template = Template(self.meson_build_template()) meson_build = template.substitute(self.substitutions) - meson_build = re.sub(r",,", ",", meson_build) + meson_build = meson_build.replace(",,", ",") return meson_build @@ -146,6 +149,7 @@ def _move_exec_to_root(self, build_dir: Path): path_objects = chain( walk_dir.glob(f"{self.modulename}*.so"), walk_dir.glob(f"{self.modulename}*.pyd"), + walk_dir.glob(f"{self.modulename}*.dll"), ) # Same behavior as distutils # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293 @@ -188,6 +192,7 @@ def run_meson(self, build_dir: Path): def compile(self) -> None: self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + _prepare_objects(self.modulename, self.extra_objects, self.build_dir) self.write_meson_build(self.build_dir) self.run_meson(self.build_dir) self._move_exec_to_root(self.build_dir) @@ -218,6 +223,12 @@ def _prepare_sources(mname, sources, bdir): ] return extended_sources +def _prepare_objects(mname, objects, bdir): + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy objects + for obj in objects: + if Path(obj).exists() and Path(obj).is_file(): + shutil.copy(obj, bdir) def _get_flags(fc_flags): flag_values = [] diff --git a/blimgui/dist64/numpy/f2py/_backends/_meson.pyi b/blimgui/dist64/numpy/f2py/_backends/_meson.pyi new file mode 100644 index 0000000..5c85c61 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/_backends/_meson.pyi @@ -0,0 +1,62 @@ +from collections.abc import Callable +from pathlib import Path +from typing import Final, Literal as L +from typing_extensions import override + +from ._backend import Backend + +class MesonTemplate: + modulename: Final[str] + build_template_path: Final[Path] + sources: Final[list[str | Path]] + deps: Final[list[str]] + libraries: Final[list[str]] + library_dirs: Final[list[str | Path]] + include_dirs: Final[list[str | Path]] + substitutions: Final[dict[str, str]] + objects: Final[list[str | Path]] + fortran_args: Final[list[str]] + pipeline: Final[list[Callable[[], None]]] + build_type: Final[str] + python_exe: Final[str] + indent: Final[str] + + def __init__( + self, + /, + modulename: str, + sources: list[Path], + deps: list[str], + libraries: list[str], + library_dirs: list[str | Path], + include_dirs: list[str | Path], + object_files: list[str | Path], + linker_args: list[str], + fortran_args: list[str], + build_type: str, + python_exe: str, + ) -> None: ... + + # + def initialize_template(self) -> None: ... + def sources_substitution(self) -> None: ... + def objects_substitution(self) -> None: ... + def deps_substitution(self) -> None: ... + def libraries_substitution(self) -> None: ... + def include_substitution(self) -> None: ... + def fortran_args_substitution(self) -> None: ... + + # + def meson_build_template(self) -> str: ... + def generate_meson_build(self) -> str: ... + +class MesonBackend(Backend): + dependencies: list[str] + meson_build_dir: L["bdir"] + build_type: L["debug", "release"] + + def __init__(self, /, *args: object, **kwargs: object) -> None: ... + def write_meson_build(self, /, build_dir: Path) -> None: ... + def run_meson(self, /, build_dir: Path) -> None: ... + @override + def compile(self) -> None: ... diff --git a/blimgui/dist64/numpy/f2py/_backends/meson.build.template b/blimgui/dist64/numpy/f2py/_backends/meson.build.template index 0be7c17..59f48a2 100644 --- a/blimgui/dist64/numpy/f2py/_backends/meson.build.template +++ b/blimgui/dist64/numpy/f2py/_backends/meson.build.template @@ -43,6 +43,9 @@ ${source_list}, include_directories: [ inc_np, ${inc_list} + ], + objects: [ +${obj_list} ], dependencies : [ py_dep, diff --git a/blimgui/dist64/numpy/f2py/_isocbind.pyi b/blimgui/dist64/numpy/f2py/_isocbind.pyi new file mode 100644 index 0000000..b972f56 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/_isocbind.pyi @@ -0,0 +1,13 @@ +from typing import Any, Final + +iso_c_binding_map: Final[dict[str, dict[str, str]]] = ... + +isoc_c2pycode_map: Final[dict[str, Any]] = {} # not implemented +iso_c2py_map: Final[dict[str, Any]] = {} # not implemented + +isoc_kindmap: Final[dict[str, str]] = ... + +# namespace pollution +c_type: str +c_type_dict: dict[str, str] +fortran_type: str diff --git a/blimgui/dist64/numpy/f2py/_src_pyf.py b/blimgui/dist64/numpy/f2py/_src_pyf.py index 375297b..6f27080 100644 --- a/blimgui/dist64/numpy/f2py/_src_pyf.py +++ b/blimgui/dist64/numpy/f2py/_src_pyf.py @@ -68,17 +68,18 @@ def parse_structure(astr): if function_start_re.match(astr, start, m.end()): while True: i = astr.rfind('\n', ind, start) - if i==-1: + if i == -1: break start = i - if astr[i:i+7]!='\n $': + if astr[i:i + 7] != '\n $': break start += 1 m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) + ind = end = (m and m.end() - 1) or len(astr) spanlist.append((start, end)) return spanlist + template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") list_re = re.compile(r"<\s*((.*?))\s*>") @@ -98,6 +99,7 @@ def find_and_remove_repl_patterns(astr): astr = re.subn(named_re, '', astr)[0] return astr, names + item_re = re.compile(r"\A\\(?P\d+)\Z") def conv(astr): b = astr.split(',') @@ -115,7 +117,7 @@ def unique_key(adict): done = False n = 1 while not done: - newkey = '__l%s' % (n) + newkey = f'__l{n}' if newkey in allkeys: n += 1 else: @@ -133,7 +135,7 @@ def expand_sub(substr, names): def listrepl(mobj): thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) if template_name_re.match(thelist): - return "<%s>" % (thelist) + return f"<{thelist}>" name = None for key in lnames.keys(): # see if list is already in dictionary if lnames[key] == thelist: @@ -141,10 +143,11 @@ def listrepl(mobj): if name is None: # this list is not in the dictionary yet name = unique_key(lnames) lnames[name] = thelist - return "<%s>" % name + return f"<{name}>" - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed + # convert all lists to named templates + # new names are constructed as needed + substr = list_re.sub(listrepl, substr) numsubs = None base_rule = None @@ -153,7 +156,7 @@ def listrepl(mobj): if r not in rules: thelist = lnames.get(r, names.get(r, None)) if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) + raise ValueError(f'No replicates found for <{r}>') if r not in names and not thelist.startswith('_'): names[r] = thelist rule = [i.replace('@comma@', ',') for i in thelist.split(',')] @@ -166,14 +169,16 @@ def listrepl(mobj): elif num == numsubs: rules[r] = rule else: - print("Mismatch in number of replacements (base <{}={}>) " - "for <{}={}>. Ignoring.".format(base_rule, ','.join(rules[base_rule]), r, thelist)) + rules_base_rule = ','.join(rules[base_rule]) + print("Mismatch in number of replacements " + f"(base <{base_rule}={rules_base_rule}>) " + f"for <{r}={thelist}>. Ignoring.") if not rules: return substr def namerepl(mobj): name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] + return rules.get(name, (k + 1) * [name])[k] newstr = '' for k in range(numsubs): @@ -197,11 +202,12 @@ def process_str(allstr): writestr += cleanedstr names.update(defs) writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] + oldend = sub[1] writestr += newstr[oldend:] return writestr + include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) def resolve_includes(source): @@ -226,6 +232,7 @@ def process_file(source): lines = resolve_includes(source) return process_str(''.join(lines)) + _special_names = find_repl_patterns(''' <_c=s,d,c,z> <_t=real,double precision,complex,double complex> diff --git a/blimgui/dist64/numpy/f2py/_src_pyf.pyi b/blimgui/dist64/numpy/f2py/_src_pyf.pyi new file mode 100644 index 0000000..50ddd07 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/_src_pyf.pyi @@ -0,0 +1,28 @@ +import re +from _typeshed import StrOrBytesPath +from collections.abc import Mapping +from typing import Final + +routine_start_re: Final[re.Pattern[str]] = ... +routine_end_re: Final[re.Pattern[str]] = ... +function_start_re: Final[re.Pattern[str]] = ... +template_re: Final[re.Pattern[str]] = ... +named_re: Final[re.Pattern[str]] = ... +list_re: Final[re.Pattern[str]] = ... +item_re: Final[re.Pattern[str]] = ... +template_name_re: Final[re.Pattern[str]] = ... +include_src_re: Final[re.Pattern[str]] = ... + +def parse_structure(astr: str) -> list[tuple[int, int]]: ... +def find_repl_patterns(astr: str) -> dict[str, str]: ... +def find_and_remove_repl_patterns(astr: str) -> tuple[str, dict[str, str]]: ... +def conv(astr: str) -> str: ... + +# +def unique_key(adict: Mapping[str, object]) -> str: ... +def expand_sub(substr: str, names: dict[str, str]) -> str: ... +def process_str(allstr: str) -> str: ... + +# +def resolve_includes(source: StrOrBytesPath) -> list[str]: ... +def process_file(source: StrOrBytesPath) -> str: ... diff --git a/blimgui/dist64/numpy/f2py/auxfuncs.py b/blimgui/dist64/numpy/f2py/auxfuncs.py index f2dee6a..5e9fae4 100644 --- a/blimgui/dist64/numpy/f2py/auxfuncs.py +++ b/blimgui/dist64/numpy/f2py/auxfuncs.py @@ -9,13 +9,12 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ import pprint -import sys import re +import sys import types from functools import reduce -from . import __version__ -from . import cfuncs +from . import __version__, cfuncs from .cfuncs import errmess __all__ = [ @@ -43,7 +42,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict', 'containscommon' + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] @@ -416,7 +415,7 @@ def getdimension(var): dimpattern = r"\((.*?)\)" if 'attrspec' in var.keys(): if any('dimension' in s for s in var['attrspec']): - return [re.findall(dimpattern, v) for v in var['attrspec']][0] + return next(re.findall(dimpattern, v) for v in var['attrspec']) def isrequired(var): @@ -570,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 @@ -607,7 +620,7 @@ def __init__(self, mess): self.mess = mess def __call__(self, var): - mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + mess = f'\n\n var = {var}\n Message: {self.mess}\n' raise F2PYError(mess) @@ -616,7 +629,7 @@ def l_and(*f): for i in range(len(f)): l1 = '%s,f%d=f[%d]' % (l1, i, i) l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l1, ' and '.join(l2))) + return eval(f"{l1}:{' and '.join(l2)}") def l_or(*f): @@ -624,7 +637,7 @@ def l_or(*f): for i in range(len(f)): l1 = '%s,f%d=f[%d]' % (l1, i, i) l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l1, ' or '.join(l2))) + return eval(f"{l1}:{' or '.join(l2)}") def l_not(f): @@ -644,8 +657,7 @@ def getfortranname(rout): if name == '': raise KeyError if not name: - errmess('Failed to use fortranname from %s\n' % - (rout['f2pyenhancements'])) + errmess(f"Failed to use fortranname from {rout['f2pyenhancements']}\n") raise KeyError except KeyError: name = rout['name'] @@ -677,8 +689,7 @@ def getmultilineblock(rout, blockname, comment=1, counter=0): else: r = r[:-3] else: - errmess("%s multiline block should end with `'''`: %s\n" - % (blockname, repr(r))) + errmess(f"{blockname} multiline block should end with `'''`: {repr(r)}\n") return r @@ -710,9 +721,8 @@ def getcallprotoargument(rout, cb_map={}): pass elif isstring(var): pass - else: - if not isattr_value(var): - ctype = ctype + '*' + elif not isattr_value(var): + ctype = ctype + '*' if (isstring(var) or isarrayofstrings(var) # obsolete? or isstringarray(var)): @@ -781,7 +791,7 @@ def getrestdoc(rout): def gentitle(name): ln = (80 - len(name) - 6) // 2 - return '/*%s %s %s*/' % (ln * '*', name, ln * '*') + return f"/*{ln * '*'} {name} {ln * '*'}*/" def flatlist(lst): @@ -809,9 +819,9 @@ def replace(str, d, defaultsep=''): else: sep = defaultsep if isinstance(d[k], list): - str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) + str = str.replace(f'#{k}#', sep.join(flatlist(d[k]))) else: - str = str.replace('#%s#' % (k), d[k]) + str = str.replace(f'#{k}#', d[k]) return str @@ -882,22 +892,16 @@ def applyrules(rules, d, var={}): for i in rules[k][k1]: if isinstance(i, dict): res = applyrules({'supertext': i}, d, var) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' + i = res.get('supertext', '') ret[k].append(replace(i, d)) else: i = rules[k][k1] if isinstance(i, dict): res = applyrules({'supertext': i}, d) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' + i = res.get('supertext', '') ret[k].append(replace(i, d)) else: - errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) + errmess(f'applyrules: ignoring rule {repr(rules[k])}.\n') if isinstance(ret[k], list): if len(ret[k]) == 1: ret[k] = ret[k][0] @@ -905,6 +909,7 @@ def applyrules(rules, d, var={}): del ret[k] return ret + _f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', re.I).match _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' @@ -916,7 +921,7 @@ def get_f2py_modulename(source): for line in f: m = _f2py_module_name_match(line) if m: - if _f2py_user_module_name_match(line): # skip *__user__* names + if _f2py_user_module_name_match(line): # skip *__user__* names continue name = m.group('name') break @@ -930,7 +935,7 @@ def getuseblocks(pymod): all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) return all_uses -def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): """ Update the Fortran-to-C type mapping dictionary with new mappings and return a list of successfully mapped C types. @@ -988,13 +993,12 @@ def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): ) f2cmap_all[k][k1] = v1 if verbose: - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, v1)) + outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') f2cmap_mapped.append(v1) - else: - if verbose: - errmess( - "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" - % (k, k1, v1, v1, list(c2py_map.keys())) - ) + elif verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) return f2cmap_all, f2cmap_mapped diff --git a/blimgui/dist64/numpy/f2py/auxfuncs.pyi b/blimgui/dist64/numpy/f2py/auxfuncs.pyi new file mode 100644 index 0000000..32e381c --- /dev/null +++ b/blimgui/dist64/numpy/f2py/auxfuncs.pyi @@ -0,0 +1,262 @@ +from _typeshed import FileDescriptorOrPath +from collections.abc import Callable, Mapping +from pprint import pprint as show +from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload + +from .cfuncs import errmess + +__all__ = [ + "applyrules", + "containscommon", + "containsderivedtypes", + "debugcapi", + "dictappend", + "errmess", + "gentitle", + "get_f2py_modulename", + "getargs2", + "getcallprotoargument", + "getcallstatement", + "getdimension", + "getfortranname", + "getpymethoddef", + "getrestdoc", + "getuseblocks", + "getusercode", + "getusercode1", + "hasbody", + "hascallstatement", + "hascommon", + "hasexternals", + "hasinitvalue", + "hasnote", + "hasresultnote", + "isallocatable", + "isarray", + "isarrayofstrings", + "isattr_value", + "ischaracter", + "ischaracter_or_characterarray", + "ischaracterarray", + "iscomplex", + "iscomplexarray", + "iscomplexfunction", + "iscomplexfunction_warn", + "iscstyledirective", + "isdouble", + "isdummyroutine", + "isexternal", + "isfunction", + "isfunction_wrap", + "isint1", + "isint1array", + "isinteger", + "isintent_aux", + "isintent_c", + "isintent_callback", + "isintent_copy", + "isintent_dict", + "isintent_hide", + "isintent_in", + "isintent_inout", + "isintent_inplace", + "isintent_nothide", + "isintent_out", + "isintent_overwrite", + "islogical", + "islogicalfunction", + "islong_complex", + "islong_double", + "islong_doublefunction", + "islong_long", + "islong_longfunction", + "ismodule", + "ismoduleroutine", + "isoptional", + "isprivate", + "isrequired", + "isroutine", + "isscalar", + "issigned_long_longarray", + "isstring", + "isstring_or_stringarray", + "isstringarray", + "isstringfunction", + "issubroutine", + "issubroutine_wrap", + "isthreadsafe", + "isunsigned", + "isunsigned_char", + "isunsigned_chararray", + "isunsigned_long_long", + "isunsigned_long_longarray", + "isunsigned_short", + "isunsigned_shortarray", + "isvariable", + "l_and", + "l_not", + "l_or", + "outmess", + "process_f2cmap_dict", + "replace", + "show", + "stripcomma", + "throw_error", +] + +### + +_VT = TypeVar("_VT") +_RT = TypeVar("_RT") + +_Var: TypeAlias = Mapping[str, list[str]] +_ROut: TypeAlias = Mapping[str, str] +_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] + +_Bool: TypeAlias = bool | L[0, 1] +_Intent: TypeAlias = L[ + "INTENT_IN", + "INTENT_OUT", + "INTENT_INOUT", + "INTENT_C", + "INTENT_CACHE", + "INTENT_HIDE", + "INTENT_INPLACE", + "INTENT_ALIGNED4", + "INTENT_ALIGNED8", + "INTENT_ALIGNED16", + "OPTIONAL", +] + +### + +isintent_dict: dict[Callable[[_Var], _Bool], _Intent] + +class F2PYError(Exception): ... + +class throw_error: + mess: Final[str] + def __init__(self, /, mess: str) -> None: ... + def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError + +# +def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... + +# +def outmess(t: str) -> None: ... +def debugcapi(var: _Var) -> bool: ... + +# +def hasinitvalue(var: _Var | str) -> bool: ... +def hasnote(var: _Var | str) -> bool: ... +def ischaracter(var: _Var) -> bool: ... +def ischaracterarray(var: _Var) -> bool: ... +def ischaracter_or_characterarray(var: _Var) -> bool: ... +def isstring(var: _Var) -> bool: ... +def isstringarray(var: _Var) -> bool: ... +def isstring_or_stringarray(var: _Var) -> bool: ... +def isarray(var: _Var) -> bool: ... +def isarrayofstrings(var: _Var) -> bool: ... +def isscalar(var: _Var) -> bool: ... +def iscomplex(var: _Var) -> bool: ... +def islogical(var: _Var) -> bool: ... +def isinteger(var: _Var) -> bool: ... +def isint1(var: _Var) -> bool: ... +def isint1array(var: _Var) -> bool: ... +def islong_long(var: _Var) -> _Bool: ... +def isunsigned(var: _Var) -> _Bool: ... +def isunsigned_char(var: _Var) -> _Bool: ... +def isunsigned_chararray(var: _Var) -> bool: ... +def isunsigned_short(var: _Var) -> _Bool: ... +def isunsigned_shortarray(var: _Var) -> bool: ... +def isunsigned_long_long(var: _Var) -> _Bool: ... +def isunsigned_long_longarray(var: _Var) -> bool: ... +def issigned_long_longarray(var: _Var) -> bool: ... +def isdouble(var: _Var) -> _Bool: ... +def islong_double(var: _Var) -> _Bool: ... +def islong_complex(var: _Var) -> _Bool: ... +def iscomplexarray(var: _Var) -> bool: ... +def isallocatable(var: _Var) -> bool: ... +def isattr_value(var: _Var) -> bool: ... +def isoptional(var: _Var) -> bool: ... +def isexternal(var: _Var) -> bool: ... +def isrequired(var: _Var) -> bool: ... +def isprivate(var: _Var) -> bool: ... +def isvariable(var: _Var) -> bool: ... +def isintent_in(var: _Var) -> _Bool: ... +def isintent_inout(var: _Var) -> bool: ... +def isintent_out(var: _Var) -> bool: ... +def isintent_hide(var: _Var) -> bool: ... +def isintent_nothide(var: _Var) -> bool: ... +def isintent_c(var: _Var) -> bool: ... +def isintent_cache(var: _Var) -> bool: ... +def isintent_copy(var: _Var) -> bool: ... +def isintent_overwrite(var: _Var) -> bool: ... +def isintent_callback(var: _Var) -> bool: ... +def isintent_inplace(var: _Var) -> bool: ... +def isintent_aux(var: _Var) -> bool: ... + +# +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... +def containscommon(rout: _ROut) -> _Bool: ... +def hasexternals(rout: _ROut) -> bool: ... +def hasresultnote(rout: _ROut) -> _Bool: ... +def hasbody(rout: _ROut) -> _Bool: ... +def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... +def hascallstatement(rout: _ROut) -> bool: ... +def isroutine(rout: _ROut) -> bool: ... +def ismodule(rout: _ROut) -> bool: ... +def ismoduleroutine(rout: _ROut) -> bool: ... +def issubroutine(rout: _ROut) -> bool: ... +def issubroutine_wrap(rout: _ROut) -> _Bool: ... +def isfunction(rout: _ROut) -> bool: ... +def isfunction_wrap(rout: _ROut) -> _Bool: ... +def islogicalfunction(rout: _ROut) -> _Bool: ... +def islong_longfunction(rout: _ROut) -> _Bool: ... +def islong_doublefunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction_warn(rout: _ROut) -> _Bool: ... +def isstringfunction(rout: _ROut) -> _Bool: ... +def isthreadsafe(rout: _ROut) -> bool: ... +def isdummyroutine(rout: _ROut) -> _Bool: ... +def iscstyledirective(f2py_line: str) -> bool: ... + +# . +def getdimension(var: _Var) -> list[Any] | None: ... +def getfortranname(rout: _ROut) -> str: ... +def getmultilineblock(rout: _ROut, blockname: str, comment: _Bool = 1, counter: int = 0) -> str | None: ... +def getcallstatement(rout: _ROut) -> str | None: ... +def getcallprotoargument(rout: _ROut, cb_map: dict[str, str] = {}) -> str: ... +def getusercode(rout: _ROut) -> str | None: ... +def getusercode1(rout: _ROut) -> str | None: ... +def getpymethoddef(rout: _ROut) -> str | None: ... +def getargs(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getargs2(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getrestdoc(rout: _ROut) -> str | None: ... + +# +def gentitle(name: str) -> str: ... +def stripcomma(s: str) -> str: ... +@overload +def replace(str: str, d: list[str], defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: list[str], d: str, defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: str, d: str, defaultsep: str = "") -> str: ... + +# +def dictappend(rd: Mapping[str, object], ar: Mapping[str, object] | list[Mapping[str, object]]) -> dict[str, Any]: ... +def applyrules(rules: Mapping[str, object], d: Mapping[str, object], var: _Var = {}) -> dict[str, Any]: ... + +# +def get_f2py_modulename(source: FileDescriptorOrPath) -> str: ... +def getuseblocks(pymod: Mapping[str, Mapping[str, Mapping[str, str]]]) -> list[str]: ... +def process_f2cmap_dict( + f2cmap_all: _F2CMap, + new_map: _F2CMap, + c2py_map: _F2CMap, + verbose: bool = False, +) -> tuple[dict[str, dict[str, str]], list[str]]: ... diff --git a/blimgui/dist64/numpy/f2py/capi_maps.py b/blimgui/dist64/numpy/f2py/capi_maps.py index c3b4058..2ab9e40 100644 --- a/blimgui/dist64/numpy/f2py/capi_maps.py +++ b/blimgui/dist64/numpy/f2py/capi_maps.py @@ -7,19 +7,21 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from . import __version__ + f2py_version = __version__.version import copy -import re import os -from .crackfortran import markoutercomma +import re + from . import cb_rules -from ._isocbind import iso_c_binding_map, isoc_c2pycode_map, iso_c2py_map +from ._isocbind import iso_c2py_map, iso_c_binding_map, isoc_c2pycode_map # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * +from .crackfortran import markoutercomma __all__ = [ 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', @@ -152,13 +154,13 @@ def load_f2cmap_file(f2cmap_file): # interpreted as C 'float'. This feature is useful for F90/95 users if # they use PARAMETERS in type specifications. try: - outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) + outmess(f'Reading f2cmap from {f2cmap_file!r} ...\n') with open(f2cmap_file) as f: d = eval(f.read().lower(), {}, {}) f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) outmess('Successfully applied user defined f2cmap changes\n') except Exception as msg: - errmess('Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) + errmess(f'Failed to apply user defined f2cmap changes: {msg}. Skipping.\n') cformat_map = {'double': '%g', @@ -197,7 +199,7 @@ def getctype(var): if a in var['vars']: return getctype(var['vars'][a]) else: - errmess('getctype: function %s has no return value?!\n' % a) + errmess(f'getctype: function {a} has no return value?!\n') elif issubroutine(var): return ctype elif ischaracter_or_characterarray(var): @@ -229,9 +231,8 @@ def getctype(var): errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' % (typespec, var['kindselector']['kind'], ctype, typespec, var['kindselector']['kind'], os.getcwd())) - else: - if not isexternal(var): - errmess('getctype: No C-type found in "%s", assuming void.\n' % var) + elif not isexternal(var): + errmess(f'getctype: No C-type found in "{var}", assuming void.\n') return ctype @@ -259,10 +260,10 @@ def getstrlength(var): if a in var['vars']: return getstrlength(var['vars'][a]) else: - errmess('getstrlength: function %s has no return value?!\n' % a) + errmess(f'getstrlength: function {a} has no return value?!\n') if not isstring(var): errmess( - 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) + f'getstrlength: expected a signature of a string but got: {repr(var)}\n') len = '1' if 'charselector' in var: a = var['charselector'] @@ -331,7 +332,7 @@ def getarrdims(a, var, verbose=0): ret['cbsetdims'], i, 0) elif verbose: errmess( - 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) + f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') if ret['cbsetdims']: ret['cbsetdims'] = ret['cbsetdims'][:-1] # if not isintent_c(var): @@ -349,7 +350,7 @@ def getpydocsign(a, var): if af in var['vars']: return getpydocsign(af, var['vars'][af]) else: - errmess('getctype: function %s has no return value?!\n' % af) + errmess(f'getctype: function {af} has no return value?!\n') return '', '' sig, sigout = a, a opt = '' @@ -368,22 +369,21 @@ def getpydocsign(a, var): if hasinitvalue(var): init, showinit = getinit(a, var) - init = ', optional\\n Default: %s' % showinit + init = f', optional\\n Default: {showinit}' if isscalar(var): if isintent_inout(var): sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], c2pycode_map[ctype], init) else: - sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) - sigout = '%s : %s' % (out_a, c2py_map[ctype]) + sig = f'{a} : {opt} {c2py_map[ctype]}{init}' + sigout = f'{out_a} : {c2py_map[ctype]}' elif isstring(var): if isintent_inout(var): sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( a, opt, getstrlength(var), init) else: - sig = '%s : %s string(len=%s)%s' % ( - a, opt, getstrlength(var), init) - sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) + sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' + sigout = f'{out_a} : string(len={getstrlength(var)})' elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) @@ -402,25 +402,23 @@ def getpydocsign(a, var): if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: ua = lcb2_map[lcb_map[a]]['argname'] if not ua == a: - ua = ' => %s' % ua + ua = f' => {ua}' else: ua = '' - sig = '%s : call-back function%s' % (a, ua) + sig = f'{a} : call-back function{ua}' sigout = sig else: errmess( - 'getpydocsign: Could not resolve docsignature for "%s".\n' % a) + f'getpydocsign: Could not resolve docsignature for "{a}".\n') return sig, sigout def getarrdocsign(a, var): ctype = getctype(var) if isstring(var) and (not isarray(var)): - sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, - getstrlength(var)) + sig = f'{a} : rank-0 array(string(len={getstrlength(var)}),\'c\')' elif isscalar(var): - sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], - c2pycode_map[ctype],) + sig = f'{a} : rank-0 array({c2py_map[ctype]},\'{c2pycode_map[ctype]}\')' elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) @@ -452,17 +450,16 @@ def getinit(a, var): ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) except Exception: raise ValueError( - 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + f'getinit: expected complex number `(r,i)\' but got `{init}\' as initial value of {a!r}.') if isarray(var): - init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( - ret['init.r'], ret['init.i']) + init = f"(capi_c.r={ret['init.r']},capi_c.i={ret['init.i']},capi_c)" elif isstring(var): if not init: init, showinit = '""', "''" if init[0] == "'": init = '"%s"' % (init[1:-1].replace('"', '\\"')) if init[0] == '"': - showinit = "'%s'" % (init[1:-1]) + showinit = f"'{init[1:-1]}'" return init, showinit @@ -499,7 +496,7 @@ def sign2map(a, var): intent_flags = [] for f, s in isintent_dict.items(): if f(var): - intent_flags.append('F2PY_%s' % s) + intent_flags.append(f'F2PY_{s}') if intent_flags: # TODO: Evaluate intent_flags here. ret['intent'] = '|'.join(intent_flags) @@ -555,29 +552,27 @@ def sign2map(a, var): if il[i](var): rl.append(il[i + 1]) if isstring(var): - rl.append('slen(%s)=%s' % (a, ret['length'])) + rl.append(f"slen({a})={ret['length']}") if isarray(var): ddim = ','.join( - map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) - rl.append('dims(%s)' % ddim) + map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) + rl.append(f'dims({ddim})') if isexternal(var): - ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( - a, ret['cbname'], ','.join(rl)) + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" else: ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( ret['ctype'], a, ret['showinit'], ','.join(rl)) if isscalar(var): if ret['ctype'] in cformat_map: - ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) + ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" if isstring(var): ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( a, a) if isexternal(var): - ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) + ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' if ret['ctype'] in cformat_map: - ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isstring(var): ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) @@ -600,7 +595,7 @@ def routsign2map(rout): 'name_lower': name.lower(), 'NAME': name.upper(), 'begintitle': gentitle(name), - 'endtitle': gentitle('end of %s' % name), + 'endtitle': gentitle(f'end of {name}'), 'fortranname': fname, 'FORTRANNAME': fname.upper(), 'callstatement': getcallstatement(rout) or '', @@ -706,7 +701,7 @@ def cb_sign2map(a, var, index=None): ret['atype'] = c2capi_map[ret['ctype']] ret['elsize'] = get_elsize(var) if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isarray(var): ret = dictappend(ret, getarrdims(a, var)) ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) @@ -721,25 +716,21 @@ def cb_routsign2map(rout, um): name,begintitle,endtitle,argname ctype,rctype,maxnofargs,nofoptargs,returncptr """ - ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), + ret = {'name': f"cb_{rout['name']}_in_{um}", 'returncptr': ''} if isintent_callback(rout): if '_' in rout['name']: F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) + ret['callbackname'] = f"{F_FUNC}({rout['name'].lower()},{rout['name'].upper()})" ret['static'] = 'extern' else: ret['callbackname'] = ret['name'] ret['static'] = 'static' ret['argname'] = rout['name'] ret['begintitle'] = gentitle(ret['name']) - ret['endtitle'] = gentitle('end of %s' % ret['name']) + ret['endtitle'] = gentitle(f"end of {ret['name']}") ret['ctype'] = getctype(rout) ret['rctype'] = 'void' if ret['ctype'] == 'string': @@ -756,7 +747,7 @@ def cb_routsign2map(rout, um): else: ret['returncptr'] = 'return_value=' if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isstringfunction(rout): ret['strlength'] = getstrlength(rout) if isfunction(rout): @@ -777,10 +768,9 @@ def cb_routsign2map(rout, um): void #endif """ - else: - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] + elif hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] nofargs = 0 nofoptargs = 0 if 'args' in rout and 'vars' in rout: @@ -806,7 +796,7 @@ def common_sign2map(a, var): # obsolete ret['atype'] = c2capi_map[ret['ctype']] ret['elsize'] = get_elsize(var) if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isarray(var): ret = dictappend(ret, getarrdims(a, var)) elif isstring(var): diff --git a/blimgui/dist64/numpy/f2py/capi_maps.pyi b/blimgui/dist64/numpy/f2py/capi_maps.pyi new file mode 100644 index 0000000..9266003 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/capi_maps.pyi @@ -0,0 +1,33 @@ +from .auxfuncs import _ROut, _Var, process_f2cmap_dict + +__all__ = [ + "cb_routsign2map", + "cb_sign2map", + "common_sign2map", + "getarrdims", + "getarrdocsign", + "getctype", + "getinit", + "getpydocsign", + "getstrlength", + "modsign2map", + "process_f2cmap_dict", + "routsign2map", + "sign2map", +] + +### + +def getctype(var: _Var) -> str: ... +def f2cexpr(expr: str) -> str: ... +def getstrlength(var: _Var) -> str: ... +def getarrdims(a: str, var: _Var, verbose: int = 0) -> dict[str, str]: ... +def getpydocsign(a: str, var: _Var) -> tuple[str, str]: ... +def getarrdocsign(a: str, var: _Var) -> str: ... +def getinit(a: str, var: _Var) -> tuple[str, str]: ... +def sign2map(a: str, var: _Var) -> dict[str, str]: ... +def routsign2map(rout: _ROut) -> dict[str, str]: ... +def modsign2map(m: _ROut) -> dict[str, str]: ... +def cb_sign2map(a: str, var: _Var, index: object | None = None) -> dict[str, str]: ... +def cb_routsign2map(rout: _ROut, um: str) -> dict[str, str]: ... +def common_sign2map(a: str, var: _Var) -> dict[str, str]: ... # obsolete diff --git a/blimgui/dist64/numpy/f2py/cb_rules.py b/blimgui/dist64/numpy/f2py/cb_rules.py index 649fdbc..9803e68 100644 --- a/blimgui/dist64/numpy/f2py/cb_rules.py +++ b/blimgui/dist64/numpy/f2py/cb_rules.py @@ -8,16 +8,39 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -from . import __version__ +from . import __version__, cfuncs from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, - iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, - isintent_hide, isintent_in, isintent_inout, isintent_nothide, - isintent_out, isoptional, isrequired, isscalar, isstring, - isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, - stripcomma, throw_error + applyrules, + debugcapi, + dictappend, + errmess, + getargs, + hasnote, + isarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + isfunction, + isintent_c, + isintent_hide, + isintent_in, + isintent_inout, + isintent_nothide, + isintent_out, + isoptional, + isrequired, + isscalar, + isstring, + isstringfunction, + issubroutine, + l_and, + l_not, + l_or, + outmess, + replace, + stripcomma, + throw_error, ) -from . import cfuncs f2py_version = __version__.version @@ -384,11 +407,11 @@ def #argname#(#docsignature#): return #docreturn#\\n\\ ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))): ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, {l_and(debugcapi, l_and(iscomplex, isintent_c)): ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))): ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, ], 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, @@ -513,14 +536,13 @@ def buildcallbacks(m): if b: buildcallback(b, m['name']) else: - errmess('warning: empty body for %s\n' % (m['name'])) + errmess(f"warning: empty body for {m['name']}\n") def buildcallback(rout, um): from . import capi_maps - outmess(' Constructing call-back function "cb_%s_in_%s"\n' % - (rout['name'], um)) + outmess(f" Constructing call-back function \"cb_{rout['name']}_in_{um}\"\n") args, depargs = getargs(rout) capi_maps.depargs = depargs var = rout['vars'] @@ -639,6 +661,5 @@ def buildcallback(rout, um): 'latexdocstr': ar['latexdocstr'], 'argname': rd['argname'] } - outmess(' %s\n' % (ar['docstrshort'])) - return + outmess(f" {ar['docstrshort']}\n") ################## Build call-back function ############# diff --git a/blimgui/dist64/numpy/f2py/cb_rules.pyi b/blimgui/dist64/numpy/f2py/cb_rules.pyi new file mode 100644 index 0000000..b22f544 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/cb_rules.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +## + +f2py_version: Final = version + +cb_routine_rules: Final[dict[str, str | list[str]]] = ... +cb_rout_rules: Final[list[dict[str, str | Any]]] = ... +cb_arg_rules: Final[list[dict[str, str | Any]]] = ... + +cb_map: Final[dict[str, list[list[str]]]] = ... + +def buildcallbacks(m: Mapping[str, object]) -> None: ... +def buildcallback(rout: Mapping[str, object], um: Mapping[str, object]) -> None: ... diff --git a/blimgui/dist64/numpy/f2py/cfuncs.py b/blimgui/dist64/numpy/f2py/cfuncs.py index 2e0ca7f..c33a0d9 100644 --- a/blimgui/dist64/numpy/f2py/cfuncs.py +++ b/blimgui/dist64/numpy/f2py/cfuncs.py @@ -9,8 +9,8 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import sys import copy +import sys from . import __version__ @@ -28,6 +28,7 @@ def errmess(s: str) -> None: ##################### Definitions ################## + outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], 'userincludes': [], 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], @@ -597,32 +598,37 @@ def errmess(s: str) -> None: return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -633,7 +639,7 @@ def errmess(s: str) -> None: i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] @@ -1046,9 +1052,12 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1130,10 +1139,13 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { @@ -1438,14 +1450,14 @@ def errmess(s: str) -> None: def buildcfuncs(): from .capi_maps import c2capi_map for k in c2capi_map.keys(): - m = 'pyarr_from_p_%s1' % k + m = f'pyarr_from_p_{k}1' cppmacros[ - m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k]) + m] = f'#define {m}(v) (PyArray_SimpleNewFromData(0,NULL,{c2capi_map[k]},(char *)v))' k = 'string' - m = 'pyarr_from_p_%s1' % k + m = f'pyarr_from_p_{k}1' # NPY_CHAR compatibility, NPY_STRING with itemsize 1 cppmacros[ - m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m) + m] = f'#define {m}(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' ############ Auxiliary functions for sorting needs ################### @@ -1477,7 +1489,7 @@ def append_needs(need, flag=1): elif need in commonhooks: n = 'commonhooks' else: - errmess('append_needs: unknown need %s\n' % (repr(need))) + errmess(f'append_needs: unknown need {repr(need)}\n') return if need in outneeds[n]: return @@ -1513,8 +1525,7 @@ def append_needs(need, flag=1): tmp[n].append(need) return tmp else: - errmess('append_needs: expected list or string but got :%s\n' % - (repr(need))) + errmess(f'append_needs: expected list or string but got :{repr(need)}\n') def get_needs(): diff --git a/blimgui/dist64/numpy/f2py/cfuncs.pyi b/blimgui/dist64/numpy/f2py/cfuncs.pyi new file mode 100644 index 0000000..5887177 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/cfuncs.pyi @@ -0,0 +1,31 @@ +from typing import Final, TypeAlias + +from .__version__ import version + +### + +_NeedListDict: TypeAlias = dict[str, list[str]] +_NeedDict: TypeAlias = dict[str, str] + +### + +f2py_version: Final = version + +outneeds: Final[_NeedListDict] = ... +needs: Final[_NeedListDict] = ... + +includes0: Final[_NeedDict] = ... +includes: Final[_NeedDict] = ... +userincludes: Final[_NeedDict] = ... +typedefs: Final[_NeedDict] = ... +typedefs_generated: Final[_NeedDict] = ... +cppmacros: Final[_NeedDict] = ... +cfuncs: Final[_NeedDict] = ... +callbacks: Final[_NeedDict] = ... +f90modhooks: Final[_NeedDict] = ... +commonhooks: Final[_NeedDict] = ... + +def errmess(s: str) -> None: ... +def buildcfuncs() -> None: ... +def get_needs() -> _NeedListDict: ... +def append_needs(need: str | list[str], flag: int = 1) -> _NeedListDict: ... diff --git a/blimgui/dist64/numpy/f2py/common_rules.py b/blimgui/dist64/numpy/f2py/common_rules.py index 7a86d10..206928e 100644 --- a/blimgui/dist64/numpy/f2py/common_rules.py +++ b/blimgui/dist64/numpy/f2py/common_rules.py @@ -9,13 +9,11 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from . import __version__ + f2py_version = __version__.version -from .auxfuncs import ( - hasbody, hascommon, hasnote, isintent_hide, outmess, getuseblocks -) -from . import capi_maps -from . import func2subr +from . import capi_maps, func2subr +from .auxfuncs import getuseblocks, hasbody, hascommon, hasnote, isintent_hide, outmess from .crackfortran import rmbadname @@ -45,19 +43,19 @@ def buildhooks(m): fwrap = [''] def fadd(line, s=fwrap): - s[0] = '%s\n %s' % (s[0], line) + s[0] = f'{s[0]}\n {line}' chooks = [''] def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' ihooks = [''] def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' doc = [''] def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' for (name, vnames, vars) in findcommonblocks(m): lower_name = name.lower() hnames, inames = [], [] @@ -72,17 +70,17 @@ def dadd(line, s=doc): else: outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( name, ','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)' % name) + fadd(f'subroutine f2pyinit{name}(setupfunc)') for usename in getuseblocks(m): fadd(f'use {usename}') fadd('external setupfunc') for n in vnames: fadd(func2subr.var2fixfortran(vars, n)) if name == '_BLNK_': - fadd('common %s' % (','.join(vnames))) + fadd(f"common {','.join(vnames)}") else: - fadd('common /%s/ %s' % (name, ','.join(vnames))) - fadd('call setupfunc(%s)' % (','.join(inames))) + fadd(f"common /{name}/ {','.join(vnames)}") + fadd(f"call setupfunc({','.join(inames)})") fadd('end\n') cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) idims = [] @@ -92,7 +90,7 @@ def dadd(line, s=doc): at = capi_maps.c2capi_map[ct] dm = capi_maps.getarrdims(n, vars[n]) if dm['dims']: - idims.append('(%s)' % (dm['dims'])) + idims.append(f"({dm['dims']})") else: idims.append('') dms = dm['dims'].strip() @@ -106,7 +104,7 @@ def dadd(line, s=doc): cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) cadd('\tint i_f2py=0;') for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) + cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') cadd('}') if '_' in lower_name: F_FUNC = 'F_FUNC_US' @@ -119,10 +117,9 @@ def dadd(line, s=doc): cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' % (F_FUNC, lower_name, name.upper(), name)) cadd('}\n') - iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) + iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') iadd('\tif (tmp == NULL) return NULL;') - iadd('\tif (F2PyDict_SetItemString(d, \"%s\", tmp) == -1) return NULL;' - % name) + iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') iadd('\tPy_DECREF(tmp);') tname = name.replace('_', '\\_') dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) @@ -134,10 +131,10 @@ def dadd(line, s=doc): note = vars[n]['note'] if isinstance(note, list): note = '\n'.join(note) - dadd('--- %s' % (note)) + dadd(f'--- {note}') dadd('\\end{description}') ret['docs'].append( - '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) + f"\"\t/{name}/ {','.join(map(lambda v, d: v + d, inames, idims))}\\n\"") ret['commonhooks'] = chooks ret['initcommonhooks'] = ihooks ret['latexdoc'] = doc[0] diff --git a/blimgui/dist64/numpy/f2py/common_rules.pyi b/blimgui/dist64/numpy/f2py/common_rules.pyi new file mode 100644 index 0000000..d840de0 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/common_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +f2py_version: Final = version + +def findcommonblocks(block: Mapping[str, object], top: int = 1) -> list[tuple[str, list[str], dict[str, Any]]]: ... +def buildhooks(m: Mapping[str, object]) -> tuple[dict[str, Any], str]: ... diff --git a/blimgui/dist64/numpy/f2py/crackfortran.py b/blimgui/dist64/numpy/f2py/crackfortran.py index bf09f3d..8befbde 100644 --- a/blimgui/dist64/numpy/f2py/crackfortran.py +++ b/blimgui/dist64/numpy/f2py/crackfortran.py @@ -136,27 +136,27 @@ The above may be solved by creating appropriate preprocessor program, for example. """ -import sys -import string +import codecs +import copy import fileinput -import re import os -import copy import platform -import codecs +import re +import string +import sys from pathlib import Path + try: import charset_normalizer except ImportError: charset_normalizer = None -from . import __version__ +from . import __version__, symbolic # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * -from . import symbolic f2py_version = __version__.version @@ -242,6 +242,7 @@ def outmess(line, flag=1): sys.stdout.write(filepositiontext) sys.stdout.write(line) + re._MAXCACHE = 50 defaultimplicitrules = {} for c in "abcdefghopqrstuvwxyz$_": @@ -265,8 +266,7 @@ def outmess(line, flag=1): def rmbadname1(name): if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n' % - (name, badnames[name])) + errmess(f'rmbadname1: Replacing "{name}" with "{badnames[name]}".\n') return badnames[name] return name @@ -277,8 +277,7 @@ def rmbadname(names): def undo_rmbadname1(name): if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' - % (name, invbadnames[name])) + errmess(f'undo_rmbadname1: Replacing "{name}" with "{invbadnames[name]}".\n') return invbadnames[name] return name @@ -416,7 +415,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern = beginpattern90 outmess('\tReading file %s (format:%s%s)\n' % (repr(currentfilename), sourcecodeform, - strictf77 and ',strict' or '')) + (strictf77 and ',strict') or '')) l = l.expandtabs().replace('\xa0', ' ') # Get rid of newline characters @@ -472,7 +471,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: r = cont1.match(l) if r: - l = r.group('line') # Continuation follows .. + l = r.group('line') # Continuation follows .. if cont: ll = ll + cont2.match(l).group('line') finalline = '' @@ -520,7 +519,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): cont = (r is not None) else: raise ValueError( - "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) + f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( fin.filelineno() - 1, currentfilename, l1) m = includeline.match(origfinalline) @@ -579,9 +578,10 @@ def readfortrancode(ffile, dowithline=show, istop=1): gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase = saveglobals + # Crack line -beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ - r'\s*(?P(\b(%s)\b))' + \ +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))'\ + r'\s*(?P(\b(%s)\b))'\ r'\s*(?P%s)\s*\Z' ## fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' @@ -600,7 +600,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern77 = re.compile( beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90 = groupbegins77 + \ - r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|'\ r'type(?!\s*\()' beginpattern90 = re.compile( beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' @@ -609,7 +609,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): endpattern = re.compile( beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' # block, the Fortran 2008 construct needs special handling in the rest of the file -endifs = r'end\s*(if|do|where|select|while|forall|associate|' + \ +endifs = r'end\s*(if|do|where|select|while|forall|associate|'\ r'critical|enum|team)' endifpattern = re.compile( beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' @@ -671,8 +671,8 @@ def split_by_unquoted(line, characters): r = re.compile( r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" r"(?P{char}.*)\Z".format( - not_quoted="[^\"'{}]".format(re.escape(characters)), - char="[{}]".format(re.escape(characters)), + not_quoted=f"[^\"'{re.escape(characters)}]", + char=f"[{re.escape(characters)}]", single_quoted=r"('([^'\\]|(\\.))*')", double_quoted=r'("([^"\\]|(\\.))*")')) m = r.match(line) @@ -689,6 +689,7 @@ def _simplifyargs(argsline): a.append(n) return ','.join(a) + crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) @@ -790,14 +791,13 @@ def crackline(line, reset=0): m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) if m2: - line = 'callfun %s(%s) result (%s)' % ( - name, a, m2.group('result')) + line = f"callfun {name}({a}) result ({m2.group('result')})" else: - line = 'callfun %s(%s)' % (name, a) + line = f'callfun {name}({a})' m = callfunpattern[0].match(line) if not m: outmess( - 'crackline: could not resolve function call for line=%s.\n' % repr(line)) + f'crackline: could not resolve function call for line={repr(line)}.\n') return analyzeline(m, 'callfun', line) return @@ -919,12 +919,13 @@ def appenddecl(decl, decl2, force=1): pass elif k in ['intent', 'check', 'dimension', 'optional', 'required', 'depend']: - errmess('appenddecl: "%s" not implemented.\n' % k) + errmess(f'appenddecl: "{k}" not implemented.\n') else: raise Exception('appenddecl: Unknown variable definition key: ' + str(k)) return decl + selectpattern = re.compile( r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) typedefpattern = re.compile( @@ -1010,7 +1011,7 @@ def analyzeline(m, case, line): and not skipemptyends and groupcounter < 1: newname = os.path.basename(currentfilename).split('.')[0] outmess( - 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) + f'analyzeline: no group yet. Creating program group with name "{newname}".\n') gotnextfile = 0 groupcounter = groupcounter + 1 groupname[groupcounter] = 'program' @@ -1033,7 +1034,7 @@ def analyzeline(m, case, line): block = 'abstract interface' if block == 'type': name, attrs, _ = _resolvetypedefpattern(m.group('after')) - groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs) + groupcache[groupcounter]['vars'][name] = {'attrspec': attrs} args = [] result = None else: @@ -1123,13 +1124,12 @@ def analyzeline(m, case, line): groupcache[groupcounter]['result'] = result if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename + elif f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) else: - if f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) - else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] @@ -1159,7 +1159,7 @@ def analyzeline(m, case, line): if bindcline: bindcdat = re.search(crackline_bindlang, bindcline) if bindcdat: - groupcache[groupcounter]['bindlang'] = {name : {}} + groupcache[groupcounter]['bindlang'] = {name: {}} groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') if bindcdat.group('lang_name'): groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') @@ -1196,7 +1196,7 @@ def analyzeline(m, case, line): groupcounter = groupcounter - 1 # end interface elif case == 'entry': - name, args, result, _= _resolvenameargspattern(m.group('after')) + name, args, result, _ = _resolvenameargspattern(m.group('after')) if name is not None: if args: args = rmbadname([x.strip() @@ -1249,8 +1249,7 @@ def analyzeline(m, case, line): continue else: k = rmbadname1(m1.group('name')) - if case in ['public', 'private'] and \ - (k == 'operator' or k == 'assignment'): + if case in ['public', 'private'] and k in {'operator', 'assignment'}: k += m1.group('after') if k not in edecl: edecl[k] = {} @@ -1271,7 +1270,7 @@ def analyzeline(m, case, line): groupcache[groupcounter]['args'].append(k) else: errmess( - 'analyzeline: intent(callback) %s is ignored\n' % (k)) + f'analyzeline: intent(callback) {k} is ignored\n') else: errmess('analyzeline: intent(callback) %s is already' ' in argument list\n' % (k)) @@ -1306,7 +1305,7 @@ def analyzeline(m, case, line): k, initexpr = [x.strip() for x in e.split('=')] except Exception: outmess( - 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) + f'analyzeline: could not extract name,expr in parameter statement "{e}" of "{ll}\"\n') continue params = get_parameters(edecl) k = rmbadname1(k) @@ -1345,10 +1344,7 @@ def analyzeline(m, case, line): if m.group('after').strip().lower() == 'none': groupcache[groupcounter]['implicit'] = None elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl = groupcache[groupcounter]['implicit'] - else: - impl = {} + impl = groupcache[groupcounter].get('implicit', {}) if impl is None: outmess( 'analyzeline: Overwriting earlier "implicit none" statement.\n') @@ -1359,12 +1355,12 @@ def analyzeline(m, case, line): r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) if not m1: outmess( - 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) + f'analyzeline: could not extract info of implicit statement part "{e}\"\n') continue m2 = typespattern4implicit.match(m1.group('this')) if not m2: outmess( - 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) + f'analyzeline: could not extract types pattern of implicit statement part "{e}\"\n') continue typespec, selector, attr, edecl = cracktypespec0( m2.group('this'), m2.group('after')) @@ -1383,13 +1379,13 @@ def analyzeline(m, case, line): begc, endc = [x.strip() for x in r.split('-')] except Exception: outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement\n') continue else: begc = endc = r.strip() if not len(begc) == len(endc) == 1: outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement (2)\n') continue for o in range(ord(begc), ord(endc) + 1): impl[chr(o)] = decl @@ -1432,15 +1428,13 @@ def analyzeline(m, case, line): vars = groupcache[groupcounter].get('vars', {}) last_name = None for l in ll: - l[0], l[1] = l[0].strip(), l[1].strip() - if l[0].startswith(','): - l[0] = l[0][1:] + l[0], l[1] = l[0].strip().removeprefix(','), l[1].strip() if l[0].startswith('('): - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) + outmess(f'analyzeline: implied-DO list "{l[0]}" is not supported. Skipping.\n') continue for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): if v.startswith('('): - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) + outmess(f'analyzeline: implied-DO list "{v}" is not supported. Skipping.\n') # XXX: subsequent init expressions may get wrong values. # Ignoring since data statements are irrelevant for # wrapping. @@ -1451,14 +1445,14 @@ def analyzeline(m, case, line): # integer dimension(3) :: mytab # common /mycom/ mytab # Since in any case it is initialized in the Fortran code - outmess('Comment line in declaration "%s" is not supported. Skipping.\n' % l[1]) + outmess(f'Comment line in declaration "{l[1]}" is not supported. Skipping.\n') continue vars.setdefault(v, {}) vtype = vars[v].get('typespec') vdim = getdimension(vars[v]) matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') try: - new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] except IndexError: # gh-24746 # Runs only if above code fails. Fixes the line @@ -1471,15 +1465,15 @@ def analyzeline(m, case, line): try: multiplier, value = match.split("*") expanded_list.extend([value.strip()] * int(multiplier)) - except ValueError: # if int(multiplier) fails + except ValueError: # if int(multiplier) fails expanded_list.append(match.strip()) else: expanded_list.append(match.strip()) matches = expanded_list - new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] current_val = vars[v].get('=') if current_val and (current_val != new_val): - outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (v, current_val, new_val)) + outmess(f'analyzeline: changing init expression of "{v}" ("{current_val}") to "{new_val}\"\n') vars[v]['='] = new_val last_name = v groupcache[groupcounter]['vars'] = vars @@ -1491,7 +1485,7 @@ def analyzeline(m, case, line): line = '//' + line cl = [] - [_, bn, ol] = re.split('/', line, maxsplit=2) + [_, bn, ol] = re.split('/', line, maxsplit=2) # noqa: RUF039 bn = bn.strip() if not bn: bn = '_BLNK_' @@ -1532,12 +1526,10 @@ def analyzeline(m, case, line): 'use').strip() else: outmess( - 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) + f'analyzeline: Not local=>use pattern found in {repr(l)}\n') else: rl[l] = l groupcache[groupcounter]['use'][name]['map'] = rl - else: - pass else: print(m.groupdict()) outmess('analyzeline: Could not crack the use statement.\n') @@ -1560,10 +1552,9 @@ def analyzeline(m, case, line): appendmultiline(groupcache[gc], previous_context[:2], m.group('this')) - else: - if verbose > 1: - print(m.groupdict()) - outmess('analyzeline: No code implemented for line.\n') + elif verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') def appendmultiline(group, context_name, ml): @@ -1573,7 +1564,6 @@ def appendmultiline(group, context_name, ml): if context_name not in d: d[context_name] = [] d[context_name].append(ml) - return def cracktypespec0(typespec, ll): @@ -1601,6 +1591,8 @@ def cracktypespec0(typespec, ll): attr = ll[:i].strip() ll = ll[i + 2:] return typespec, selector, attr, ll + + ##### namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) kindselector = re.compile( @@ -1632,7 +1624,7 @@ def removespaces(expr): def markinnerspaces(line): """ - The function replace all spaces in the input variable line which are + The function replace all spaces in the input variable line which are surrounded with quotation marks, with the triplet "@_@". For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" @@ -1645,7 +1637,7 @@ def markinnerspaces(line): ------- str - """ + """ fragment = '' inside = False current_quote = None @@ -1703,7 +1695,7 @@ def updatevars(typespec, selector, attrspec, entitydecl): m = namepattern.match(e) if not m: outmess( - 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) + f'updatevars: no name pattern found for entity={repr(e)}. Skipping.\n') continue ename = rmbadname1(m.group('name')) edecl = {} @@ -1811,7 +1803,7 @@ def updatevars(typespec, selector, attrspec, entitydecl): edecl['='] = d1['init'] if 'array' in d1: - dm = 'dimension(%s)' % d1['array'] + dm = f"dimension({d1['array']})" if 'attrspec' not in edecl or (not edecl['attrspec']): edecl['attrspec'] = [dm] else: @@ -1845,7 +1837,7 @@ def cracktypespec(typespec, selector): kindselect = kindselector.match(selector) if not kindselect: outmess( - 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) + f'cracktypespec: no kindselector pattern found for {repr(selector)}\n') return kindselect = kindselect.groupdict() kindselect['*'] = kindselect['kind2'] @@ -1859,7 +1851,7 @@ def cracktypespec(typespec, selector): charselect = charselector.match(selector) if not charselect: outmess( - 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) + f'cracktypespec: no charselector pattern found for {repr(selector)}\n') return charselect = charselect.groupdict() charselect['*'] = charselect['charlen'] @@ -1890,8 +1882,7 @@ def cracktypespec(typespec, selector): outmess('cracktypespec: no typename found in %s\n' % (repr(typespec + selector))) else: - outmess('cracktypespec: no selector used for %s\n' % - (repr(selector))) + outmess(f'cracktypespec: no selector used for {repr(selector)}\n') return kindselect, charselect, typename ###### @@ -1964,7 +1955,7 @@ def setmesstext(block): global filepositiontext try: - filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) + filepositiontext = f"In: {block['from']}:{block['name']}\n" except Exception: pass @@ -1998,7 +1989,7 @@ def get_useparameters(block, param_map=None): continue # XXX: apply mapping if mapping: - errmess('get_useparameters: mapping for %s not impl.\n' % (mapping)) + errmess(f'get_useparameters: mapping for {mapping} not impl.\n') for k, v in list(params.items()): if k in param_map: outmess('get_useparameters: overriding parameter %s with' @@ -2018,7 +2009,7 @@ def postcrack2(block, tab='', param_map=None): for g in block] return ret setmesstext(block) - outmess('%sBlock: %s\n' % (tab, block['name']), 0) + outmess(f"{tab}Block: {block['name']}\n", 0) if param_map is None: param_map = get_useparameters(block) @@ -2065,7 +2056,7 @@ def postcrack(block, args=None, tab=''): raise Exception('postcrack: Expected block dictionary instead of ' + str(block)) if 'name' in block and not block['name'] == 'unknown_interface': - outmess('%sBlock: %s\n' % (tab, block['name']), 0) + outmess(f"{tab}Block: {block['name']}\n", 0) block = analyzeargs(block) block = analyzecommon(block) block['vars'] = analyzevars(block) @@ -2097,9 +2088,9 @@ def postcrack(block, args=None, tab=''): mname = 'unknown__user__routines' if mname in userisdefined: i = 1 - while '%s_%i' % (mname, i) in userisdefined: + while f"{mname}_{i}" in userisdefined: i = i + 1 - mname = '%s_%i' % (mname, i) + mname = f"{mname}_{i}" interface = {'block': 'interface', 'body': [], 'vars': {}, 'name': name + '_user_interface'} for e in block['externals']: @@ -2122,9 +2113,8 @@ def postcrack(block, args=None, tab=''): del interfaced[interfaced.index(e)] break interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e] = mvars[e] + elif e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] if interface['vars'] or interface['body']: block['interfaced'] = interfaced mblock = {'block': 'python module', 'body': [ @@ -2188,22 +2178,21 @@ def analyzecommon(block): if n in block['vars']: if 'attrspec' in block['vars'][n]: block['vars'][n]['attrspec'].append( - 'dimension(%s)' % (','.join(dims))) + f"dimension({','.join(dims)})") else: block['vars'][n]['attrspec'] = [ - 'dimension(%s)' % (','.join(dims))] + f"dimension({','.join(dims)})"] + elif dims: + block['vars'][n] = { + 'attrspec': [f"dimension({','.join(dims)})"]} else: - if dims: - block['vars'][n] = { - 'attrspec': ['dimension(%s)' % (','.join(dims))]} - else: - block['vars'][n] = {} + block['vars'][n] = {} if n not in commonvars: commonvars.append(n) else: n = e errmess( - 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) + f'analyzecommon: failed to extract "[()]" from "{e}" in common /{k}/.\n') comvars.append(n) block['common'][k] = comvars if 'commonvars' not in block: @@ -2267,7 +2256,7 @@ def buildimplicitrules(block): implicitrules = None if verbose > 1: outmess( - 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) + f"buildimplicitrules: no implicit rules for routine {repr(block['name'])}.\n") else: for k in list(block['implicit'].keys()): if block['implicit'][k].get('typespec') not in ['static', 'automatic']: @@ -2282,7 +2271,8 @@ def myeval(e, g=None, l=None): r = eval(e, g, l) if type(r) in [int, float]: return r - raise ValueError('r=%r' % (r)) + raise ValueError(f'r={r!r}') + getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) @@ -2328,27 +2318,23 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset try: m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0, m1.group('after')) + ee = f"{m1.group('before')}({0}){m1.group('after')}" m1 = re_1.match(ee) b = myeval(ee, {}, {}) m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1, m1.group('after')) + ee = f"{m1.group('before')}({1}){m1.group('after')}" m1 = re_1.match(ee) a = myeval(ee, {}, {}) - b m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0.5, m1.group('after')) + ee = f"{m1.group('before')}({0.5}){m1.group('after')}" m1 = re_1.match(ee) c = myeval(ee, {}, {}) # computing another point to be sure that expression is linear m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1.5, m1.group('after')) + ee = f"{m1.group('before')}({1.5}){m1.group('after')}" m1 = re_1.match(ee) c2 = myeval(ee, {}, {}) if (a * 0.5 + b == c and a * 1.5 + b == c2): @@ -2378,7 +2364,7 @@ def _get_depend_dict(name, vars, deps): if w not in words: words.append(w) else: - outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + outmess(f'_get_depend_dict: no dependence info for {repr(name)}\n') words = [] deps[name] = words return words @@ -2448,11 +2434,10 @@ def _selected_real_kind_func(p, r=0, radix=0): if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): if p <= 33: return 16 - else: - if p < 19: - return 10 - elif p <= 33: - return 16 + elif p < 19: + return 10 + elif p <= 33: + return 16 return -1 @@ -2508,7 +2493,7 @@ def get_parameters(vars, global_params={}): if not selected_kind_re.match(v): v_ = v.split('_') # In case there are additive parameters - if len(v_) > 1: + if len(v_) > 1: v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') # Currently this will not work for complex numbers. @@ -2598,7 +2583,7 @@ def analyzevars(block): del vars[''] if 'attrspec' in block['vars']['']: gen = block['vars']['']['attrspec'] - for n in set(vars) | set(b['name'] for b in block['body']): + for n in set(vars) | {b['name'] for b in block['body']}: for k in ['public', 'private']: if k in gen: vars[n] = setattrspec(vars.get(n, {}), k) @@ -2631,7 +2616,7 @@ def analyzevars(block): if n[0] in list(attrrules.keys()): vars[n] = setattrspec(vars[n], attrrules[n[0]]) if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if not ('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): if implicitrules: ln0 = n[0].lower() for k in list(implicitrules[ln0].keys()): @@ -2767,9 +2752,9 @@ def solve_v(s, a=a, b=b): # solve_v function here. solve_v = None all_symbols = set(dsize.symbols()) - v_deps = set( + v_deps = { s.data for s in all_symbols - if s.data in vars) + if s.data in vars} solver_and_deps[v] = solve_v, list(v_deps) # Note that dsize may contain symbols that are # not defined in block['vars']. Here we assume @@ -2941,7 +2926,7 @@ def compute_deps(v, deps): vars[n] = setattrspec(vars[n], 'recursive') else: outmess( - 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) + f"analyzevars: prefix ({repr(block['prefix'])}) were not used\n") if block['block'] not in ['module', 'pythonmodule', 'python module', 'block data']: if 'commonvars' in block: neededvars = copy.copy(block['args'] + block['commonvars']) @@ -3005,7 +2990,7 @@ def param_eval(v, g_params, params, dimspec=None): # This is an array parameter. # First, we parse the dimension information - if len(dimspec) < 2 or dimspec[::len(dimspec)-1] != "()": + if len(dimspec) < 2 or dimspec[::len(dimspec) - 1] != "()": raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') dimrange = dimspec[1:-1].split(',') if len(dimrange) == 1: @@ -3014,11 +2999,11 @@ def param_eval(v, g_params, params, dimspec=None): # now, dimrange is a list of 1 or 2 elements if len(dimrange) == 1: bound = param_parse(dimrange[0], params) - dimrange = range(1, int(bound)+1) + dimrange = range(1, int(bound) + 1) else: lbound = param_parse(dimrange[0], params) ubound = param_parse(dimrange[1], params) - dimrange = range(int(lbound), int(ubound)+1) + dimrange = range(int(lbound), int(ubound) + 1) else: raise ValueError('param_eval: multidimensional array parameters ' f'{dimspec} not supported') @@ -3098,7 +3083,7 @@ def param_parse(d, params): if "(" in d: # this dimension expression is an array dname = d[:d.find("(")] - ddims = d[d.find("(")+1:d.rfind(")")] + ddims = d[d.find("(") + 1:d.rfind(")")] # this dimension expression is also a parameter; # parse it recursively index = int(param_parse(ddims, params)) @@ -3146,10 +3131,7 @@ def expr2name(a, block, args=[]): block['vars'][a] = at else: if a not in block['vars']: - if orig_a in block['vars']: - block['vars'][a] = block['vars'][orig_a] - else: - block['vars'][a] = {} + block['vars'][a] = block['vars'].get(orig_a, {}) if 'externals' in block and orig_a in block['externals'] + block['interfaced']: block['vars'][a] = setattrspec(block['vars'][a], 'external') return a @@ -3181,6 +3163,7 @@ def analyzeargs(block): block['vars'][block['result']] = {} return block + determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) determineexprtype_re_3 = re.compile( @@ -3211,13 +3194,13 @@ def determineexprtype(expr, vars, rules={}): if m: if 'name' in m.groupdict() and m.group('name'): outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') return {'typespec': 'integer'} m = determineexprtype_re_3.match(expr) if m: if 'name' in m.groupdict() and m.group('name'): outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') return {'typespec': 'real'} for op in ['+', '-', '*', '/']: for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: @@ -3240,7 +3223,7 @@ def determineexprtype(expr, vars, rules={}): return {'typespec': 'character', 'charselector': {'*': '*'}} if not t: outmess( - 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) + f'determineexprtype: could not determine expressions ({repr(expr)}) type.\n') return t ###### @@ -3276,7 +3259,7 @@ def crack2fortrangen(block, tab='\n', as_interface=False): if not isintent_callback(vars[a]): argsl.append(a) if block['block'] == 'function' or argsl: - args = '(%s)' % ','.join(argsl) + args = f"({','.join(argsl)})" f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): @@ -3299,7 +3282,7 @@ def crack2fortrangen(block, tab='\n', as_interface=False): name = '' result = '' if 'result' in block: - result = ' result (%s)' % block['result'] + result = f" result ({block['result']})" if block['result'] not in argsl: argsl.append(block['result']) body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) @@ -3307,12 +3290,11 @@ def crack2fortrangen(block, tab='\n', as_interface=False): block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) mess = '' if 'from' in block and not as_interface: - mess = '! in %s' % block['from'] + mess = f"! in {block['from']}" if 'entry' in block: entry_stmts = '' for k, i in list(block['entry'].items()): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts, tab + tabchar, k, ','.join(i)) + entry_stmts = f"{entry_stmts}{tab + tabchar}entry {k}({','.join(i)})" body = body + entry_stmts if blocktype == 'block data' and name == '_BLOCK_DATA_': name = '' @@ -3325,30 +3307,30 @@ def common2fortran(common, tab=''): ret = '' for k in list(common.keys()): if k == '_BLNK_': - ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) + ret = f"{ret}{tab}common {','.join(common[k])}" else: - ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) + ret = f"{ret}{tab}common /{k}/ {','.join(common[k])}" return ret def use2fortran(use, tab=''): ret = '' for m in list(use.keys()): - ret = '%s%suse %s,' % (ret, tab, m) + ret = f'{ret}{tab}use {m},' if use[m] == {}: if ret and ret[-1] == ',': ret = ret[:-1] continue if 'only' in use[m] and use[m]['only']: - ret = '%s only:' % (ret) + ret = f'{ret} only:' if 'map' in use[m] and use[m]['map']: c = ' ' for k in list(use[m]['map'].keys()): if k == use[m]['map'][k]: - ret = '%s%s%s' % (ret, c, k) + ret = f'{ret}{c}{k}' c = ',' else: - ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) + ret = f"{ret}{c}{k}=>{use[m]['map'][k]}" c = ',' if ret and ret[-1] == ',': ret = ret[:-1] @@ -3360,7 +3342,7 @@ def true_intent_list(var): ret = [] for intent in lst: try: - f = globals()['isintent_%s' % intent] + f = globals()[f'isintent_{intent}'] except KeyError: pass else: @@ -3383,7 +3365,7 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): nout.append(a) else: errmess( - 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) + f'vars2fortran: Confused?!: "{a}" is not defined in vars.\n') if 'varnames' in block: nout.extend(block['varnames']) if not as_interface: @@ -3395,13 +3377,13 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): for d in vars[a]['depend']: if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: errmess( - 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) + f'vars2fortran: Warning: cross-dependence between variables "{a}" and "{d}\"\n') if 'externals' in block and a in block['externals']: if isintent_callback(vars[a]): - ret = '%s%sintent(callback) %s' % (ret, tab, a) - ret = '%s%sexternal %s' % (ret, tab, a) + ret = f'{ret}{tab}intent(callback) {a}' + ret = f'{ret}{tab}external {a}' if isoptional(vars[a]): - ret = '%s%soptional %s' % (ret, tab, a) + ret = f'{ret}{tab}optional {a}' if a in vars and 'typespec' not in vars[a]: continue cont = 1 @@ -3413,7 +3395,7 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): continue if a not in vars: show(vars) - outmess('vars2fortran: No definition for argument "%s".\n' % a) + outmess(f'vars2fortran: No definition for argument "{a}".\n') continue if a == block['name']: if block['block'] != 'function' or block.get('result'): @@ -3425,14 +3407,14 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: - ret = '%s%sexternal %s' % (ret, tab, a) + ret = f'{ret}{tab}external {a}' continue show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n' % a) + outmess(f'vars2fortran: No typespec for argument "{a}".\n') continue vardef = vars[a]['typespec'] if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) + vardef = f"{vardef}({vars[a]['typename']})" selector = {} if 'kindselector' in vars[a]: selector = vars[a]['kindselector'] @@ -3440,18 +3422,17 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): selector = vars[a]['charselector'] if '*' in selector: if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) + vardef = f"{vardef}*({selector['*']})" else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" c = ' ' if 'attrspec' in vars[a]: attr = [l for l in vars[a]['attrspec'] @@ -3464,36 +3445,34 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): # intent(out) to resolve the conflict. attr.remove('intent(out)') if attr: - vardef = '%s, %s' % (vardef, ','.join(attr)) + vardef = f"{vardef}, {','.join(attr)}" c = ',' if 'dimension' in vars[a]: - vardef = '%s%sdimension(%s)' % ( - vardef, c, ','.join(vars[a]['dimension'])) + vardef = f"{vardef}{c}dimension({','.join(vars[a]['dimension'])})" c = ',' if 'intent' in vars[a]: lst = true_intent_list(vars[a]) if lst: - vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) + vardef = f"{vardef}{c}intent({','.join(lst)})" c = ',' if 'check' in vars[a]: - vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) + vardef = f"{vardef}{c}check({','.join(vars[a]['check'])})" c = ',' if 'depend' in vars[a]: - vardef = '%s%sdepend(%s)' % ( - vardef, c, ','.join(vars[a]['depend'])) + vardef = f"{vardef}{c}depend({','.join(vars[a]['depend'])})" c = ',' if '=' in vars[a]: v = vars[a]['='] if vars[a]['typespec'] in ['complex', 'double complex']: try: v = eval(v) - v = '(%s,%s)' % (v.real, v.imag) + v = f'({v.real},{v.imag})' except Exception: pass - vardef = '%s :: %s=%s' % (vardef, a, v) + vardef = f'{vardef} :: {a}={v}' else: - vardef = '%s :: %s' % (vardef, a) - ret = '%s%s%s' % (ret, tab, vardef) + vardef = f'{vardef} :: {a}' + ret = f'{ret}{tab}{vardef}' return ret ###### @@ -3587,16 +3566,16 @@ def visit(item, parents, result, *args, **kwargs): new_result = [] for index, value in enumerate(obj): new_index, new_item = traverse((index, value), visit, - parents=parents + [parent], - result=result, *args, **kwargs) + parents + [parent], result, + *args, **kwargs) if new_index is not None: new_result.append(new_item) elif isinstance(obj, dict): - new_result = dict() + new_result = {} for key, value in obj.items(): new_key, new_value = traverse((key, value), visit, - parents=parents + [parent], - result=result, *args, **kwargs) + parents + [parent], result, + *args, **kwargs) if new_key is not None: new_result[new_key] = new_value else: @@ -3712,7 +3691,7 @@ def fix_usage(varname, value): elif l == '-m': f3 = 1 elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) + errmess(f'Unknown option {repr(l)}\n') elif f2: f2 = 0 pyffilename = l @@ -3738,7 +3717,7 @@ def fix_usage(varname, value): postlist = crackfortran(files) if pyffilename: - outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) + outmess(f'Writing fortran code to file {repr(pyffilename)}\n', 0) pyf = crack2fortran(postlist) with open(pyffilename, 'w') as f: f.write(pyf) diff --git a/blimgui/dist64/numpy/f2py/crackfortran.pyi b/blimgui/dist64/numpy/f2py/crackfortran.pyi new file mode 100644 index 0000000..742d358 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/crackfortran.pyi @@ -0,0 +1,266 @@ +import re +from _typeshed import StrOrBytesPath, StrPath +from collections.abc import Callable, Iterable, Mapping +from typing import ( + IO, + Any, + Concatenate, + Final, + Literal as L, + Never, + ParamSpec, + TypeAlias, + overload, +) + +from .__version__ import version +from .auxfuncs import isintent_dict as isintent_dict + +### + +_Tss = ParamSpec("_Tss") + +_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None +_VisitItem: TypeAlias = tuple[str | None, _VisitResult] +_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] + +### + +COMMON_FREE_EXTENSIONS: Final[list[str]] = ... +COMMON_FIXED_EXTENSIONS: Final[list[str]] = ... + +f2py_version: Final = version +tabchar: Final[str] = " " + +f77modulename: str +pyffilename: str +sourcecodeform: L["fix", "gree"] +strictf77: L[0, 1] +quiet: L[0, 1] +verbose: L[0, 1, 2] +skipemptyends: L[0, 1] +ignorecontains: L[1] +dolowercase: L[1] + +beginpattern: str | re.Pattern[str] +currentfilename: str +filepositiontext: str +expectbegin: L[0, 1] +gotnextfile: L[0, 1] +neededmodule: int +skipblocksuntil: int +groupcounter: int +groupname: dict[int, str] | str +groupcache: dict[int, dict[str, Any]] | None +grouplist: dict[int, list[dict[str, Any]]] | None +previous_context: tuple[str, str, int] | None + +f90modulevars: dict[str, dict[str, Any]] = {} +debug: list[Never] = [] +include_paths: list[str] = [] +onlyfuncs: list[str] = [] +skipfuncs: list[str] = [] +skipfunctions: Final[list[str]] = [] +usermodules: Final[list[dict[str, Any]]] = [] + +defaultimplicitrules: Final[dict[str, dict[str, str]]] = {} +badnames: Final[dict[str, str]] = {} +invbadnames: Final[dict[str, str]] = {} + +beforethisafter: Final[str] = ... +fortrantypes: Final[str] = ... +groupbegins77: Final[str] = ... +groupbegins90: Final[str] = ... +groupends: Final[str] = ... +endifs: Final[str] = ... +moduleprocedures: Final[str] = ... + +beginpattern77: Final[tuple[re.Pattern[str], L["begin"]]] = ... +beginpattern90: Final[tuple[re.Pattern[str], L["begin"]]] = ... +callpattern: Final[tuple[re.Pattern[str], L["call"]]] = ... +callfunpattern: Final[tuple[re.Pattern[str], L["callfun"]]] = ... +commonpattern: Final[tuple[re.Pattern[str], L["common"]]] = ... +containspattern: Final[tuple[re.Pattern[str], L["contains"]]] = ... +datapattern: Final[tuple[re.Pattern[str], L["data"]]] = ... +dimensionpattern: Final[tuple[re.Pattern[str], L["dimension"]]] = ... +endifpattern: Final[tuple[re.Pattern[str], L["endif"]]] = ... +endpattern: Final[tuple[re.Pattern[str], L["end"]]] = ... +entrypattern: Final[tuple[re.Pattern[str], L["entry"]]] = ... +externalpattern: Final[tuple[re.Pattern[str], L["external"]]] = ... +f2pyenhancementspattern: Final[tuple[re.Pattern[str], L["f2pyenhancements"]]] = ... +formatpattern: Final[tuple[re.Pattern[str], L["format"]]] = ... +functionpattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +implicitpattern: Final[tuple[re.Pattern[str], L["implicit"]]] = ... +intentpattern: Final[tuple[re.Pattern[str], L["intent"]]] = ... +intrinsicpattern: Final[tuple[re.Pattern[str], L["intrinsic"]]] = ... +optionalpattern: Final[tuple[re.Pattern[str], L["optional"]]] = ... +moduleprocedurepattern: Final[tuple[re.Pattern[str], L["moduleprocedure"]]] = ... +multilinepattern: Final[tuple[re.Pattern[str], L["multiline"]]] = ... +parameterpattern: Final[tuple[re.Pattern[str], L["parameter"]]] = ... +privatepattern: Final[tuple[re.Pattern[str], L["private"]]] = ... +publicpattern: Final[tuple[re.Pattern[str], L["public"]]] = ... +requiredpattern: Final[tuple[re.Pattern[str], L["required"]]] = ... +subroutinepattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +typespattern: Final[tuple[re.Pattern[str], L["type"]]] = ... +usepattern: Final[tuple[re.Pattern[str], L["use"]]] = ... + +analyzeargs_re_1: Final[re.Pattern[str]] = ... +callnameargspattern: Final[re.Pattern[str]] = ... +charselector: Final[re.Pattern[str]] = ... +crackline_bind_1: Final[re.Pattern[str]] = ... +crackline_bindlang: Final[re.Pattern[str]] = ... +crackline_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_2: Final[re.Pattern[str]] = ... +determineexprtype_re_3: Final[re.Pattern[str]] = ... +determineexprtype_re_4: Final[re.Pattern[str]] = ... +determineexprtype_re_5: Final[re.Pattern[str]] = ... +getlincoef_re_1: Final[re.Pattern[str]] = ... +kindselector: Final[re.Pattern[str]] = ... +lenarraypattern: Final[re.Pattern[str]] = ... +lenkindpattern: Final[re.Pattern[str]] = ... +namepattern: Final[re.Pattern[str]] = ... +nameargspattern: Final[re.Pattern[str]] = ... +operatorpattern: Final[re.Pattern[str]] = ... +real16pattern: Final[re.Pattern[str]] = ... +real8pattern: Final[re.Pattern[str]] = ... +selectpattern: Final[re.Pattern[str]] = ... +typedefpattern: Final[re.Pattern[str]] = ... +typespattern4implicit: Final[re.Pattern[str]] = ... +word_pattern: Final[re.Pattern[str]] = ... + +post_processing_hooks: Final[list[_VisitFunc[...]]] = [] + +# +def outmess(line: str, flag: int = 1) -> None: ... +def reset_global_f2py_vars() -> None: ... + +# +def rmbadname1(name: str) -> str: ... +def undo_rmbadname1(name: str) -> str: ... +def rmbadname(names: Iterable[str]) -> list[str]: ... +def undo_rmbadname(names: Iterable[str]) -> list[str]: ... + +# +def openhook(filename: StrPath, mode: str) -> IO[Any]: ... +def is_free_format(fname: StrPath) -> bool: ... +def readfortrancode( + ffile: StrOrBytesPath | Iterable[StrOrBytesPath], + dowithline: Callable[[str, int], object] = ..., + istop: int = 1, +) -> None: ... + +# +def split_by_unquoted(line: str, characters: str) -> tuple[str, str]: ... + +# +def crackline(line: str, reset: int = 0) -> None: ... +def markouterparen(line: str) -> str: ... +def markoutercomma(line: str, comma: str = ",") -> str: ... +def unmarkouterparen(line: str) -> str: ... +def appenddecl(decl: Mapping[str, object] | None, decl2: Mapping[str, object] | None, force: int = 1) -> dict[str, Any]: ... + +# +def parse_name_for_bind(line: str) -> tuple[str, str | None]: ... +def analyzeline(m: re.Match[str], case: str, line: str) -> None: ... +def appendmultiline(group: dict[str, Any], context_name: str, ml: str) -> None: ... +def cracktypespec0(typespec: str, ll: str | None) -> tuple[str, str | None, str | None, str | None]: ... + +# +def removespaces(expr: str) -> str: ... +def markinnerspaces(line: str) -> str: ... +def updatevars(typespec: str, selector: str | None, attrspec: str, entitydecl: str) -> str: ... +def cracktypespec(typespec: str, selector: str | None) -> tuple[dict[str, str] | None, dict[str, str] | None, str | None]: ... + +# +def setattrspec(decl: dict[str, list[str]], attr: str | None, force: int = 0) -> dict[str, list[str]]: ... +def setkindselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def setcharselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def getblockname(block: Mapping[str, object], unknown: str = "unknown") -> str: ... +def setmesstext(block: Mapping[str, object]) -> None: ... +def get_usedict(block: Mapping[str, object]) -> dict[str, str]: ... +def get_useparameters(block: Mapping[str, object], param_map: Mapping[str, str] | None = None) -> dict[str, str]: ... + +# +@overload +def postcrack2( + block: dict[str, Any], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> dict[str, str | Any]: ... +@overload +def postcrack2( + block: list[dict[str, Any]], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> list[dict[str, str | Any]]: ... + +# +@overload +def postcrack(block: dict[str, Any], args: Mapping[str, str] | None = None, tab: str = "") -> dict[str, Any]: ... +@overload +def postcrack(block: list[dict[str, str]], args: Mapping[str, str] | None = None, tab: str = "") -> list[dict[str, Any]]: ... + +# +def sortvarnames(vars: Mapping[str, object]) -> list[str]: ... +def analyzecommon(block: Mapping[str, object]) -> dict[str, Any]: ... +def analyzebody(block: Mapping[str, object], args: Mapping[str, str], tab: str = "") -> list[dict[str, Any]]: ... +def buildimplicitrules(block: Mapping[str, object]) -> tuple[dict[str, dict[str, str]], dict[str, str]]: ... +def myeval(e: str, g: object | None = None, l: object | None = None) -> float: ... + +# +def getlincoef(e: str, xset: set[str]) -> tuple[float | None, float | None, str | None]: ... + +# +def get_sorted_names(vars: Mapping[str, Mapping[str, str]]) -> list[str]: ... +def get_parameters(vars: Mapping[str, Mapping[str, str]], global_params: dict[str, str] = {}) -> dict[str, str]: ... + +# +def analyzevars(block: Mapping[str, Any]) -> dict[str, dict[str, str]]: ... + +# +def param_eval(v: str, g_params: dict[str, Any], params: Mapping[str, object], dimspec: str | None = None) -> dict[str, Any]: ... +def param_parse(d: str, params: Mapping[str, str]) -> str: ... +def expr2name(a: str, block: Mapping[str, object], args: list[str] = []) -> str: ... +def analyzeargs(block: Mapping[str, object]) -> dict[str, Any]: ... + +# +def determineexprtype(expr: str, vars: Mapping[str, object], rules: dict[str, Any] = {}) -> dict[str, Any]: ... +def crack2fortrangen(block: Mapping[str, object], tab: str = "\n", as_interface: bool = False) -> str: ... +def common2fortran(common: Mapping[str, object], tab: str = "") -> str: ... +def use2fortran(use: Mapping[str, object], tab: str = "") -> str: ... +def true_intent_list(var: dict[str, list[str]]) -> list[str]: ... +def vars2fortran( + block: Mapping[str, Mapping[str, object]], + vars: Mapping[str, object], + args: Mapping[str, str], + tab: str = "", + as_interface: bool = False, +) -> str: ... + +# +def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[str, Any]]: ... +def crack2fortran(block: Mapping[str, Any]) -> str: ... + +# +def traverse( + obj: tuple[str | None, _VisitResult], + visit: _VisitFunc[_Tss], + parents: list[tuple[str | None, _VisitResult]] = [], + result: list[Any] | dict[str, Any] | None = None, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> _VisitItem | _VisitResult: ... + +# +def character_backward_compatibility_hook( + item: _VisitItem, + parents: list[_VisitItem], + result: object, # ignored + *args: object, # ignored + **kwargs: object, # ignored +) -> _VisitItem | None: ... + +# namespace pollution +c: str +n: str diff --git a/blimgui/dist64/numpy/f2py/diagnose.py b/blimgui/dist64/numpy/f2py/diagnose.py index f14dfe5..9b8045c 100644 --- a/blimgui/dist64/numpy/f2py/diagnose.py +++ b/blimgui/dist64/numpy/f2py/diagnose.py @@ -4,19 +4,13 @@ import tempfile -def run_command(cmd): - print('Running %r:' % (cmd)) - os.system(cmd) - print('------') - - def run(): _path = os.getcwd() os.chdir(tempfile.gettempdir()) print('------') - print('os.name=%r' % (os.name)) + print(f'os.name={os.name!r}') print('------') - print('sys.platform=%r' % (sys.platform)) + print(f'sys.platform={sys.platform!r}') print('------') print('sys.version:') print(sys.version) @@ -24,7 +18,7 @@ def run(): print('sys.prefix:') print(sys.prefix) print('------') - print('sys.path=%r' % (':'.join(sys.path))) + print(f"sys.path={':'.join(sys.path)!r}") print('------') try: @@ -54,8 +48,7 @@ def run(): if has_newnumpy: try: - print('Found new numpy version %r in %s' % - (numpy.__version__, numpy.__file__)) + print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') except Exception as msg: print('error:', msg) print('------') @@ -150,5 +143,7 @@ def run(): print('error:', msg) print('------') os.chdir(_path) + + if __name__ == "__main__": run() diff --git a/blimgui/dist64/numpy/f2py/diagnose.pyi b/blimgui/dist64/numpy/f2py/diagnose.pyi new file mode 100644 index 0000000..b88194a --- /dev/null +++ b/blimgui/dist64/numpy/f2py/diagnose.pyi @@ -0,0 +1 @@ +def run() -> None: ... diff --git a/blimgui/dist64/numpy/f2py/f2py2e.py b/blimgui/dist64/numpy/f2py/f2py2e.py index 06cc329..a239f1e 100644 --- a/blimgui/dist64/numpy/f2py/f2py2e.py +++ b/blimgui/dist64/numpy/f2py/f2py2e.py @@ -10,23 +10,26 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import sys +import argparse import os import pprint import re -import argparse +import sys -from . import crackfortran -from . import rules -from . import cb_rules -from . import auxfuncs -from . import cfuncs -from . import f90mod_rules -from . import __version__ -from . import capi_maps -from .cfuncs import errmess from numpy.f2py._backends import f2py_build_generator +from . import ( + __version__, + auxfuncs, + capi_maps, + cb_rules, + cfuncs, + crackfortran, + f90mod_rules, + rules, +) +from .cfuncs import errmess + f2py_version = __version__.version numpy_version = __version__.version @@ -267,7 +270,7 @@ def scaninputline(inputline): elif l == '--skip-empty-wrappers': emptygen = False elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) + errmess(f'Unknown option {repr(l)}\n') sys.exit() elif f2: f2 = 0 @@ -303,13 +306,13 @@ def scaninputline(inputline): sys.exit() if not os.path.isdir(buildpath): if not verbose: - outmess('Creating build directory %s\n' % (buildpath)) + outmess(f'Creating build directory {buildpath}\n') os.mkdir(buildpath) if signsfile: signsfile = os.path.join(buildpath, signsfile) if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: errmess( - 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) + f'Signature file "{signsfile}" exists!!! Use --overwrite-signature to overwrite.\n') sys.exit() options['emptygen'] = emptygen @@ -351,7 +354,7 @@ def callcrackfortran(files, options): crackfortran.dolowercase = options['do-lower'] postlist = crackfortran.crackfortran(files) if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) + outmess(f"Saving signatures to file \"{options['signsfile']}\"\n") pyf = crackfortran.crack2fortran(postlist) if options['signsfile'][-6:] == 'stdout': sys.stdout.write(pyf) @@ -360,13 +363,13 @@ def callcrackfortran(files, options): f.write(pyf) if options["coutput"] is None: for mod in postlist: - mod["coutput"] = "%smodule.c" % mod["name"] + mod["coutput"] = f"{mod['name']}module.c" else: for mod in postlist: mod["coutput"] = options["coutput"] if options["f2py_wrapper_output"] is None: for mod in postlist: - mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + mod["f2py_wrapper_output"] = f"{mod['name']}-f2pywrappers.f" else: for mod in postlist: mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] @@ -375,6 +378,8 @@ def callcrackfortran(files, options): mod['gil_used'] = 'Py_MOD_GIL_USED' else: mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' return postlist @@ -479,19 +484,19 @@ def run_main(comline_list): isusedby[u] = [] isusedby[u].append(plist['name']) for plist in postlist: - if plist['block'] == 'python module' and '__user__' in plist['name']: - if plist['name'] in isusedby: + module_name = plist['name'] + if plist['block'] == 'python module' and '__user__' in module_name: + if module_name in isusedby: # if not quiet: + usedby = ','.join(f'"{s}"' for s in isusedby[module_name]) outmess( - f'Skipping Makefile build for module "{plist["name"]}" ' - 'which is used by {}\n'.format( - ','.join(f'"{s}"' for s in isusedby[plist['name']]))) + f'Skipping Makefile build for module "{module_name}" ' + f'which is used by {usedby}\n') if 'signsfile' in options: if options['verbose'] > 1: outmess( 'Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n' % - (os.path.basename(sys.argv[0]), options['signsfile'])) + outmess(f"{os.path.basename(sys.argv[0])} {options['signsfile']}\n") return for plist in postlist: if plist['block'] != 'python module': @@ -539,7 +544,7 @@ def __call__(self, parser, namespace, values, option_string=None): include_paths_set = set(getattr(namespace, 'include_paths', []) or []) if option_string == "--include_paths": outmess("Use --include-paths or -I instead of --include_paths which will be removed") - if option_string == "--include-paths" or option_string == "--include_paths": + if option_string in {"--include-paths", "--include_paths"}: include_paths_set.update(values.split(':')) else: include_paths_set.add(values) @@ -676,10 +681,10 @@ def run_compile(): nv = vmap[ov] except KeyError: if ov not in vmap.values(): - print('Unknown vendor: "%s"' % (s[len(v):])) + print(f'Unknown vendor: "{s[len(v):]}"') nv = ov i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv + flib_flags[i] = '--fcompiler=' + nv # noqa: B909 continue for s in del_list: i = flib_flags.index(s) diff --git a/blimgui/dist64/numpy/f2py/f2py2e.pyi b/blimgui/dist64/numpy/f2py/f2py2e.pyi new file mode 100644 index 0000000..46794e5 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/f2py2e.pyi @@ -0,0 +1,74 @@ +import argparse +import pprint +from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence +from types import ModuleType +from typing import Any, Final, NotRequired, TypedDict, type_check_only +from typing_extensions import TypeVar, override + +from .__version__ import version +from .auxfuncs import _Bool, outmess as outmess + +### + +_KT = TypeVar("_KT", bound=Hashable) +_VT = TypeVar("_VT") + +@type_check_only +class _F2PyDict(TypedDict): + csrc: list[str] + h: list[str] + fsrc: NotRequired[list[str]] + ltx: NotRequired[list[str]] + +@type_check_only +class _PreparseResult(TypedDict): + dependencies: list[str] + backend: str + modulename: str + +### + +MESON_ONLY_VER: Final[bool] +f2py_version: Final = version +numpy_version: Final = version +__usage__: Final[str] + +show = pprint.pprint + +class CombineIncludePaths(argparse.Action): + @override + def __call__( + self, + /, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[str] | None, + option_string: str | None = None, + ) -> None: ... + +# +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... +def run_compile() -> None: ... +def main() -> None: ... + +# +def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... +def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... +def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... +def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def filter_files( + prefix: str, + suffix: str, + files: Iterable[str], + remove_prefix: _Bool | None = None, +) -> tuple[list[str], list[str]]: ... +def get_prefix(module: ModuleType) -> str: ... +def get_newer_options(iline: Iterable[str]) -> tuple[list[str], Any, list[str]]: ... + +# +def f2py_parser() -> argparse.ArgumentParser: ... +def make_f2py_compile_parser() -> argparse.ArgumentParser: ... + +# +def preparse_sysargv() -> _PreparseResult: ... +def validate_modulename(pyf_files: Sequence[str], modulename: str = "untitled") -> str: ... diff --git a/blimgui/dist64/numpy/f2py/f90mod_rules.py b/blimgui/dist64/numpy/f2py/f90mod_rules.py index 55435fc..85c1399 100644 --- a/blimgui/dist64/numpy/f2py/f90mod_rules.py +++ b/blimgui/dist64/numpy/f2py/f90mod_rules.py @@ -14,14 +14,13 @@ import numpy as np -from . import capi_maps -from . import func2subr -from .crackfortran import undo_rmbadname, undo_rmbadname1 +from . import capi_maps, func2subr # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * +from .crackfortran import undo_rmbadname, undo_rmbadname1 options = {} @@ -39,6 +38,7 @@ def findf90modules(m): ret = ret + findf90modules(b) return ret + fgetdims1 = """\ external f2pysetdata logical ns @@ -89,11 +89,11 @@ def buildhooks(pymod): fhooks = [''] def fadd(line, s=fhooks): - s[0] = '%s\n %s' % (s[0], line) + s[0] = f'{s[0]}\n {line}' doc = [''] def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' usenames = getuseblocks(pymod) for m in findf90modules(pymod): @@ -111,8 +111,7 @@ def dadd(line, s=doc): if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n' % - (m['name'])) + outmess(f"\t\tConstructing F90 module support for \"{m['name']}\"...\n") if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") continue @@ -121,16 +120,20 @@ def dadd(line, s=doc): if m['name'] in usenames and containscommon(m): outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue if onlyvars: - outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) + outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' ihooks = [''] def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' vrd = capi_maps.modsign2map(m) cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) @@ -162,29 +165,28 @@ def iadd(line, s=ihooks): note = var['note'] if isinstance(note, list): note = '\n'.join(note) - dadd('--- %s' % (note)) + dadd(f'--- {note}') if isallocatable(var): - fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) + fargs.append(f"f2py_{m['name']}_getdims_{n}") efargs.append(fargs[-1]) sargs.append( - 'void (*%s)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)' % (n)) + f'void (*{n})(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) - fadd('use %s, only: d => %s\n' % - (m['name'], undo_rmbadname1(n))) + iadd(f"\tf2py_{m['name']}_def[i_f2py++].func = {n};") + fadd(f'subroutine {fargs[-1]}(r,s,f2pysetdata,flag)') + fadd(f"use {m['name']}, only: d => {undo_rmbadname1(n)}\n") fadd('integer flag\n') fhooks[0] = fhooks[0] + fgetdims1 dms = range(1, int(dm['rank']) + 1) fadd(' allocate(d(%s))\n' % (','.join(['s(%s)' % i for i in dms]))) fhooks[0] = fhooks[0] + use_fgetdims2 - fadd('end subroutine %s' % (fargs[-1])) + fadd(f'end subroutine {fargs[-1]}') else: fargs.append(n) - sargs.append('char *%s' % (n)) + sargs.append(f'char *{n}') sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {n};") if onlyvars: dadd('\\end{description}') if hasbody(m): @@ -193,22 +195,21 @@ def iadd(line, s=ihooks): outmess("f90mod_rules.buildhooks:" f" skipping {b['block']} {b['name']}\n") continue - modobjs.append('%s()' % (b['name'])) + modobjs.append(f"{b['name']}()") b['modulename'] = m['name'] api, wrap = rules.buildapi(b) if isfunction(b): fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + elif wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) else: - if wrap: - fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) - ifargs.append( - func2subr.createsubrwrapper(b, signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) + fargs.append(b['name']) + mfargs.append(fargs[-1]) api['externroutines'] = [] ar = applyrules(api, vrd) ar['docs'] = [] @@ -218,10 +219,9 @@ def iadd(line, s=ihooks): 'f2py_rout_#modulename#_%s_%s,' 'doc_f2py_rout_#modulename#_%s_%s},') % (b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append('char *%s' % (b['name'])) + sargs.append(f"char *{b['name']}") sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % - (m['name'], b['name'])) + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") cadd('\t{NULL}\n};\n') iadd('}') ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( @@ -240,26 +240,25 @@ def iadd(line, s=ihooks): ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( m['name'], m['name'], m['name'])] + ret['initf90modhooks'] fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) + fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") if mfargs: for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s' % (m['name'], a)) + fadd(f"use {m['name']}, only : {a}") if ifargs: fadd(' '.join(['interface'] + ifargs)) fadd('end interface') fadd('external f2pysetupfunc') if efargs: for a in undo_rmbadname(efargs): - fadd('external %s' % (a)) - fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n' % (m['name'])) + fadd(f'external {a}') + fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") + fadd(f"end subroutine f2pyinit{m['name']}\n") dadd('\n'.join(ret['latexdoc']).replace( r'\subsection{', r'\subsubsection{')) ret['latexdoc'] = [] - ret['docs'].append('"\t%s --- %s"' % (m['name'], - ','.join(undo_rmbadname(modobjs)))) + ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") ret['routine_defs'] = '' ret['doc'] = [] diff --git a/blimgui/dist64/numpy/f2py/f90mod_rules.pyi b/blimgui/dist64/numpy/f2py/f90mod_rules.pyi new file mode 100644 index 0000000..4df004e --- /dev/null +++ b/blimgui/dist64/numpy/f2py/f90mod_rules.pyi @@ -0,0 +1,16 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .auxfuncs import isintent_dict as isintent_dict + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" + +options: Final[dict[str, bool]] + +fgetdims1: Final[str] = ... +fgetdims2: Final[str] = ... +fgetdims2_sa: Final[str] = ... + +def findf90modules(m: Mapping[str, object]) -> list[dict[str, Any]]: ... +def buildhooks(pymod: Mapping[str, object]) -> dict[str, Any]: ... diff --git a/blimgui/dist64/numpy/f2py/func2subr.py b/blimgui/dist64/numpy/f2py/func2subr.py index 1c5b9ef..f298bd1 100644 --- a/blimgui/dist64/numpy/f2py/func2subr.py +++ b/blimgui/dist64/numpy/f2py/func2subr.py @@ -11,28 +11,38 @@ """ import copy +from ._isocbind import isoc_kindmap from .auxfuncs import ( - getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, - isintent_out, islogicalfunction, ismoduleroutine, isscalar, - issubroutine, issubroutine_wrap, outmess, show + getfortranname, + isexternal, + isfunction, + isfunction_wrap, + isintent_in, + isintent_out, + islogicalfunction, + ismoduleroutine, + isscalar, + issubroutine, + issubroutine_wrap, + outmess, + show, ) -from ._isocbind import isoc_kindmap def var2fixfortran(vars, a, fa=None, f90mode=None): if fa is None: fa = a if a not in vars: show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n' % a) + outmess(f'var2fixfortran: No definition for argument "{a}".\n') return '' if 'typespec' not in vars[a]: show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n' % a) + outmess(f'var2fixfortran: No typespec for argument "{a}".\n') return '' vardef = vars[a]['typespec'] if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) + vardef = f"{vardef}({vars[a]['typename']})" selector = {} lk = '' if 'kindselector' in vars[a]: @@ -44,32 +54,30 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): if '*' in selector: if f90mode: if selector['*'] in ['*', ':', '(*)']: - vardef = '%s(len=*)' % (vardef) + vardef = f'{vardef}(len=*)' else: - vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) + vardef = f"{vardef}({lk}={selector['*']})" + elif selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" else: - if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) - else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" - vardef = '%s %s' % (vardef, fa) + vardef = f'{vardef} {fa}' if 'dimension' in vars[a]: - vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) + vardef = f"{vardef}({','.join(vars[a]['dimension'])})" return vardef def useiso_c_binding(rout): useisoc = False - for key, value in rout['vars'].items(): + for value in rout['vars'].values(): kind_value = value.get('kindselector', {}).get('kind') if kind_value in isoc_kindmap: return True @@ -84,9 +92,9 @@ def createfuncwrapper(rout, signature=0): v = rout['vars'][a] for i, d in enumerate(v.get('dimension', [])): if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' extra_args.append(dn) vars[dn] = dv v['dimension'][i] = dn @@ -96,11 +104,11 @@ def createfuncwrapper(rout, signature=0): ret = [''] def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) + ret[0] = f'{ret[0]}\n {line}' name = rout['name'] fortranname = getfortranname(rout) f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap' % (name) + newname = f'{name}f2pywrap' if newname not in vars: vars[newname] = vars[name] @@ -130,18 +138,17 @@ def add(line, ret=ret): sargs = sargs.replace(f"{name}, ", '') args = [arg for arg in args if arg != name] rout['args'] = args - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) + add(f"use {rout['modulename']}, only : {fortranname}") if useisoc: add('use iso_c_binding') else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) + add(f'subroutine f2pywrap{name} ({sargs})') if useisoc: add('use iso_c_binding') if not need_interface: - add('external %s' % (fortranname)) + add(f'external {fortranname}') rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname if need_interface: @@ -153,7 +160,7 @@ def add(line, ret=ret): dumped_args = [] for a in args: if isexternal(vars[a]): - add('external %s' % (a)) + add(f'external {a}') dumped_args.append(a) for a in args: if a in dumped_args: @@ -189,11 +196,11 @@ def add(line, ret=ret): if not signature: if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) + add(f'{newname} = .not.(.not.{fortranname}({sargs}))') else: - add('%s = %s(%s)' % (newname, fortranname, sargs)) + add(f'{newname} = {fortranname}({sargs})') if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") else: add('end') return ret[0] @@ -208,9 +215,9 @@ def createsubrwrapper(rout, signature=0): v = rout['vars'][a] for i, d in enumerate(v.get('dimension', [])): if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' extra_args.append(dn) vars[dn] = dv v['dimension'][i] = dn @@ -220,7 +227,7 @@ def createsubrwrapper(rout, signature=0): ret = [''] def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) + ret[0] = f'{ret[0]}\n {line}' name = rout['name'] fortranname = getfortranname(rout) f90mode = ismoduleroutine(rout) @@ -230,18 +237,17 @@ def add(line, ret=ret): useisoc = useiso_c_binding(rout) sargs = ', '.join(args) if f90mode: - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") if useisoc: add('use iso_c_binding') if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) + add(f"use {rout['modulename']}, only : {fortranname}") else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) + add(f'subroutine f2pywrap{name} ({sargs})') if useisoc: add('use iso_c_binding') if not need_interface: - add('external %s' % (fortranname)) + add(f'external {fortranname}') if need_interface: for line in rout['saved_interface'].split('\n'): @@ -251,7 +257,7 @@ def add(line, ret=ret): dumped_args = [] for a in args: if isexternal(vars[a]): - add('external %s' % (a)) + add(f'external {a}') dumped_args.append(a) for a in args: if a in dumped_args: @@ -279,9 +285,9 @@ def add(line, ret=ret): sargs = ', '.join([a for a in args if a not in extra_args]) if not signature: - add('call %s(%s)' % (fortranname, sargs)) + add(f'call {fortranname}({sargs})') if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") else: add('end') return ret[0] @@ -310,7 +316,7 @@ def assubr(rout): flag = 0 break if flag: - fvar['intent'].append('out=%s' % (rname)) + fvar['intent'].append(f'out={rname}') rout['args'][:] = [fname] + rout['args'] return rout, createfuncwrapper(rout) if issubroutine_wrap(rout): diff --git a/blimgui/dist64/numpy/f2py/func2subr.pyi b/blimgui/dist64/numpy/f2py/func2subr.pyi new file mode 100644 index 0000000..8d2b3db --- /dev/null +++ b/blimgui/dist64/numpy/f2py/func2subr.pyi @@ -0,0 +1,7 @@ +from .auxfuncs import _Bool, _ROut, _Var + +def var2fixfortran(vars: _Var, a: str, fa: str | None = None, f90mode: _Bool | None = None) -> str: ... +def useiso_c_binding(rout: _ROut) -> bool: ... +def createfuncwrapper(rout: _ROut, signature: int = 0) -> str: ... +def createsubrwrapper(rout: _ROut, signature: int = 0) -> str: ... +def assubr(rout: _ROut) -> tuple[dict[str, str], str]: ... diff --git a/blimgui/dist64/numpy/f2py/rules.py b/blimgui/dist64/numpy/f2py/rules.py index 190462b..cbbad06 100644 --- a/blimgui/dist64/numpy/f2py/rules.py +++ b/blimgui/dist64/numpy/f2py/rules.py @@ -46,42 +46,92 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ +import copy import os import sys import time -import copy from pathlib import Path # __version__.version is now the same as the NumPy version -from . import __version__ - +from . import ( + __version__, + capi_maps, + cfuncs, + common_rules, + f90mod_rules, + func2subr, + use_rules, +) from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, - hascallstatement, hasexternals, hasinitvalue, hasnote, - hasresultnote, isarray, isarrayofstrings, ischaracter, - ischaracterarray, ischaracter_or_characterarray, iscomplex, - iscomplexarray, iscomplexfunction, iscomplexfunction_warn, - isdummyroutine, isexternal, isfunction, isfunction_wrap, isint1, - isint1array, isintent_aux, isintent_c, isintent_callback, - isintent_copy, isintent_hide, isintent_inout, isintent_nothide, - isintent_out, isintent_overwrite, islogical, islong_complex, - islong_double, islong_doublefunction, islong_long, - islong_longfunction, ismoduleroutine, isoptional, isrequired, - isscalar, issigned_long_longarray, isstring, isstringarray, - isstringfunction, issubroutine, isattr_value, - issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, - isunsigned_chararray, isunsigned_long_long, - isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper + applyrules, + debugcapi, + dictappend, + errmess, + gentitle, + getargs2, + hascallstatement, + hasexternals, + hasinitvalue, + hasnote, + hasresultnote, + isarray, + isarrayofstrings, + isattr_value, + ischaracter, + ischaracter_or_characterarray, + ischaracterarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + iscomplexfunction_warn, + isdummyroutine, + isexternal, + isfunction, + isfunction_wrap, + isint1, + isint1array, + isintent_aux, + isintent_c, + isintent_callback, + isintent_copy, + isintent_hide, + isintent_inout, + isintent_nothide, + isintent_out, + isintent_overwrite, + islogical, + islong_complex, + islong_double, + islong_doublefunction, + islong_long, + islong_longfunction, + ismoduleroutine, + isoptional, + isrequired, + isscalar, + issigned_long_longarray, + isstring, + isstringarray, + isstringfunction, + issubroutine, + issubroutine_wrap, + isthreadsafe, + isunsigned, + isunsigned_char, + isunsigned_chararray, + isunsigned_long_long, + isunsigned_long_longarray, + isunsigned_short, + isunsigned_shortarray, + l_and, + l_not, + l_or, + outmess, + replace, + requiresf90wrapper, + stripcomma, ) -from . import capi_maps -from . import cfuncs -from . import common_rules -from . import use_rules -from . import f90mod_rules -from . import func2subr - f2py_version = __version__.version numpy_version = __version__.version @@ -236,7 +286,7 @@ #initcommonhooks# #interface_usercode# -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m , #gil_used#); #endif @@ -608,21 +658,20 @@ }, 'decl': [' #ctype# #name#_return_value = NULL;', ' int #name#_return_value_len = 0;'], - 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':[' #name#_return_value_len = #rlength#;', - ' if ((#name#_return_value = (string)malloc(' - + '#name#_return_value_len+1) == NULL) {', - ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', - ' f2py_success = 0;', - ' } else {', - " (#name#_return_value)[#name#_return_value_len] = '\\0';", - ' }', - ' if (f2py_success) {', - {hasexternals: """\ + 'callfortran': '#name#_return_value,#name#_return_value_len,', + 'callfortranroutine': [' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ if (#setjmpbuf#) { f2py_success = 0; } else {"""}, - {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, """\ #ifdef USESCOMPAQFORTRAN (*f2py_func)(#callcompaqfortran#); @@ -630,17 +679,17 @@ (*f2py_func)(#callfortran#); #endif """, - {isthreadsafe: ' Py_END_ALLOW_THREADS'}, - {hasexternals: ' }'}, - {debugcapi: + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - ' } /* if (f2py_success) after (string)malloc */', + ' } /* if (f2py_success) after (string)malloc */', ], 'returnformat': '#rformat#', 'return': ',#name#_return_value', 'freemem': ' STRINGFREE(#name#_return_value);', 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], - '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + '_check': l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete }, { # Debugging 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', @@ -702,8 +751,8 @@ 'decl': [' #ctype# #varname# = NULL;', ' int slen(#varname#);', ], - 'need':['len..'], - '_check':isstring + 'need': ['len..'], + '_check': isstring }, # Array { # Common @@ -711,7 +760,7 @@ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', ' const int #varname#_Rank = #rank#;', ], - 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + 'need': ['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], '_check': isarray }, # Scalararray @@ -820,7 +869,7 @@ 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, 'need': ['#cbname#', 'setjmp.h'], - '_check':isexternal + '_check': isexternal }, { 'frompyobj': [{l_not(isintent_callback): """\ @@ -874,8 +923,8 @@ Py_DECREF(#varname#_cb.args_capi); }""", 'need': ['SWAP', 'create_cb_arglist'], - '_check':isexternal, - '_depend':'' + '_check': isexternal, + '_depend': '' }, # Scalars (not complex) { # Common @@ -993,9 +1042,9 @@ 'decl': [' #ctype# #varname# = NULL;', ' int slen(#varname#);', ' PyObject *#varname#_capi = Py_None;'], - 'callfortran':'#varname#,', - 'callfortranappend':'slen(#varname#),', - 'pyobjfrom':[ + 'callfortran': '#varname#,', + 'callfortranappend': 'slen(#varname#),', + 'pyobjfrom': [ {debugcapi: ' fprintf(stderr,' '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, @@ -1024,8 +1073,8 @@ } /*if (f2py_success) of #varname#*/""", 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', {l_not(isintent_c): 'STRINGPADN'}], - '_check':isstring, - '_depend':'' + '_check': isstring, + '_depend': '' }, { # Not hidden 'argformat': {isrequired: 'O'}, 'keyformat': {isoptional: 'O'}, @@ -1058,7 +1107,7 @@ ' int capi_#varname#_intent = 0;', {isstringarray: ' int slen(#varname#) = 0;'}, ], - 'callfortran':'#varname#,', + 'callfortran': '#varname#,', 'callfortranappend': {isstringarray: 'slen(#varname#),'}, 'return': {isintent_out: ',capi_#varname#_as_array'}, 'need': 'len..', @@ -1105,7 +1154,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' @@ -1135,9 +1184,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; @@ -1252,7 +1302,7 @@ def buildmodule(m, um): """ Return """ - outmess(' Building module "%s"...\n' % (m['name'])) + outmess(f" Building module \"{m['name']}\"...\n") ret = {} mod_rules = defmod_rules[:] vrd = capi_maps.modsign2map(m) @@ -1272,7 +1322,7 @@ def buildmodule(m, um): if not nb: print( - 'buildmodule: Could not find the body of interfaced routine "%s". Skipping.\n' % (n), file=sys.stderr) + f'buildmodule: Could not find the body of interfaced routine "{n}". Skipping.\n', file=sys.stderr) continue nb_list = [nb] if 'entry' in nb: @@ -1331,7 +1381,7 @@ def buildmodule(m, um): needs = cfuncs.get_needs() # Add mapped definitions - needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # if cvar in typedef_need_dict.values()] code = {} for n in needs.keys(): @@ -1359,7 +1409,7 @@ def buildmodule(m, um): elif k in cfuncs.commonhooks: c = cfuncs.commonhooks[k] else: - errmess('buildmodule: unknown need %s.\n' % (repr(k))) + errmess(f'buildmodule: unknown need {repr(k)}.\n') continue code[n].append(c) mod_rules.append(code) @@ -1373,7 +1423,7 @@ def buildmodule(m, um): ret['csrc'] = fn with open(fn, 'w') as f: f.write(ar['modulebody'].replace('\t', 2 * ' ')) - outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + outmess(f" Wrote C/API module \"{m['name']}\" to file \"{fn}\"\n") if options['dorestdoc']: fn = os.path.join( @@ -1389,7 +1439,7 @@ def buildmodule(m, um): ret['ltx'] = fn with open(fn, 'w') as f: f.write( - '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) + f'% This file is auto-generated with f2py (version:{f2py_version})\n') if 'shortlatex' not in options: f.write( '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') @@ -1404,7 +1454,7 @@ def buildmodule(m, um): with open(wn, 'w') as f: f.write('C -*- fortran -*-\n') f.write( - 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f'C This file is autogenerated with f2py (version:{f2py_version})\n') f.write( 'C It contains Fortran 77 wrappers to fortran functions.\n') lines = [] @@ -1421,15 +1471,15 @@ def buildmodule(m, um): lines.append(l + '\n') lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) - outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn)) + outmess(f' Fortran 77 wrappers are saved to "{wn}\"\n') if funcwrappers2: wn = os.path.join( - options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) + options['buildpath'], f"{vrd['modulename']}-f2pywrappers2.f90") ret['fsrc'] = wn with open(wn, 'w') as f: f.write('! -*- f90 -*-\n') f.write( - '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f'! This file is autogenerated with f2py (version:{f2py_version})\n') f.write( '! It contains Fortran 90 wrappers to fortran functions.\n') lines = [] @@ -1448,11 +1498,12 @@ def buildmodule(m, um): lines.append(l + '\n') lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) - outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn)) + outmess(f' Fortran 90 wrappers are saved to "{wn}\"\n') return ret ################## Build C/API function ############# + stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} @@ -1467,7 +1518,7 @@ def buildapi(rout): outmess(' Constructing wrapper function "%s.%s"...\n' % (rout['modulename'], rout['name'])) else: - outmess(' Constructing wrapper function "%s"...\n' % (rout['name'])) + outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") # Routine vrd = capi_maps.routsign2map(rout) rd = dictappend({}, vrd) @@ -1569,9 +1620,9 @@ def buildapi(rout): ar = applyrules(routine_rules, rd) if ismoduleroutine(rout): - outmess(' %s\n' % (ar['docshort'])) + outmess(f" {ar['docshort']}\n") else: - outmess(' %s\n' % (ar['docshort'])) + outmess(f" {ar['docshort']}\n") return ar, wrap diff --git a/blimgui/dist64/numpy/f2py/rules.pyi b/blimgui/dist64/numpy/f2py/rules.pyi new file mode 100644 index 0000000..30439f6 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/rules.pyi @@ -0,0 +1,41 @@ +from collections.abc import Callable, Iterable, Mapping +from typing import Any, Final, Literal as L, TypeAlias +from typing_extensions import TypeVar + +from .__version__ import version +from .auxfuncs import _Bool, _Var + +### + +_VT = TypeVar("_VT", default=str) + +_Predicate: TypeAlias = Callable[[_Var], _Bool] +_RuleDict: TypeAlias = dict[str, _VT] +_DefDict: TypeAlias = dict[_Predicate, _VT] + +### + +f2py_version: Final = version +numpy_version: Final = version + +options: Final[dict[str, bool]] = ... +sepdict: Final[dict[str, str]] = ... + +generationtime: Final[int] = ... +typedef_need_dict: Final[_DefDict[str]] = ... + +module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +rout_rules: Final[list[_RuleDict[str | Any]]] = ... +aux_rules: Final[list[_RuleDict[str | Any]]] = ... +arg_rules: Final[list[_RuleDict[str | Any]]] = ... +check_rules: Final[list[_RuleDict[str | Any]]] = ... + +stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... + +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... + +# namespace pollution +k: str diff --git a/blimgui/dist64/numpy/f2py/src/fortranobject.c b/blimgui/dist64/numpy/f2py/src/fortranobject.c index 5633654..53e6879 100644 --- a/blimgui/dist64/numpy/f2py/src/fortranobject.c +++ b/blimgui/dist64/numpy/f2py/src/fortranobject.c @@ -47,7 +47,7 @@ F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) "failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -87,7 +87,7 @@ F2PyGetThreadLocalCallbackPtr(char *key) "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -363,7 +363,9 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { - PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK if (v == NULL && PyErr_Occurred()) { return NULL; } @@ -371,6 +373,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) @@ -809,7 +822,7 @@ get_elsize(PyObject *obj) { } else if (PyUnicode_Check(obj)) { return PyUnicode_GET_LENGTH(obj); } else if (PySequence_Check(obj)) { - PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK if (fast != NULL) { Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); int sz, elsize = 0; diff --git a/blimgui/dist64/numpy/f2py/symbolic.py b/blimgui/dist64/numpy/f2py/symbolic.py index 012ad35..47a3e6b 100644 --- a/blimgui/dist64/numpy/f2py/symbolic.py +++ b/blimgui/dist64/numpy/f2py/symbolic.py @@ -155,14 +155,14 @@ def ewarn(message): class Expr: - """Represents a Fortran expression as a op-data pair. + """Represents a Fortran expression as an op-data pair. Expr instances are hashable and sortable. """ @staticmethod def parse(s, language=Language.C): - """Parse a Fortran expression to a Expr. + """Parse a Fortran expression to an Expr. """ return fromstring(s, language=language) @@ -190,7 +190,7 @@ def __init__(self, op, data): # (default is 1) assert isinstance(data, tuple) and len(data) == 2 assert (isinstance(data[0], str) - and data[0][::len(data[0])-1] in ('""', "''", '@@')) + and data[0][::len(data[0]) - 1] in ('""', "''", '@@')) assert isinstance(data[1], (int, str)), data elif op is Op.SYMBOL: # data is any hashable object @@ -310,12 +310,11 @@ def tostring(self, parent_precedence=Precedence.NONE, op = ' + ' if coeff == 1: term = term.tostring(Precedence.SUM, language=language) + elif term == as_number(1): + term = str(coeff) else: - if term == as_number(1): - term = str(coeff) - else: - term = f'{coeff} * ' + term.tostring( - Precedence.PRODUCT, language=language) + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) if terms: terms.append(op) elif op == ' - ': @@ -570,7 +569,7 @@ def __call__(self, *args, **kwargs): # TODO: implement a method for deciding when __call__ should # return an INDEXING expression. return as_apply(self, *map(as_expr, args), - **dict((k, as_expr(v)) for k, v in kwargs.items())) + **{k: as_expr(v) for k, v in kwargs.items()}) def __getitem__(self, index): # Provided to support C indexing operations that .pyf files @@ -636,8 +635,8 @@ def substitute(self, symbols_map): if isinstance(target, Expr): target = target.substitute(symbols_map) args = tuple(a.substitute(symbols_map) for a in args) - kwargs = dict((k, v.substitute(symbols_map)) - for k, v in kwargs.items()) + kwargs = {k: v.substitute(symbols_map) + for k, v in kwargs.items()} return normalize(Expr(self.op, (target, args, kwargs))) if self.op is Op.INDEXING: func = self.data[0] @@ -693,8 +692,8 @@ def traverse(self, visit, *args, **kwargs): if isinstance(obj, Expr) else obj) operands = tuple(operand.traverse(visit, *args, **kwargs) for operand in self.data[1]) - kwoperands = dict((k, v.traverse(visit, *args, **kwargs)) - for k, v in self.data[2].items()) + kwoperands = {k: v.traverse(visit, *args, **kwargs) + for k, v in self.data[2].items()} return normalize(Expr(self.op, (func, operands, kwoperands))) elif self.op is Op.INDEXING: obj = self.data[0] @@ -866,9 +865,9 @@ def normalize(obj): t2, c2 = as_term_coeff(divisor) if isinstance(c1, integer_types) and isinstance(c2, integer_types): g = gcd(c1, c2) - c1, c2 = c1//g, c2//g + c1, c2 = c1 // g, c2 // g else: - c1, c2 = c1/c2, 1 + c1, c2 = c1 / c2, 1 if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: numer = t1.data[1][0] * c1 @@ -1011,7 +1010,7 @@ def as_apply(func, *args, **kwargs): """ return Expr(Op.APPLY, (func, tuple(map(as_expr, args)), - dict((k, as_expr(v)) for k, v in kwargs.items()))) + {k: as_expr(v) for k, v in kwargs.items()})) def as_ternary(cond, expr1, expr2): @@ -1237,17 +1236,19 @@ def replace_parenthesis(s): i = mn_i j = s.find(right, i) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') while s.count(left, i + 1, j) != s.count(right, i + 1, j): j = s.find(right, j + 1) if j == -1: - raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}') + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' - v = s[i+len(left):j] - r, d = replace_parenthesis(s[j+len(right):]) + v = s[i + len(left):j] + r, d = replace_parenthesis(s[j + len(right):]) d[k] = v return s[:i] + k + r, d @@ -1262,8 +1263,8 @@ def unreplace_parenthesis(s, d): """ for k, v in d.items(): p = _get_parenthesis_kind(k) - left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p] - right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p] + left = {'ROUND': '(', 'SQUARE': '[', 'CURLY': '{', 'ROUNDDIV': '(/'}[p] + right = {'ROUND': ')', 'SQUARE': ']', 'CURLY': '}', 'ROUNDDIV': '/)'}[p] s = s.replace(k, left + v + right) return s @@ -1479,7 +1480,7 @@ def restore(r): if isinstance(items, Expr): return items if paren in ['ROUNDDIV', 'SQUARE']: - # Expression is a array constructor + # Expression is an array constructor if isinstance(items, Expr): items = (items,) return as_array(items) @@ -1494,8 +1495,8 @@ def restore(r): if not isinstance(args, tuple): args = args, if paren == 'ROUND': - kwargs = dict((a.left, a.right) for a in args - if isinstance(a, _Pair)) + kwargs = {a.left: a.right for a in args + if isinstance(a, _Pair)} args = tuple(a for a in args if not isinstance(a, _Pair)) # Warning: this could also be Fortran indexing operation.. return as_apply(target, *args, **kwargs) diff --git a/blimgui/dist64/numpy/f2py/symbolic.pyi b/blimgui/dist64/numpy/f2py/symbolic.pyi new file mode 100644 index 0000000..06be2bb --- /dev/null +++ b/blimgui/dist64/numpy/f2py/symbolic.pyi @@ -0,0 +1,219 @@ +from collections.abc import Callable, Mapping +from enum import Enum +from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload +from typing_extensions import TypeVar + +__all__ = ["Expr"] + +### + +_Tss = ParamSpec("_Tss") +_ExprT = TypeVar("_ExprT", bound=Expr) +_ExprT1 = TypeVar("_ExprT1", bound=Expr) +_ExprT2 = TypeVar("_ExprT2", bound=Expr) +_OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) +_LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) +_DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) +_LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) +_RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) + +_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] +_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] + +_ToExpr: TypeAlias = Expr | complex | str +_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] +_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] + +### + +class OpError(Exception): ... +class ExprWarning(UserWarning): ... + +class Language(Enum): + Python = 0 + Fortran = 1 + C = 2 + +class Op(Enum): + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1_000 + FACTORS = 2_000 + REF = 3_000 + DEREF = 3_001 + +class RelOp(Enum): + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @overload + @classmethod + def fromstring(cls, s: _RelCOrPy, language: L[Language.C, Language.Python] = ...) -> RelOp: ... + @overload + @classmethod + def fromstring(cls, s: _RelFortran, language: L[Language.Fortran]) -> RelOp: ... + + # + @overload + def tostring(self, /, language: L[Language.C, Language.Python] = ...) -> _RelCOrPy: ... + @overload + def tostring(self, /, language: L[Language.Fortran]) -> _RelFortran: ... + +class ArithOp(Enum): + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + +class Precedence(Enum): + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + +class Expr(Generic[_OpT_co, _DataT_co]): + op: _OpT_co + data: _DataT_co + + @staticmethod + def parse(s: str, language: Language = ...) -> Expr: ... + + # + def __init__(self, /, op: Op, data: _DataT_co) -> None: ... + + # + def __lt__(self, other: Expr, /) -> bool: ... + def __le__(self, other: Expr, /) -> bool: ... + def __gt__(self, other: Expr, /) -> bool: ... + def __ge__(self, other: Expr, /) -> bool: ... + + # + def __pos__(self, /) -> Self: ... + def __neg__(self, /) -> Expr: ... + + # + def __add__(self, other: Expr, /) -> Expr: ... + def __radd__(self, other: Expr, /) -> Expr: ... + + # + def __sub__(self, other: Expr, /) -> Expr: ... + def __rsub__(self, other: Expr, /) -> Expr: ... + + # + def __mul__(self, other: Expr, /) -> Expr: ... + def __rmul__(self, other: Expr, /) -> Expr: ... + + # + def __pow__(self, other: Expr, /) -> Expr: ... + + # + def __truediv__(self, other: Expr, /) -> Expr: ... + def __rtruediv__(self, other: Expr, /) -> Expr: ... + + # + def __floordiv__(self, other: Expr, /) -> Expr: ... + def __rfloordiv__(self, other: Expr, /) -> Expr: ... + + # + def __call__( + self, + /, + *args: _ToExprN, + **kwargs: _ToExprN, + ) -> Expr[L[Op.APPLY], tuple[Self, tuple[Expr, ...], dict[str, Expr]]]: ... + + # + @overload + def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + @overload + def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... + + # + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> Expr: ... + + # + @overload + def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + @overload + def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + + # + def contains(self, /, other: Expr) -> bool: ... + + # + def symbols(self, /) -> set[Expr]: ... + def polynomial_atoms(self, /) -> set[Expr]: ... + + # + def linear_solve(self, /, symbol: Expr) -> tuple[Expr, Expr]: ... + + # + def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... + +class _Pair(Generic[_LeftT_co, _RightT_co]): + left: _LeftT_co + right: _RightT_co + + def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... + + # + @overload + def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + @overload + def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + @overload + def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + @overload + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... + +class _FromStringWorker(Generic[_LanguageT_co]): + language: _LanguageT_co + + original: str | None + quotes_map: dict[str, str] + + @overload + def __init__(self: _FromStringWorker[L[Language.C]], /, language: L[Language.C] = ...) -> None: ... + @overload + def __init__(self, /, language: _LanguageT_co) -> None: ... + + # + def finalize_string(self, /, s: str) -> str: ... + + # + def parse(self, /, inp: str) -> Expr | _Pair: ... + + # + @overload + def process(self, /, s: str, context: str = "expr") -> Expr | _Pair: ... + @overload + def process(self, /, s: list[str], context: str = "expr") -> list[Expr | _Pair]: ... + @overload + def process(self, /, s: tuple[str, ...], context: str = "expr") -> tuple[Expr | _Pair, ...]: ... + @overload + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... # noqa: ANN401 diff --git a/blimgui/dist64/numpy/f2py/tests/__init__.py b/blimgui/dist64/numpy/f2py/tests/__init__.py index 48797de..3b88525 100644 --- a/blimgui/dist64/numpy/f2py/tests/__init__.py +++ b/blimgui/dist64/numpy/f2py/tests/__init__.py @@ -1,6 +1,7 @@ -from numpy.testing import IS_WASM, IS_EDITABLE import pytest +from numpy.testing import IS_EDITABLE, IS_WASM + if IS_WASM: pytest.skip( "WASM/Pyodide does not use or support Fortran", diff --git a/blimgui/dist64/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/blimgui/dist64/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index cb2a52a..49e61f7 100644 --- a/blimgui/dist64/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/blimgui/dist64/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -223,7 +223,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/blimgui/dist64/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/blimgui/dist64/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 0000000..7692c82 --- /dev/null +++ b/blimgui/dist64/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/blimgui/dist64/numpy/f2py/tests/test_abstract_interface.py b/blimgui/dist64/numpy/f2py/tests/test_abstract_interface.py index 7e7a51d..9c95978 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_abstract_interface.py +++ b/blimgui/dist64/numpy/f2py/tests/test_abstract_interface.py @@ -1,8 +1,10 @@ import pytest -from . import util + from numpy.f2py import crackfortran from numpy.testing import IS_WASM +from . import util + @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @pytest.mark.slow diff --git a/blimgui/dist64/numpy/f2py/tests/test_array_from_pyobj.py b/blimgui/dist64/numpy/f2py/tests/test_array_from_pyobj.py index 23f1daf..e920568 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_array_from_pyobj.py +++ b/blimgui/dist64/numpy/f2py/tests/test_array_from_pyobj.py @@ -1,12 +1,13 @@ -import sys import copy import platform -import pytest +import sys from pathlib import Path -import numpy as np +import pytest +import numpy as np from numpy._core._type_aliases import c_names_dict as _c_names_dict + from . import util wrap = None @@ -20,7 +21,7 @@ def get_testdir(): testroot = Path(__file__).resolve().parent / "src" - return testroot / "array_from_pyobj" + return testroot / "array_from_pyobj" def setup_module(): """ @@ -33,7 +34,7 @@ def setup_module(): src = [ get_testdir() / "wrapmodule.c", ] - wrap = util.build_meson(src, module_name = "test_array_from_pyobj_ext") + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") def flags_info(arr): @@ -82,10 +83,10 @@ def __getattr__(self, name): return self.__class__(self.intent_list + [name]) def __str__(self): - return "intent(%s)" % (",".join(self.intent_list)) + return f"intent({','.join(self.intent_list)})" def __repr__(self): - return "Intent(%r)" % (self.intent_list) + return f"Intent({self.intent_list!r})" def is_intent(self, *names): return all(name in self.intent_list for name in names) @@ -146,9 +147,9 @@ def is_intent_exact(self, *names): # and several tests fail as the alignment flag can be randomly true or false # when numpy gains an aligned allocator the tests could be enabled again # -# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +# Furthermore, on macOS ARM64 and AIX, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) - and sys.platform != "win32" + and sys.platform not in ["win32", "aix"] and (platform.system(), platform.processor()) != ("Darwin", "arm")): _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ @@ -291,7 +292,7 @@ def __init__(self, typ, dims, intent, obj): else: self.pyarr = np.array( np.array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent("c") and "C" or "F", + order=(self.intent.is_intent("c") and "C") or "F", ) assert self.pyarr.dtype == typ self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) diff --git a/blimgui/dist64/numpy/f2py/tests/test_assumed_shape.py b/blimgui/dist64/numpy/f2py/tests/test_assumed_shape.py index 7076892..cc1d18d 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_assumed_shape.py +++ b/blimgui/dist64/numpy/f2py/tests/test_assumed_shape.py @@ -1,7 +1,8 @@ import os -import pytest import tempfile +import pytest + from . import util diff --git a/blimgui/dist64/numpy/f2py/tests/test_block_docstring.py b/blimgui/dist64/numpy/f2py/tests/test_block_docstring.py index 55a12d7..8929ae1 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_block_docstring.py +++ b/blimgui/dist64/numpy/f2py/tests/test_block_docstring.py @@ -1,9 +1,11 @@ import sys + import pytest -from . import util from numpy.testing import IS_PYPY +from . import util + @pytest.mark.slow class TestBlockDocString(util.F2PyTest): diff --git a/blimgui/dist64/numpy/f2py/tests/test_callback.py b/blimgui/dist64/numpy/f2py/tests/test_callback.py index d90e36e..c3555d6 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_callback.py +++ b/blimgui/dist64/numpy/f2py/tests/test_callback.py @@ -1,21 +1,23 @@ import math -import textwrap +import platform import sys -import pytest +import textwrap import threading -import traceback import time -import platform +import traceback + +import pytest import numpy as np from numpy.testing import IS_PYPY + from . import util class TestF77Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "foo.f")] - @pytest.mark.parametrize("name", "t,t2".split(",")) + @pytest.mark.parametrize("name", ["t", "t2"]) @pytest.mark.slow def test_all(self, name): self.check_function(name) @@ -61,7 +63,7 @@ def check_function(self, name): assert r == 6 r = t(lambda a: 5 + a, fun_extra_args=(7, )) assert r == 12 - r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, )) + r = t(math.degrees, fun_extra_args=(math.pi, )) assert r == 180 r = t(math.degrees, fun_extra_args=(math.pi, )) assert r == 180 @@ -241,7 +243,7 @@ class TestGH25211(util.F2PyTest): def test_gh25211(self): def bar(x): - return x*x + return x * x res = self.module.foo(bar) assert res == 110 diff --git a/blimgui/dist64/numpy/f2py/tests/test_character.py b/blimgui/dist64/numpy/f2py/tests/test_character.py index de25c89..b973b76 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_character.py +++ b/blimgui/dist64/numpy/f2py/tests/test_character.py @@ -1,8 +1,10 @@ -import pytest import textwrap -from numpy.testing import assert_array_equal, assert_equal, assert_raises + +import pytest + import numpy as np from numpy.f2py.tests import util +from numpy.testing import assert_array_equal, assert_equal, assert_raises @pytest.mark.slow @@ -15,7 +17,7 @@ class TestCharacterString(util.F2PyTest): code = '' for length in length_list: fsuffix = length - clength = dict(star='(*)').get(length, length) + clength = {'star': '(*)'}.get(length, length) code += textwrap.dedent(f""" @@ -538,13 +540,13 @@ def test_gh4519(self): f = getattr(self.module, self.fprefix + '_gh4519') for x, expected in [ - ('a', dict(shape=(), dtype=np.dtype('S1'))), - ('text', dict(shape=(), dtype=np.dtype('S4'))), + ('a', {'shape': (), 'dtype': np.dtype('S1')}), + ('text', {'shape': (), 'dtype': np.dtype('S4')}), (np.array(['1', '2', '3'], dtype='S1'), - dict(shape=(3,), dtype=np.dtype('S1'))), + {'shape': (3,), 'dtype': np.dtype('S1')}), (['1', '2', '34'], - dict(shape=(3,), dtype=np.dtype('S2'))), - (['', ''], dict(shape=(2,), dtype=np.dtype('S1')))]: + {'shape': (3,), 'dtype': np.dtype('S2')}), + (['', ''], {'shape': (2,), 'dtype': np.dtype('S1')})]: r = f(x) for k, v in expected.items(): assert_equal(getattr(r, k), v) @@ -587,7 +589,7 @@ def test_char(self): def test_char_arr(self): for out in (self.module.string_test.strarr, self.module.string_test.strarr77): - expected = (5,7) + expected = (5, 7) assert out.shape == expected expected = '|S12' assert out.dtype == expected @@ -607,7 +609,7 @@ def test_gh24662(self): a = np.array('hi', dtype='S32') self.module.string_inout_optional(a) assert "output string" in a.tobytes().decode() - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 aa = "Hi" self.module.string_inout_optional(aa) diff --git a/blimgui/dist64/numpy/f2py/tests/test_common.py b/blimgui/dist64/numpy/f2py/tests/test_common.py index af5430c..b88c9b8 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_common.py +++ b/blimgui/dist64/numpy/f2py/tests/test_common.py @@ -1,7 +1,10 @@ import pytest + import numpy as np + from . import util + @pytest.mark.slow class TestCommonBlock(util.F2PyTest): sources = [util.getpath("tests", "src", "common", "block.f")] diff --git a/blimgui/dist64/numpy/f2py/tests/test_crackfortran.py b/blimgui/dist64/numpy/f2py/tests/test_crackfortran.py index b44f079..005ec65 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_crackfortran.py +++ b/blimgui/dist64/numpy/f2py/tests/test_crackfortran.py @@ -1,13 +1,16 @@ +import contextlib import importlib +import io +import textwrap import time + import pytest + import numpy as np +from numpy.f2py import crackfortran from numpy.f2py.crackfortran import markinnerspaces, nameargspattern + from . import util -from numpy.f2py import crackfortran -import textwrap -import contextlib -import io class TestNoSpace(util.F2PyTest): @@ -263,7 +266,7 @@ def test_eval_scalar(self): assert eval_scalar('123', {}) == '123' assert eval_scalar('12 + 3', {}) == '15' - assert eval_scalar('a + b', dict(a=1, b=2)) == '3' + assert eval_scalar('a + b', {"a": 1, "b": 2}) == '3' assert eval_scalar('"123"', {}) == "'123'" @@ -360,9 +363,9 @@ class TestParamEval: # issue gh-11612, array parameter parsing def test_param_eval_nested(self): v = '(/3.14, 4./)' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {'dp': 8, 'intparamarray': {1: 3, 2: 5}, 'nested': {1: 1, 2: 2, 3: 3}} dimspec = '(2)' @@ -371,9 +374,9 @@ def test_param_eval_nested(self): def test_param_eval_nonstandard_range(self): v = '(/ 6, 3, 1 /)' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} dimspec = '(-1:1)' ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) @@ -381,9 +384,9 @@ def test_param_eval_nonstandard_range(self): def test_param_eval_empty_range(self): v = '6' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} dimspec = '' pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, @@ -391,18 +394,18 @@ def test_param_eval_empty_range(self): def test_param_eval_non_array_param(self): v = '3.14_dp' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} ret = crackfortran.param_eval(v, g_params, params, dimspec=None) assert ret == '3.14_dp' def test_param_eval_too_many_dims(self): v = 'reshape((/ (i, i=1, 250) /), (/5, 10, 5/))' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} dimspec = '(0:4, 3:12, 5)' pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, diff --git a/blimgui/dist64/numpy/f2py/tests/test_data.py b/blimgui/dist64/numpy/f2py/tests/test_data.py index 3d4e2ce..eaf7bcc 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_data.py +++ b/blimgui/dist64/numpy/f2py/tests/test_data.py @@ -1,8 +1,9 @@ import pytest + import numpy as np +from numpy.f2py.crackfortran import crackfortran from . import util -from numpy.f2py.crackfortran import crackfortran class TestData(util.F2PyTest): @@ -16,9 +17,9 @@ def test_data_stmts(self): assert self.module.cmplxdat.x == 1.5 assert self.module.cmplxdat.y == 2.0 assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 - assert self.module.cmplxdat.medium_ref_index == np.array(1.+0.j) + assert self.module.cmplxdat.medium_ref_index == np.array(1. + 0.j) assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) - assert np.all(self.module.cmplxdat.my_array == np.array([ 1.+2.j, -3.+4.j])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1. + 2.j, -3. + 4.j])) assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) diff --git a/blimgui/dist64/numpy/f2py/tests/test_docs.py b/blimgui/dist64/numpy/f2py/tests/test_docs.py index 4d65412..574ea62 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_docs.py +++ b/blimgui/dist64/numpy/f2py/tests/test_docs.py @@ -1,8 +1,12 @@ +from pathlib import Path + import pytest + import numpy as np from numpy.testing import assert_array_equal, assert_equal + from . import util -from pathlib import Path + def get_docdir(): parents = Path(__file__).resolve().parents @@ -18,6 +22,7 @@ def get_docdir(): # Assumes that an editable install is used to run tests return parents[3] / "doc" / "source" / "f2py" / "code" + pytestmark = pytest.mark.skipif( not get_docdir().is_dir(), reason=f"Could not find f2py documentation sources" @@ -55,5 +60,7 @@ def test_ftype(self): ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes diff --git a/blimgui/dist64/numpy/f2py/tests/test_f2cmap.py b/blimgui/dist64/numpy/f2py/tests/test_f2cmap.py index 4faa465..64931df 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_f2cmap.py +++ b/blimgui/dist64/numpy/f2py/tests/test_f2cmap.py @@ -1,6 +1,8 @@ -from . import util import numpy as np +from . import util + + class TestF2Cmap(util.F2PyTest): sources = [ util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), diff --git a/blimgui/dist64/numpy/f2py/tests/test_f2py2e.py b/blimgui/dist64/numpy/f2py/tests/test_f2py2e.py index e865c5a..6420cab 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_f2py2e.py +++ b/blimgui/dist64/numpy/f2py/tests/test_f2py2e.py @@ -1,19 +1,19 @@ +import platform import re import shlex import subprocess import sys import textwrap -from pathlib import Path from collections import namedtuple - -import platform +from pathlib import Path import pytest -from . import util from numpy.f2py.f2py2e import main as f2pycli from numpy.testing._private.utils import NOGIL_BUILD +from . import util + ####################### # F2PY Test utilities # ###################### @@ -30,6 +30,7 @@ def compiler_check_f2pycli(): # CLI utils and classes # ######################### + PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") @@ -144,11 +145,10 @@ def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): with util.switchdir(ipath.parent): f2pycli() gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] - assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blahmodule.c" not in gen_paths # shouldn't be generated assert "blah-f2pywrappers.f" not in gen_paths assert "test_22819-f2pywrappers.f" in gen_paths assert "test_22819module.c" in gen_paths - assert "Ignoring blah" def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): @@ -249,13 +249,13 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out monkeypatch.setattr( - sys, "argv", "f2py --help-link".split() + sys, "argv", ["f2py", "--help-link"] ) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "Use --dep for meson builds" in out - MNAME = "hi2" # Needs to be different for a new -c + MNAME = "hi2" # Needs to be different for a new -c monkeypatch.setattr( sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() ) @@ -673,6 +673,25 @@ def test_inclheader(capfd, hello_world_f90, monkeypatch): assert "#include " in ocmr assert "#include " in ocmr +@pytest.mark.skipif((platform.system() != 'Linux'), reason='Compiler required') +def test_cli_obj(capfd, hello_world_f90, monkeypatch): + """Ensures that the extra object can be specified when using meson backend + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + obj = "extra.o" + monkeypatch.setattr(sys, "argv", + f'f2py --backend meson --build-dir {odir} -m {mname} -c {obj} {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path(obj).touch() + compiler_check_f2pycli() + with Path(f"{odir}/meson.build").open() as mesonbuild: + mbld = mesonbuild.read() + assert "objects:" in mbld + assert f"'''{obj}'''" in mbld + def test_inclpath(): """Add to the include directories @@ -743,7 +762,7 @@ def test_version(capfd, monkeypatch): CLI :: -v """ - monkeypatch.setattr(sys, "argv", 'f2py -v'.split()) + monkeypatch.setattr(sys, "argv", ["f2py", "-v"]) # TODO: f2py2e should not call sys.exit() after printing the version with pytest.raises(SystemExit): f2pycli() diff --git a/blimgui/dist64/numpy/f2py/tests/test_isoc.py b/blimgui/dist64/numpy/f2py/tests/test_isoc.py index 356d6f2..8b5ef3e 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_isoc.py +++ b/blimgui/dist64/numpy/f2py/tests/test_isoc.py @@ -1,8 +1,11 @@ -from . import util -import numpy as np import pytest + +import numpy as np from numpy.testing import assert_allclose +from . import util + + class TestISOC(util.F2PyTest): sources = [ util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), @@ -13,26 +16,26 @@ class TestISOC(util.F2PyTest): def test_c_double(self): out = self.module.coddity.c_add(1, 2) exp_out = 3 - assert out == exp_out + assert out == exp_out # gh-9693 def test_bindc_function(self): out = self.module.coddity.wat(1, 20) exp_out = 8 - assert out == exp_out + assert out == exp_out # gh-25207 def test_bindc_kinds(self): out = self.module.coddity.c_add_int64(1, 20) exp_out = 21 - assert out == exp_out + assert out == exp_out # gh-25207 def test_bindc_add_arr(self): - a = np.array([1,2,3]) - b = np.array([1,2,3]) + a = np.array([1, 2, 3]) + b = np.array([1, 2, 3]) out = self.module.coddity.add_arr(a, b) - exp_out = a*2 + exp_out = a * 2 assert_allclose(out, exp_out) diff --git a/blimgui/dist64/numpy/f2py/tests/test_kind.py b/blimgui/dist64/numpy/f2py/tests/test_kind.py index 09f52a2..1d594cd 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_kind.py +++ b/blimgui/dist64/numpy/f2py/tests/test_kind.py @@ -1,13 +1,16 @@ +import platform import sys + import pytest -import platform from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, _selected_real_kind_func as selected_real_kind, ) + from . import util +IS_PPC_OR_AIX = platform.machine().lower().startswith("ppc") or platform.system() == 'AIX' class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] @@ -35,7 +38,7 @@ def test_real(self): i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" - @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + @pytest.mark.xfail(IS_PPC_OR_AIX, reason="Some PowerPC may not support full IEEE 754 precision") def test_quad_precision(self): """ diff --git a/blimgui/dist64/numpy/f2py/tests/test_mixed.py b/blimgui/dist64/numpy/f2py/tests/test_mixed.py index e8e3062..04e837a 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_mixed.py +++ b/blimgui/dist64/numpy/f2py/tests/test_mixed.py @@ -1,7 +1,9 @@ import textwrap + import pytest from numpy.testing import IS_PYPY + from . import util diff --git a/blimgui/dist64/numpy/f2py/tests/test_modules.py b/blimgui/dist64/numpy/f2py/tests/test_modules.py index 4dd5529..16c17ea 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_modules.py +++ b/blimgui/dist64/numpy/f2py/tests/test_modules.py @@ -1,9 +1,11 @@ -import pytest import textwrap -from . import util +import pytest + from numpy.testing import IS_PYPY +from . import util + @pytest.mark.slow class TestModuleFilterPublicEntities(util.F2PyTest): diff --git a/blimgui/dist64/numpy/f2py/tests/test_parameter.py b/blimgui/dist64/numpy/f2py/tests/test_parameter.py index 4390e52..5007bb0 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_parameter.py +++ b/blimgui/dist64/numpy/f2py/tests/test_parameter.py @@ -115,8 +115,8 @@ def test_constant_array(self): x = np.arange(3, dtype=np.float64) y = np.arange(5, dtype=np.float64) z = self.module.foo_array(x, y) - assert np.allclose(x, [0.0, 1./10, 2./10]) - assert np.allclose(y, [0.0, 1.*10, 2.*10, 3.*10, 4.*10]) + assert np.allclose(x, [0.0, 1. / 10, 2. / 10]) + assert np.allclose(y, [0.0, 1. * 10, 2. * 10, 3. * 10, 4. * 10]) assert np.allclose(z, 19.0) def test_constant_array_any_index(self): @@ -127,4 +127,3 @@ def test_constant_array_any_index(self): def test_constant_array_delims(self): x = self.module.foo_array_delims() assert x == 9 - diff --git a/blimgui/dist64/numpy/f2py/tests/test_pyf_src.py b/blimgui/dist64/numpy/f2py/tests/test_pyf_src.py index e93543d..da5eeb5 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_pyf_src.py +++ b/blimgui/dist64/numpy/f2py/tests/test_pyf_src.py @@ -2,7 +2,6 @@ from numpy.f2py._src_pyf import process_str from numpy.testing import assert_equal - pyf_src = """ python module foo <_rd=real,double precision> diff --git a/blimgui/dist64/numpy/f2py/tests/test_quoted_character.py b/blimgui/dist64/numpy/f2py/tests/test_quoted_character.py index ebbcf0c..66bc1a6 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_quoted_character.py +++ b/blimgui/dist64/numpy/f2py/tests/test_quoted_character.py @@ -2,6 +2,7 @@ """ import sys + import pytest from . import util diff --git a/blimgui/dist64/numpy/f2py/tests/test_regression.py b/blimgui/dist64/numpy/f2py/tests/test_regression.py index 88fbdbf..f88a5a1 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_regression.py +++ b/blimgui/dist64/numpy/f2py/tests/test_regression.py @@ -1,7 +1,8 @@ import os -import pytest import platform +import pytest + import numpy as np import numpy.testing as npt @@ -36,6 +37,16 @@ def test_mdat(self): assert self.module.simple_subroutine(5) == 1014 +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] @@ -45,13 +56,15 @@ def test_negbound(self): xvec = np.arange(12) xlow = -6 xhigh = 4 + # Calculate the upper bound, # Keeping the 1 index in mind + def ubound(xl, xh): return xh - xl + 1 rval = self.module.foo(is_=xlow, ie_=xhigh, arr=xvec[:ubound(xlow, xhigh)]) - expval = np.arange(11, dtype = np.float32) + expval = np.arange(11, dtype=np.float32) assert np.allclose(rval, expval) @@ -89,7 +102,7 @@ class TestIncludeFiles(util.F2PyTest): def test_gh25344(self): exp = 7.0 res = self.module.add(3.0, 4.0) - assert exp == res + assert exp == res class TestF77Comments(util.F2PyTest): # Check that comments are stripped from F77 continuation lines @@ -99,15 +112,15 @@ class TestF77Comments(util.F2PyTest): def test_gh26148(self): x1 = np.array(3, dtype=np.int32) x2 = np.array(5, dtype=np.int32) - res=self.module.testsub(x1, x2) - assert(res[0] == 8) - assert(res[1] == 15) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 @pytest.mark.slow def test_gh26466(self): # Check that comments after PARAMETER directions are stripped - expected = np.arange(1, 11, dtype=np.float32)*2 - res=self.module.testsub2() + expected = np.arange(1, 11, dtype=np.float32) * 2 + res = self.module.testsub2() npt.assert_allclose(expected, res) class TestF90Contiuation(util.F2PyTest): @@ -118,9 +131,9 @@ class TestF90Contiuation(util.F2PyTest): def test_gh26148b(self): x1 = np.array(3, dtype=np.int32) x2 = np.array(5, dtype=np.int32) - res=self.module.testsub(x1, x2) - assert(res[0] == 8) - assert(res[1] == 15) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 class TestLowerF2PYDirectives(util.F2PyTest): # Check variables are cased correctly @@ -145,7 +158,7 @@ def test_gh26623(): @pytest.mark.slow -@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +@pytest.mark.skipif(platform.system() == "Windows", reason='Unsupported on this platform for now') def test_gh25784(): # Compile dubious file using passed flags try: @@ -154,7 +167,7 @@ def test_gh25784(): options=[ # Meson will collect and dedup these to pass to fortran_args: "--f77flags='-ffixed-form -O2'", - "--f90flags=\"-ffixed-form -Og\"", + "--f90flags=\"-ffixed-form -g\"", ], module_name="Blah", ) diff --git a/blimgui/dist64/numpy/f2py/tests/test_return_character.py b/blimgui/dist64/numpy/f2py/tests/test_return_character.py index bd30d37..c5ffa62 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_return_character.py +++ b/blimgui/dist64/numpy/f2py/tests/test_return_character.py @@ -1,8 +1,10 @@ +import platform + import pytest from numpy import array + from . import util -import platform IS_S390X = platform.machine() == "s390x" @@ -36,11 +38,11 @@ class TestFReturnCharacter(TestReturnCharacter): ] @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") - @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(",")) + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "s0", "s1", "s5", "ss"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") - @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(",")) + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "ts", "s0", "s1", "s5", "ss"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/blimgui/dist64/numpy/f2py/tests/test_return_complex.py b/blimgui/dist64/numpy/f2py/tests/test_return_complex.py index 15a5e60..e07ffaf 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_return_complex.py +++ b/blimgui/dist64/numpy/f2py/tests/test_return_complex.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util @@ -56,11 +57,11 @@ class TestFReturnComplex(TestReturnComplex): util.getpath("tests", "src", "return_complex", "foo90.f90"), ] - @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_complex, name), name) diff --git a/blimgui/dist64/numpy/f2py/tests/test_return_integer.py b/blimgui/dist64/numpy/f2py/tests/test_return_integer.py index 151ec40..2cc7143 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_return_integer.py +++ b/blimgui/dist64/numpy/f2py/tests/test_return_integer.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util @@ -28,8 +29,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) if tname in ["t8", "s8"]: pytest.raises(OverflowError, t, 100000000000000000000000) @@ -43,12 +44,12 @@ class TestFReturnInteger(TestReturnInteger): ] @pytest.mark.parametrize("name", - "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.parametrize("name", - "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_integer, name), name) diff --git a/blimgui/dist64/numpy/f2py/tests/test_return_logical.py b/blimgui/dist64/numpy/f2py/tests/test_return_logical.py index f9a9e97..5264852 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_return_logical.py +++ b/blimgui/dist64/numpy/f2py/tests/test_return_logical.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util @@ -53,12 +54,12 @@ class TestFReturnLogical(TestReturnLogical): ] @pytest.mark.slow - @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(",")) + @pytest.mark.parametrize("name", ["t0", "t1", "t2", "t4", "s0", "s1", "s2", "s4"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name)) @pytest.mark.slow @pytest.mark.parametrize("name", - "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/blimgui/dist64/numpy/f2py/tests/test_return_real.py b/blimgui/dist64/numpy/f2py/tests/test_return_real.py index 68c2e8e..97f73d3 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_return_real.py +++ b/blimgui/dist64/numpy/f2py/tests/test_return_real.py @@ -1,8 +1,10 @@ import platform + import pytest from numpy import array from numpy.testing import IS_64BIT + from . import util @@ -37,8 +39,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) try: r = t(10**400) @@ -87,7 +89,7 @@ class TestCReturnReal(TestReturnReal): end python module c_ext_return_real """ - @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(",")) + @pytest.mark.parametrize("name", ["t4", "t8", "s4", "s8"]) def test_all(self, name): self.check_function(getattr(self.module, name), name) @@ -98,10 +100,10 @@ class TestFReturnReal(TestReturnReal): util.getpath("tests", "src", "return_real", "foo90.f90"), ] - @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/blimgui/dist64/numpy/f2py/tests/test_routines.py b/blimgui/dist64/numpy/f2py/tests/test_routines.py index 1da72c3..39dfd56 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_routines.py +++ b/blimgui/dist64/numpy/f2py/tests/test_routines.py @@ -1,4 +1,5 @@ import pytest + from . import util diff --git a/blimgui/dist64/numpy/f2py/tests/test_semicolon_split.py b/blimgui/dist64/numpy/f2py/tests/test_semicolon_split.py index 91ef1df..f0b0ec9 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_semicolon_split.py +++ b/blimgui/dist64/numpy/f2py/tests/test_semicolon_split.py @@ -1,4 +1,5 @@ import platform + import pytest from numpy.testing import IS_64BIT diff --git a/blimgui/dist64/numpy/f2py/tests/test_size.py b/blimgui/dist64/numpy/f2py/tests/test_size.py index 2995ef3..e293234 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_size.py +++ b/blimgui/dist64/numpy/f2py/tests/test_size.py @@ -1,4 +1,5 @@ import pytest + import numpy as np from . import util diff --git a/blimgui/dist64/numpy/f2py/tests/test_string.py b/blimgui/dist64/numpy/f2py/tests/test_string.py index e09acf8..c695f65 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_string.py +++ b/blimgui/dist64/numpy/f2py/tests/test_string.py @@ -1,5 +1,7 @@ import pytest + import numpy as np + from . import util diff --git a/blimgui/dist64/numpy/f2py/tests/test_symbolic.py b/blimgui/dist64/numpy/f2py/tests/test_symbolic.py index 14d068f..395790b 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_symbolic.py +++ b/blimgui/dist64/numpy/f2py/tests/test_symbolic.py @@ -1,34 +1,35 @@ import pytest from numpy.f2py.symbolic import ( - Expr, - Op, ArithOp, + Expr, Language, - as_symbol, - as_number, - as_string, + Op, + as_apply, as_array, as_complex, - as_terms, - as_factors, - eliminate_quotes, - insert_quotes, - fromstring, - as_expr, - as_apply, - as_numer_denom, - as_ternary, - as_ref, as_deref, - normalize, as_eq, - as_ne, - as_lt, + as_expr, + as_factors, + as_ge, as_gt, as_le, - as_ge, + as_lt, + as_ne, + as_number, + as_numer_denom, + as_ref, + as_string, + as_symbol, + as_terms, + as_ternary, + eliminate_quotes, + fromstring, + insert_quotes, + normalize, ) + from . import util @@ -492,3 +493,8 @@ def test_polynomial_atoms(self): assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} assert (y(x)**x).polynomial_atoms() == {y(x)} + + def test_unmatched_parenthesis_gh30268(self): + #gh - 30268 + with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"): + Expr.parse("DATA (A, I=1, N", language=Language.Fortran) diff --git a/blimgui/dist64/numpy/f2py/tests/test_value_attrspec.py b/blimgui/dist64/numpy/f2py/tests/test_value_attrspec.py index 453627e..bf7f537 100644 --- a/blimgui/dist64/numpy/f2py/tests/test_value_attrspec.py +++ b/blimgui/dist64/numpy/f2py/tests/test_value_attrspec.py @@ -2,6 +2,7 @@ from . import util + class TestValueAttr(util.F2PyTest): sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] diff --git a/blimgui/dist64/numpy/f2py/tests/util.py b/blimgui/dist64/numpy/f2py/tests/util.py index 8d21540..0aa28e0 100644 --- a/blimgui/dist64/numpy/f2py/tests/util.py +++ b/blimgui/dist64/numpy/f2py/tests/util.py @@ -6,23 +6,24 @@ - determining paths to tests """ +import atexit +import concurrent.futures +import contextlib import glob import os -import sys +import shutil import subprocess +import sys import tempfile -import shutil -import atexit +from importlib import import_module +from pathlib import Path + import pytest -import contextlib -import numpy -import concurrent.futures -from pathlib import Path +import numpy from numpy._utils import asunicode -from numpy.testing import temppath, IS_WASM -from importlib import import_module from numpy.f2py._backends._meson import MesonBackend +from numpy.testing import IS_WASM, temppath # # Check if compilers are available at all... @@ -102,6 +103,7 @@ def check_compilers(self): self.compilers_checked = True + if not IS_WASM: checker = CompilerChecker() checker.check_compilers() @@ -211,7 +213,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): f2py_sources = [] for fn in source_files: if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) + raise RuntimeError(f"{fn} is not a file") dst = os.path.join(d, os.path.basename(fn)) shutil.copyfile(fn, dst) dst_sources.append(dst) @@ -246,8 +248,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" % - (cmd[4:], asunicode(out))) + raise RuntimeError(f"Running f2py failed: {cmd[4:]}\n{asunicode(out)}") finally: os.chdir(cwd) @@ -261,7 +262,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): # need to change to record how big each module is, rather than # relying on rebase being able to find that from the files. _module_list.extend( - glob.glob(os.path.join(d, "{:s}*".format(module_name))) + glob.glob(os.path.join(d, f"{module_name:s}*")) ) subprocess.check_call( ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] @@ -369,7 +370,7 @@ class F2PyTest: @property def module_name(self): cls = type(self) - return f'_{cls.__module__.rsplit(".",1)[-1]}_{cls.__name__}_ext_module' + return f'_{cls.__module__.rsplit(".", 1)[-1]}_{cls.__name__}_ext_module' @classmethod def setup_class(cls): @@ -384,7 +385,7 @@ def setup_method(self): if self.module is not None: return - codes = self.sources if self.sources else [] + codes = self.sources or [] if self.code: codes.append(self.suffix) diff --git a/blimgui/dist64/numpy/f2py/use_rules.py b/blimgui/dist64/numpy/f2py/use_rules.py index 9d5efe5..e7b7e8a 100644 --- a/blimgui/dist64/numpy/f2py/use_rules.py +++ b/blimgui/dist64/numpy/f2py/use_rules.py @@ -13,10 +13,7 @@ f2py_version = 'See `f2py -v`' -from .auxfuncs import ( - applyrules, dictappend, gentitle, hasnote, outmess -) - +from .auxfuncs import applyrules, dictappend, gentitle, hasnote, outmess usemodule_rules = { 'body': """ @@ -45,7 +42,7 @@ def buildusevars(m, r): ret = {} outmess( - '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) + f"\t\tBuilding use variable hooks for module \"{m['name']}\" (feature only for F90/F95)...\n") varsmap = {} revmap = {} if 'map' in r: @@ -62,17 +59,13 @@ def buildusevars(m, r): if revmap[r['map'][v]] == v: varsmap[v] = r['map'][v] else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % - (v, r['map'][v])) + outmess(f"\t\t\tIgnoring map \"{v}=>{r['map'][v]}\". See above.\n") else: outmess( - '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) + f"\t\t\tNo definition for variable \"{v}=>{r['map'][v]}\". Skipping.\n") else: for v in m['vars'].keys(): - if v in revmap: - varsmap[v] = revmap[v] - else: - varsmap[v] = v + varsmap[v] = revmap.get(v, v) for v in varsmap.keys(): ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) return ret @@ -88,9 +81,9 @@ def buildusevar(name, realname, vars, usemodulename): 'usemodulename': usemodulename, 'USEMODULENAME': usemodulename.upper(), 'texname': name.replace('_', '\\_'), - 'begintitle': gentitle('%s=>%s' % (name, realname)), - 'endtitle': gentitle('end of %s=>%s' % (name, realname)), - 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) + 'begintitle': gentitle(f'{name}=>{realname}'), + 'endtitle': gentitle(f'end of {name}=>{realname}'), + 'apiname': f'#modulename#_use_{realname}_from_{usemodulename}' } nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} diff --git a/blimgui/dist64/numpy/f2py/use_rules.pyi b/blimgui/dist64/numpy/f2py/use_rules.pyi new file mode 100644 index 0000000..58c7f9b --- /dev/null +++ b/blimgui/dist64/numpy/f2py/use_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" +usemodule_rules: Final[dict[str, str | list[str]]] = ... + +def buildusevars(m: Mapping[str, object], r: Mapping[str, Mapping[str, object]]) -> dict[str, Any]: ... +def buildusevar(name: str, realname: str, vars: Mapping[str, Mapping[str, object]], usemodulename: str) -> dict[str, Any]: ... diff --git a/blimgui/dist64/numpy/fft/__init__.py b/blimgui/dist64/numpy/fft/__init__.py index 4101081..a24f3a5 100644 --- a/blimgui/dist64/numpy/fft/__init__.py +++ b/blimgui/dist64/numpy/fft/__init__.py @@ -1,11 +1,11 @@ """ -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= +Discrete Fourier Transform +========================== .. currentmodule:: numpy.fft The SciPy module `scipy.fft` is a more comprehensive superset -of ``numpy.fft``, which includes only a basic set of routines. +of `numpy.fft`, which includes only a basic set of routines. Standard FFTs ------------- @@ -200,16 +200,14 @@ """ -from . import _pocketfft, _helper -# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should -# be deleted once downstream libraries move to `numpy.fft`. -from . import helper -from ._pocketfft import * +from . import _helper, _pocketfft from ._helper import * +from ._pocketfft import * -__all__ = _pocketfft.__all__.copy() +__all__ = _pocketfft.__all__.copy() # noqa: PLE0605 __all__ += _helper.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/fft/__init__.pyi b/blimgui/dist64/numpy/fft/__init__.pyi index 640b8f8..bb7fa06 100644 --- a/blimgui/dist64/numpy/fft/__init__.pyi +++ b/blimgui/dist64/numpy/fft/__init__.pyi @@ -1,24 +1,19 @@ +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq from ._pocketfft import ( fft, - ifft, - rfft, - irfft, + fft2, + fftn, hfft, + ifft, + ifft2, + ifftn, ihfft, - rfftn, + irfft, + irfft2, irfftn, + rfft, rfft2, - irfft2, - fft2, - ifft2, - fftn, - ifftn, -) -from ._helper import ( - fftshift, - ifftshift, - fftfreq, - rfftfreq, + rfftn, ) __all__ = [ diff --git a/blimgui/dist64/numpy/fft/_helper.py b/blimgui/dist64/numpy/fft/_helper.py index d11a892..cafa85a 100644 --- a/blimgui/dist64/numpy/fft/_helper.py +++ b/blimgui/dist64/numpy/fft/_helper.py @@ -2,7 +2,7 @@ Discrete Fourier Transforms - _helper.py """ -from numpy._core import integer, empty, arange, asarray, roll +from numpy._core import arange, asarray, empty, integer, roll from numpy._core.overrides import array_function_dispatch, set_module # Created by Pearu Peterson, September 2002 @@ -169,10 +169,10 @@ def fftfreq(n, d=1.0, device=None): raise ValueError("n should be an integer") val = 1.0 / (n * d) results = empty(n, int, device=device) - N = (n-1)//2 + 1 + N = (n - 1) // 2 + 1 p1 = arange(0, N, dtype=int, device=device) results[:N] = p1 - p2 = arange(-(n//2), 0, dtype=int, device=device) + p2 = arange(-(n // 2), 0, dtype=int, device=device) results[N:] = p2 return results * val @@ -229,7 +229,7 @@ def rfftfreq(n, d=1.0, device=None): """ if not isinstance(n, integer_types): raise ValueError("n should be an integer") - val = 1.0/(n*d) - N = n//2 + 1 + val = 1.0 / (n * d) + N = n // 2 + 1 results = arange(0, N, dtype=int, device=device) return results * val diff --git a/blimgui/dist64/numpy/fft/_helper.pyi b/blimgui/dist64/numpy/fft/_helper.pyi index 8b6bd5c..cb6d419 100644 --- a/blimgui/dist64/numpy/fft/_helper.pyi +++ b/blimgui/dist64/numpy/fft/_helper.pyi @@ -1,12 +1,18 @@ -from typing import Any, Final, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar, overload from numpy import complexfloating, floating, generic, integer -from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ShapeLike +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ShapeLike, +) __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] -_SCT = TypeVar("_SCT", bound=generic) +_ScalarT = TypeVar("_ScalarT", bound=generic) ### @@ -15,13 +21,13 @@ integer_types: Final[tuple[type[int], type[integer]]] = ... ### @overload -def fftshift(x: _ArrayLike[_SCT], axes: _ShapeLike | None = None) -> NDArray[_SCT]: ... +def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... # @overload -def ifftshift(x: _ArrayLike[_SCT], axes: _ShapeLike | None = None) -> NDArray[_SCT]: ... +def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... diff --git a/blimgui/dist64/numpy/fft/_pocketfft.py b/blimgui/dist64/numpy/fft/_pocketfft.py index 9a5658d..ffac793 100644 --- a/blimgui/dist64/numpy/fft/_pocketfft.py +++ b/blimgui/dist64/numpy/fft/_pocketfft.py @@ -33,12 +33,19 @@ import functools import warnings +from numpy._core import ( + asarray, + conjugate, + empty_like, + overrides, + reciprocal, + result_type, + sqrt, + take, +) from numpy.lib.array_utils import normalize_axis_index -from numpy._core import (asarray, empty_like, result_type, - conjugate, take, sqrt, reciprocal) -from . import _pocketfft_umath as pfu -from numpy._core import overrides +from . import _pocketfft_umath as pfu array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.fft') @@ -85,7 +92,7 @@ def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): out_dtype = real_dtype else: # Others, complex output. out_dtype = result_type(a.dtype, 1j) - out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis+1:], + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis + 1:], dtype=out_dtype) elif ((shape := getattr(out, "shape", None)) is not None and (len(shape) != a.ndim or shape[axis] != n_out)): @@ -117,7 +124,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. + algorithm [CT]_. Parameters ---------- @@ -198,8 +205,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] + >>> _ = plt.plot(freq, sp.real, freq, sp.imag) >>> plt.show() """ @@ -1379,7 +1385,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm, out=out) - for ii in range(len(axes)-2, -1, -1): + for ii in range(len(axes) - 2, -1, -1): a = fft(a, s[ii], axes[ii], norm, out=out) return a @@ -1597,7 +1603,7 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): """ a = asarray(a) s, axes = _cook_nd_args(a, s, axes, invreal=1) - for ii in range(len(axes)-1): + for ii in range(len(axes) - 1): a = ifft(a, s[ii], axes[ii], norm) a = irfft(a, s[-1], axes[-1], norm, out=out) return a diff --git a/blimgui/dist64/numpy/fft/_pocketfft.pyi b/blimgui/dist64/numpy/fft/_pocketfft.pyi index 51ff364..8dd1b43 100644 --- a/blimgui/dist64/numpy/fft/_pocketfft.pyi +++ b/blimgui/dist64/numpy/fft/_pocketfft.pyi @@ -21,117 +21,117 @@ __all__ = [ "ifftn", ] -_NormKind: TypeAlias = L[None, "backward", "ortho", "forward"] +_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... # Input array must be compatible with `np.conjugate` def hfft( a: _ArrayLikeNumber_co, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def ihfft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def fftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def fft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/blimgui/dist64/numpy/fft/_pocketfft_umath.cp313-win_amd64.lib b/blimgui/dist64/numpy/fft/_pocketfft_umath.cp313-win_amd64.lib deleted file mode 100644 index d005726..0000000 Binary files a/blimgui/dist64/numpy/fft/_pocketfft_umath.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/fft/_pocketfft_umath.cp314-win_amd64.lib b/blimgui/dist64/numpy/fft/_pocketfft_umath.cp314-win_amd64.lib new file mode 100644 index 0000000..d5a4159 Binary files /dev/null and b/blimgui/dist64/numpy/fft/_pocketfft_umath.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/fft/helper.py b/blimgui/dist64/numpy/fft/helper.py deleted file mode 100644 index bdcbd3e..0000000 --- a/blimgui/dist64/numpy/fft/helper.py +++ /dev/null @@ -1,16 +0,0 @@ -def __getattr__(attr_name): - import warnings - from numpy.fft import _helper - ret = getattr(_helper, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.fft.helper' has no attribute {attr_name}") - warnings.warn( - "The numpy.fft.helper has been made private and renamed to " - "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " - "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " - f"Please use numpy.fft.{attr_name} instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret diff --git a/blimgui/dist64/numpy/fft/helper.pyi b/blimgui/dist64/numpy/fft/helper.pyi deleted file mode 100644 index 9a7c7be..0000000 --- a/blimgui/dist64/numpy/fft/helper.pyi +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Any -from typing import Literal as L - -from typing_extensions import deprecated - -import numpy as np -from numpy._typing import ArrayLike, NDArray, _ShapeLike - -from ._helper import integer_types as integer_types - -__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] - -### - -@deprecated("Please use `numpy.fft.fftshift` instead.") -def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.ifftshift` instead.") -def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.fftfreq` instead.") -def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.rfftfreq` instead.") -def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... diff --git a/blimgui/dist64/numpy/fft/tests/test_helper.py b/blimgui/dist64/numpy/fft/tests/test_helper.py index 44c6a33..84fb57a 100644 --- a/blimgui/dist64/numpy/fft/tests/test_helper.py +++ b/blimgui/dist64/numpy/fft/tests/test_helper.py @@ -4,8 +4,8 @@ """ import numpy as np -from numpy.testing import assert_array_almost_equal from numpy import fft, pi +from numpy.testing import assert_array_almost_equal class TestFFTShift: @@ -84,8 +84,8 @@ def test_uneven_dims(self): assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) def test_equal_to_original(self): - """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ - from numpy._core import asarray, concatenate, arange, take + """ Test the new (>=v1.15) and old implementations are equal (see #10073) """ + from numpy._core import arange, asarray, concatenate, take def original_fftshift(x, axes=None): """ How fftshift was implemented in v1.14""" @@ -137,29 +137,29 @@ class TestFFTFreq: def test_definition(self): x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - assert_array_almost_equal(9*fft.fftfreq(9), x) - assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + assert_array_almost_equal(9 * fft.fftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x) x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - assert_array_almost_equal(10*fft.fftfreq(10), x) - assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + assert_array_almost_equal(10 * fft.fftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x) class TestRFFTFreq: def test_definition(self): x = [0, 1, 2, 3, 4] - assert_array_almost_equal(9*fft.rfftfreq(9), x) - assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + assert_array_almost_equal(9 * fft.rfftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.rfftfreq(9, pi), x) x = [0, 1, 2, 3, 4, 5] - assert_array_almost_equal(10*fft.rfftfreq(10), x) - assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + assert_array_almost_equal(10 * fft.rfftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.rfftfreq(10, pi), x) class TestIRFFTN: def test_not_last_axis_success(self): ar, ai = np.random.random((2, 16, 8, 32)) - a = ar + 1j*ai + a = ar + 1j * ai axes = (-2,) diff --git a/blimgui/dist64/numpy/fft/tests/test_pocketfft.py b/blimgui/dist64/numpy/fft/tests/test_pocketfft.py index 7190ac3..0367cc5 100644 --- a/blimgui/dist64/numpy/fft/tests/test_pocketfft.py +++ b/blimgui/dist64/numpy/fft/tests/test_pocketfft.py @@ -1,18 +1,18 @@ -import numpy as np +import queue +import threading + import pytest + +import numpy as np from numpy.random import random -from numpy.testing import ( - assert_array_equal, assert_raises, assert_allclose, IS_WASM - ) -import threading -import queue +from numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises def fft1(x): L = len(x) phase = -2j * np.pi * (np.arange(L) / L) phase = np.arange(L).reshape(-1, 1) * phase - return np.sum(x*np.exp(phase), axis=1) + return np.sum(x * np.exp(phase), axis=1) class TestFFTShift: @@ -25,7 +25,7 @@ class TestFFT1D: def test_identity(self): maxlen = 512 - x = random(maxlen) + 1j*random(maxlen) + x = random(maxlen) + 1j * random(maxlen) xr = random(maxlen) for i in range(1, maxlen): assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], @@ -39,11 +39,11 @@ def test_identity_long_short(self, dtype): # smaller and for n larger than the input size. maxlen = 16 atol = 5 * np.spacing(np.array(1., dtype=dtype)) - x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) xr = random(maxlen).astype(dtype) xxr = np.concatenate([xr, np.zeros_like(xr)]) - for i in range(1, maxlen*2): + for i in range(1, maxlen * 2): check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i) assert check_c.real.dtype == dtype assert_allclose(check_c, xx[0:i], atol=atol, rtol=0) @@ -55,10 +55,10 @@ def test_identity_long_short(self, dtype): def test_identity_long_short_reversed(self, dtype): # Also test explicitly given number of points in reversed order. maxlen = 16 - atol = 5 * np.spacing(np.array(1., dtype=dtype)) - x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) + atol = 6 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) - for i in range(1, maxlen*2): + for i in range(1, maxlen * 2): check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i) assert check_via_c.dtype == x.dtype assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0) @@ -69,14 +69,14 @@ def test_identity_long_short_reversed(self, dtype): n = i // 2 + 1 y.imag[0] = 0 if i % 2 == 0: - y.imag[n-1:] = 0 + y.imag[n - 1:] = 0 yy = np.concatenate([y, np.zeros_like(y)]) check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i) assert check_via_r.dtype == x.dtype assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0) def test_fft(self): - x = random(30) + 1j*random(30) + x = random(30) + 1j * random(30) assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) assert_allclose(fft1(x) / np.sqrt(30), @@ -96,7 +96,7 @@ def zeros_like(x): # tests below only test the out parameter if dtype is complex: - y = random((10, 20)) + 1j*random((10, 20)) + y = random((10, 20)) + 1j * random((10, 20)) fft, ifft = np.fft.fft, np.fft.ifft else: y = random((10, 20)) @@ -117,7 +117,7 @@ def zeros_like(x): @pytest.mark.parametrize("axis", [0, 1]) def test_fft_inplace_out(self, axis): # Test some weirder in-place combinations - y = random((20, 20)) + 1j*random((20, 20)) + y = random((20, 20)) + 1j * random((20, 20)) # Fully in-place. y1 = y.copy() expected1 = np.fft.fft(y1, axis=axis) @@ -185,7 +185,7 @@ def test_fft_bad_out(self): @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) def test_ifft(self, norm): - x = random(30) + 1j*random(30) + x = random(30) + 1j * random(30) assert_allclose( x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), atol=1e-6) @@ -195,7 +195,7 @@ def test_ifft(self, norm): np.fft.ifft([], norm=norm) def test_fft2(self): - x = random((30, 20)) + 1j*random((30, 20)) + x = random((30, 20)) + 1j * random((30, 20)) assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), np.fft.fft2(x), atol=1e-6) assert_allclose(np.fft.fft2(x), @@ -206,7 +206,7 @@ def test_fft2(self): np.fft.fft2(x, norm="forward"), atol=1e-6) def test_ifft2(self): - x = random((30, 20)) + 1j*random((30, 20)) + x = random((30, 20)) + 1j * random((30, 20)) assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), np.fft.ifft2(x), atol=1e-6) assert_allclose(np.fft.ifft2(x), @@ -217,7 +217,7 @@ def test_ifft2(self): np.fft.ifft2(x, norm="forward"), atol=1e-6) def test_fftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) assert_allclose( np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), np.fft.fftn(x), atol=1e-6) @@ -229,7 +229,7 @@ def test_fftn(self): np.fft.fftn(x, norm="forward"), atol=1e-6) def test_ifftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) assert_allclose( np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), np.fft.ifftn(x), atol=1e-6) @@ -242,10 +242,10 @@ def test_ifftn(self): def test_rfft(self): x = random(30) - for n in [x.size, 2*x.size]: + for n in [x.size, 2 * x.size]: for norm in [None, 'backward', 'ortho', 'forward']: assert_allclose( - np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], + np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)], np.fft.rfft(x, n=n, norm=norm), atol=1e-6) assert_allclose( np.fft.rfft(x, n=n), @@ -261,7 +261,7 @@ def test_rfft_even(self): x = np.arange(8) n = 4 y = np.fft.rfft(x, n) - assert_allclose(y, np.fft.fft(x[:n])[:n//2 + 1], rtol=1e-14) + assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14) def test_rfft_odd(self): x = np.array([1, 0, 2, 3, -3]) @@ -327,7 +327,7 @@ def test_irfftn(self): norm="forward"), atol=1e-6) def test_hfft(self): - x = random(14) + 1j*random(14) + x = random(14) + 1j * random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) @@ -339,7 +339,7 @@ def test_hfft(self): np.fft.hfft(x_herm, norm="forward"), atol=1e-6) def test_ihfft(self): - x = random(14) + 1j*random(14) + x = random(14) + 1j * random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) @@ -400,7 +400,7 @@ def test_all_1d_norm_preserving(self): (np.fft.ihfft, np.fft.hfft), ] for forw, back in func_pairs: - for n in [x.size, 2*x.size]: + for n in [x.size, 2 * x.size]: for norm in [None, 'backward', 'ortho', 'forward']: tmp = forw(x, n=n, norm=norm) tmp = back(tmp, n=n, norm=norm) @@ -419,7 +419,7 @@ def zeros_like(x): # tests below only test the out parameter if dtype is complex: - x = random((10, 5, 6)) + 1j*random((10, 5, 6)) + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) fft, ifft = np.fft.fftn, np.fft.ifftn else: x = random((10, 5, 6)) @@ -443,7 +443,7 @@ def test_fftn_out_and_s_interaction(self, fft): if fft is np.fft.rfftn: x = random((10, 5, 6)) else: - x = random((10, 5, 6)) + 1j*random((10, 5, 6)) + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) with pytest.raises(ValueError, match="has wrong shape"): fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2)) # Except on the first axis done (which is the last of axes). @@ -458,7 +458,7 @@ def test_fftn_out_and_s_interaction(self, fft): def test_irfftn_out_and_s_interaction(self, s): # Since for irfftn, the output is real and thus cannot be used for # intermediate steps, it should always work. - x = random((9, 5, 6, 2)) + 1j*random((9, 5, 6, 2)) + x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2)) expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2)) out = np.zeros_like(expected) result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out) @@ -539,11 +539,11 @@ def worker(args, q): 'Function returned wrong value in multithreaded context') def test_fft(self): - a = np.ones(self.input_shape) * 1+0j + a = np.ones(self.input_shape) * 1 + 0j self._test_mtsame(np.fft.fft, a) def test_ifft(self): - a = np.ones(self.input_shape) * 1+0j + a = np.ones(self.input_shape) * 1 + 0j self._test_mtsame(np.fft.ifft, a) def test_rfft(self): @@ -551,7 +551,7 @@ def test_rfft(self): self._test_mtsame(np.fft.rfft, a) def test_irfft(self): - a = np.ones(self.input_shape) * 1+0j + a = np.ones(self.input_shape) * 1 + 0j self._test_mtsame(np.fft.irfft, a) diff --git a/blimgui/dist64/numpy/linalg/__init__.py b/blimgui/dist64/numpy/linalg/__init__.py index 9f7fd79..922989a 100644 --- a/blimgui/dist64/numpy/linalg/__init__.py +++ b/blimgui/dist64/numpy/linalg/__init__.py @@ -84,12 +84,12 @@ """ # To get sub-modules -from . import linalg # deprecated in NumPy 2.0 from . import _linalg from ._linalg import * -__all__ = _linalg.__all__.copy() +__all__ = _linalg.__all__.copy() # noqa: PLE0605 from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/linalg/__init__.pyi b/blimgui/dist64/numpy/linalg/__init__.pyi index ed41bf0..6e031b4 100644 --- a/blimgui/dist64/numpy/linalg/__init__.pyi +++ b/blimgui/dist64/numpy/linalg/__init__.pyi @@ -1,70 +1,71 @@ -from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot, vecdot - +from . import _linalg as _linalg, _umath_linalg as _umath_linalg from ._linalg import ( - matrix_power, - solve, - tensorsolve, - tensorinv, - inv, cholesky, - outer, - eigvals, - eigvalsh, - pinv, - slogdet, + cond, + cross, det, - svd, - svdvals, + diagonal, eig, eigh, + eigvals, + eigvalsh, + inv, lstsq, - norm, + matmul, matrix_norm, - vector_norm, - qr, - cond, + matrix_power, matrix_rank, + matrix_transpose, multi_dot, - matmul, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, trace, - diagonal, - cross, + vecdot, + vector_norm, ) __all__ = [ - "matrix_power", - "solve", - "tensorsolve", - "tensorinv", - "inv", + "LinAlgError", "cholesky", - "eigvals", - "eigvalsh", - "pinv", - "slogdet", + "cond", + "cross", "det", - "svd", - "svdvals", + "diagonal", "eig", "eigh", + "eigvals", + "eigvalsh", + "inv", "lstsq", - "norm", - "qr", - "cond", + "matmul", + "matrix_norm", + "matrix_power", "matrix_rank", - "LinAlgError", + "matrix_transpose", "multi_dot", - "trace", - "diagonal", - "cross", + "norm", "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", "tensordot", - "matmul", - "matrix_transpose", - "matrix_norm", - "vector_norm", + "tensorinv", + "tensorsolve", + "trace", "vecdot", + "vector_norm", ] class LinAlgError(ValueError): ... diff --git a/blimgui/dist64/numpy/linalg/_linalg.py b/blimgui/dist64/numpy/linalg/_linalg.py index ea0667f..9374737 100644 --- a/blimgui/dist64/numpy/linalg/_linalg.py +++ b/blimgui/dist64/numpy/linalg/_linalg.py @@ -19,26 +19,67 @@ import functools import operator import warnings -from typing import NamedTuple, Any +from typing import Any, NamedTuple -from numpy._utils import set_module from numpy._core import ( - array, asarray, zeros, empty, empty_like, intc, single, double, - csingle, cdouble, inexact, complexfloating, newaxis, all, inf, dot, - add, multiply, sqrt, sum, isfinite, finfo, errstate, moveaxis, amin, - amax, prod, abs, atleast_2d, intp, asanyarray, object_, - swapaxes, divide, count_nonzero, isnan, sign, argsort, sort, - reciprocal, overrides, diagonal as _core_diagonal, trace as _core_trace, - cross as _core_cross, outer as _core_outer, tensordot as _core_tensordot, - matmul as _core_matmul, matrix_transpose as _core_matrix_transpose, - transpose as _core_transpose, vecdot as _core_vecdot, + abs, + add, + all, + amax, + amin, + argsort, + array, + asanyarray, + asarray, + atleast_2d, + cdouble, + complexfloating, + count_nonzero, + cross as _core_cross, + csingle, + diagonal as _core_diagonal, + divide, + dot, + double, + empty, + empty_like, + errstate, + finfo, + inexact, + inf, + intc, + intp, + isfinite, + isnan, + matmul as _core_matmul, + matrix_transpose as _core_matrix_transpose, + moveaxis, + multiply, + newaxis, + object_, + outer as _core_outer, + overrides, + prod, + reciprocal, + sign, + single, + sort, + sqrt, + sum, + swapaxes, + tensordot as _core_tensordot, + trace as _core_trace, + transpose as _core_transpose, + vecdot as _core_vecdot, + zeros, ) from numpy._globals import _NoValue -from numpy.lib._twodim_base_impl import triu, eye +from numpy._typing import NDArray +from numpy._utils import set_module +from numpy.lib._twodim_base_impl import eye, triu from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple from numpy.linalg import _umath_linalg -from numpy._typing import NDArray class EigResult(NamedTuple): eigenvalues: NDArray[Any] @@ -159,8 +200,7 @@ def _commonType(*arrays): result_type = double elif rt is None: # unsupported inexact scalar - raise TypeError("array type %s is unsupported in linalg" % - (a.dtype.name,)) + raise TypeError(f"array type {a.dtype.name} is unsupported in linalg") else: result_type = double if is_complex: @@ -197,7 +237,11 @@ def _assert_stacked_2d(*arrays): def _assert_stacked_square(*arrays): for a in arrays: - m, n = a.shape[-2:] + try: + m, n = a.shape[-2:] + except ValueError: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) if m != n: raise LinAlgError('Last 2 dimensions of the array must be square') @@ -273,8 +317,7 @@ def tensorsolve(a, b, axes=None): Examples -------- >>> import numpy as np - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) @@ -289,13 +332,13 @@ def tensorsolve(a, b, axes=None): an = a.ndim if axes is not None: - allaxes = list(range(0, an)) + allaxes = list(range(an)) for k in axes: allaxes.remove(k) allaxes.insert(an, k) a = a.transpose(allaxes) - oldshape = a.shape[-(an-b.ndim):] + oldshape = a.shape[-(an - b.ndim):] prod = 1 for k in oldshape: prod *= k @@ -392,7 +435,6 @@ def solve(a, b): """ a, _ = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) @@ -452,8 +494,7 @@ def tensorinv(a, ind=2): Examples -------- >>> import numpy as np - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) @@ -462,8 +503,7 @@ def tensorinv(a, ind=2): >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) + >>> a = np.eye(4*6).reshape((24, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) @@ -599,7 +639,6 @@ def inv(a): """ a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) @@ -681,7 +720,6 @@ def matrix_power(a, n): """ a = asanyarray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) try: @@ -830,7 +868,6 @@ def cholesky(a, /, *, upper=False): """ gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' @@ -958,9 +995,6 @@ def qr(a, mode='reduced'): Returns ------- - When mode is 'reduced' or 'complete', the result will be a namedtuple with - the attributes `Q` and `R`. - Q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not @@ -989,6 +1023,9 @@ def qr(a, mode='reduced'): Notes ----- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes ``Q`` and ``R``. + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, ``dorgqr``, and ``zungqr``. @@ -1201,7 +1238,6 @@ def eigvals(a): """ a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) _assert_finite(a) t, result_t = _commonType(a) @@ -1294,8 +1330,9 @@ def eigvalsh(a, UPLO='L'): [0.+2.j, 2.+0.j]]) >>> wa = LA.eigvalsh(a) >>> wb = LA.eigvals(b) - >>> wa; wb + >>> wa array([1., 6.]) + >>> wb array([6.+0.j, 1.+0.j]) """ @@ -1309,7 +1346,6 @@ def eigvalsh(a, UPLO='L'): gufunc = _umath_linalg.eigvalsh_up a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->d' if isComplexType(t) else 'd->d' @@ -1319,11 +1355,6 @@ def eigvalsh(a, UPLO='L'): w = gufunc(a, signature=signature) return w.astype(_realType(result_t), copy=False) -def _convertarray(a): - t, result_t = _commonType(a) - a = a.astype(t).T.copy() - return a, t, result_t - # Eigenvectors @@ -1460,7 +1491,6 @@ def eig(a): """ a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) _assert_finite(a) t, result_t = _commonType(a) @@ -1611,7 +1641,6 @@ def eigh(a, UPLO='L'): raise ValueError("UPLO argument must be 'L' or 'U'") a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) @@ -1667,9 +1696,6 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Returns ------- - When `compute_uv` is True, the result is a namedtuple with the following - attribute names: - U : { (..., M, M), (..., M, K) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions @@ -1697,6 +1723,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Notes ----- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: `U`, `S`, and `Vh`. + The decomposition is performed using LAPACK routine ``_gesdd``. SVD is usually described for the factorization of a 2D matrix :math:`A`. @@ -1772,7 +1801,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): True """ - import numpy as _nx + import numpy as np a, wrap = _makearray(a) if hermitian: @@ -1784,9 +1813,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): sgn = sign(s) s = abs(s) sidx = argsort(s)[..., ::-1] - sgn = _nx.take_along_axis(sgn, sidx, axis=-1) - s = _nx.take_along_axis(s, sidx, axis=-1) - u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) # singular values are unsigned, move the sign into v vt = transpose(u * sgn[..., None, :]).conjugate() return SVDResult(wrap(u), s, wrap(vt)) @@ -1967,7 +1996,7 @@ def cond(x, p=None): x = asarray(x) # in case we have a matrix if _is_empty_2d(x): raise LinAlgError("cond is not defined on empty arrays") - if p is None or p == 2 or p == -2: + if p is None or p in {2, -2}: s = svd(x, compute_uv=False) with errstate(all='ignore'): if p == -2: @@ -1977,9 +2006,9 @@ def cond(x, p=None): else: # Call inv(x) ignoring errors. The result array will # contain nans in the entries where inversion failed. - _assert_stacked_2d(x) _assert_stacked_square(x) t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) @@ -1987,18 +2016,14 @@ def cond(x, p=None): r = r.astype(result_t, copy=False) # Convert nans to infs unless the original array had nan entries - r = asarray(r) nan_mask = isnan(r) if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)) if r.ndim > 0: r[nan_mask] = inf elif nan_mask: - r[()] = inf - - # Convention is to return scalars instead of 0d arrays - if r.ndim == 0: - r = r[()] + # Convention is to return scalars instead of 0d arrays. + r = r.dtype.type(inf) return r @@ -2044,9 +2069,9 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `A`. By default, we identify singular values less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency - (with the symbols defined above). This is the algorithm MATLAB uses [1]. + (with the symbols defined above). This is the algorithm MATLAB uses [1]_. It also appears in *Numerical recipes* in the discussion of SVD solutions - for linear least squares [2]. + for linear least squares [2]_. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there @@ -2317,7 +2342,6 @@ def slogdet(a): """ a = asarray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) real_t = _realType(result_t) @@ -2376,7 +2400,6 @@ def det(a): """ a = asarray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' @@ -2540,7 +2563,7 @@ def lstsq(a, b, rcond=None): return wrap(x), wrap(resids), rank, s -def _multi_svd_norm(x, row_axis, col_axis, op): +def _multi_svd_norm(x, row_axis, col_axis, op, initial=None): """Compute a function of the singular values of the 2-D matrices in `x`. This is a private utility function used by `numpy.linalg.norm()`. @@ -2564,7 +2587,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op): """ y = moveaxis(x, (row_axis, col_axis), (-2, -1)) - result = op(svd(y, compute_uv=False), axis=-1) + result = op(svd(y, compute_uv=False), axis=-1, initial=initial) return result @@ -2744,7 +2767,7 @@ def norm(x, ord=None, axis=None, keepdims=False): sqnorm = x.dot(x) ret = sqrt(sqnorm) if keepdims: - ret = ret.reshape(ndim*[1]) + ret = ret.reshape(ndim * [1]) return ret # Normalize the `axis` argument to a tuple. @@ -2762,7 +2785,7 @@ def norm(x, ord=None, axis=None, keepdims=False): if len(axis) == 1: if ord == inf: - return abs(x).max(axis=axis, keepdims=keepdims) + return abs(x).max(axis=axis, keepdims=keepdims, initial=0) elif ord == -inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: @@ -2796,17 +2819,17 @@ def norm(x, ord=None, axis=None, keepdims=False): if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: - ret = _multi_svd_norm(x, row_axis, col_axis, amax) + ret = _multi_svd_norm(x, row_axis, col_axis, amax, 0) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 - ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis, initial=0) elif ord == inf: if row_axis > col_axis: row_axis -= 1 - ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis, initial=0) elif ord == -1: if col_axis > row_axis: col_axis -= 1 @@ -2818,7 +2841,7 @@ def norm(x, ord=None, axis=None, keepdims=False): elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': - ret = _multi_svd_norm(x, row_axis, col_axis, sum) + ret = _multi_svd_norm(x, row_axis, col_axis, sum, 0) else: raise ValueError("Invalid norm order for matrices.") if keepdims: @@ -2915,7 +2938,7 @@ def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Assume we have three matrices - :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + :math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`. The costs for the two different parenthesizations are as follows:: @@ -3012,7 +3035,7 @@ def _multi_dot_matrix_chain_order(arrays, return_costs=False): j = i + l m[i, j] = inf for k in range(i, j): - q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] + q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index @@ -3407,7 +3430,12 @@ def matrix_transpose(x, /): return _core_matrix_transpose(x) -matrix_transpose.__doc__ = _core_matrix_transpose.__doc__ +matrix_transpose.__doc__ = f"""{_core_matrix_transpose.__doc__} + + Notes + ----- + This function is an alias of `numpy.matrix_transpose`. +""" # matrix_norm diff --git a/blimgui/dist64/numpy/linalg/_linalg.pyi b/blimgui/dist64/numpy/linalg/_linalg.pyi index a4b302f..4dd7efa 100644 --- a/blimgui/dist64/numpy/linalg/_linalg.pyi +++ b/blimgui/dist64/numpy/linalg/_linalg.pyi @@ -1,47 +1,48 @@ from collections.abc import Iterable from typing import ( - Literal as L, - overload, - TypeAlias, - TypeVar, Any, + Literal as L, + NamedTuple, + Never, SupportsIndex, SupportsInt, - NamedTuple, + TypeAlias, + TypeVar, + overload, ) import numpy as np from numpy import ( - # re-exports - vecdot, - - # other - floating, + complex128, complexfloating, + float64, + floating, + int32, + object_, signedinteger, - unsignedinteger, timedelta64, - object_, - int32, - float64, - complex128, + unsignedinteger, + vecdot, ) -from numpy.linalg import LinAlgError from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot +from numpy._globals import _NoValueType from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, - _ArrayLikeUnknown, + NDArray, + _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeInt_co, - _ArrayLikeUInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _NestedSequence, + _ShapeLike, ) +from numpy.linalg import LinAlgError __all__ = [ "matrix_power", @@ -78,7 +79,8 @@ __all__ = [ "vecdot", ] -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_NumberT = TypeVar("_NumberT", bound=np.number) +_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] @@ -113,20 +115,20 @@ class SVDResult(NamedTuple): def tensorsolve( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: None | Iterable[int] =..., + axes: Iterable[int] | None = None, ) -> NDArray[float64]: ... @overload def tensorsolve( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: None | Iterable[int] =..., -) -> NDArray[floating[Any]]: ... + axes: Iterable[int] | None = None, +) -> NDArray[floating]: ... @overload def tensorsolve( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: None | Iterable[int] =..., -) -> NDArray[complexfloating[Any, Any]]: ... + axes: Iterable[int] | None = None, +) -> NDArray[complexfloating]: ... @overload def solve( @@ -137,35 +139,35 @@ def solve( def solve( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def solve( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... @overload def tensorinv( a: _ArrayLikeInt_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[float64]: ... @overload def tensorinv( a: _ArrayLikeFloat_co, - ind: int = ..., -) -> NDArray[floating[Any]]: ... + ind: int = 2, +) -> NDArray[floating]: ... @overload def tensorinv( a: _ArrayLikeComplex_co, - ind: int = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + ind: int = 2, +) -> NDArray[complexfloating]: ... @overload def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... @overload -def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... @overload -def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... # TODO: The supported input and output dtypes are dependent on the value of `n`. # For example: `n < 0` always casts integer types to float64 @@ -177,57 +179,53 @@ def matrix_power( @overload def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... @overload -def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating[Any]]: ... +def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... @overload -def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating[Any, Any]]: ... +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... @overload -def outer(x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown) -> NDArray[Any]: ... +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... +def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def outer( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload -def outer( - x1: _ArrayLikeTD64_co, - x2: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... +def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +@overload +def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... @overload def outer( x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, -) -> _ArrayType: ... + /, +) -> NDArray[Any]: ... @overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... @overload -def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ... +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... @overload -def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... @overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ... +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @@ -239,90 +237,118 @@ def eig(a: _ArrayLikeComplex_co) -> EigResult: ... @overload def eigh( a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def eigh( a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def eigh( a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def svd( a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeFloat_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[False] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + *, + compute_uv: L[False], + hermitian: bool = False, ) -> NDArray[float64]: ... @overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = True, + *, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[floating]: ... +@overload def svd( a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[False] = ..., - hermitian: bool = ..., -) -> NDArray[floating[Any]]: ... + full_matrices: bool, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[floating]: ... -def svdvals( - x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co -) -> NDArray[floating[Any]]: ... +# the ignored `overload-overlap` mypy error below is a false-positive +@overload +def svdvals( # type: ignore[overload-overlap] + x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / +) -> NDArray[np.float64]: ... +@overload +def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... +@overload +def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensionl array otherwise -def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ... +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... # TODO: Returns `int` for <2D arrays and `intp` otherwise def matrix_rank( A: _ArrayLikeComplex_co, - tol: None | _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, *, - rtol: None | _ArrayLikeFloat_co = ..., + rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def pinv( a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[float64]: ... @overload def pinv( a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[floating[Any]]: ... + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[floating]: ... @overload def pinv( a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise @@ -333,40 +359,48 @@ def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... def det(a: _ArrayLikeComplex_co) -> Any: ... @overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[ +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ NDArray[float64], NDArray[float64], int32, NDArray[float64], ]: ... @overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[ - NDArray[floating[Any]], - NDArray[floating[Any]], +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ + NDArray[floating], + NDArray[floating], int32, - NDArray[floating[Any]], + NDArray[floating], ]: ... @overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[ - NDArray[complexfloating[Any, Any]], - NDArray[floating[Any]], +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ + NDArray[complexfloating], + NDArray[floating], int32, - NDArray[floating[Any]], + NDArray[floating], ]: ... @overload def norm( x: ArrayLike, - ord: None | float | L["fro", "nuc"] = ..., - axis: None = ..., - keepdims: bool = ..., -) -> floating[Any]: ... + ord: float | L["fro", "nuc"] | None = None, + axis: None = None, + keepdims: L[False] = False, +) -> floating: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, +) -> Any: ... @overload def norm( x: ArrayLike, - ord: None | float | L["fro", "nuc"] = ..., - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = None, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, ) -> Any: ... @overload @@ -374,16 +408,16 @@ def matrix_norm( x: ArrayLike, /, *, - ord: None | float | L["fro", "nuc"] = ..., - keepdims: bool = ..., -) -> floating[Any]: ... + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: L[False] = False, +) -> floating: ... @overload def matrix_norm( x: ArrayLike, /, *, - ord: None | float | L["fro", "nuc"] = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: bool = False, ) -> Any: ... @overload @@ -391,40 +425,82 @@ def vector_norm( x: ArrayLike, /, *, - axis: None = ..., - ord: None | float = ..., - keepdims: bool = ..., -) -> floating[Any]: ... + axis: None = None, + ord: float | None = 2, + keepdims: L[False] = False, +) -> floating: ... @overload def vector_norm( x: ArrayLike, /, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - ord: None | float = ..., - keepdims: bool = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...], + ord: float | None = 2, + keepdims: bool = False, ) -> Any: ... +# keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) +@overload +def tensordot( + a: _ArrayLike[_NumericScalarT], + b: _ArrayLike[_NumericScalarT], + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[_NumericScalarT]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.bool_]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.complex128 | Any]: ... + # TODO: Returns a scalar or array def multi_dot( arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, - out: None | NDArray[Any] = ..., + out: NDArray[Any] | None = None, ) -> Any: ... def diagonal( x: ArrayLike, # >= 2D array /, *, - offset: SupportsIndex = ..., + offset: SupportsIndex = 0, ) -> NDArray[Any]: ... def trace( x: ArrayLike, # >= 2D array /, *, - offset: SupportsIndex = ..., - dtype: DTypeLike = ..., + offset: SupportsIndex = 0, + dtype: DTypeLike | None = None, ) -> Any: ... @overload @@ -433,50 +509,40 @@ def cross( x2: _ArrayLikeUInt_co, /, *, - axis: int = ..., -) -> NDArray[unsignedinteger[Any]]: ... + axis: int = -1, +) -> NDArray[unsignedinteger]: ... @overload def cross( x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /, *, - axis: int = ..., -) -> NDArray[signedinteger[Any]]: ... + axis: int = -1, +) -> NDArray[signedinteger]: ... @overload def cross( x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /, *, - axis: int = ..., -) -> NDArray[floating[Any]]: ... + axis: int = -1, +) -> NDArray[floating]: ... @overload def cross( x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /, *, - axis: int = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + axis: int = -1, +) -> NDArray[complexfloating]: ... @overload -def matmul( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, -) -> NDArray[signedinteger[Any]]: ... +def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def matmul( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger[Any]]: ... +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def matmul( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def matmul( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... diff --git a/blimgui/dist64/numpy/linalg/_umath_linalg.cp313-win_amd64.lib b/blimgui/dist64/numpy/linalg/_umath_linalg.cp313-win_amd64.lib deleted file mode 100644 index 17fc7df..0000000 Binary files a/blimgui/dist64/numpy/linalg/_umath_linalg.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/linalg/_umath_linalg.cp314-win_amd64.lib b/blimgui/dist64/numpy/linalg/_umath_linalg.cp314-win_amd64.lib new file mode 100644 index 0000000..72ed374 Binary files /dev/null and b/blimgui/dist64/numpy/linalg/_umath_linalg.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/linalg/_umath_linalg.pyi b/blimgui/dist64/numpy/linalg/_umath_linalg.pyi index 00e3348..17ef482 100644 --- a/blimgui/dist64/numpy/linalg/_umath_linalg.pyi +++ b/blimgui/dist64/numpy/linalg/_umath_linalg.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 diff --git a/blimgui/dist64/numpy/linalg/lapack_lite.cp313-win_amd64.lib b/blimgui/dist64/numpy/linalg/lapack_lite.cp313-win_amd64.lib deleted file mode 100644 index afc459c..0000000 Binary files a/blimgui/dist64/numpy/linalg/lapack_lite.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/linalg/lapack_lite.cp314-win_amd64.lib b/blimgui/dist64/numpy/linalg/lapack_lite.cp314-win_amd64.lib new file mode 100644 index 0000000..aa09239 Binary files /dev/null and b/blimgui/dist64/numpy/linalg/lapack_lite.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/linalg/lapack_lite.pyi b/blimgui/dist64/numpy/linalg/lapack_lite.pyi index 01d33c2..c341ab8 100644 --- a/blimgui/dist64/numpy/linalg/lapack_lite.pyi +++ b/blimgui/dist64/numpy/linalg/lapack_lite.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, TypedDict, type_check_only +from typing import Final, TypedDict, type_check_only import numpy as np from numpy._typing import NDArray @@ -57,6 +57,8 @@ class _ZUNGQR(TypedDict): _ilp64: Final[bool] = ... +class LapackError(Exception): ... + def dgelsd( m: int, n: int, diff --git a/blimgui/dist64/numpy/linalg/linalg.py b/blimgui/dist64/numpy/linalg/linalg.py deleted file mode 100644 index 5885499..0000000 --- a/blimgui/dist64/numpy/linalg/linalg.py +++ /dev/null @@ -1,16 +0,0 @@ -def __getattr__(attr_name): - import warnings - from numpy.linalg import _linalg - ret = getattr(_linalg, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.linalg.linalg' has no attribute {attr_name}") - warnings.warn( - "The numpy.linalg.linalg has been made private and renamed to " - "numpy.linalg._linalg. All public functions exported by it are " - f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " - "instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret diff --git a/blimgui/dist64/numpy/linalg/linalg.pyi b/blimgui/dist64/numpy/linalg/linalg.pyi deleted file mode 100644 index 6a51bf2..0000000 --- a/blimgui/dist64/numpy/linalg/linalg.pyi +++ /dev/null @@ -1,69 +0,0 @@ -from ._linalg import ( - LinAlgError, - cholesky, - cond, - cross, - det, - diagonal, - eig, - eigh, - eigvals, - eigvalsh, - inv, - lstsq, - matmul, - matrix_norm, - matrix_power, - matrix_rank, - matrix_transpose, - multi_dot, - norm, - outer, - pinv, - qr, - slogdet, - solve, - svd, - svdvals, - tensordot, - tensorinv, - tensorsolve, - trace, - vecdot, - vector_norm, -) - -__all__ = [ - "LinAlgError", - "cholesky", - "cond", - "cross", - "det", - "diagonal", - "eig", - "eigh", - "eigvals", - "eigvalsh", - "inv", - "lstsq", - "matmul", - "matrix_norm", - "matrix_power", - "matrix_rank", - "matrix_transpose", - "multi_dot", - "norm", - "outer", - "pinv", - "qr", - "slogdet", - "solve", - "svd", - "svdvals", - "tensordot", - "tensorinv", - "tensorsolve", - "trace", - "vecdot", - "vector_norm", -] diff --git a/blimgui/dist64/numpy/linalg/tests/test_deprecations.py b/blimgui/dist64/numpy/linalg/tests/test_deprecations.py index 3dad16f..83f2192 100644 --- a/blimgui/dist64/numpy/linalg/tests/test_deprecations.py +++ b/blimgui/dist64/numpy/linalg/tests/test_deprecations.py @@ -1,8 +1,9 @@ """Test deprecation and future warnings. """ +import pytest + import numpy as np -from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): @@ -14,7 +15,7 @@ def test_qr_mode_full_future_warning(): """ a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/blimgui/dist64/numpy/linalg/tests/test_linalg.py b/blimgui/dist64/numpy/linalg/tests/test_linalg.py index d608abe..a6f372a 100644 --- a/blimgui/dist64/numpy/linalg/tests/test_linalg.py +++ b/blimgui/dist64/numpy/linalg/tests/test_linalg.py @@ -1,28 +1,50 @@ """ Test functions for linalg module """ +import itertools import os +import subprocess import sys -import itertools +import textwrap import threading import traceback -import textwrap -import subprocess +import warnings + import pytest import numpy as np -from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy import ( + array, + asarray, + atleast_2d, + cdouble, + csingle, + dot, + double, + identity, + inf, + linalg, + matmul, + multiply, + single, +) from numpy._core import swapaxes from numpy.exceptions import AxisError -from numpy import multiply, atleast_2d, inf, asarray -from numpy import linalg -from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.linalg._linalg import _multi_dot_matrix_chain_order from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_allclose, suppress_warnings, - assert_raises_regex, HAS_LAPACK64, IS_WASM - ) + HAS_LAPACK64, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + try: import numpy.linalg.lapack_lite except ImportError: @@ -71,7 +93,7 @@ def get_rtol(dtype): # used to categorize tests all_tags = { 'square', 'nonsquare', 'hermitian', # mutually exclusive - 'generalized', 'size-0', 'strided' # optional additions + 'generalized', 'size-0', 'strided' # optional additions } @@ -298,7 +320,7 @@ def _stride_comb_iter(x): for repeats in itertools.product(*tuple(stride_set)): new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] - slices = tuple([slice(None, None, repeat) for repeat in repeats]) + slices = tuple(slice(None, None, repeat) for repeat in repeats) # new array with different strides, but same data xi = np.empty(new_shape, dtype=x.dtype) @@ -707,6 +729,7 @@ def do(self, a, b, tags): assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], np.asarray(vt)), rtol=get_rtol(u.dtype)) + def hermitian(mat): axes = list(range(mat.ndim)) axes[-1], axes[-2] = axes[-2], axes[-1] @@ -770,15 +793,28 @@ def do(self, a, b, tags): class TestCond(CondCases): - def test_basic_nonsvd(self): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) assert_almost_equal(linalg.cond(A, inf), 4) - assert_almost_equal(linalg.cond(A, -inf), 2/3) + assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise @@ -802,14 +838,14 @@ def test_nan(self): p_pos = [None, 1, 2, 'fro'] A = np.ones((2, 2)) - A[0,1] = np.nan + A[0, 1] = np.nan for p in ps: c = linalg.cond(A, p) assert_(isinstance(c, np.float64)) assert_(np.isnan(c)) A = np.ones((3, 2, 2)) - A[1,0,1] = np.nan + A[1, 0, 1] = np.nan for p in ps: c = linalg.cond(A, p) assert_(np.isnan(c[1])) @@ -825,15 +861,15 @@ def test_stacked_singular(self): # singular np.random.seed(1234) A = np.random.rand(2, 2, 2, 2) - A[0,0] = 0 - A[1,1] = 0 + A[0, 0] = 0 + A[1, 1] = 0 for p in (None, 1, 2, 'fro', -1, -2): c = linalg.cond(A, p) - assert_equal(c[0,0], np.inf) - assert_equal(c[1,1], np.inf) - assert_(np.isfinite(c[0,1])) - assert_(np.isfinite(c[1,0])) + assert_equal(c[0, 0], np.inf) + assert_equal(c[1, 1], np.inf) + assert_(np.isfinite(c[0, 1])) + assert_(np.isfinite(c[1, 0])) class PinvCases(LinalgSquareTestCase, @@ -1032,8 +1068,8 @@ class TestMatrixPower: rshft_3 = rshft_0[[1, 2, 3, 0]] rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] noninv = array([[1, 0], [0, 0]]) - stacked = np.block([[[rshft_0]]]*2) - #FIXME the 'e' dtype might work in future + stacked = np.block([[[rshft_0]]] * 2) + # FIXME the 'e' dtype might work in future dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] def test_large_power(self, dt): @@ -1295,8 +1331,9 @@ def test_vector_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 0.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) @@ -1311,11 +1348,11 @@ def test_vector_return_type(self): an = norm(at, 2) self.check_dtype(at, an) - assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0)) an = norm(at, 4) self.check_dtype(at, an) - assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0)) an = norm(at, np.inf) self.check_dtype(at, an) @@ -1458,8 +1495,9 @@ def test_matrix_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 2.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) @@ -1470,7 +1508,7 @@ def test_matrix_return_type(self): an = norm(at, 2) self.check_dtype(at, an) - assert_almost_equal(an, 3.0**(1.0/2.0)) + assert_almost_equal(an, 3.0**(1.0 / 2.0)) an = norm(at, -2) self.check_dtype(at, an) @@ -1627,7 +1665,7 @@ def test_matrix_rank(self): # accepts array-like assert_equal(matrix_rank([1]), 1) # greater than 2 dimensions treated as stacked matrices - ms = np.array([I, np.eye(4), np.zeros((4,4))]) + ms = np.array([I, np.eye(4), np.zeros((4, 4))]) assert_equal(matrix_rank(ms), np.array([3, 4, 0])) # works on scalar assert_equal(matrix_rank(1), 1) @@ -1707,7 +1745,6 @@ def check_qr(self, a): assert_(isinstance(r2, a_type)) assert_almost_equal(r2, r1) - @pytest.mark.parametrize(["m", "n"], [ (3, 0), (0, 3), @@ -1783,7 +1820,7 @@ def check_qr_stacked(self, a): assert_almost_equal(matmul(q, r), a) I_mat = np.identity(q.shape[-1]) stack_I_mat = np.broadcast_to(I_mat, - q.shape[:-2] + (q.shape[-1],)*2) + q.shape[:-2] + (q.shape[-1],) * 2) assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) assert_almost_equal(np.triu(r[..., :, :]), r) @@ -1798,7 +1835,7 @@ def check_qr_stacked(self, a): assert_almost_equal(matmul(q1, r1), a) I_mat = np.identity(q1.shape[-1]) stack_I_mat = np.broadcast_to(I_mat, - q1.shape[:-2] + (q1.shape[-1],)*2) + q1.shape[:-2] + (q1.shape[-1],) * 2) assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), stack_I_mat) assert_almost_equal(np.triu(r1[..., :, :]), r1) @@ -1823,7 +1860,7 @@ def test_stacked_inputs(self, outer_size, size, dt): A = rng.normal(size=outer_size + size).astype(dt) B = rng.normal(size=outer_size + size).astype(dt) self.check_qr_stacked(A) - self.check_qr_stacked(A + 1.j*B) + self.check_qr_stacked(A + 1.j * B) class TestCholesky: @@ -1840,7 +1877,7 @@ def test_basic_property(self, shape, dtype, upper): np.random.seed(1) a = np.random.randn(*shape) if np.issubdtype(dtype, np.complexfloating): - a = a + 1j*np.random.randn(*shape) + a = a + 1j * np.random.randn(*shape) t = list(range(len(shape))) t[-2:] = -1, -2 @@ -1882,7 +1919,7 @@ class ArraySubclass(np.ndarray): def test_upper_lower_arg(self): # Explicit test of upper argument that also checks the default. - a = np.array([[1+0j, 0-2j], [0+2j, 5+0j]]) + a = np.array([[1 + 0j, 0 - 2j], [0 + 2j, 5 + 0j]]) assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False)) @@ -1944,9 +1981,13 @@ def test_generalized_raise_multiloop(): assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + @pytest.mark.skipif( threading.active_count() > 1, reason="skipping test that uses fork because there are multiple threads") +@pytest.mark.skipif( + NOGIL_BUILD, + reason="Cannot safely use fork in tests on the free-threaded build") def test_xerbla_override(): # Check that our xerbla has been successfully linked in. If it is not, # the default xerbla routine is called, which prints a message to stdout @@ -2177,8 +2218,7 @@ def test_non_square_handling(self, arr, ind): ((24, 8, 3), 1), ]) def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape + a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape @@ -2188,15 +2228,13 @@ def test_tensorinv_shape(self, shape, ind): 0, -2, ]) def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) + a = np.eye(24).reshape((4, 6, 8, 3)) with assert_raises(ValueError): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) + a = np.eye(24).reshape((24, 8, 3)) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) @@ -2239,9 +2277,9 @@ def test_blas64_dot(): n = 2**32 a = np.zeros([1, n], dtype=np.float32) b = np.ones([1, 1], dtype=np.float32) - a[0,-1] = 1 + a[0, -1] = 1 c = np.dot(b, a) - assert_equal(c[0,-1], 1) + assert_equal(c[0, -1], 1) @pytest.mark.xfail(not HAS_LAPACK64, @@ -2368,6 +2406,16 @@ def test_matrix_norm(): assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3) +def test_matrix_norm_empty(): + for shape in [(0, 2), (2, 0), (0, 0)]: + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(shape, dtype) + assert_equal(np.linalg.matrix_norm(x, ord="fro"), 0) + assert_equal(np.linalg.matrix_norm(x, ord="nuc"), 0) + assert_equal(np.linalg.matrix_norm(x, ord=1), 0) + assert_equal(np.linalg.matrix_norm(x, ord=2), 0) + assert_equal(np.linalg.matrix_norm(x, ord=np.inf), 0) + def test_vector_norm(): x = np.arange(9).reshape((3, 3)) actual = np.linalg.vector_norm(x) @@ -2384,3 +2432,11 @@ def test_vector_norm(): expected = np.full((1, 1), 14.2828, dtype='float64') assert_equal(actual.shape, expected.shape) assert_almost_equal(actual, expected, double_decimal=3) + + +def test_vector_norm_empty(): + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(0, dtype) + assert_equal(np.linalg.vector_norm(x, ord=1), 0) + assert_equal(np.linalg.vector_norm(x, ord=2), 0) + assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) diff --git a/blimgui/dist64/numpy/linalg/tests/test_regression.py b/blimgui/dist64/numpy/linalg/tests/test_regression.py index 0b01750..ab010cc 100644 --- a/blimgui/dist64/numpy/linalg/tests/test_regression.py +++ b/blimgui/dist64/numpy/linalg/tests/test_regression.py @@ -4,10 +4,14 @@ import pytest import numpy as np -from numpy import linalg, arange, float64, array, dot, transpose +from numpy import arange, array, dot, float64, linalg, transpose from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal, - assert_array_almost_equal, assert_array_less + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, ) @@ -29,7 +33,7 @@ def test_eig_build(self): 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) + a = a.reshape((13, 13)) a = a % 17 va, ve = linalg.eig(a) va.sort() @@ -40,9 +44,9 @@ def test_eigh_build(self): # Ticket 662. rvals = [68.60568999, 89.57756725, 106.67185574] - cov = array([[77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) vals, vecs = linalg.eigh(cov) assert_array_almost_equal(vals, rvals) @@ -64,8 +68,8 @@ def test_norm_vector_badarg(self): def test_lapack_endian(self): # For bug #1482 - a = array([[5.7998084, -2.1825367], - [-2.1825367, 9.85910595]], dtype='>f8') + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') b = array(a, dtype=''%s'" % (dtype1, dtype2)) + raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'") if dtype1.names is not None: # Allocate an output mask array with the properly broadcast shape. newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) @@ -2482,6 +2474,7 @@ def __str__(self): __repr__ = __str__ + # if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') @@ -2501,18 +2494,18 @@ def _recursive_printoption(result, mask, printopt): _recursive_printoption(curdata, curmask, printopt) else: np.copyto(result, printopt, where=mask) - return + # For better or worse, these end in a newline -_legacy_print_templates = dict( - long_std=textwrap.dedent("""\ +_legacy_print_templates = { + 'long_std': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), - long_flx=textwrap.dedent("""\ + 'long_flx': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = @@ -2520,18 +2513,18 @@ def _recursive_printoption(result, mask, printopt): %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """), - short_std=textwrap.dedent("""\ + 'short_std': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), - short_flx=textwrap.dedent("""\ + 'short_flx': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """) -) +} ############################################################################### # MaskedArray class # @@ -2978,33 +2971,32 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, elif nm == nd: mask = np.reshape(mask, _data.shape) else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MaskError(msg % (nd, nm)) + msg = (f"Mask and data not compatible:" + f" data size is {nd}, mask size is {nm}.") + raise MaskError(msg) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy + elif not keep_mask: + _data._mask = mask + _data._sharedmask = not copy else: - if not keep_mask: - _data._mask = mask - _data._sharedmask = not copy + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) else: - if _data.dtype.names is not None: - def _recursive_or(a, b): - "do a|=b on each field of a, recursively" - for name in a.dtype.names: - (af, bf) = (a[name], b[name]) - if af.dtype.names is not None: - _recursive_or(af, bf) - else: - af |= bf - - _recursive_or(_data._mask, mask) - else: - _data._mask = np.logical_or(mask, _data._mask) - _data._sharedmask = False + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False # Update fill_value. if fill_value is None: @@ -3020,7 +3012,6 @@ def _recursive_or(a, b): _data._baseclass = _baseclass return _data - def _update_from(self, obj): """ Copies some attributes of obj to self. @@ -3036,16 +3027,15 @@ def _update_from(self, obj): _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) - _dict = dict(_fill_value=getattr(obj, '_fill_value', None), - _hardmask=getattr(obj, '_hardmask', False), - _sharedmask=getattr(obj, '_sharedmask', False), - _isfield=getattr(obj, '_isfield', False), - _baseclass=getattr(obj, '_baseclass', _baseclass), - _optinfo=_optinfo, - _basedict=_optinfo) + _dict = {'_fill_value': getattr(obj, '_fill_value', None), + '_hardmask': getattr(obj, '_hardmask', False), + '_sharedmask': getattr(obj, '_sharedmask', False), + '_isfield': getattr(obj, '_isfield', False), + '_baseclass': getattr(obj, '_baseclass', _baseclass), + '_optinfo': _optinfo, + '_basedict': _optinfo} self.__dict__.update(_dict) self.__dict__.update(_optinfo) - return def __array_finalize__(self, obj): """ @@ -3366,11 +3356,10 @@ def _scalar_heuristic(arr, elem): return dout # Just a scalar + elif mout: + return masked else: - if mout: - return masked - else: - return dout + return dout else: # Force dout to MA dout = dout.view(type(self)) @@ -3741,7 +3730,8 @@ def shrink_mask(self): Returns ------- - None + result : MaskedArray + A :class:`~ma.MaskedArray` object. Examples -------- @@ -4097,18 +4087,17 @@ def __repr__(self): else: name = self._baseclass.__name__ - # 2016-11-19: Demoted to legacy format if np._core.arrayprint._get_legacy_print_mode() <= 113: is_long = self.ndim > 1 - parameters = dict( - name=name, - nlen=" " * len(name), - data=str(self), - mask=str(self._mask), - fill=str(self.fill_value), - dtype=str(self.dtype) - ) + parameters = { + 'name': name, + 'nlen': " " * len(name), + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + 'dtype': str(self.dtype) + } is_structured = bool(self.dtype.names) key = '{}_{}'.format( 'long' if is_long else 'short', @@ -4145,7 +4134,7 @@ def __repr__(self): prefix = '' # absorbed into the first indent else: # each key on its own line, indented by two spaces - indents = {k: ' ' * min_indent for k in keys} + indents = dict.fromkeys(keys, ' ' * min_indent) prefix = prefix + '\n' # first key on the next line # format the field values @@ -4162,7 +4151,7 @@ def __repr__(self): suffix=',') if self._fill_value is None: - self.fill_value # initialize fill_value + self.fill_value # initialize fill_value # noqa: B018 if (self._fill_value.dtype.kind in ("S", "U") and self.dtype.kind == self._fill_value.dtype.kind): @@ -4181,7 +4170,7 @@ def __repr__(self): # join keys with values and indentations result = ',\n'.join( - '{}{}={}'.format(indents[k], k, reprs[k]) + f'{indents[k]}{k}={reprs[k]}' for k in keys ) return prefix + result + ')' @@ -4362,15 +4351,6 @@ def __rmul__(self, other): # we get here from `other * self`. return multiply(other, self) - def __div__(self, other): - """ - Divide other into self, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return divide(self, other) - def __truediv__(self, other): """ Divide other into self, and return a new masked array. @@ -4429,9 +4409,8 @@ def __iadd__(self, other): if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m - else: - if m is not nomask: - self._mask += m + elif m is not nomask: + self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(0), other_data) self._data.__iadd__(other_data) @@ -4471,25 +4450,6 @@ def __imul__(self, other): self._data.__imul__(other_data) return self - def __idiv__(self, other): - """ - Divide self by other in-place. - - """ - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 4 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.divide] - other_data = np.where( - dom_mask, other_data.dtype.type(fval), other_data) - self._mask |= new_mask - other_data = np.where(self._mask, other_data.dtype.type(1), other_data) - self._data.__idiv__(other_data) - return self - def __ifloordiv__(self, other): """ Floor divide self by other in-place. @@ -4701,7 +4661,7 @@ def count(self, axis=None, keepdims=np._NoValue): raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) return 1 elif axis is None: - if kwargs.get('keepdims', False): + if kwargs.get('keepdims'): return np.array(self.size, dtype=np.intp, ndmin=self.ndim) return self.size @@ -4710,7 +4670,7 @@ def count(self, axis=None, keepdims=np._NoValue): for ax in axes: items *= self.shape[ax] - if kwargs.get('keepdims', False): + if kwargs.get('keepdims'): out_dims = list(self.shape) for a in axes: out_dims[a] = 1 @@ -4788,7 +4748,6 @@ def ravel(self, order='C'): r._mask = nomask return r - def reshape(self, *s, **kwargs): """ Give a new shape to the array without changing its data. @@ -4848,7 +4807,6 @@ def reshape(self, *s, **kwargs): fill_value=999999) """ - kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask @@ -5189,7 +5147,7 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ - #!!!: implement out + test! + # !!!: implement out + test! m = self._mask if m is nomask: result = super().trace(offset=offset, axis1=axis1, axis2=axis2, @@ -5484,8 +5442,8 @@ def anom(self, axis=None, dtype=None): The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. + the default is float32; for arrays of float types it is the same as + the array type. See Also -------- @@ -5658,7 +5616,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. - order : list, optional + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. @@ -5842,11 +5800,6 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, stable : bool, optional Only for compatibility with ``np.sort``. Ignored. - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - See Also -------- numpy.ndarray.sort : Method to sort an array in-place. @@ -6275,7 +6228,7 @@ def take(self, indices, axis=None, out=None, mode='raise'): mask=[[False, False], [ True, False]], fill_value=999999) - """ + """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked @@ -6343,7 +6296,6 @@ def mT(self): else: return masked_array(data=self.data.mT, mask=self.mask.mT) - def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. @@ -6396,21 +6348,6 @@ def tolist(self, fill_value=None): result.shape = inishape return result.tolist() - def tostring(self, fill_value=None, order='C'): - r""" - A compatibility alias for `tobytes`, with exactly the same behavior. - - Despite its name, it returns `bytes` not `str`\ s. - - .. deprecated:: 1.19.0 - """ - # 2020-03-30, Numpy 1.19.0 - warnings.warn( - "tostring() is deprecated. Use tobytes() instead.", - DeprecationWarning, stacklevel=2) - - return self.tobytes(fill_value, order=order) - def tobytes(self, fill_value=None, order='C'): """ Return the array data as a string containing the raw bytes in the array. @@ -6702,14 +6639,14 @@ def filled(self, fill_value=None): def tolist(self): """ - Transforms the mvoid object into a tuple. + Transforms the mvoid object into a tuple. - Masked fields are replaced by None. + Masked fields are replaced by None. - Returns - ------- - returned_tuple - Tuple of fields + Returns + ------- + returned_tuple + Tuple of fields """ _mask = self._mask if _mask is nomask: @@ -6841,16 +6778,17 @@ def __repr__(self): return object.__repr__(self) def __format__(self, format_spec): - # Replace ndarray.__format__ with the default, which supports no format characters. - # Supporting format characters is unwise here, because we do not know what type - # the user was expecting - better to not guess. + # Replace ndarray.__format__ with the default, which supports no + # format characters. + # Supporting format characters is unwise here, because we do not know + # what type the user was expecting - better to not guess. try: return object.__format__(self, format_spec) except TypeError: # 2020-03-23, NumPy 1.19.0 warnings.warn( - "Format strings passed to MaskedConstant are ignored, but in future may " - "error or produce different behavior", + "Format strings passed to MaskedConstant are ignored," + " but in future may error or produce different behavior", FutureWarning, stacklevel=2 ) return object.__format__(self, "") @@ -6916,6 +6854,8 @@ def array(data, dtype=None, copy=False, order=None, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink, order=order) + + array.__doc__ = masked_array.__doc__ @@ -7003,18 +6943,19 @@ def reduce(self, target, axis=np._NoValue): m = getmask(target) if axis is np._NoValue and target.ndim > 1: + name = self.__name__ # 2017-05-06, Numpy 1.13.0: warn on axis default warnings.warn( - f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " - f"not the current None, to match np.{self.__name__}.reduce. " + f"In the future the default for ma.{name}.reduce will be axis=0, " + f"not the current None, to match np.{name}.reduce. " "Explicitly pass 0 or None to silence this warning.", MaskedArrayFutureWarning, stacklevel=2) axis = None if axis is not np._NoValue: - kwargs = dict(axis=axis) + kwargs = {'axis': axis} else: - kwargs = dict() + kwargs = {} if m is nomask: t = self.f.reduce(target, **kwargs) @@ -7055,6 +6996,8 @@ def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): # fill_value argument return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out, **kwargs) + + min.__doc__ = MaskedArray.min.__doc__ def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): @@ -7067,6 +7010,8 @@ def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): # fill_value argument return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out, **kwargs) + + max.__doc__ = MaskedArray.max.__doc__ @@ -7079,6 +7024,8 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): # a fill_value argument return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out, **kwargs) + + ptp.__doc__ = MaskedArray.ptp.__doc__ @@ -7087,7 +7034,7 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): ############################################################################## -class _frommethod: +def _frommethod(methodname: str, reversed: bool = False): """ Define functions from existing MaskedArray methods. @@ -7095,44 +7042,47 @@ class _frommethod: ---------- methodname : str Name of the method to transform. - + reversed : bool, optional + Whether to reverse the first two arguments of the method. Default is False. """ + method = getattr(MaskedArray, methodname) + assert callable(method) - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__qualname__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed + signature = inspect.signature(method) + params = list(signature.parameters.values()) + params[0] = params[0].replace(name="a") # rename 'self' to 'a' - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = """ %s\n%s""" % ( - signature, getattr(meth, '__doc__', None)) - return doc + if reversed: + assert len(params) >= 2 + params[0], params[1] = params[1], params[0] - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - a, args[0] = args[0], a + def wrapper(a, b, *args, **params): + return getattr(asanyarray(b), methodname)(a, *args, **params) - marr = asanyarray(a) - method_name = self.__name__ - method = getattr(type(marr), method_name, None) - if method is None: - # use the corresponding np function - method = getattr(np, method_name) + else: + def wrapper(a, *args, **params): + return getattr(asanyarray(a), methodname)(*args, **params) + + wrapper.__signature__ = signature.replace(parameters=params) + wrapper.__name__ = wrapper.__qualname__ = methodname - return method(marr, *args, **params) + # __doc__ is None when using `python -OO ...` + if method.__doc__ is not None: + str_signature = f"{methodname}{signature}" + # TODO: For methods with a docstring "Parameters" section, that do not already + # mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there. + wrapper.__doc__ = f" {str_signature}\n{method.__doc__}" + + return wrapper all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +argmax = _frommethod('argmax') +argmin = _frommethod('argmin') compress = _frommethod('compress', reversed=True) +count = _frommethod('count') cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') @@ -7144,7 +7094,7 @@ def __call__(self, a, *args, **params): minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) nonzero = _frommethod('nonzero') prod = _frommethod('prod') -product = _frommethod('prod') +product = _frommethod('product') ravel = _frommethod('ravel') repeat = _frommethod('repeat') shrink_mask = _frommethod('shrink_mask') @@ -7156,7 +7106,6 @@ def __call__(self, a, *args, **params): trace = _frommethod('trace') var = _frommethod('var') -count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ @@ -7244,8 +7193,6 @@ def power(a, b, third=None): result._data[invalid] = result.fill_value return result -argmin = _frommethod('argmin') -argmax = _frommethod('argmax') def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=None): @@ -7261,6 +7208,8 @@ def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=fill_value, stable=None) else: return a.argsort(axis=axis, kind=kind, order=order, stable=None) + + argsort.__doc__ = MaskedArray.argsort.__doc__ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, @@ -7637,7 +7586,6 @@ def putmask(a, mask, values): # , mode='raise'): valmask = getmaskarray(values) np.copyto(a._mask, valmask, where=mask) np.copyto(a._data, valdata, where=mask) - return def transpose(a, axes=None): @@ -7801,18 +7749,23 @@ def ndim(obj): """ return np.ndim(getdata(obj)) + ndim.__doc__ = np.ndim.__doc__ def shape(obj): "maskedarray version of the numpy function." return np.shape(getdata(obj)) + + shape.__doc__ = np.shape.__doc__ def size(obj, axis=None): "maskedarray version of the numpy function." return np.size(getdata(obj), axis) + + size.__doc__ = np.size.__doc__ @@ -8179,6 +8132,8 @@ def round_(a, decimals=0, out=None): if hasattr(out, '_mask'): out._mask = getmask(a) return out + + round = round_ @@ -8296,6 +8251,8 @@ def inner(a, b): if fb.ndim == 0: fb.shape = (1,) return np.inner(fa, fb).view(MaskedArray) + + inner.__doc__ = doc_note(np.inner.__doc__, "Masked values are replaced by 0.") innerproduct = inner @@ -8314,6 +8271,8 @@ def outer(a, b): mb = getmaskarray(b) m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) return masked_array(d, mask=m) + + outer.__doc__ = doc_note(np.outer.__doc__, "Masked values are replaced by 0.") outerproduct = outer @@ -8350,9 +8309,9 @@ def correlate(a, v, mode='valid', propagate_mask=True): Refer to the `np.convolve` docstring. Note that the default is 'valid', unlike `convolve`, which uses 'full'. propagate_mask : bool - If True, then a result element is masked if any masked element contributes towards it. - If False, then a result element is only masked if no non-masked element - contribute towards it + If True, then a result element is masked if any masked element contributes + towards it. If False, then a result element is only masked if no non-masked + element contribute towards it Returns ------- @@ -8643,7 +8602,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None): +def asanyarray(a, dtype=None, order=None): """ Convert the input to a masked array, conserving subclasses. @@ -8656,9 +8615,13 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array ``a``. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if ``a`` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. Returns ------- @@ -8688,9 +8651,18 @@ def asanyarray(a, dtype=None): """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. - if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + if ( + isinstance(a, MaskedArray) + and (dtype is None or dtype == a.dtype) + and ( + order in {None, 'A', 'K'} + or order == 'C' and a.flags.carray + or order == 'F' and a.flags.f_contiguous + ) + ): return a - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True, + order=order) ############################################################################## @@ -8768,95 +8740,93 @@ def fromflex(fxarray): return masked_array(fxarray['_data'], mask=fxarray['_mask']) -class _convert2ma: +def _convert2ma(funcname: str, np_ret: str, np_ma_ret: str, + params: dict[str, str] | None = None): + """Convert function from numpy to numpy.ma.""" + func = getattr(np, funcname) + params = params or {} - """ - Convert functions from numpy to numpy.ma. + @functools.wraps(func, assigned=set(functools.WRAPPER_ASSIGNMENTS) - {"__module__"}) + def wrapper(*args, **kwargs): + common_params = kwargs.keys() & params.keys() + extras = params | {p: kwargs.pop(p) for p in common_params} - Parameters - ---------- - _methodname : string - Name of the method to transform. + result = func.__call__(*args, **kwargs).view(MaskedArray) - """ - __doc__ = None + if "fill_value" in common_params: + result.fill_value = extras["fill_value"] + if "hardmask" in common_params: + result._hardmask = bool(extras["hardmask"]) - def __init__(self, funcname, np_ret, np_ma_ret, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc(np_ret, np_ma_ret) - self._extras = params or {} + return result - def getdoc(self, np_ret, np_ma_ret): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - doc = self._replace_return_type(doc, np_ret, np_ma_ret) - # Add the signature of the function at the beginning of the doc - if sig: - sig = "%s%s\n" % (self._func.__name__, sig) - doc = sig + doc - return doc + # workaround for a doctest bug in Python 3.11 that incorrectly assumes `__code__` + # exists on wrapped functions + del wrapper.__wrapped__ - def _replace_return_type(self, doc, np_ret, np_ma_ret): - """ - Replace documentation of ``np`` function's return type. + # `arange`, `empty`, `empty_like`, `frombuffer`, and `zeros` have no signature + try: + signature = inspect.signature(func) + except ValueError: + signature = inspect.Signature([ + inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), + inspect.Parameter('kwargs', inspect.Parameter.VAR_KEYWORD), + ]) + + if params: + sig_params = list(signature.parameters.values()) + + # pop `**kwargs` if present + sig_kwargs = None + if sig_params[-1].kind is inspect.Parameter.VAR_KEYWORD: + sig_kwargs = sig_params.pop() + + # add new keyword-only parameters + for param_name, default in params.items(): + new_param = inspect.Parameter( + param_name, + inspect.Parameter.KEYWORD_ONLY, + default=default, + ) + sig_params.append(new_param) - Replaces it with the proper type for the ``np.ma`` function. + # re-append `**kwargs` if it was present + if sig_kwargs: + sig_params.append(sig_kwargs) - Parameters - ---------- - doc : str - The documentation of the ``np`` method. - np_ret : str - The return type string of the ``np`` method that we want to - replace. (e.g. "out : ndarray") - np_ma_ret : str - The return type string of the ``np.ma`` method. - (e.g. "out : MaskedArray") - """ - if np_ret not in doc: - raise RuntimeError( - f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " - f"The documentation string for return type, {np_ret}, is not " - f"found in the docstring for `np.{self._func.__name__}`. " - f"Fix the docstring for `np.{self._func.__name__}` or " - "update the expected string for return type." - ) + signature = signature.replace(parameters=sig_params) - return doc.replace(np_ret, np_ma_ret) + wrapper.__signature__ = signature - def __call__(self, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(*args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result + # __doc__ is None when using `python -OO ...` + if func.__doc__ is not None: + assert np_ret in func.__doc__, ( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{func.__name__}`. " + f"Fix the docstring for `np.{func.__name__}` or " + "update the expected string for return type." + ) + wrapper.__doc__ = inspect.cleandoc(func.__doc__).replace(np_ret, np_ma_ret) + + return wrapper arange = _convert2ma( 'arange', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='arange : ndarray', np_ma_ret='arange : MaskedArray', ) clip = _convert2ma( 'clip', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='clipped_array : ndarray', np_ma_ret='clipped_array : MaskedArray', ) empty = _convert2ma( 'empty', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) @@ -8877,19 +8847,19 @@ def __call__(self, *args, **params): ) identity = _convert2ma( 'identity', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) indices = _convert2ma( 'indices', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='grid : one ndarray or tuple of ndarrays', np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', ) ones = _convert2ma( 'ones', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) @@ -8900,13 +8870,13 @@ def __call__(self, *args, **params): ) squeeze = _convert2ma( 'squeeze', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='squeezed : ndarray', np_ma_ret='squeezed : MaskedArray', ) zeros = _convert2ma( 'zeros', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) diff --git a/blimgui/dist64/numpy/ma/core.pyi b/blimgui/dist64/numpy/ma/core.pyi index 03f6e6c..5e144e5 100644 --- a/blimgui/dist64/numpy/ma/core.pyi +++ b/blimgui/dist64/numpy/ma/core.pyi @@ -1,13 +1,115 @@ # pyright: reportIncompatibleMethodOverride=false -# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204 - -from typing import Any, SupportsIndex, TypeVar +import datetime as dt +import types from _typeshed import Incomplete -from typing_extensions import deprecated +from collections.abc import Callable, Sequence +from typing import ( + Any, + Concatenate, + Final, + Generic, + Literal, + Never, + NoReturn, + Self, + SupportsComplex, + SupportsFloat, + SupportsIndex, + SupportsInt, + TypeAlias, + Unpack, + final, + overload, +) +from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override -from numpy import _OrderKACF, amax, amin, bool_, dtype, expand_dims, float64, ndarray -from numpy._typing import ArrayLike, _DTypeLikeBool +import numpy as np +from numpy import ( + _AnyShapeT, + _HasDType, + _HasDTypeWithRealAndImag, + _ModeKind, + _OrderACF, + _OrderCF, + _OrderKACF, + _PartitionKind, + _SortKind, + _ToIndices, + amax, + amin, + bool_, + bytes_, + character, + complex128, + complexfloating, + datetime64, + dtype, + dtypes, + expand_dims, + flexible, + float16, + float32, + float64, + floating, + generic, + inexact, + int8, + int64, + int_, + integer, + intp, + ndarray, + number, + object_, + signedinteger, + str_, + timedelta64, + ufunc, + unsignedinteger, + void, +) +from numpy._core.fromnumeric import _UFuncKwargs # type-check only +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _32Bit, + _64Bit, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _CharLike_co, + _DT64Codes, + _DTypeLike, + _DTypeLikeBool, + _DTypeLikeVoid, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, +) +from numpy._typing._dtype_like import _VoidDTypeLike __all__ = [ "MAError", @@ -190,299 +292,2115 @@ __all__ = [ "zeros_like", ] -_ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) +_UFuncT_co = TypeVar( + "_UFuncT_co", + # the `| Callable` simplifies self-binding to the ufunc's callable signature + bound=np.ufunc | Callable[..., object], + default=np.ufunc, + covariant=True, +) +_Pss = ParamSpec("_Pss") +_T = TypeVar("_T") + +_Ignored: TypeAlias = object + +# A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` +_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] +_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] + +_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] +_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] +_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] + +_ArrayInt_co: TypeAlias = NDArray[integer | bool_] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 + +_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` +_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] +_FillValue: TypeAlias = complex | None # int | float | complex | None +_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] +_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] -MaskType = bool -nomask: bool +### + +MaskType = np.bool_ + +nomask: Final[_NoMaskType] = ... class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -def default_fill_value(obj): ... -def minimum_fill_value(obj): ... -def maximum_fill_value(obj): ... -def set_fill_value(a, fill_value): ... -def common_fill_value(a, b): ... -def filled(a, fill_value=...): ... -def getdata(a, subok=...): ... -get_data = getdata +# not generic at runtime +class _MaskedUFunc(Generic[_UFuncT_co]): + f: _UFuncT_co # readonly + def __init__(self, /, ufunc: _UFuncT_co) -> None: ... + +# not generic at runtime +class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fill: Final[_FillValue] + domain: Final[_DomainCallable | None] + + def __init__(self, /, mufunc: _UFuncT_co, fill: _FillValue = 0, domain: _DomainCallable | None = None) -> None: ... + + # NOTE: This might not work with overloaded callable signatures might not work on + # pyright, which is a long-standing issue, and is unique to pyright: + # https://github.com/microsoft/pyright/issues/9663 + # https://github.com/microsoft/pyright/issues/10849 + # https://github.com/microsoft/pyright/issues/10899 + # https://github.com/microsoft/pyright/issues/11049 + def __call__( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + /, + a: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + def accumulate(self, /, target: ArrayLike, axis: SupportsIndex = 0) -> _MaskedArray[Incomplete]: ... + +# not generic at runtime +class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + domain: Final[_DomainCallable] + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__( + self, + /, + dbfunc: _UFuncT_co, + domain: _DomainCallable, + fillx: _FillValue = 0, + filly: _FillValue = 0, + ) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + compare: Final[_MaskedBinaryOperation] + fill_value_func: Final[_FillValueCallable] + + def __init__( + self, + /, + ufunc: _UFuncT_co, + compare: _MaskedBinaryOperation, + fill_value: _FillValueCallable, + ) -> None: ... + + # NOTE: This class is only used internally for `maximum` and `minimum`, so we are + # able to annotate the `__call__` method specifically for those two functions. + @overload + def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + @overload + def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex | _NoValueType = ...) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + +@final +class _MaskedPrintOption: + _display: str + _enabled: bool | Literal[0, 1] + def __init__(self, /, display: str) -> None: ... + def display(self, /) -> str: ... + def set_display(self, /, s: str) -> None: ... + def enabled(self, /) -> bool: ... + def enable(self, /, shrink: bool | Literal[0, 1] = 1) -> None: ... -def fix_invalid(a, mask=..., copy=..., fill_value=...): ... - -class _MaskedUFunc: - f: Any - __doc__: Any - __name__: Any - def __init__(self, ufunc): ... - -class _MaskedUnaryOperation(_MaskedUFunc): - fill: Any - domain: Any - def __init__(self, mufunc, fill=..., domain=...): ... - def __call__(self, a, *args, **kwargs): ... - -class _MaskedBinaryOperation(_MaskedUFunc): - fillx: Any - filly: Any - def __init__(self, mbfunc, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=..., dtype=...): ... - def outer(self, a, b): ... - def accumulate(self, target, axis=...): ... - -class _DomainedBinaryOperation(_MaskedUFunc): - domain: Any - fillx: Any - filly: Any - def __init__(self, dbfunc, domain, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - -exp: _MaskedUnaryOperation -conjugate: _MaskedUnaryOperation -sin: _MaskedUnaryOperation -cos: _MaskedUnaryOperation -arctan: _MaskedUnaryOperation -arcsinh: _MaskedUnaryOperation -sinh: _MaskedUnaryOperation -cosh: _MaskedUnaryOperation -tanh: _MaskedUnaryOperation -abs: _MaskedUnaryOperation -absolute: _MaskedUnaryOperation -angle: _MaskedUnaryOperation -fabs: _MaskedUnaryOperation -negative: _MaskedUnaryOperation -floor: _MaskedUnaryOperation -ceil: _MaskedUnaryOperation -around: _MaskedUnaryOperation -logical_not: _MaskedUnaryOperation -sqrt: _MaskedUnaryOperation -log: _MaskedUnaryOperation -log2: _MaskedUnaryOperation -log10: _MaskedUnaryOperation -tan: _MaskedUnaryOperation -arcsin: _MaskedUnaryOperation -arccos: _MaskedUnaryOperation -arccosh: _MaskedUnaryOperation -arctanh: _MaskedUnaryOperation - -add: _MaskedBinaryOperation -subtract: _MaskedBinaryOperation -multiply: _MaskedBinaryOperation -arctan2: _MaskedBinaryOperation -equal: _MaskedBinaryOperation -not_equal: _MaskedBinaryOperation -less_equal: _MaskedBinaryOperation -greater_equal: _MaskedBinaryOperation -less: _MaskedBinaryOperation -greater: _MaskedBinaryOperation -logical_and: _MaskedBinaryOperation +masked_print_option: Final[_MaskedPrintOption] = ... + +exp: _MaskedUnaryOperation = ... +conjugate: _MaskedUnaryOperation = ... +sin: _MaskedUnaryOperation = ... +cos: _MaskedUnaryOperation = ... +arctan: _MaskedUnaryOperation = ... +arcsinh: _MaskedUnaryOperation = ... +sinh: _MaskedUnaryOperation = ... +cosh: _MaskedUnaryOperation = ... +tanh: _MaskedUnaryOperation = ... +abs: _MaskedUnaryOperation = ... +absolute: _MaskedUnaryOperation = ... +angle: _MaskedUnaryOperation = ... +fabs: _MaskedUnaryOperation = ... +negative: _MaskedUnaryOperation = ... +floor: _MaskedUnaryOperation = ... +ceil: _MaskedUnaryOperation = ... +around: _MaskedUnaryOperation = ... +logical_not: _MaskedUnaryOperation = ... +sqrt: _MaskedUnaryOperation = ... +log: _MaskedUnaryOperation = ... +log2: _MaskedUnaryOperation = ... +log10: _MaskedUnaryOperation = ... +tan: _MaskedUnaryOperation = ... +arcsin: _MaskedUnaryOperation = ... +arccos: _MaskedUnaryOperation = ... +arccosh: _MaskedUnaryOperation = ... +arctanh: _MaskedUnaryOperation = ... + +add: _MaskedBinaryOperation = ... +subtract: _MaskedBinaryOperation = ... +multiply: _MaskedBinaryOperation = ... +arctan2: _MaskedBinaryOperation = ... +equal: _MaskedBinaryOperation = ... +not_equal: _MaskedBinaryOperation = ... +less_equal: _MaskedBinaryOperation = ... +greater_equal: _MaskedBinaryOperation = ... +less: _MaskedBinaryOperation = ... +greater: _MaskedBinaryOperation = ... +logical_and: _MaskedBinaryOperation = ... def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_or: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation = ... def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_xor: _MaskedBinaryOperation -bitwise_and: _MaskedBinaryOperation -bitwise_or: _MaskedBinaryOperation -bitwise_xor: _MaskedBinaryOperation -hypot: _MaskedBinaryOperation - -divide: _DomainedBinaryOperation -true_divide: _DomainedBinaryOperation -floor_divide: _DomainedBinaryOperation -remainder: _DomainedBinaryOperation -fmod: _DomainedBinaryOperation -mod: _DomainedBinaryOperation - -def make_mask_descr(ndtype): ... -def getmask(a): ... +logical_xor: _MaskedBinaryOperation = ... +bitwise_and: _MaskedBinaryOperation = ... +bitwise_or: _MaskedBinaryOperation = ... +bitwise_xor: _MaskedBinaryOperation = ... +hypot: _MaskedBinaryOperation = ... + +divide: _DomainedBinaryOperation = ... +true_divide: _DomainedBinaryOperation = ... +floor_divide: _DomainedBinaryOperation = ... +remainder: _DomainedBinaryOperation = ... +fmod: _DomainedBinaryOperation = ... +mod: _DomainedBinaryOperation = ... + +# `obj` can be anything (even `object()`), and is too "flexible", so we can't +# meaningfully annotate it, or its return type. +def default_fill_value(obj: object) -> Any: ... +def minimum_fill_value(obj: object) -> Any: ... +def maximum_fill_value(obj: object) -> Any: ... + +# +@overload # returns `a.fill_value` if `a` is a `MaskedArray` +def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +@overload # otherwise returns `default_fill_value(a)` +def get_fill_value(a: object) -> Any: ... + +# this is a noop if `a` isn't a `MaskedArray`, so we only accept `MaskedArray` input +def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... + +# the return type depends on the *values* of `a` and `b` (which cannot be known +# statically), which is why we need to return an awkward `_ | None` +@overload +def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... +@overload +def common_fill_value(a: object, b: object) -> Any: ... + +# keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` +@overload +def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... +@overload +def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... + +# keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` +@overload +def fix_invalid( + a: np.ndarray[_ShapeT, _DTypeT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def fix_invalid( + a: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def fix_invalid( + a: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[Incomplete]: ... + +# +def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... + +# +@overload +def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +@overload +def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +@overload +def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... + +get_data = getdata + +# +@overload +def getmask(a: _ScalarLike_co) -> _NoMaskType: ... +@overload +def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload +def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... + get_mask = getmask -def getmaskarray(arr): ... -def is_mask(m): ... -def make_mask(m, copy=..., shrink=..., dtype=...): ... -def make_mask_none(newshape, dtype=...): ... -def mask_or(m1, m2, copy=..., shrink=...): ... -def flatten_mask(mask): ... -def masked_where(condition, a, copy=...): ... -def masked_greater(x, value, copy=...): ... -def masked_greater_equal(x, value, copy=...): ... -def masked_less(x, value, copy=...): ... -def masked_less_equal(x, value, copy=...): ... -def masked_not_equal(x, value, copy=...): ... -def masked_equal(x, value, copy=...): ... -def masked_inside(x, v1, v2, copy=...): ... -def masked_outside(x, v1, v2, copy=...): ... -def masked_object(x, value, copy=..., shrink=...): ... -def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... -def masked_invalid(a, copy=...): ... +# like `getmask`, but instead of `nomask` returns `make_mask_none(arr, arr.dtype?)` +@overload +def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... +@overload +def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... -class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=...): ... +# It's sufficient for `m` to have dtype with type: `type[np.bool_]`, +# which isn't necessarily a ndarray. Please open an issue if this causes issues. +def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... -masked_print_option: _MaskedPrintOption +# +@overload +def make_mask_descr(ndtype: _VoidDTypeLike) -> np.dtype[np.void]: ... +@overload +def make_mask_descr(ndtype: _DTypeLike[np.generic] | str | type) -> np.dtype[np.bool_]: ... -def flatten_structured_array(a): ... +# +@overload # m is nomask +def make_mask( + m: _NoMaskType, + copy: bool = False, + shrink: bool = True, + dtype: _DTypeLikeBool = ..., +) -> _NoMaskType: ... +@overload # m: ndarray, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT]: ... +@overload # m: ndarray, dtype: void-like +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload # m: array-like, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray | _NoMaskType: ... +@overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray: ... +@overload # m: array-like, dtype: void-like +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> NDArray[np.void]: ... +@overload # fallback +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: DTypeLike = ..., +) -> NDArray[Incomplete] | _NoMaskType: ... + +# +@overload # known shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +@overload # known shape, dtype: structured +def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +@overload # unknown shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +@overload # unknown shape, dtype: structured +def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... + +# +@overload # nomask, scalar-like, shrink=True (default) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # nomask, scalar-like, shrink=False (kwarg) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # scalar-like, nomask, shrink=True (default) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # scalar-like, nomask, shrink=False (kwarg) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # ndarray, ndarray | nomask, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray, ndarray | nomask, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... +@overload # ndarray | nomask, ndarray, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray | nomask, ndarray, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... + +# +@overload +def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +@overload +def flatten_mask(mask: ArrayLike) -> _MaskArray: ... + +# NOTE: we currently don't know the field types of `void` dtypes, so it's not possible +# to know the output dtype of the returned array. +@overload +def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +@overload +def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +@overload # for some reason this accepts unstructured array-likes, hence this fallback overload +def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # array-like of known scalar-type +def masked_where( + condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# only intended for object arrays, so we assume that's how it's always used in practice +@overload +def masked_object( + x: np.ndarray[_ShapeT, np.dtype[np.object_]], + value: object, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +@overload +def masked_object( + x: _ArrayLikeObject_co, + value: object, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[np.object_]: ... + +# keep roughly in sync with `filled` +@overload +def masked_values( + x: np.ndarray[_ShapeT, _DTypeT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def masked_values( + x: _ArrayLike[_ScalarT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[_ScalarT]: ... +@overload +def masked_values( + x: ArrayLike, + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[Incomplete]: ... + +# TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an +# additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to +# hold the dtype of the mask. + +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] # readonly + dataiter: np.flatiter[ndarray[_ShapeT_co, _DTypeT_co]] # readonly + maskiter: Final[np.flatiter[NDArray[np.bool]]] + + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Self: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Incomplete: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. + def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... + +class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: Final[Literal[15]] = 15 + + @overload + def __new__( + cls, + data: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object = None, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + + def __array_wrap__( + self, + obj: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[_ShapeT, _DTypeT]: ... + + @overload # type: ignore[override] # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view( + self, + /, + dtype: _DTypeT | _HasDType[_DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view( + self, + /, + dtype: _DTypeLike[_ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[_ArrayT], + fill_value: _ScalarLike_co | None = None + ) -> _ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ArrayT, /) + def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype]: ... + + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... -class MaskedIterator: - ma: Any - dataiter: Any - maskiter: Any - def __init__(self, ma): ... - def __iter__(self): ... - def __getitem__(self, indx): ... - def __setitem__(self, index, value): ... - def __next__(self): ... - -class MaskedArray(ndarray[_ShapeType_co, _DType_co]): - __array_priority__: Any - def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def view(self, dtype=..., type=..., fill_value=...): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - @property - def dtype(self): ... - @dtype.setter - def dtype(self, dtype): ... @property - def shape(self): ... - @shape.setter - def shape(self, shape): ... - def __setmask__(self, mask, copy=...): ... + def shape(self) -> _ShapeT_co: ... + @shape.setter # type: ignore[override] + def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self): ... + def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @mask.setter - def mask(self, value): ... + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self): ... + def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @recordmask.setter - def recordmask(self, mask): ... - def harden_mask(self): ... - def soften_mask(self): ... + def recordmask(self, mask: Never, /) -> NoReturn: ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... @property - def hardmask(self): ... - def unshare_mask(self): ... + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... @property - def sharedmask(self): ... - def shrink_mask(self): ... + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... + @property - def baseclass(self): ... - data: Any + def baseclass(self) -> type[ndarray]: ... + @property - def flat(self): ... + def _data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @property + def data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # type: ignore[override] + + @property # type: ignore[override] + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter - def flat(self, value): ... + def flat(self, value: ArrayLike, /) -> None: ... + @property - def fill_value(self): ... + def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... @fill_value.setter - def fill_value(self, value=...): ... - get_fill_value: Any - set_fill_value: Any - def filled(self, fill_value=...): ... - def compressed(self): ... - def compress(self, condition, axis=..., out=...): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def __ge__(self, other): ... - def __gt__(self, other): ... - def __le__(self, other): ... - def __lt__(self, other): ... - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... - def __mul__(self, other): ... - def __rmul__(self, other): ... - def __div__(self, other): ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... - def __pow__(self, other): ... - def __rpow__(self, other): ... - def __iadd__(self, other): ... - def __isub__(self, other): ... - def __imul__(self, other): ... - def __idiv__(self, other): ... - def __ifloordiv__(self, other): ... - def __itruediv__(self, other): ... - def __ipow__(self, other): ... - def __float__(self): ... - def __int__(self): ... - @property # type: ignore[misc] - def imag(self): ... - get_imag: Any + def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... + + def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... + + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + + # keep roughly in sync with `ma.core.compress`, but swap the first two arguments + @overload # type: ignore[override] + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + + # Keep in sync with `ndarray.__add__` + @overload # type: ignore[override] + def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # type: ignore[override] # signature equivalent to __add__ + def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload # type: ignore[override] + def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload # type: ignore[override] + def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__mul__` + @overload # type: ignore[override] + def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # type: ignore[override] # signature equivalent to __mul__ + def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__truediv__` + @overload # type: ignore[override] + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload # type: ignore[override] + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload # type: ignore[override] + def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload # type: ignore[override] + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # @property # type: ignore[misc] - def real(self): ... - get_real: Any - def count(self, axis=..., keepdims=...): ... - def ravel(self, order=...): ... - def reshape(self, *s, **kwargs): ... - def resize(self, newshape, refcheck=..., order=...): ... - def put(self, indices, values, mode=...): ... - def ids(self): ... - def iscontiguous(self): ... - def all(self, axis=..., out=..., keepdims=...): ... - def any(self, axis=..., out=..., keepdims=...): ... - def nonzero(self): ... - def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... - def dot(self, b, out=..., strict=...): ... - def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... - def cumsum(self, axis=..., dtype=..., out=...): ... - def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... - product: Any - def cumprod(self, axis=..., dtype=..., out=...): ... - def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... - def anom(self, axis=..., dtype=...): ... - def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def round(self, decimals=..., out=...): ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... - def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... - def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... - def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... - def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... - # NOTE: deprecated - # def tostring(self, fill_value=..., order=...): ... - def max(self, axis=..., out=..., fill_value=..., keepdims=...): ... - def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ... - def partition(self, *args, **kwargs): ... - def argpartition(self, *args, **kwargs): ... - def take(self, indices, axis=..., out=..., mode=...): ... - - copy: Any - diagonal: Any - flatten: Any - repeat: Any - squeeze: Any - swapaxes: Any - T: Any - transpose: Any + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] + def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + # @property # type: ignore[misc] - def mT(self): ... + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] + def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + + # keep in sync with `np.ma.count` + @overload + def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + @overload + def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... # type: ignore[override] + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def ids(self) -> tuple[int, int]: ... + def iscontiguous(self) -> bool: ... + + # Keep in sync with `ma.core.all` + @overload # type: ignore[override] + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ma.core.any` + @overload # type: ignore[override] + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ndarray.trace` and `ma.core.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, + ) -> _ArrayT: ... + + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + + # Keep in sync with `ma.core.sum` + @overload # type: ignore[override] + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `ma.core.prod` + @overload # type: ignore[override] + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + product = prod + + # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `ma.core.mean` + @overload # type: ignore[override] + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # keep roughly in sync with `ma.core.anom` + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + + # keep in sync with `std` and `ma.core.var` + @overload # type: ignore[override] + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + # keep in sync with `var` and `ma.core.std` + @overload # type: ignore[override] + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + def argsort( # type: ignore[override] + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.argmin + @overload # type: ignore[override] + def argmin( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmin( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in-sync with np.ma.argmax + @overload # type: ignore[override] + def argmax( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmax( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... # - def toflex(self) -> Incomplete: ... - def torecords(self) -> Incomplete: ... - def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + def sort( # type: ignore[override] + self, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, + ) -> None: ... # - def __reduce__(self): ... - def __deepcopy__(self, memo=...): ... + @overload # type: ignore[override] + def min( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def min( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... -class mvoid(MaskedArray[_ShapeType_co, _DType_co]): + # + @overload # type: ignore[override] + def max( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def max( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload + def ptp( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] = False, + ) -> _ScalarT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> Any: ... + @overload + def ptp( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None + ) -> None: ... + @overload + def partition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> _MaskedArray[intp]: ... + @overload + def argpartition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.take + @overload # type: ignore[override] + def take( # type: ignore[overload-overlap] + self: _MaskedArray[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise" + ) -> _ScalarT: ... + @overload + def take( + self: _MaskedArray[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> _MaskedArray[_ScalarT]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + + # keep in sync with `ndarray.diagonal` + @override + def diagonal( + self, + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ndarray.repeat` + @override + @overload + def repeat( + self, + /, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + /, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ndarray.flatten` and `ndarray.ravel` + @override + def flatten(self, /, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + @override + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + + # keep in sync with `ndarray.squeeze` + @override + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # + def toflex(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + def torecords(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + + # + @override + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] + + # keep in sync with `ndarray.tolist` + @override + @overload + def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + @overload + def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + @overload + def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[_T]]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... + + # NOTE: will raise `NotImplementedError` + @override + def tofile(self, /, fid: Never, sep: str = "", format: str = "%s") -> NoReturn: ... # type: ignore[override] + + # + @override + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + +class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( self, # pyright: ignore[reportSelfClsParameterName] data, @@ -497,125 +2415,758 @@ class mvoid(MaskedArray[_ShapeType_co, _DType_co]): def __setitem__(self, indx, value): ... def __iter__(self): ... def __len__(self): ... - def filled(self, fill_value=...): ... - def tolist(self): ... + def filled(self, fill_value=None): ... + def tolist(self): ... # type: ignore[override] -def isMaskedArray(x): ... -isarray = isMaskedArray -isMA = isMaskedArray +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray # 0D float64 array -class MaskedConstant(MaskedArray[Any, dtype[float64]]): - def __new__(cls): ... - __class__: Any - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def __format__(self, format_spec): ... - def __reduce__(self): ... - def __iop__(self, other): ... - __iadd__: Any - __isub__: Any - __imul__: Any - __ifloordiv__: Any - __itruediv__: Any - __ipow__: Any - def copy(self, *args, **kwargs): ... - def __copy__(self): ... - def __deepcopy__(self, memo): ... - def __setattr__(self, attr, value): ... - -masked: MaskedConstant -masked_singleton: MaskedConstant -masked_array = MaskedArray +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + + # these overrides are no-ops + @override + def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __isub__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __imul__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ifloordiv__(self, other: _Ignored, /) -> Self: ... + @override + def __itruediv__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ipow__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... # type: ignore[override] + @override + def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... +masked: Final[MaskedConstant] = ... +masked_singleton: Final[MaskedConstant] = ... + +masked_array: TypeAlias = MaskedArray + +# keep in sync with `MaskedArray.__new__` +@overload +def array( + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload def array( - data, - dtype=..., - copy=..., - order=..., - mask=..., - fill_value=..., - keep_mask=..., - hard_mask=..., - shrink=..., - subok=..., - ndmin=..., -): ... -def is_masked(x): ... - -class _extrema_operation(_MaskedUFunc): - compare: Any - fill_value_func: Any - def __init__(self, ufunc, compare, fill_value): ... - # NOTE: in practice `b` has a default value, but users should - # explicitly provide a value here as the default is deprecated - def __call__(self, a, b): ... - def reduce(self, target, axis=...): ... - def outer(self, a, b): ... - -def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ... -def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ... -def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ... - -class _frommethod: - __name__: Any - __doc__: Any - reversed: Any - def __init__(self, methodname, reversed=...): ... - def getdoc(self): ... - def __call__(self, a, *args, **params): ... - -all: _frommethod -anomalies: _frommethod -anom: _frommethod -any: _frommethod -compress: _frommethod -cumprod: _frommethod -cumsum: _frommethod -copy: _frommethod -diagonal: _frommethod -harden_mask: _frommethod -ids: _frommethod -mean: _frommethod -nonzero: _frommethod -prod: _frommethod -product: _frommethod -ravel: _frommethod -repeat: _frommethod -soften_mask: _frommethod -std: _frommethod -sum: _frommethod -swapaxes: _frommethod -trace: _frommethod -var: _frommethod -count: _frommethod -argmin: _frommethod -argmax: _frommethod - -minimum: _extrema_operation -maximum: _extrema_operation - -def take(a, indices, axis=..., out=..., mode=...): ... -def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... -def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... -def compressed(x): ... -def concatenate(arrays, axis=...): ... -def diag(v, k=...): ... + data: object, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: DTypeLike | None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... + +# keep in sync with `array` +@overload +def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... + +# keep in sync with `asarray` (but note the additional first overload) +@overload +def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... +@overload +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... + +# +def is_masked(x: object) -> bool: ... + +@overload +def min( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def max( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def ptp( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# we cannot meaningfully annotate `frommethod` further, because the callable signature +# of the return type fully depends on the *value* of `methodname` and `reversed` in +# a way that cannot be expressed in the Python type system. +def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: ... + +# NOTE: The following `*_mask` functions will accept any array-like input runtime, but +# since their use-cases are specific to masks, they only accept `MaskedArray` inputs. + +# keep in sync with `MaskedArray.harden_mask` +def harden_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.soften_mask` +def soften_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.shrink_mask` +def shrink_mask(a: _MArrayT) -> _MArrayT: ... + +# keep in sync with `MaskedArray.ids` +def ids(a: ArrayLike) -> tuple[int, int]: ... + +# keep in sync with `ndarray.nonzero` +def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + +# keep first overload in sync with `MaskedArray.ravel` +@overload +def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + +# keep roughly in sync with `lib._function_base_impl.copy` +@overload +def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +@overload +def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.diagonal` +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.repeat` +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.swapaxes` +@overload +def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +@overload +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need +# additional overloads to cover the array-like input here. +@overload # a: MaskedArray, dtype=None +def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +@overload # a: array-like, dtype=None +def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... + +anomalies = anom + +# Keep in sync with `any` and `MaskedArray.all` +@overload +def all(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# Keep in sync with `all` and `MaskedArray.any` +@overload +def any(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, +# which wouldn't work here for array-like inputs, so we need additional overloads. +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None +) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumprod` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumsum( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumsum` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumprod( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `product` and `MaskedArray.prod` +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `prod` and `MaskedArray.prod` +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, +) -> Incomplete: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... + +# (a, b) +minimum: _extrema_operation = ... +maximum: _extrema_operation = ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def argmin( + a: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `argmin` +@overload +def argmax( + a: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise" +) -> _ScalarT: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... + +def power(a, b, third=None): ... +def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... +@overload +def sort( + a: _ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = None, +) -> _ArrayT: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = None, +) -> NDArray[Any]: ... +@overload +def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +@overload +def compressed(x: ArrayLike) -> _Array1D[Any]: ... +def concatenate(arrays, axis=0): ... +def diag(v, k=0): ... def left_shift(a, n): ... def right_shift(a, n): ... -def put(a, indices, values, mode=...): ... -def putmask(a, mask, values): ... -def transpose(a, axes=...): ... -def reshape(a, new_shape, order=...): ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... +def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... +def transpose(a, axes=None): ... +def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... -def ndim(obj): ... +def ndim(obj: ArrayLike) -> int: ... def shape(obj): ... -def size(obj, axis=...): ... -def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... +def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... def where(condition, x=..., y=...): ... -def choose(indices, choices, out=..., mode=...): ... -def round_(a, decimals=..., out=...): ... +def choose(indices, choices, out=None, mode="raise"): ... +def round_(a, decimals=0, out=None): ... round = round_ def inner(a, b): ... @@ -624,33 +3175,559 @@ innerproduct = inner def outer(a, b): ... outerproduct = outer -def correlate(a, v, mode=..., propagate_mask=...): ... -def convolve(a, v, mode=..., propagate_mask=...): ... -def allequal(a, b, fill_value=...): ... -def allclose(a, b, masked_equal=..., rtol=..., atol=...): ... -def asarray(a, dtype=..., order=...): ... -def asanyarray(a, dtype=...): ... +def correlate(a, v, mode="valid", propagate_mask=True): ... +def convolve(a, v, mode="full", propagate_mask=True): ... + +def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... + +def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... + def fromflex(fxarray): ... -class _convert2ma: - def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... - def __call__(self, /, *args: object, **params: object) -> Any: ... # noqa: ANN401 - def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... - -arange: _convert2ma -clip: _convert2ma -empty: _convert2ma -empty_like: _convert2ma -frombuffer: _convert2ma -fromfunction: _convert2ma -identity: _convert2ma -indices: _convert2ma -ones: _convert2ma -ones_like: _convert2ma -squeeze: _convert2ma -zeros: _convert2ma -zeros_like: _convert2ma - -def append(a, b, axis=...): ... -def dot(a, b, strict=..., out=...): ... -def mask_rowcols(a, axis=...): ... +def append(a, b, axis=None): ... +def dot(a, b, strict=False, out=None): ... + +# internal wrapper functions for the functions below +def _convert2ma( + funcname: str, + np_ret: str, + np_ma_ret: str, + params: dict[str, Any] | None = None, +) -> Callable[..., Any]: ... + +# keep in sync with `_core.multiarray.arange` +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: _DTypeLike[_ArangeScalarT], + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) +def arange( + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.int_]: ... +@overload # (float, float-like?, float-like?) +def arange( + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) +def arange( + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) +def arange( + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float | str, + /, + stop: _ArangeScalar | float | str | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: DTypeLike | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Incomplete]: ... + +# based on `_core.fromnumeric.clip` +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ScalarT: ... +@overload +def clip( + a: NDArray[_ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MaskedArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _MArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: _MArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Incomplete: ... + +# keep in sync with `_core.multiarray.ones` +@overload +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int]]: ... +@overload # known shape +def empty( + shape: _AnyShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT]: ... +@overload # unknown shape +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.float64]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShape, _DTypeT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray: ... + +# keep in sync with `_core.multiarray.empty_like` +@overload +def empty_like( + a: _MArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MArrayT: ... +@overload +def empty_like( + a: _ArrayLike[_ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: Incomplete, + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[Incomplete]: ... + +# This is a bit of a hack to avoid having to duplicate all those `empty` overloads for +# `ones` and `zeros`, that relies on the fact that empty/zeros/ones have identical +# type signatures, but may cause some type-checkers to report incorrect names in case +# of user errors. Mypy and Pyright seem to handle this just fine. +ones = empty +ones_like = empty_like +zeros = empty +zeros_like = empty_like + +# keep in sync with `_core.multiarray.frombuffer` +@overload +def frombuffer( + buffer: Buffer, + *, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[np.float64]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: DTypeLike | None = float, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[Incomplete]: ... + +# keep roughly in sync with `_core.numeric.fromfunction` +def fromfunction( + function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> MaskedArray[_ShapeT, _DTypeT]: ... + +# keep roughly in sync with `_core.numeric.identity` +@overload +def identity( + n: int, + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[Incomplete]]: ... + +# keep roughly in sync with `_core.numeric.indices` +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.intp]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[_ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None, + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... + +# keep roughly in sync with `_core.fromnumeric.squeeze` +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... diff --git a/blimgui/dist64/numpy/ma/extras.py b/blimgui/dist64/numpy/ma/extras.py index e93a505..82f1b6b 100644 --- a/blimgui/dist64/numpy/ma/extras.py +++ b/blimgui/dist64/numpy/ma/extras.py @@ -5,7 +5,6 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ __all__ = [ @@ -20,21 +19,40 @@ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', ] +import functools import itertools import warnings -from . import core as ma -from .core import ( - MaskedArray, MAError, add, array, asarray, concatenate, filled, count, - getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, - nomask, ones, sort, zeros, getdata, get_masked_subclass, dot - ) - import numpy as np -from numpy import ndarray, array as nxarray -from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple +from numpy import array as nxarray, ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple + +from . import core as ma +from .core import ( # noqa: F401 + MAError, + MaskedArray, + add, + array, + asarray, + concatenate, + count, + dot, + filled, + get_masked_subclass, + getdata, + getmask, + getmaskarray, + make_mask_descr, + mask_or, + masked, + masked_array, + nomask, + ones, + sort, + zeros, +) def issequence(seq): @@ -227,151 +245,93 @@ def masked_all_like(arr): #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - - This class should not be used directly. Instead, one of its extensions that - provides support for a specific type of input should be used. +def _fromnxfunction_function(_fromnxfunction): + """ + Decorator to wrap a "_fromnxfunction" function, wrapping a numpy function as a + masked array function, with proper docstring and name. Parameters ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). + _fromnxfunction : ({params}) -> ndarray, {params}) -> masked_array + Wrapper function that calls the wrapped numpy function - """ - - def __init__(self, funcname): - self.__name__ = funcname - self.__qualname__ = funcname - self.__doc__ = self.getdoc() + Returns + ------- + decorator : (f: ({params}) -> ndarray) -> ({params}) -> masked_array + Function that accepts a numpy function and returns a masked array function - def getdoc(self): - """ - Retrieve the docstring and signature from the function. + """ + def decorator(npfunc, /): + def wrapper(*args, **kwargs): + return _fromnxfunction(npfunc, *args, **kwargs) - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. + functools.update_wrapper(wrapper, npfunc, assigned=("__name__", "__qualname__")) + wrapper.__doc__ = ma.doc_note( + npfunc.__doc__, + "The function is applied to both the ``_data`` and the ``_mask``, if any.", + ) + return wrapper - Parameters - ---------- - None + return decorator - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = ma.get_object_signature(npfunc) - doc = ma.doc_note(doc, "The function is applied to both the _data " - "and the _mask, if any.") - if sig: - sig = self.__name__ + sig + "\n\n" - return sig + doc - return - def __call__(self, *args, **params): - pass +@_fromnxfunction_function +def _fromnxfunction_single(npfunc, a, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single array argument followed by + auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(np.asarray(a), *args, **kwargs), + mask=npfunc(getmaskarray(a), *args, **kwargs), + ) -class _fromnxfunction_single(_fromnxfunction): +@_fromnxfunction_function +def _fromnxfunction_seq(npfunc, arys, /, *args, **kwargs): """ - A version of `_fromnxfunction` that is called with a single array - argument followed by auxiliary args that are passed verbatim for - both the data and mask calls. + Wraps a NumPy function that can be called with a single sequence of arrays followed + by auxiliary args that are passed verbatim for both the data and mask calls. """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - if isinstance(x, ndarray): - _d = func(x.__array__(), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - else: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_seq(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single sequence - of arrays followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - _d = func(tuple([np.asarray(a) for a in x]), *args, **params) - _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_args(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. The first non-array-like input marks the beginning of the - arguments that are passed verbatim for both the data and mask calls. - Array arguments are processed independently and the results are - returned in a list. If only one array is found, the return value is - just the processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - if len(arrays) == 1: - return res[0] - return res - - -class _fromnxfunction_allargs(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. Similar to `_fromnxfunction_args` except that all args - are converted to arrays even if they are not so already. This makes - it possible to process scalars as 1-D arrays. Only keyword arguments - are passed through verbatim for the data and mask calls. Arrays - arguments are processed independently and the results are returned - in a list. If only one arg is present, the return value is just the - processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - res = [] - for x in args: - _d = func(np.asarray(x), **params) - _m = func(getmaskarray(x), **params) - res.append(masked_array(_d, mask=_m)) - if len(args) == 1: - return res[0] - return res - - -atleast_1d = _fromnxfunction_allargs('atleast_1d') -atleast_2d = _fromnxfunction_allargs('atleast_2d') -atleast_3d = _fromnxfunction_allargs('atleast_3d') - -vstack = row_stack = _fromnxfunction_seq('vstack') -hstack = _fromnxfunction_seq('hstack') -column_stack = _fromnxfunction_seq('column_stack') -dstack = _fromnxfunction_seq('dstack') -stack = _fromnxfunction_seq('stack') - -hsplit = _fromnxfunction_single('hsplit') - -diagflat = _fromnxfunction_single('diagflat') + return masked_array( + data=npfunc(tuple(np.asarray(a) for a in arys), *args, **kwargs), + mask=npfunc(tuple(getmaskarray(a) for a in arys), *args, **kwargs), + ) + +@_fromnxfunction_function +def _fromnxfunction_allargs(npfunc, /, *arys, **kwargs): + """ + Wraps a NumPy function that can be called with multiple array arguments. + All args are converted to arrays even if they are not so already. + This makes it possible to process scalars as 1-D arrays. + Only keyword arguments are passed through verbatim for the data and mask calls. + Arrays arguments are processed independently and the results are returned in a list. + If only one arg is present, the return value is just the processed array instead of + a list. + """ + out = tuple( + masked_array( + data=npfunc(np.asarray(a), **kwargs), + mask=npfunc(getmaskarray(a), **kwargs), + ) + for a in arys + ) + return out[0] if len(out) == 1 else out + + +atleast_1d = _fromnxfunction_allargs(np.atleast_1d) +atleast_2d = _fromnxfunction_allargs(np.atleast_2d) +atleast_3d = _fromnxfunction_allargs(np.atleast_3d) + +vstack = row_stack = _fromnxfunction_seq(np.vstack) +hstack = _fromnxfunction_seq(np.hstack) +column_stack = _fromnxfunction_seq(np.column_stack) +dstack = _fromnxfunction_seq(np.dstack) +stack = _fromnxfunction_seq(np.stack) + +hsplit = _fromnxfunction_single(np.hsplit) +diagflat = _fromnxfunction_single(np.diagflat) #####-------------------------------------------------------------------------- @@ -467,6 +427,8 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): result = asarray(outarr, dtype=max_dtypes) result.fill_value = ma.default_fill_value(result) return result + + apply_along_axis.__doc__ = np.apply_along_axis.__doc__ @@ -698,7 +660,7 @@ def average(a, axis=None, weights=None, returned=False, *, for ax, s in enumerate(a.shape))) if m is not nomask: - wgt = wgt*(~a.mask) + wgt = wgt * (~a.mask) wgt.mask |= a.mask scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) @@ -845,9 +807,9 @@ def _median(a, axis=None, out=None, overwrite_input=False): # duplicate high if odd number of elements so mean does nothing odd = counts % 2 == 1 - l = np.where(odd, h, h-1) + l = np.where(odd, h, h - 1) - lh = np.concatenate([l,h], axis=axis) + lh = np.concatenate([l, h], axis=axis) # get low and high median low_high = np.take_along_axis(asorted, lh, axis=axis) @@ -930,7 +892,7 @@ def compress_nd(x, axis=None): data = x._data for ax in axis: axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) - data = data[(slice(None),)*ax + (~m.any(axis=axes),)] + data = data[(slice(None),) * ax + (~m.any(axis=axes),)] return data @@ -1427,14 +1389,13 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Test whether each element of an array is also present in a second array. - The output is always a masked array. See `numpy.in1d` for more details. + The output is always a masked array. We recommend using :func:`isin` instead of `in1d` for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. - numpy.in1d : Equivalent function for ndarrays. Examples -------- @@ -1711,8 +1672,8 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): return result -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, - ddof=np._NoValue): +def corrcoef(x, y=None, rowvar=True, allow_masked=True, + ): """ Return Pearson product-moment correlation coefficients. @@ -1733,32 +1694,17 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 allow_masked : bool, optional If True, masked values are propagated pair-wise: if a value is masked in `x`, the corresponding value is masked in `y`. If False, raises an exception. Because `bias` is deprecated, this argument needs to be treated as keyword only to avoid a warning. - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 See Also -------- numpy.corrcoef : Equivalent function in top-level NumPy module. cov : Estimate the covariance matrix. - Notes - ----- - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np @@ -1773,10 +1719,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, dtype=float64) """ - msg = 'bias and ddof have no effect and are deprecated' - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn(msg, DeprecationWarning, stacklevel=2) # Estimate the covariance matrix. corr = cov(x, y, rowvar, allow_masked=allow_masked) # The non-masked version returns a masked value for a scalar. @@ -1847,6 +1789,7 @@ class mr_class(MAxisConcatenator): def __init__(self): MAxisConcatenator.__init__(self, 0) + mr_ = mr_class() @@ -2027,8 +1970,8 @@ def notmasked_edges(a, axis=None): return flatnotmasked_edges(a) m = getmaskarray(a) idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) - return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), - tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + return [tuple(idx[i].min(axis).compressed() for i in range(a.ndim)), + tuple(idx[i].max(axis).compressed() for i in range(a.ndim)), ] def flatnotmasked_contiguous(a): @@ -2144,7 +2087,7 @@ def notmasked_contiguous(a, axis=None): >>> np.ma.notmasked_contiguous(ma, axis=1) [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] - """ + """ # noqa: E501 a = asarray(a) nd = a.ndim if nd > 2: @@ -2281,6 +2224,7 @@ def vander(x, n=None): _vander[m] = 0 return _vander + vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) @@ -2318,4 +2262,5 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): else: return np.polyfit(x, y, deg, rcond, full, w, cov) + polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/blimgui/dist64/numpy/ma/extras.pyi b/blimgui/dist64/numpy/ma/extras.pyi index 942822f..a03a198 100644 --- a/blimgui/dist64/numpy/ma/extras.pyi +++ b/blimgui/dist64/numpy/ma/extras.pyi @@ -1,6 +1,17 @@ from _typeshed import Incomplete +from collections.abc import Sequence +from typing import SupportsIndex, TypeAlias, TypeVar, overload import numpy as np +from numpy import _CastingKind +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLike, + _DTypeLike, + _ShapeLike, +) from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator @@ -55,80 +66,232 @@ __all__ = [ "vstack", ] -def count_masked(arr, axis=...): ... -def masked_all(shape, dtype = ...): ... -def masked_all_like(arr): ... +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) + +_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +### + +# keep in sync with `numpy._core.shape_base.atleast_1d` +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_1d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_2d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_3d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... -class _fromnxfunction: - __name__: Incomplete - __doc__: Incomplete - def __init__(self, funcname) -> None: ... - def getdoc(self): ... - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base.vstack` +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -class _fromnxfunction_single(_fromnxfunction): - def __call__(self, x, *args, **params): ... +row_stack = vstack -class _fromnxfunction_seq(_fromnxfunction): - def __call__(self, x, *args, **params): ... +# keep in sync with `numpy._core.shape_base.hstack` +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -class _fromnxfunction_allargs(_fromnxfunction): - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base_impl.column_stack` +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -atleast_1d: _fromnxfunction_allargs -atleast_2d: _fromnxfunction_allargs -atleast_3d: _fromnxfunction_allargs +# keep in sync with `numpy._core.shape_base_impl.dstack` +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -vstack: _fromnxfunction_seq -row_stack: _fromnxfunction_seq -hstack: _fromnxfunction_seq -column_stack: _fromnxfunction_seq -dstack: _fromnxfunction_seq -stack: _fromnxfunction_seq +# keep in sync with `numpy._core.shape_base.stack` +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _MArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _MArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... -hsplit: _fromnxfunction_single -diagflat: _fromnxfunction_single +# keep in sync with `numpy._core.shape_base_impl.hsplit` +@overload +def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... + +# keep in sync with `numpy._core.twodim_base_impl.hsplit` +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... + +# TODO: everything below + +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 +def masked_all_like(arr): ... def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def compress_nd(x, axis=...): ... -def compress_rowcols(x, axis=...): ... +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... +def compress_nd(x, axis=None): ... +def compress_rowcols(x, axis=None): ... def compress_rows(a): ... def compress_cols(a): ... -def mask_rows(a, axis = ...): ... -def mask_cols(a, axis = ...): ... -def ediff1d(arr, to_end=..., to_begin=...): ... -def unique(ar1, return_index=..., return_inverse=...): ... -def intersect1d(ar1, ar2, assume_unique=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... +def mask_rows(a, axis=...): ... +def mask_cols(a, axis=...): ... +def ediff1d(arr, to_end=None, to_begin=None): ... +def unique(ar1, return_index=False, return_inverse=False): ... +def intersect1d(ar1, ar2, assume_unique=False): ... +def setxor1d(ar1, ar2, assume_unique=False): ... +def in1d(ar1, ar2, assume_unique=False, invert=False): ... +def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... -def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... +def setdiff1d(ar1, ar2, assume_unique=False): ... +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... +def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... class MAxisConcatenator(AxisConcatenator): + __slots__ = () + @staticmethod def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): + __slots__ = () + def __init__(self) -> None: ... mr_: mr_class -def ndenumerate(a, compressed=...): ... +def ndenumerate(a, compressed=True): ... def flatnotmasked_edges(a): ... -def notmasked_edges(a, axis=...): ... +def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... -def notmasked_contiguous(a, axis=...): ... +def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... def clump_masked(a): ... -def vander(x, n=...): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def vander(x, n=None): ... +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... # def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/blimgui/dist64/numpy/ma/mrecords.py b/blimgui/dist64/numpy/ma/mrecords.py index 7fdbe23..c37767f 100644 --- a/blimgui/dist64/numpy/ma/mrecords.py +++ b/blimgui/dist64/numpy/ma/mrecords.py @@ -18,7 +18,6 @@ import numpy as np import numpy.ma as ma - _byteorderconv = np._core.records._byteorderconv @@ -42,7 +41,7 @@ def _checknames(descr, names=None): """ ndescr = len(descr) - default_names = ['f%i' % i for i in range(ndescr)] + default_names = [f'f{i}' for i in range(ndescr)] if names is None: new_names = default_names else: @@ -117,9 +116,9 @@ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, elif nm == nd: mask = np.reshape(mask, self.shape) else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise ma.MAError(msg % (nd, nm)) + msg = (f"Mask and data not compatible: data size is {nd}," + " mask size is {nm}.") + raise ma.MAError(msg) if not keep_mask: self.__setmask__(mask) self._sharedmask = True @@ -150,7 +149,6 @@ def __array_finalize__(self, obj): self._update_from(obj) if _dict['_baseclass'] == np.ndarray: _dict['_baseclass'] = np.recarray - return @property def _data(self): @@ -343,7 +341,7 @@ def __repr__(self): """ _names = self.dtype.names - fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + fmt = f"%{max(len(n) for n in _names) + 4}s : %s" reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] reprstr.insert(0, 'masked_records(') reprstr.extend([fmt % (' fill_value', self.fill_value), @@ -484,6 +482,7 @@ def _mrreconstruct(subtype, baseclass, baseshape, basetype,): _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + mrecarray = MaskedRecords @@ -658,8 +657,7 @@ def openfile(fname): def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', - varnames=None, vartypes=None, - *, delimitor=np._NoValue): # backwards compatibility + varnames=None, vartypes=None): """ Creates a mrecarray from data stored in the file `filename`. @@ -683,16 +681,6 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', Ultra simple: the varnames are in the header, one line""" - if delimitor is not np._NoValue: - if delimiter is not None: - raise TypeError("fromtextfile() got multiple values for argument " - "'delimiter'") - # NumPy 1.22.0, 2021-09-23 - warnings.warn("The 'delimitor' keyword argument of " - "numpy.ma.mrecords.fromtextfile() is deprecated " - "since NumPy 1.22.0, use 'delimiter' instead.", - DeprecationWarning, stacklevel=2) - delimiter = delimitor # Try to open the file. ftext = openfile(fname) @@ -719,9 +707,9 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', else: vartypes = [np.dtype(v) for v in vartypes] if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" + msg = f"Attempting to {len(vartypes)} dtypes for {nfields} fields!" msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) + warnings.warn(msg, stacklevel=2) vartypes = _guessvartypes(_variables[0]) # Construct the descriptor. @@ -748,7 +736,7 @@ def addfield(mrecord, newfield, newfieldname=None): _data = mrecord._data _mask = mrecord._mask if newfieldname is None or newfieldname in reserved_fields: - newfieldname = 'f%i' % len(_data.dtype) + newfieldname = f'f{len(_data.dtype)}' newfield = ma.array(newfield) # Get the new data. # Create a new empty recarray diff --git a/blimgui/dist64/numpy/ma/mrecords.pyi b/blimgui/dist64/numpy/ma/mrecords.pyi index 232b832..c11df1d 100644 --- a/blimgui/dist64/numpy/ma/mrecords.pyi +++ b/blimgui/dist64/numpy/ma/mrecords.pyi @@ -1,7 +1,10 @@ -from typing import Any, TypeVar +from typing import Any, Generic +from typing_extensions import TypeVar -from numpy import dtype -from . import MaskedArray +import numpy as np +from numpy._typing import _AnyShape + +from .core import MaskedArray __all__ = [ "MaskedRecords", @@ -12,10 +15,10 @@ __all__ = [ "addfield", ] -_ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -class MaskedRecords(MaskedArray[_ShapeType_co, _DType_co]): +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, shape, @@ -47,49 +50,47 @@ class MaskedRecords(MaskedArray[_ShapeType_co, _DType_co]): def __setattr__(self, attr, val): ... def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... - def view(self, dtype=..., type=...): ... + def view(self, dtype=None, type=None): ... def harden_mask(self): ... def soften_mask(self): ... def copy(self): ... - def tolist(self, fill_value=...): ... + def tolist(self, fill_value=None): ... def __reduce__(self): ... mrecarray = MaskedRecords def fromarrays( arraylist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, ): ... def fromrecords( reclist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, mask=..., ): ... def fromtextfile( fname, - delimiter=..., - commentchar=..., - missingchar=..., - varnames=..., - vartypes=..., - # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 - # delimitor=..., + delimiter=None, + commentchar="#", + missingchar="", + varnames=None, + vartypes=None, ): ... -def addfield(mrecord, newfield, newfieldname=...): ... +def addfield(mrecord, newfield, newfieldname=None): ... diff --git a/blimgui/dist64/numpy/ma/tests/test_core.py b/blimgui/dist64/numpy/ma/tests/test_core.py index a907df5..40fec15 100644 --- a/blimgui/dist64/numpy/ma/tests/test_core.py +++ b/blimgui/dist64/numpy/ma/tests/test_core.py @@ -1,4 +1,3 @@ -# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant @@ -6,71 +5,161 @@ """ __author__ = "Pierre GF Gerard-Marchant" -import sys -import warnings import copy -import operator +import inspect import itertools -import textwrap +import operator import pickle +import sys +import textwrap +import warnings from functools import reduce import pytest import numpy as np -import numpy.ma.core import numpy._core.fromnumeric as fromnumeric import numpy._core.umath as umath -from numpy.exceptions import AxisError -from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM, temppath - ) -from numpy.testing._private.utils import requires_memory +import numpy.ma.core from numpy import ndarray from numpy._utils import asbytes -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal, - assert_equal_records, fail_if_equal, assert_not_equal, - assert_mask_equal - ) +from numpy.exceptions import AxisError from numpy.ma.core import ( - MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, - allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, - arcsin, arctan, argsort, array, asarray, choose, concatenate, - conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note, - empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, - flatten_structured_array, fromflex, getmask, getmaskarray, greater, - greater_equal, identity, inner, isMaskedArray, less, less_equal, log, - log10, make_mask, make_mask_descr, mask_or, masked, masked_array, - masked_equal, masked_greater, masked_greater_equal, masked_inside, - masked_less, masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, max, maximum, - maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, - mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put, - putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, - sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like, - ) + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + angle, + anom, + arange, + arccos, + arccosh, + arcsin, + arctan, + arctan2, + argsort, + array, + asarray, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + default_fill_value, + diag, + divide, + empty, + empty_like, + equal, + exp, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + fromflex, + getmask, + getmaskarray, + greater, + greater_equal, + identity, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + make_mask_descr, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + nomask, + not_equal, + ones, + ones_like, + outer, + power, + product, + put, + putmask, + ravel, + repeat, + reshape, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, + zeros_like, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_equal_records, + assert_mask_equal, + assert_not_equal, + fail_if_equal, +) +from numpy.testing import IS_WASM, assert_raises, temppath +from numpy.testing._private.utils import requires_memory pi = np.pi -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - # For parametrized numeric testing num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] num_ids = [dt_.char for dt_ in num_dts] - +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" class TestMaskedArray: # Base test class for MaskedArrays. - def setup_method(self): + # message for warning filters + def _create_data(self): # Base data definition. - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] @@ -81,7 +170,7 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + return x, y, a10, m1, m2, xm, ym, z, zm, xf def test_basicattributes(self): # Tests some basic array attributes. @@ -107,7 +196,7 @@ def test_basic0d(self): def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, _, _, m1, _, xm, ym, z, zm, xf = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_((xm - ym).filled(0).any()) @@ -117,35 +206,35 @@ def test_basic1d(self): assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) assert_array_equal(xm, xf) assert_array_equal(filled(xm, 1.e20), xf) assert_array_equal(x, xm) def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, _, xm, ym, _, _, xf = self._create_data() for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) assert_equal(xm, xf) assert_equal(filled(xm, 1.e20), xf) assert_equal(x, xm) def test_concatenate_basic(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) @@ -154,10 +243,15 @@ def test_concatenate_basic(self): def test_concatenate_alongaxis(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, m2, xm, ym, z, _, xf = self._create_data() # Concatenation along an axis s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) @@ -263,9 +357,9 @@ def __bool__(self): assert_array_equal(res.mask, [[True, False], [False, False]]) def test_creation_from_ndarray_with_padding(self): - x = np.array([('A', 0)], dtype={'names':['f0','f1'], - 'formats':['S4','i8'], - 'offsets':[0,8]}) + x = np.array([('A', 0)], dtype={'names': ['f0', 'f1'], + 'formats': ['S4', 'i8'], + 'offsets': [0, 8]}) array(x) # used to fail due to 'V' padding field in x.dtype.descr def test_unknown_keyword_parameter(self): @@ -273,7 +367,7 @@ def test_unknown_keyword_parameter(self): MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm = self._create_data()[5] xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) @@ -395,7 +489,7 @@ def test_setitem_no_warning(self): x[...] = value x[[0, 1, 2]] = value - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] @@ -414,9 +508,9 @@ def test_copy(self): y1a = array(y1) # Default for masked array is not to copy; see gh-10318. assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) + y1._data.__array_interface__) assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) + y1._mask.__array_interface__) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) @@ -476,8 +570,8 @@ def test_copy_0d(self): def test_copy_on_python_builtins(self): # Tests copy works on python builtins (issue#8019) - assert_(isMaskedArray(np.ma.copy([1,2,3]))) - assert_(isMaskedArray(np.ma.copy((1,2,3)))) + assert_(isMaskedArray(np.ma.copy([1, 2, 3]))) + assert_(isMaskedArray(np.ma.copy((1, 2, 3)))) def test_copy_immutable(self): # Tests that the copy method is immutable, GitHub issue #5247 @@ -515,7 +609,7 @@ def test_format(self): # assert_equal(format(masked, " <5"), "-- ") # Expect a FutureWarning for using format_spec with MaskedElement - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): with_format_string = format(masked, " >5") assert_equal(with_format_string, "--") @@ -554,7 +648,7 @@ def test_str_repr(self): # 2d arrays cause wrapping a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) - a[1,1] = np.ma.masked + a[1, 1] = np.ma.masked assert_equal( repr(a), textwrap.dedent(f'''\ @@ -673,8 +767,7 @@ def test_pickling_wstructured(self): def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) + a = arange(10).reshape( (-1, 2)) b = a.T for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): test = pickle.loads(pickle.dumps(b, protocol=proto)) @@ -696,8 +789,9 @@ def test_topython(self): assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) @@ -706,7 +800,7 @@ def test_topython(self): assert_(np.isnan(float(a[0]))) assert_raises(TypeError, int, a) assert_equal(int(a[-1]), 3) - assert_raises(MAError, lambda:int(a[0])) + assert_raises(MAError, lambda: int(a[0])) def test_oddfeatures_1(self): # Test of other odd features @@ -750,14 +844,17 @@ def test_oddfeatures_2(self): assert_(z[1] is not masked) assert_(z[2] is masked) - @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') @@ -804,7 +901,7 @@ def test_filled_with_nested_dtype(self): assert_equal(test, control) # test if mask gets set correctly (see #6760) - Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2, 2))])) assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), ('f1', 'i1', (2, 2))], (2, 2))])) assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), @@ -828,7 +925,7 @@ def test_optinfo_propagation(self): assert_equal(x._optinfo['info'], '???') def test_optinfo_forward_propagation(self): - a = array([1,2,2,4]) + a = array([1, 2, 2, 4]) a._optinfo["key"] = "value" assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) @@ -840,7 +937,7 @@ def test_optinfo_forward_propagation(self): assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) - assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0, 0, 2]]._optinfo["key"]) assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) @@ -856,13 +953,13 @@ def test_fancy_printoptions(self): assert_equal(str(test), control) # Test 0-d array with multi-dimensional dtype - t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - 0.0), - mask = (False, [[True, False, True], - [False, False, True]], - False), - dtype = "int, (2,3)float, float") + t_2d0 = masked_array(data=(0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask=(False, [[True, False, True], + [False, False, True]], + False), + dtype="int, (2,3)float, float") control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" assert_equal(str(t_2d0), control) @@ -933,7 +1030,7 @@ def test_mvoid_getitem(self): assert_equal(f[1], 4) # exotic dtype - A = masked_array(data=[([0,1],)], + A = masked_array(data=[([0, 1],)], mask=[([True, False],)], dtype=[("A", ">i2", (2,))]) assert_equal(A[0]["A"], A["A"][0]) @@ -950,6 +1047,7 @@ def test_mvoid_iter(self): # w/ mask assert_equal(list(a[1]), [masked, 4]) + @pytest.mark.thread_unsafe(reason="masked_print_option.set_display global state") def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) @@ -967,39 +1065,40 @@ def test_mvoid_print(self): mx = array([(1,), (2,)], dtype=[('a', 'O')]) assert_equal(str(mx[0]), "(1,)") + @pytest.mark.thread_unsafe(reason="masked_print_option global state") def test_mvoid_multidim_print(self): # regression test for gh-6019 - t_ma = masked_array(data = [([1, 2, 3],)], - mask = [([False, True, False],)], - fill_value = ([999999, 999999, 999999],), - dtype = [('a', '= len(a))) # No mask test = take(a, mindices, mode='clip') @@ -3893,29 +4016,28 @@ def test_arraymethod_0d(self): def test_transpose_view(self): x = np.ma.array([[1, 2, 3], [4, 5, 6]]) - x[0,1] = np.ma.masked + x[0, 1] = np.ma.masked xt = x.T - xt[1,0] = 10 - xt[0,1] = np.ma.masked + xt[1, 0] = 10 + xt[0, 1] = np.ma.masked assert_equal(x.data, xt.T.data) assert_equal(x.mask, xt.T.mask) def test_diagonal_view(self): - x = np.ma.zeros((3,3)) - x[0,0] = 10 - x[1,1] = np.ma.masked - x[2,2] = 20 + x = np.ma.zeros((3, 3)) + x[0, 0] = 10 + x[1, 1] = np.ma.masked + x[2, 2] = 20 xd = x.diagonal() - x[1,1] = 15 + x[1, 1] = 15 assert_equal(xd.mask, x.diagonal().mask) assert_equal(xd.data, x.diagonal().data) class TestMaskedArrayMathMethods: - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3945,11 +4067,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) @@ -3983,7 +4105,7 @@ def test_cumsumprod_with_output(self): def test_ptp(self): # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, m, mx, mX, _, _, _, _ = self._create_data() (n, m) = X.shape assert_equal(mx.ptp(), np.ptp(mx.compressed())) rows = np.zeros(n, float) @@ -4047,7 +4169,7 @@ def test_anom(self): def test_trace(self): # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, _, _, mX, _, _, _, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), @@ -4056,13 +4178,13 @@ def test_trace(self): assert_equal(np.trace(mX), mX.trace()) # gh-5560 - arr = np.arange(2*4*4).reshape(2,4,4) + arr = np.arange(2 * 4 * 4).reshape(2, 4, 4) m_arr = np.ma.masked_array(arr, False) assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) def test_dot(self): # Tests dot on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, mXX, _, _, _ = self._create_data() fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) @@ -4071,7 +4193,7 @@ def test_dot(self): fX = mX.filled(0) r = mX.dot(mX) assert_almost_equal(r.filled(0), fX.dot(fX)) - assert_(r.mask[1,3]) + assert_(r.mask[1, 3]) r1 = empty_like(r) mX.dot(mX, out=r1) assert_almost_equal(r, r1) @@ -4086,23 +4208,23 @@ def test_dot(self): def test_dot_shape_mismatch(self): # regression test - x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) - y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) - z = masked_array([[0,1],[3,3]]) + x = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + y = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + z = masked_array([[0, 1], [3, 3]]) x.dot(y, out=z) assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) assert_almost_equal(z.mask, [[0, 1], [0, 0]]) def test_varmean_nomask(self): # gh-5769 - foo = array([1,2,3,4], dtype='f8') - bar = array([1,2,3,4], dtype='f8') + foo = array([1, 2, 3, 4], dtype='f8') + bar = array([1, 2, 3, 4], dtype='f8') assert_equal(type(foo.mean()), np.float64) assert_equal(type(foo.var()), np.float64) - assert((foo.mean() == bar.mean()) is np.bool(True)) + assert (foo.mean() == bar.mean()) is np.bool(True) # check array type is preserved and out works - foo = array(np.arange(16).reshape((4,4)), dtype='f8') + foo = array(np.arange(16).reshape((4, 4)), dtype='f8') bar = empty(4, dtype='f4') assert_equal(type(foo.mean(axis=1)), MaskedArray) assert_equal(type(foo.var(axis=1)), MaskedArray) @@ -4111,7 +4233,7 @@ def test_varmean_nomask(self): def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), @@ -4131,7 +4253,7 @@ def test_varstd(self): assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) @@ -4212,6 +4334,7 @@ def test_axis_methods_nomask(self): assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) + @pytest.mark.thread_unsafe(reason="crashes with low memory") @requires_memory(free_bytes=2 * 10000 * 1000 * 2) def test_mean_overflow(self): # Test overflow in masked arrays @@ -4262,7 +4385,7 @@ def test_diff_with_n_0(self): class TestMaskedArrayMathMethodsComplex: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4292,11 +4415,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) @@ -4315,17 +4438,6 @@ def test_varstd(self): class TestMaskedArrayFunctions: # Test class for miscellaneous functions. - - def setup_method(self): - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) @@ -4539,7 +4651,7 @@ def test_power_with_broadcasting(self): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_where(self): # Test the where function - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] @@ -4974,7 +5086,7 @@ class A(np.ndarray): assert_(type(test) is A) # Test that compress flattens - test = np.ma.compressed([[1],[2]]) + test = np.ma.compressed([[1], [2]]) assert_equal(test.ndim, 1) test = np.ma.compressed([[[[[1]]]]]) assert_equal(test.ndim, 1) @@ -5030,8 +5142,7 @@ def test_convolve(self): class TestMaskedFields: - - def setup_method(self): + def _create_data(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] @@ -5039,11 +5150,12 @@ def setup_method(self): mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] + data = self._create_data() + base = data['base'] + mdtype = data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) @@ -5062,7 +5174,7 @@ def test_set_records_masks(self): def test_set_record_element(self): # Check setting an element of a record) - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') @@ -5077,7 +5189,7 @@ def test_set_record_element(self): [b'pi', b'two', b'three', b'four', b'five']) def test_set_record_slice(self): - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') @@ -5093,7 +5205,7 @@ def test_set_record_slice(self): def test_mask_element(self): "Check record access" - base = self.data['base'] + base = self._create_data()['base'] base[0] = masked for n in ('a', 'b', 'c'): @@ -5141,8 +5253,8 @@ def _test_index(i): assert_equal_records(a[i]._mask, a._mask[i]) assert_equal(type(a[i, ...]), MaskedArray) - assert_equal_records(a[i,...]._data, a._data[i,...]) - assert_equal_records(a[i,...]._mask, a._mask[i,...]) + assert_equal_records(a[i, ...]._data, a._data[i, ...]) + assert_equal_records(a[i, ...]._mask, a._mask[i, ...]) _test_index(1) # No mask _test_index(0) # One element masked @@ -5186,9 +5298,10 @@ def test_setitem_scalar(self): assert_array_equal(arr.mask, [True, False, False]) def test_element_len(self): + data = self._create_data() # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) + for rec in data['base']: + assert_equal(len(rec), len(data['ddtype'])) class TestMaskedObjectArray: @@ -5203,30 +5316,30 @@ def test_getitem(self): assert_(arr[0] is a0) assert_(arr[1] is a1) - assert_(isinstance(arr[0,...], MaskedArray)) - assert_(isinstance(arr[1,...], MaskedArray)) - assert_(arr[0,...][()] is a0) - assert_(arr[1,...][()] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_(arr[0, ...][()] is a0) + assert_(arr[1, ...][()] is a1) arr[0] = np.ma.masked assert_(arr[1] is a1) - assert_(isinstance(arr[0,...], MaskedArray)) - assert_(isinstance(arr[1,...], MaskedArray)) - assert_equal(arr[0,...].mask, True) - assert_(arr[1,...][()] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_equal(arr[0, ...].mask, True) + assert_(arr[1, ...][()] is a1) # gh-5962 - object arrays of arrays do something special assert_equal(arr[0].data, a0) assert_equal(arr[0].mask, True) - assert_equal(arr[0,...][()].data, a0) - assert_equal(arr[0,...][()].mask, True) + assert_equal(arr[0, ...][()].data, a0) + assert_equal(arr[0, ...][()].mask, True) def test_nested_ma(self): arr = np.ma.array([None, None]) # set the first object to be an unmasked masked constant. A little fiddly - arr[0,...] = np.array([np.ma.masked], object)[0,...] + arr[0, ...] = np.array([np.ma.masked], object)[0, ...] # check the above line did what we were aiming for assert_(arr.data[0] is np.ma.masked) @@ -5240,31 +5353,30 @@ def test_nested_ma(self): class TestMaskedView: - - def setup_method(self): + def _create_data(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) + return data, a, controlmask def test_view_to_nothing(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view() assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view(np.ndarray) assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view(float) assert_(isinstance(test, MaskedArray)) @@ -5272,7 +5384,7 @@ def test_view_to_simple_dtype(self): assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) @@ -5292,7 +5404,7 @@ def test_view_to_flexible_dtype(self): assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view((float, 2)) assert_(isinstance(test, MaskedArray)) @@ -5309,7 +5421,7 @@ def test_view_to_subdtype(self): assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view((float, 2), np.recarray) assert_equal(test, data) @@ -5321,10 +5433,10 @@ class TestOptionalArgs: def test_ndarrayfuncs(self): # test axis arg behaves the same as ndarray (including multiple axes) - d = np.arange(24.0).reshape((2,3,4)) - m = np.zeros(24, dtype=bool).reshape((2,3,4)) + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) # mask out last element of last dimension - m[:,:,-1] = True + m[:, :, -1] = True a = np.ma.array(d, mask=m) def testaxis(f, a, d): @@ -5332,9 +5444,9 @@ def testaxis(f, a, d): ma_f = np.ma.__getattribute__(f) # test axis arg - assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) - assert_equal(ma_f(a, axis=(0,1))[...,:-1], - numpy_f(d[...,:-1], axis=(0,1))) + assert_equal(ma_f(a, axis=1)[..., :-1], numpy_f(d[..., :-1], axis=1)) + assert_equal(ma_f(a, axis=(0, 1))[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1))) def testkeepdims(f, a, d): numpy_f = numpy.__getattribute__(f) @@ -5347,10 +5459,10 @@ def testkeepdims(f, a, d): numpy_f(d, keepdims=False).shape) # test both at once - assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], - numpy_f(d[...,:-1], axis=1, keepdims=True)) - assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], - numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) + assert_equal(ma_f(a, axis=1, keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0, 1), keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1), keepdims=True)) for f in ['sum', 'prod', 'mean', 'var', 'std']: testaxis(f, a, d) @@ -5359,7 +5471,7 @@ def testkeepdims(f, a, d): for f in ['min', 'max']: testaxis(f, a, d) - d = (np.arange(24).reshape((2,3,4))%2 == 0) + d = (np.arange(24).reshape((2, 3, 4)) % 2 == 0) a = np.ma.array(d, mask=m) for f in ['all', 'any']: testaxis(f, a, d) @@ -5368,33 +5480,33 @@ def testkeepdims(f, a, d): def test_count(self): # test np.ma.count specially - d = np.arange(24.0).reshape((2,3,4)) - m = np.zeros(24, dtype=bool).reshape((2,3,4)) - m[:,0,:] = True + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + m[:, 0, :] = True a = np.ma.array(d, mask=m) assert_equal(count(a), 16) - assert_equal(count(a, axis=1), 2*ones((2,4))) - assert_equal(count(a, axis=(0,1)), 4*ones((4,))) - assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) - assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) - assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) - assert_equal(count(a, axis=-2), 2*ones((2,4))) - assert_raises(ValueError, count, a, axis=(1,1)) + assert_equal(count(a, axis=1), 2 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 4 * ones((4,))) + assert_equal(count(a, keepdims=True), 16 * ones((1, 1, 1))) + assert_equal(count(a, axis=1, keepdims=True), 2 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 4 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 2 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) assert_raises(AxisError, count, a, axis=3) # check the 'nomask' path a = np.ma.array(d, mask=nomask) assert_equal(count(a), 24) - assert_equal(count(a, axis=1), 3*ones((2,4))) - assert_equal(count(a, axis=(0,1)), 6*ones((4,))) - assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) + assert_equal(count(a, axis=1), 3 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 6 * ones((4,))) + assert_equal(count(a, keepdims=True), 24 * ones((1, 1, 1))) assert_equal(np.ndim(count(a, keepdims=True)), 3) - assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) - assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) - assert_equal(count(a, axis=-2), 3*ones((2,4))) - assert_raises(ValueError, count, a, axis=(1,1)) + assert_equal(count(a, axis=1, keepdims=True), 3 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 6 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 3 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) assert_raises(AxisError, count, a, axis=3) # check the 'masked' singleton @@ -5472,7 +5584,7 @@ def test_deepcopy(self): def test_immutable(self): orig = np.ma.masked assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) - assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.data, (), 1) assert_raises(ValueError, operator.setitem, orig.mask, (), False) view = np.ma.masked.view(np.ma.MaskedArray) @@ -5487,7 +5599,7 @@ def test_coercion_int(self): def test_coercion_float(self): a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @pytest.mark.xfail(reason="See gh-9750") @@ -5542,6 +5654,7 @@ def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) + def test_masked_array_no_copy(): # check nomask array is updated in place a = np.ma.array([1, 2, 3, 4]) @@ -5556,9 +5669,10 @@ def test_masked_array_no_copy(): _ = np.ma.masked_invalid(a, copy=False) assert_array_equal(a.mask, [True, False, False, False, False]) + def test_append_masked_array(): - a = np.ma.masked_equal([1,2,3], value=2) - b = np.ma.masked_equal([4,3,2], value=2) + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_equal([4, 3, 2], value=2) result = np.ma.append(a, b) expected_data = [1, 2, 3, 4, 3, 2] @@ -5566,8 +5680,8 @@ def test_append_masked_array(): assert_array_equal(result.data, expected_data) assert_array_equal(result.mask, expected_mask) - a = np.ma.masked_all((2,2)) - b = np.ma.ones((3,1)) + a = np.ma.masked_all((2, 2)) + b = np.ma.ones((3, 1)) result = np.ma.append(a, b) expected_data = [1] * 3 @@ -5581,24 +5695,84 @@ def test_append_masked_array(): def test_append_masked_array_along_axis(): - a = np.ma.masked_equal([1,2,3], value=2) + a = np.ma.masked_equal([1, 2, 3], value=2) b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) # When `axis` is specified, `values` must have the correct shape. assert_raises(ValueError, np.ma.append, a, b, axis=0) - result = np.ma.append(a[np.newaxis,:], b, axis=0) + result = np.ma.append(a[np.newaxis, :], b, axis=0) expected = np.ma.arange(1, 10) expected[[1, 6]] = np.ma.masked - expected = expected.reshape((3,3)) + expected = expected.reshape((3, 3)) assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) + def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416. @@ -5610,9 +5784,9 @@ def test_ufunc_with_output(): def test_ufunc_with_out_varied(): """ Test that masked arrays are immune to gh-10459 """ # the mask of the output should not affect the result, however it is passed - a = array([ 1, 2, 3], mask=[1, 0, 0]) - b = array([10, 20, 30], mask=[1, 0, 0]) - out = array([ 0, 0, 0], mask=[0, 0, 1]) + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) expected = array([11, 22, 33], mask=[1, 0, 0]) out_pos = out.copy() @@ -5704,8 +5878,9 @@ def test_mask_shape_assignment_does_not_break_masked(): b.shape = (1,) assert_equal(a.mask.shape, ()) + @pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 def test_doc_note(): def method(self): """This docstring @@ -5789,3 +5964,45 @@ def test_uint_fill_value_and_filled(): # And this ensures things like filled work: np.testing.assert_array_equal( a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.nonzero, "(a)"), + (np.ma.anomalies, "(a, axis=None, dtype=None)"), + (np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"), + (np.ma.compress, "(condition, a, axis=None, out=None)"), + ] +) +def test_frommethod_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + ( + np.ma.empty, + ( + "(shape, dtype=None, order='C', *, device=None, like=None, " + "fill_value=None, hardmask=False)" + ), + ), + ( + np.ma.empty_like, + ( + "(prototype, /, dtype=None, order='K', subok=True, shape=None, *, " + "device=None)" + ), + ), + (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), + ( + np.ma.identity, + "(n, dtype=None, *, like=None, fill_value=None, hardmask=False)", + ), + ] +) +def test_convert2ma_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + assert fn.__module__ == 'numpy.ma.core' diff --git a/blimgui/dist64/numpy/ma/tests/test_deprecations.py b/blimgui/dist64/numpy/ma/tests/test_deprecations.py index f7b44a2..2d55e5e 100644 --- a/blimgui/dist64/numpy/ma/tests/test_deprecations.py +++ b/blimgui/dist64/numpy/ma/tests/test_deprecations.py @@ -2,12 +2,11 @@ """ import pytest + import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import assert_equal from numpy.ma.core import MaskedArrayFutureWarning -import io -import textwrap +from numpy.ma.testutils import assert_equal + class TestArgsort: """ gh-8701 """ @@ -20,7 +19,7 @@ def _test_base(self, argsort, cls): # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( + result = pytest.warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) @@ -50,10 +49,10 @@ def test_axis_default(self): ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent @@ -64,21 +63,3 @@ def test_axis_default(self): result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) - - -class TestFromtextfile: - def test_fromtextfile_delimitor(self): - # NumPy 1.22.0, 2021-09-23 - - textfile = io.StringIO(textwrap.dedent( - """ - A,B,C,D - 'string 1';1;1.0;'mixed column' - 'string 2';2;2.0; - 'string 3';3;3.0;123 - 'string 4';4;4.0;3.14 - """ - )) - - with pytest.warns(DeprecationWarning): - result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/blimgui/dist64/numpy/ma/tests/test_extras.py b/blimgui/dist64/numpy/ma/tests/test_extras.py index 9f77ae0..b3d1e63 100644 --- a/blimgui/dist64/numpy/ma/tests/test_extras.py +++ b/blimgui/dist64/numpy/ma/tests/test_extras.py @@ -1,36 +1,73 @@ -# pylint: disable-msg=W0611, W0612, W0511 """Tests suite for MaskedArray. Adapted from the original test_ma by Pierre Gerard-Marchant :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ -import warnings +import inspect import itertools + import pytest import numpy as np from numpy._core.numeric import normalize_axis_tuple -from numpy.testing import ( - assert_warns, suppress_warnings - ) -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal - ) from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, - nomask, ones, zeros, count - ) + MaskedArray, + arange, + array, + count, + getmaskarray, + masked, + masked_array, + nomask, + ones, + shape, + zeros, +) from numpy.ma.extras import ( - atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, - median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, - ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, - mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, - notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, - diagflat, ndenumerate, stack, vstack, _covhelper - ) + _covhelper, + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + compress_nd, + compress_rowcols, + corrcoef, + cov, + diagflat, + dot, + ediff1d, + flatnotmasked_contiguous, + in1d, + intersect1d, + isin, + mask_rowcols, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vstack, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, +) class TestGeneric: @@ -314,8 +351,8 @@ def test_complex(self): # (Regression test for https://github.com/numpy/numpy/issues/2684) mask = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=bool) - a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], - [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + a = masked_array([[0, 1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j], + [9j, 0 + 1j, 2 + 3j, 4 + 5j, 7 + 7j]], mask=mask) av = average(a) @@ -324,12 +361,12 @@ def test_complex(self): assert_almost_equal(av.imag, expected.imag) av0 = average(a, axis=0) - expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + expected0 = average(a.real, axis=0) + average(a.imag, axis=0) * 1j assert_almost_equal(av0.real, expected0.real) assert_almost_equal(av0.imag, expected0.imag) av1 = average(a, axis=1) - expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + expected1 = average(a.real, axis=1) + average(a.imag, axis=1) * 1j assert_almost_equal(av1.real, expected1.real) assert_almost_equal(av1.imag, expected1.imag) @@ -343,13 +380,13 @@ def test_complex(self): wav0 = average(a, weights=wts, axis=0) expected0 = (average(a.real, weights=wts, axis=0) + - average(a.imag, weights=wts, axis=0)*1j) + average(a.imag, weights=wts, axis=0) * 1j) assert_almost_equal(wav0.real, expected0.real) assert_almost_equal(wav0.imag, expected0.imag) wav1 = average(a, weights=wts, axis=1) expected1 = (average(a.real, weights=wts, axis=1) + - average(a.imag, weights=wts, axis=1)*1j) + average(a.imag, weights=wts, axis=1) * 1j) assert_almost_equal(wav1.real, expected1.real) assert_almost_equal(wav1.imag, expected1.imag) @@ -450,8 +487,8 @@ def test_2d(self): assert_array_equal(d.mask, np.r_['1', m_1, m_2]) d = mr_[b_1, b_2] assert_(d.shape == (10, 5)) - assert_array_equal(d[:5,:], b_1) - assert_array_equal(d[5:,:], b_2) + assert_array_equal(d[:5, :], b_1) + assert_array_equal(d[5:, :], b_2) assert_array_equal(d.mask, np.r_[m_1, m_2]) def test_masked_constant(self): @@ -538,9 +575,9 @@ class TestCompressFunctions: def test_compress_nd(self): # Tests compress_nd - x = np.array(list(range(3*4*5))).reshape(3, 4, 5) - m = np.zeros((3,4,5)).astype(bool) - m[1,1,1] = True + x = np.array(list(range(3 * 4 * 5))).reshape(3, 4, 5) + m = np.zeros((3, 4, 5)).astype(bool) + m[1, 1, 1] = True x = array(x, mask=m) # axis=None @@ -708,7 +745,7 @@ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) @@ -856,7 +893,7 @@ def test_3d_kwargs(self): a = arange(12).reshape(2, 2, 3) def myfunc(b, offset=0): - return b[1+offset] + return b[1 + offset] xa = apply_along_axis(myfunc, 2, a, offset=1) assert_equal(xa, [[2, 5], [8, 11]]) @@ -921,11 +958,11 @@ def test_non_masked(self): def test_docstring_examples(self): "test the examples given in the docstring of ma.median" - x = array(np.arange(8), mask=[0]*4 + [1]*4) + x = array(np.arange(8), mask=[0] * 4 + [1] * 4) assert_equal(np.ma.median(x), 1.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + x = array(np.arange(10).reshape(2, 5), mask=[0] * 6 + [1] * 4) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) @@ -974,38 +1011,38 @@ def test_masked_1d(self): assert_equal(np.ma.median(x), 2.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(5), mask=[0,1,0,0,0]) + x = array(np.arange(5), mask=[0, 1, 0, 0, 0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(5), mask=[0,1,1,1,1]) + x = array(np.arange(5), mask=[0, 1, 1, 1, 1]) assert_equal(np.ma.median(x), 0.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # integer - x = array(np.arange(5), mask=[0,1,1,0,0]) + x = array(np.arange(5), mask=[0, 1, 1, 0, 0]) assert_equal(np.ma.median(x), 3.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # float - x = array(np.arange(5.), mask=[0,1,1,0,0]) + x = array(np.arange(5.), mask=[0, 1, 1, 0, 0]) assert_equal(np.ma.median(x), 3.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # integer - x = array(np.arange(6), mask=[0,1,1,1,1,0]) + x = array(np.arange(6), mask=[0, 1, 1, 1, 1, 0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # float - x = array(np.arange(6.), mask=[0,1,1,1,1,0]) + x = array(np.arange(6.), mask=[0, 1, 1, 1, 1, 0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) def test_1d_shape_consistency(self): - assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, - np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) + assert_equal(np.ma.median(array([1, 2, 3], mask=[0, 0, 0])).shape, + np.ma.median(array([1, 2, 3], mask=[0, 1, 0])).shape) def test_2d(self): # Tests median w/ 2D @@ -1040,7 +1077,7 @@ def test_3d(self): x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) + x = x.reshape((4, 3, 2)) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked @@ -1073,11 +1110,11 @@ def test_out(self): out = masked_array(np.ones(10)) r = median(x, axis=1, out=out) if v == 30: - e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, + e = masked_array([0.] * 3 + [10, 13, 16, 19] + [0.] * 3, mask=[True] * 3 + [False] * 4 + [True] * 3) else: - e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, - mask=[True]*3 + [False]*4 + [True]*3) + e = masked_array([0.] * 3 + [13.5, 17.5, 21.5, 25.5] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) assert_equal(r, e) assert_(r is out) assert_(type(r) is MaskedArray) @@ -1205,10 +1242,10 @@ def test_ambigous_fill(self): def test_special(self): for inf in [np.inf, -np.inf]: - a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.array([[inf, np.nan], [np.nan, np.nan]]) a = np.ma.masked_array(a, mask=np.isnan(a)) - assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) - assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) assert_equal(np.ma.median(a), inf) a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) @@ -1237,7 +1274,7 @@ def test_special(self): assert_equal(np.ma.median(a), -2.5) assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) - for i in range(0, 10): + for i in range(10): for j in range(1, 10): a = np.array([([np.nan] * i) + ([inf] * j)] * 2) a = np.ma.masked_array(a, mask=np.isnan(a)) @@ -1249,19 +1286,14 @@ def test_special(self): def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) @@ -1270,10 +1302,8 @@ def test_empty(self): # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) @@ -1284,11 +1314,11 @@ def test_object(self): class TestCov: - def setup_method(self): - self.data = array(np.random.rand(12)) + def _create_data(self): + return array(np.random.rand(12)) def test_covhelper(self): - x = self.data + x = self._create_data() # Test not mask output type is a float. assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) @@ -1309,7 +1339,7 @@ def test_covhelper(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data() assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1317,7 +1347,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data().reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1325,7 +1355,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test cov 1 1D variable w/missing values - x = self.data + x = self._create_data() x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1349,7 +1379,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test cov on 2D variable w/ missing value - x = self.data + x = self._create_data() x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) @@ -1371,74 +1401,33 @@ def test_2d_with_missing(self): class TestCorrcoef: - def setup_method(self): - self.data = array(np.random.rand(12)) - self.data2 = array(np.random.rand(12)) - - def test_ddof(self): - # ddof raises DeprecationWarning - x, y = self.data, self.data2 - expected = np.corrcoef(x) - expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof has no or negligible effect on the function - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - assert_almost_equal(corrcoef(x, ddof=-1), expected) - assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) - assert_almost_equal(corrcoef(x, ddof=3), expected) - assert_almost_equal(corrcoef(x, y, ddof=3), expected2) - - def test_bias(self): - x, y = self.data, self.data2 - expected = np.corrcoef(x) - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(x, bias=1), expected) + def _create_data(self): + data = array(np.random.rand(12)) + data2 = array(np.random.rand(12)) + return data, data2 def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data()[0].reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values - x = self.data + x = self._create_data()[0] x[-1] = masked x -= x.mean() nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) try: corrcoef(x, allow_masked=False) except ValueError: @@ -1448,36 +1437,20 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], bias=1)) - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], ddof=2)) def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value - x = self.data + x = self._create_data()[0] x[-1] = masked x = x.reshape(3, 4) test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], - control[:-1, :-1]) class TestPolynomial: - # + def test_polyfit(self): # Tests polyfit # On ndarrays @@ -1502,7 +1475,7 @@ def test_polyfit(self): assert_almost_equal(a, a_) # (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # @@ -1522,14 +1495,14 @@ def test_polyfit_with_masked_NaNs(self): y = np.random.rand(20).reshape(-1, 2) x[0] = np.nan - y[-1,-1] = np.nan + y[-1, -1] = np.nan x = x.view(MaskedArray) y = y.view(MaskedArray) x[0] = masked - y[-1,-1] = masked + y[-1, -1] = masked (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) @@ -1717,7 +1690,7 @@ def test_isin(self): c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) - #compare results of np.isin to ma.isin + # compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d) @@ -1839,6 +1812,18 @@ def test_shape_scalar(self): assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.data.shape) + @pytest.mark.parametrize("fn", [atleast_1d, vstack, diagflat]) + def test_inspect_signature(self, fn): + name = fn.__name__ + assert getattr(np.ma, name) is fn + + assert fn.__module__ == "numpy.ma.extras" + + wrapped = getattr(np, fn.__name__) + sig_wrapped = inspect.signature(wrapped) + sig = inspect.signature(fn) + assert sig == sig_wrapped + class TestNDEnumerate: diff --git a/blimgui/dist64/numpy/ma/tests/test_mrecords.py b/blimgui/dist64/numpy/ma/tests/test_mrecords.py index 201b603..da184d5 100644 --- a/blimgui/dist64/numpy/ma/tests/test_mrecords.py +++ b/blimgui/dist64/numpy/ma/tests/test_mrecords.py @@ -1,4 +1,3 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for mrecords. :author: Pierre Gerard-Marchant @@ -9,19 +8,22 @@ import numpy as np import numpy.ma as ma -from numpy.ma import masked, nomask -from numpy.testing import temppath from numpy._core.records import ( - recarray, fromrecords as recfromrecords, fromarrays as recfromarrays - ) + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) +from numpy.ma import masked, nomask from numpy.ma.mrecords import ( - MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, - addfield - ) -from numpy.ma.testutils import ( - assert_, assert_equal, - assert_equal_records, - ) + MaskedRecords, + addfield, + fromarrays, + fromrecords, + fromtextfile, + mrecarray, +) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records +from numpy.testing import temppath class TestMRecords: @@ -97,10 +99,10 @@ def test_set_fields(self): assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) # Change the elements, and the mask will follow mbase.a = 1 - assert_equal(mbase['a']._data, [1]*5) - assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + assert_equal(mbase['a']._data, [1] * 5) + assert_equal(ma.getmaskarray(mbase['a']), [0] * 5) # Use to be _mask, now it's recordmask - assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase.recordmask, [False] * 5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 0), (0, 1, 1), @@ -111,10 +113,10 @@ def test_set_fields(self): # Set a field to mask ........................ mbase.c = masked # Use to be mask, and now it's still mask ! - assert_equal(mbase.c.mask, [1]*5) - assert_equal(mbase.c.recordmask, [1]*5) - assert_equal(ma.getmaskarray(mbase['c']), [1]*5) - assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) + assert_equal(mbase.c.mask, [1] * 5) + assert_equal(mbase.c.recordmask, [1] * 5) + assert_equal(ma.getmaskarray(mbase['c']), [1] * 5) + assert_equal(ma.getdata(mbase['c']), [b'N/A'] * 5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 1), (0, 1, 1), @@ -160,16 +162,16 @@ def test_set_mask(self): mbase = base.view(mrecarray) # Set the mask to True ....................... mbase.mask = masked - assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(ma.getmaskarray(mbase['b']), [1] * 5) assert_equal(mbase['a']._mask, mbase['b']._mask) assert_equal(mbase['a']._mask, mbase['c']._mask) assert_equal(mbase._mask.tolist(), - np.array([(1, 1, 1)]*5, dtype=bool)) + np.array([(1, 1, 1)] * 5, dtype=bool)) # Delete the mask ............................ mbase.mask = nomask - assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(ma.getmaskarray(mbase['c']), [0] * 5) assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0)]*5, dtype=bool)) + np.array([(0, 0, 0)] * 5, dtype=bool)) def test_set_mask_fromarray(self): base = self.base.copy() @@ -348,24 +350,24 @@ def test_exotic_formats(self): class TestView: - def setup_method(self): - (a, b) = (np.arange(10), np.random.rand(10)) + def _create_data(self): + a, b = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) - self.data = (mrec, a, b, arr) + return mrec, a, b, arr def test_view_by_itself(self): - (mrec, a, b, arr) = self.data + mrec = self._create_data()[0] test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): - (mrec, a, b, arr) = self.data + mrec, a, b, _ = self._create_data() ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) @@ -373,7 +375,7 @@ def test_view_simple_dtype(self): assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): - (mrec, a, b, arr) = self.data + mrec, _, _, arr = self._create_data() alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) @@ -411,14 +413,14 @@ def test_fromarrays(self): def test_fromrecords(self): # Test construction from records. (mrec, nrec, ddtype) = self.data - #...... + # ...... palist = [(1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)] pa = recfromrecords(palist, names='c1, c2, c3, c4') mpa = fromrecords(palist, names='c1, c2, c3, c4') assert_equal_records(pa, mpa) - #..... + # ..... _mrec = fromrecords(nrec) assert_equal(_mrec.dtype, mrec.dtype) for field in _mrec.dtype.names: diff --git a/blimgui/dist64/numpy/ma/tests/test_old_ma.py b/blimgui/dist64/numpy/ma/tests/test_old_ma.py index 33ce54c..8586bbc 100644 --- a/blimgui/dist64/numpy/ma/tests/test_old_ma.py +++ b/blimgui/dist64/numpy/ma/tests/test_old_ma.py @@ -1,27 +1,89 @@ -from functools import reduce import pickle +from functools import reduce import pytest import numpy as np -import numpy._core.umath as umath import numpy._core.fromnumeric as fromnumeric -from numpy.testing import ( - assert_, assert_raises, assert_equal, - ) +import numpy._core.umath as umath from numpy.ma import ( - MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue, - arange, arccos, arcsin, arctan, arctan2, array, average, choose, - concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled, - getmask, greater, greater_equal, inner, isMaskedArray, less, - less_equal, log, log10, make_mask, masked, masked_array, masked_equal, - masked_greater, masked_greater_equal, masked_inside, masked_less, - masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, maximum, minimum, - multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel, - repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, - take, tan, tanh, transpose, where, zeros, - ) + MaskedArray, + MaskType, + absolute, + add, + all, + allclose, + allequal, + alltrue, + arange, + arccos, + arcsin, + arctan, + arctan2, + array, + average, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + divide, + equal, + exp, + filled, + getmask, + greater, + greater_equal, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + maximum, + minimum, + multiply, + nomask, + nonzero, + not_equal, + ones, + outer, + product, + put, + ravel, + repeat, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, +) +from numpy.testing import assert_, assert_equal, assert_raises pi = np.pi @@ -35,8 +97,8 @@ def eq(v, w, msg=''): class TestMa: - def setup_method(self): - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + def _create_data(self): + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] @@ -48,18 +110,18 @@ def setup_method(self): xf = np.where(m1, 1e+20, x) s = x.shape xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) + return x, y, a10, m1, m2, xm, ym, z, zm, xf, s def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, m1, _, xm, _, _, _, xf, s = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) assert_(eq(xm, xf)) assert_(eq(filled(xm, 1.e20), xf)) assert_(eq(x, xm)) @@ -67,7 +129,7 @@ def test_testBasic1d(self): @pytest.mark.parametrize("s", [(4, 3), (6, 2)]) def test_testBasic2d(self, s): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, m1, _, xm, ym, _, _, xf, s = self._create_data() x.shape = s y.shape = s xm.shape = s @@ -86,7 +148,7 @@ def test_testBasic2d(self, s): def test_testArithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, a10, _, _, xm, ym, _, _, xf, s = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_(eq(a2d * a2d, a2d * a2dm)) @@ -130,7 +192,7 @@ def test_testMixedArithmetic(self): def test_testUfuncs1(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, z, zm, _, _ = self._create_data() assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) @@ -176,7 +238,7 @@ def test_xtestCount(self): def test_testMinMax(self): # Test minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, _, _, xm, _, _, _, _, _ = self._create_data() xr = np.ravel(x) # max doesn't work if shaped xmr = ravel(xm) @@ -186,7 +248,7 @@ def test_testMinMax(self): def test_testAddSumProd(self): # Test add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, _, _, _, s = self._create_data() assert_(eq(np.add.reduce(x), add.reduce(x))) assert_(eq(np.add.accumulate(x), add.accumulate(x))) assert_(eq(4, sum(array(4), axis=0))) @@ -355,7 +417,7 @@ def test_testPut2(self): assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + _, _, _, _, _, _, ym, _, zm, _, _ = self._create_data() m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] i = np.nonzero(m)[0] put(ym, i, zm) @@ -594,12 +656,12 @@ def test_testAverage2(self): np.add.reduce(np.arange(6)) * 3. / 12.)) assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) assert_(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) + [average(x, axis=0), average(x, axis=0) * 2.0])) assert_(allclose(average(y, None, weights=w2), 20. / 6.)) assert_(allclose(average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])) assert_(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) + [average(x, axis=0), average(x, axis=0) * 2.0])) m1 = zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] @@ -651,7 +713,7 @@ def test_testToPython(self): def test_testScalarArithmetic(self): xm = array(0, mask=1) - #TODO FIXME: Find out what the following raises a warning in r8247 + # TODO FIXME: Find out what the following raises a warning in r8247 with np.errstate(divide='ignore'): assert_((1 / array(0)).mask) assert_((1 + xm).mask) @@ -715,8 +777,9 @@ def test_assignment_by_condition_2(self): class TestUfuncs: - def setup_method(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + + def _create_data(self): + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) def test_testUfuncRegression(self): @@ -745,7 +808,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') @@ -757,7 +820,7 @@ def test_testUfuncRegression(self): assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -781,7 +844,7 @@ def test_nonzero(self): class TestArrayMethods: - def setup_method(self): + def _create_data(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, @@ -801,10 +864,10 @@ def setup_method(self): mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX) + return x, X, XX, m, mx, mX, mXX def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, _, _, _, mX, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_(eq(mX.trace(), @@ -812,15 +875,15 @@ def test_trace(self): axis=0))) def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + x, _, _, _, mx, _, _ = self._create_data() clipped = mx.clip(2, 8) assert_(eq(clipped.mask, mx.mask)) assert_(eq(clipped._data, x.clip(2, 8))) assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape + _, X, _, m, mx, mX, _ = self._create_data() + n, m = X.shape # print(type(mx), mx.compressed()) # raise Exception() assert_equal(mx.ptp(), np.ptp(mx.compressed())) @@ -834,28 +897,28 @@ def test_ptp(self): assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, _, _, _, _, mX, mXX = self._create_data() mXswapped = mX.swapaxes(0, 1) assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumprod(0) assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, XX, _, _, mX, mXX = self._create_data() assert_(eq(mX.var(axis=None), mX.compressed().var())) assert_(eq(mX.std(axis=None), mX.compressed().std())) assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) diff --git a/blimgui/dist64/numpy/ma/tests/test_regression.py b/blimgui/dist64/numpy/ma/tests/test_regression.py index f72f1b5..23318a5 100644 --- a/blimgui/dist64/numpy/ma/tests/test_regression.py +++ b/blimgui/dist64/numpy/ma/tests/test_regression.py @@ -1,7 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_, assert_array_equal, assert_allclose, suppress_warnings - ) +from numpy.testing import assert_, assert_array_equal class TestRegression: @@ -17,19 +15,19 @@ def test_masked_array(self): def test_mem_masked_where(self): # Ticket #62 - from numpy.ma import masked_where, MaskType + from numpy.ma import MaskType, masked_where a = np.zeros((1, 1)) b = np.zeros(a.shape, MaskType) c = masked_where(b, a) - a-c + a - c def test_masked_array_multiply(self): # Ticket #254 a = np.ma.zeros((4, 1)) a[2, 0] = np.ma.masked b = np.zeros((4, 2)) - a*b - b*a + a * b + b * a def test_masked_array_repeat(self): # Ticket #271 @@ -59,18 +57,6 @@ def test_var_sets_maskedarray_scalar(self): a.var(out=mout) assert_(mout._data == 0) - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - def test_mask_not_backmangled(self): # See gh-10314. Test case taken from gh-3140. a = np.ma.MaskedArray([1., 2.], mask=[False, False]) @@ -87,7 +73,7 @@ def test_empty_list_on_structured(self): assert_array_equal(ma[[]], ma[:0]) def test_masked_array_tobytes_fortran(self): - ma = np.ma.arange(4).reshape((2,2)) + ma = np.ma.arange(4).reshape((2, 2)) assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) def test_structured_array(self): diff --git a/blimgui/dist64/numpy/ma/tests/test_subclassing.py b/blimgui/dist64/numpy/ma/tests/test_subclassing.py index 86673e3..efa08f6 100644 --- a/blimgui/dist64/numpy/ma/tests/test_subclassing.py +++ b/blimgui/dist64/numpy/ma/tests/test_subclassing.py @@ -1,19 +1,28 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin -from numpy.testing import assert_, assert_raises -from numpy.ma.testutils import assert_equal from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, log, add, hypot, - divide, asarray, asanyarray, nomask - ) + MaskedArray, + add, + arange, + array, + asanyarray, + asarray, + divide, + hypot, + log, + masked, + masked_array, + nomask, +) +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + # from numpy.ma.core import ( def assert_startswith(a, b): @@ -23,7 +32,7 @@ def assert_startswith(a, b): class SubArray(np.ndarray): # Defines a generic np.ndarray subclass, that stores some metadata # in the dictionary `info`. - def __new__(cls,arr,info={}): + def __new__(cls, arr, info={}): x = np.asanyarray(arr).view(cls) x.info = info.copy() return x @@ -31,7 +40,6 @@ def __new__(cls,arr,info={}): def __array_finalize__(self, obj): super().__array_finalize__(obj) self.info = getattr(obj, 'info', {}).copy() - return def __add__(self, other): result = super().__add__(other) @@ -69,6 +77,7 @@ def _series(self): _view._sharedmask = False return _view + msubarray = MSubArray @@ -179,10 +188,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_data_subclassing(self): # Tests whether the subclass is kept. @@ -196,24 +205,24 @@ def test_data_subclassing(self): def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, subarray)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), msubarray)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a msubarray assert_(isinstance(add(mx, mx), msubarray)) assert_(isinstance(add(mx, x), msubarray)) # Result should work - assert_equal(add(mx, x), mx+x) + assert_equal(add(mx, x), mx + x) assert_(isinstance(add(mx, mx)._data, subarray)) assert_(isinstance(add.outer(mx, mx), msubarray)) assert_(isinstance(hypot(mx, mx), msubarray)) @@ -221,24 +230,24 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), msubarray)) assert_(isinstance(divide(mx, x), msubarray)) assert_equal(divide(mx, mx), divide(xmx, xmx)) def test_attributepropagation(self): - x = array(arange(5), mask=[0]+[1]*4) + x = array(arange(5), mask=[0] + [1] * 4) my = masked_array(subarray(x)) ym = msubarray(x) # - z = (my+1) + z = (my + 1) assert_(isinstance(z, MaskedArray)) assert_(not isinstance(z, MSubArray)) assert_(isinstance(z._data, SubArray)) assert_equal(z._data.info, {}) # - z = (ym+1) + z = (ym + 1) assert_(isinstance(z, MaskedArray)) assert_(isinstance(z, MSubArray)) assert_(isinstance(z._data, SubArray)) @@ -255,7 +264,7 @@ def test_attributepropagation(self): ym._series._set_mask([0, 0, 0, 0, 1]) assert_equal(ym._mask, [0, 0, 0, 0, 1]) # - xsub = subarray(x, info={'name':'x'}) + xsub = subarray(x, info={'name': 'x'}) mxsub = masked_array(xsub) assert_(hasattr(mxsub, 'info')) assert_equal(mxsub.info, xsub.info) @@ -265,7 +274,7 @@ def test_subclasspreservation(self): x = np.arange(5) m = [0, 0, 1, 0, 0] xinfo = list(zip(x, m)) - xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + xsub = MSubArray(x, mask=m, info={'xsub': xinfo}) # mxsub = masked_array(xsub, subok=False) assert_(not isinstance(mxsub, MSubArray)) @@ -295,14 +304,14 @@ def test_subclass_items(self): # getter should return a ComplicatedSubArray, even for single item # first check we wrote ComplicatedSubArray correctly assert_(isinstance(xcsub[1], ComplicatedSubArray)) - assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1, ...], ComplicatedSubArray)) assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) # now that it propagates inside the MaskedArray assert_(isinstance(mxcsub[1], ComplicatedSubArray)) - assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1, ...].data, ComplicatedSubArray)) assert_(mxcsub[0] is masked) - assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[0, ...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) # also for flattened version (which goes via MaskedIterator) @@ -329,8 +338,8 @@ def test_subclass_nomask_items(self): xcsub = ComplicatedSubArray(x) mxcsub_nomask = masked_array(xcsub) - assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) - assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[1, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0, ...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) @@ -363,8 +372,8 @@ def test_subclass_str(self): def test_pure_subclass_info_preservation(self): # Test that ufuncs and methods conserve extra information consistently; # see gh-7122. - arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) - arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) + arr1 = SubMaskedArray('test', data=[1, 2, 3, 4, 5, 6]) + arr2 = SubMaskedArray(data=[0, 1, 2, 3, 4, 5]) diff1 = np.subtract(arr1, arr2) assert_('info' in diff1._optinfo) assert_(diff1._optinfo['info'] == 'test') @@ -418,20 +427,20 @@ def test_array_no_inheritance(): class TestClassWrapping: # Test suite for classes that wrap MaskedArrays - def setup_method(self): + def _create_data(self): m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) wm = WrappedArray(m) - self.data = (m, wm) + return m, wm def test_masked_unary_operations(self): # Tests masked_unary_operation - (m, wm) = self.data + wm = self._create_data()[1] with np.errstate(divide='ignore'): assert_(isinstance(np.log(wm), WrappedArray)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (m, wm) = self.data + m, wm = self._create_data() # Result should be a WrappedArray assert_(isinstance(np.add(wm, wm), WrappedArray)) assert_(isinstance(np.add(m, wm), WrappedArray)) diff --git a/blimgui/dist64/numpy/ma/testutils.py b/blimgui/dist64/numpy/ma/testutils.py index 36a6765..2c72a53 100644 --- a/blimgui/dist64/numpy/ma/testutils.py +++ b/blimgui/dist64/numpy/ma/testutils.py @@ -2,20 +2,23 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ """ import operator import numpy as np -from numpy import ndarray import numpy._core.umath as umath import numpy.testing -from numpy.testing import ( - assert_, assert_allclose, assert_array_almost_equal_nulp, - assert_raises, build_err_msg - ) -from .core import mask_or, getmask, masked_array, nomask, masked, filled +from numpy import ndarray +from numpy.testing import ( # noqa: F401 + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, + build_err_msg, +) + +from .core import filled, getmask, mask_or, masked, masked_array, nomask __all__masked = [ 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', @@ -29,13 +32,14 @@ # have mistakenly included them from this file. SciPy is one. That is # unfortunate, as some of these functions are not intended to work with # masked arrays. But there was no way to tell before. -from unittest import TestCase +from unittest import TestCase # noqa: F401 + __some__from_testing = [ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', 'assert_raises' ] -__all__ = __all__masked + __some__from_testing +__all__ = __all__masked + __some__from_testing # noqa: PLE0605 def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): @@ -91,7 +95,6 @@ def _assert_equal_on_sequences(actual, desired, err_msg=''): assert_equal(len(actual), len(desired), err_msg) for k in range(len(desired)): assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') - return def assert_equal_records(a, b): @@ -106,7 +109,6 @@ def assert_equal_records(a, b): (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) if not (af is masked) and not (bf is masked): assert_equal(operator.getitem(a, f), operator.getitem(b, f)) - return def assert_equal(actual, desired, err_msg=''): @@ -119,7 +121,7 @@ def assert_equal(actual, desired, err_msg=''): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(f"{k} not in {actual}") assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') @@ -157,7 +159,7 @@ def fail_if_equal(actual, desired, err_msg='',): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') diff --git a/blimgui/dist64/numpy/ma/testutils.pyi b/blimgui/dist64/numpy/ma/testutils.pyi new file mode 100644 index 0000000..92b843b --- /dev/null +++ b/blimgui/dist64/numpy/ma/testutils.pyi @@ -0,0 +1,69 @@ +import numpy as np +from numpy._typing import NDArray +from numpy.testing import ( + TestCase, + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, +) +from numpy.testing._private.utils import _ComparisonFunc + +__all__ = [ + "TestCase", + "almost", + "approx", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_approx_equal", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_close", + "assert_equal", + "assert_equal_records", + "assert_mask_equal", + "assert_not_equal", + "assert_raises", + "fail_if_array_equal", +] + +def approx( + a: object, b: object, fill_value: bool = True, rtol: float = 1e-5, atol: float = 1e-8 +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +def almost(a: object, b: object, decimal: int = 6, fill_value: bool = True) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... + +# +def assert_equal_records(a: NDArray[np.void], b: NDArray[np.void]) -> None: ... +def assert_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def fail_if_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def assert_almost_equal( + actual: object, desired: object, decimal: int = 7, err_msg: str = "", verbose: bool = True +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: object, + y: object, + err_msg: str = "", + verbose: bool = True, + header: str = "", + fill_value: bool = True, +) -> None: ... +def assert_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def fail_if_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_array_approx_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_almost_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_less(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_mask_equal(m1: object, m2: object, err_msg: str = "") -> None: ... + +assert_not_equal = fail_if_equal +assert_close = assert_almost_equal diff --git a/blimgui/dist64/numpy/ma/timer_comparison.py b/blimgui/dist64/numpy/ma/timer_comparison.py deleted file mode 100644 index 33fbbb5..0000000 --- a/blimgui/dist64/numpy/ma/timer_comparison.py +++ /dev/null @@ -1,442 +0,0 @@ -import functools -import timeit - -import numpy as np -import numpy._core.fromnumeric as fromnumeric - -from numpy.testing import build_err_msg - - -pi = np.pi - -class ModuleTester: - def __init__(self, module): - self.module = module - self.allequal = module.allequal - self.arange = module.arange - self.array = module.array - self.concatenate = module.concatenate - self.count = module.count - self.equal = module.equal - self.filled = module.filled - self.getmask = module.getmask - self.getmaskarray = module.getmaskarray - self.id = id - self.inner = module.inner - self.make_mask = module.make_mask - self.masked = module.masked - self.masked_array = module.masked_array - self.masked_values = module.masked_values - self.mask_or = module.mask_or - self.nomask = module.nomask - self.ones = module.ones - self.outer = module.outer - self.repeat = module.repeat - self.resize = module.resize - self.sort = module.sort - self.take = module.take - self.transpose = module.transpose - self.zeros = module.zeros - self.MaskType = module.MaskType - try: - self.umath = module.umath - except AttributeError: - self.umath = module.core.umath - self.testnames = [] - - def assert_array_compare(self, comparison, x, y, err_msg='', header='', - fill_value=True): - """ - Assert that a comparison of two masked arrays is satisfied elementwise. - - """ - xf = self.filled(x) - yf = self.filled(y) - m = self.mask_or(self.getmask(x), self.getmask(y)) - - x = self.filled(self.masked_array(xf, mask=m), fill_value) - y = self.filled(self.masked_array(yf, mask=m), fill_value) - if (x.dtype.char != "O"): - x = x.astype(np.float64) - if isinstance(x, np.ndarray) and x.size > 1: - x[np.isnan(x)] = 0 - elif np.isnan(x): - x = 0 - if (y.dtype.char != "O"): - y = y.astype(np.float64) - if isinstance(y, np.ndarray) and y.size > 1: - y[np.isnan(y)] = 0 - elif np.isnan(y): - y = 0 - try: - cond = (x.shape == () or y.shape == ()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + f'\n(shapes {x.shape}, {y.shape} mismatch)', - header=header, - names=('x', 'y')) - assert cond, msg - val = comparison(x, y) - if m is not self.nomask and fill_value: - val = self.masked_array(val, mask=m) - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - header=header, - names=('x', 'y')) - assert cond, msg - except ValueError as e: - msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) - raise ValueError(msg) from e - - def assert_array_equal(self, x, y, err_msg=''): - """ - Checks the elementwise equality of two masked arrays. - - """ - self.assert_array_compare(self.equal, x, y, err_msg=err_msg, - header='Arrays are not equal') - - @np.errstate(all='ignore') - def test_0(self): - """ - Tests creation - - """ - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - xm = self.masked_array(x, mask=m) - xm[0] - - @np.errstate(all='ignore') - def test_1(self): - """ - Tests creation - - """ - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = self.masked_array(x, mask=m1) - ym = self.masked_array(y, mask=m2) - xf = np.where(m1, 1.e+20, x) - xm.set_fill_value(1.e+20) - - assert (xm-ym).filled(0).any() - s = x.shape - assert xm.size == functools.reduce(lambda x, y: x*y, s) - assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) - - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) - - @np.errstate(all='ignore') - def test_2(self): - """ - Tests conversions and indexing. - - """ - x1 = np.array([1, 2, 4, 3]) - x2 = self.array(x1, mask=[1, 0, 0, 0]) - x3 = self.array(x1, mask=[0, 1, 0, 1]) - x4 = self.array(x1) - # test conversion to strings, no errors - str(x2) - repr(x2) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] - x1[2] = 9 - x2[2] = 9 - self.assert_array_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - x2[1] = self.masked - x2[1:3] = self.masked - x2[:] = x1 - x2[1] = self.masked - x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - x1 = np.arange(5)*1.0 - x2 = self.masked_values(x1, 3.0) - x1 = self.array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - # check that no error occurs. - x1[1] - x2[1] - assert x1[1:1].shape == (0,) - # Tests copy-size - n = [0, 0, 1, 0, 0] - m = self.make_mask(n) - m2 = self.make_mask(m) - assert(m is m2) - m3 = self.make_mask(m, copy=1) - assert(m is not m3) - - @np.errstate(all='ignore') - def test_3(self): - """ - Tests resize/repeat - - """ - x4 = self.arange(4) - x4[2] = self.masked - y4 = self.resize(x4, (8,)) - assert self.allequal(self.concatenate([x4, x4]), y4) - assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) - self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = self.repeat(x4, 2, axis=0) - assert self.allequal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert self.allequal(y5, y7) - y8 = x4.repeat(2, 0) - assert self.allequal(y5, y8) - - @np.errstate(all='ignore') - def test_4(self): - """ - Test of take, transpose, inner, outer products. - - """ - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) - assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) - assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), - self.inner(x, y)) - assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), - self.outer(x, y)) - y = self.array(['abc', 1, 'def', 2, 3], object) - y[2] = self.masked - t = self.take(y, [0, 3, 4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - - @np.errstate(all='ignore') - def test_5(self): - """ - Tests inplace w/ scalar - - """ - x = self.arange(10) - y = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x += 1 - assert self.allequal(x, y+1) - xm += 1 - assert self.allequal(xm, y+1) - - x = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x -= 1 - assert self.allequal(x, y-1) - xm -= 1 - assert self.allequal(xm, y-1) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x *= 2.0 - assert self.allequal(x, y*2) - xm *= 2.0 - assert self.allequal(xm, y*2) - - x = self.arange(10)*2 - xm = self.arange(10)*2 - xm[2] = self.masked - x /= 2 - assert self.allequal(x, y) - xm /= 2 - assert self.allequal(xm, y) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x /= 2.0 - assert self.allequal(x, y/2.0) - xm /= self.arange(10) - self.assert_array_equal(xm, self.ones((10,))) - - x = self.arange(10).astype(np.float64) - xm = self.arange(10) - xm[2] = self.masked - x += 1. - assert self.allequal(x, y + 1.) - - @np.errstate(all='ignore') - def test_6(self): - """ - Tests inplace w/ array - - """ - x = self.arange(10, dtype=np.float64) - y = self.arange(10) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x += a - xm += a - assert self.allequal(x, y+a) - assert self.allequal(xm, y+a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=np.float64) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x -= a - xm -= a - assert self.allequal(x, y-a) - assert self.allequal(xm, y-a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=np.float64) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x *= a - xm *= a - assert self.allequal(x, y*a) - assert self.allequal(xm, y*a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=np.float64) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x /= a - xm /= a - - @np.errstate(all='ignore') - def test_7(self): - "Tests ufunc" - d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), - self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', -# 'sin', 'cos', 'tan', -# 'arcsin', 'arccos', 'arctan', -# 'sinh', 'cosh', 'tanh', -# 'arcsinh', -# 'arccosh', -# 'arctanh', -# 'absolute', 'fabs', 'negative', -# # 'nonzero', 'around', -# 'floor', 'ceil', -# # 'sometrue', 'alltrue', -# 'logical_not', -# 'add', 'subtract', 'multiply', -# 'divide', 'true_divide', 'floor_divide', -# 'remainder', 'fmod', 'hypot', 'arctan2', -# 'equal', 'not_equal', 'less_equal', 'greater_equal', -# 'less', 'greater', -# 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(self.umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(self.module, f) - args = d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - self.assert_array_equal(ur.filled(0), mr.filled(0), f) - self.assert_array_equal(ur._mask, mr._mask) - - @np.errstate(all='ignore') - def test_99(self): - # test average - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assert_array_equal(2.0, self.average(ott, axis=0)) - self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) - result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assert_array_equal(2.0, result) - assert(wts == 4.0) - ott[:] = self.masked - assert(self.average(ott, axis=0) is self.masked) - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = self.masked - self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) - assert(self.average(ott, axis=1)[0] is self.masked) - self.assert_array_equal([2., 0.], self.average(ott, axis=0)) - result, wts = self.average(ott, axis=0, returned=1) - self.assert_array_equal(wts, [1., 0.]) - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = self.arange(6) - self.assert_array_equal(self.average(x, axis=0), 2.5) - self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) - y = self.array([self.arange(6), 2.0*self.arange(6)]) - self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) - self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) - self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - m1 = self.zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = self.ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) - self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) - z = self.masked_array(y, m3) - self.assert_array_equal(self.average(z, None), 20./6.) - self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) - self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) - - @np.errstate(all='ignore') - def test_A(self): - x = self.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - - -if __name__ == '__main__': - setup_base = ("from __main__ import ModuleTester \n" - "import numpy\n" - "tester = ModuleTester(module)\n") - setup_cur = "import numpy.ma.core as module\n" + setup_base - (nrepeat, nloop) = (10, 10) - - for i in range(1, 8): - func = 'tester.test_%i()' % i - cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) - cur = np.sort(cur) - print("#%i" % i + 50*'.') - print(eval("ModuleTester.test_%i.__doc__" % i)) - print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}') diff --git a/blimgui/dist64/numpy/matlib.py b/blimgui/dist64/numpy/matlib.py index f96582f..f7a9e33 100644 --- a/blimgui/dist64/numpy/matlib.py +++ b/blimgui/dist64/numpy/matlib.py @@ -10,16 +10,17 @@ PendingDeprecationWarning, stacklevel=2) import numpy as np -from numpy.matrixlib.defmatrix import matrix, asmatrix + # Matlib.py contains all functions in the numpy namespace with a few # replacements. See doc/source/reference/routines.matlib.rst for details. # Need * as we're copying the numpy namespace. from numpy import * # noqa: F403 +from numpy.matrixlib.defmatrix import asmatrix, matrix __version__ = np.__version__ -__all__ = np.__all__[:] # copy numpy namespace -__all__ += ['rand', 'randn', 'repmat'] +__all__ = ['rand', 'randn', 'repmat'] +__all__ += np.__all__ def empty(shape, dtype=None, order='C'): """Return a new matrix of given shape and type, without initializing entries. @@ -151,7 +152,7 @@ def zeros(shape, dtype=None, order='C'): a.fill(0) return a -def identity(n,dtype=None): +def identity(n, dtype=None): """ Returns the square identity matrix of given size. @@ -182,12 +183,12 @@ def identity(n,dtype=None): [0, 0, 1]]) """ - a = array([1]+n*[0], dtype=dtype) + a = array([1] + n * [0], dtype=dtype) b = empty((n, n), dtype=dtype) b.flat = a return b -def eye(n,M=None, k=0, dtype=float, order='C'): +def eye(n, M=None, k=0, dtype=float, order='C'): """ Return a matrix with ones on the diagonal and zeros elsewhere. diff --git a/blimgui/dist64/numpy/matlib.pyi b/blimgui/dist64/numpy/matlib.pyi index 446de9f..8618b4d 100644 --- a/blimgui/dist64/numpy/matlib.pyi +++ b/blimgui/dist64/numpy/matlib.pyi @@ -2,9 +2,7 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt - -# ruff: noqa: F401 -from numpy import ( +from numpy import ( # noqa: F401 False_, ScalarType, True_, @@ -224,7 +222,6 @@ from numpy import ( i0, iinfo, imag, - in1d, index_exp, indices, inexact, @@ -453,7 +450,6 @@ from numpy import ( trace, transpose, trapezoid, - trapz, tri, tril, tril_indices, @@ -509,19 +505,17 @@ __all__ += np.__all__ ### -_T = TypeVar("_T", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] _Order: TypeAlias = Literal["C", "F"] ### -# ruff: noqa: F811 - # @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -529,7 +523,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -537,7 +531,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -545,7 +539,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -559,11 +553,11 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... # @overload @@ -579,8 +573,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... @overload -def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/blimgui/dist64/numpy/matrixlib/__init__.py b/blimgui/dist64/numpy/matrixlib/__init__.py index 51e1cab..e376271 100644 --- a/blimgui/dist64/numpy/matrixlib/__init__.py +++ b/blimgui/dist64/numpy/matrixlib/__init__.py @@ -7,5 +7,6 @@ __all__ = defmatrix.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/matrixlib/__init__.pyi b/blimgui/dist64/numpy/matrixlib/__init__.pyi index bc36bc0..cb5a842 100644 --- a/blimgui/dist64/numpy/matrixlib/__init__.pyi +++ b/blimgui/dist64/numpy/matrixlib/__init__.pyi @@ -1,4 +1,3 @@ -from numpy import matrix -from .defmatrix import bmat, asmatrix +from .defmatrix import asmatrix, bmat, matrix __all__ = ["matrix", "bmat", "asmatrix"] diff --git a/blimgui/dist64/numpy/matrixlib/defmatrix.py b/blimgui/dist64/numpy/matrixlib/defmatrix.py index 9984771..62312a8 100644 --- a/blimgui/dist64/numpy/matrixlib/defmatrix.py +++ b/blimgui/dist64/numpy/matrixlib/defmatrix.py @@ -1,12 +1,13 @@ __all__ = ['matrix', 'bmat', 'asmatrix'] +import ast import sys import warnings -import ast -from .._utils import set_module import numpy._core.numeric as N from numpy._core.numeric import concatenate, isscalar +from numpy._utils import set_module + # While not in __all__, matrix_power used to be defined here, so we import # it for backward compatibility. from numpy.linalg import matrix_power @@ -114,6 +115,7 @@ class matrix(N.ndarray): """ __array_priority__ = 10.0 + def __new__(subtype, data, dtype=None, copy=True): warnings.warn('the matrix subclass is not the recommended way to ' 'represent matrices or deal with linear algebra (see ' @@ -177,7 +179,7 @@ def __array_finalize__(self, obj): if (ndim == 2): return if (ndim > 2): - newshape = tuple([x for x in self.shape if x > 1]) + newshape = tuple(x for x in self.shape if x > 1) ndim = len(newshape) if ndim == 2: self.shape = newshape @@ -219,10 +221,10 @@ def __getitem__(self, index): return out def __mul__(self, other): - if isinstance(other, (N.ndarray, list, tuple)) : + if isinstance(other, (N.ndarray, list, tuple)): # This promotes 1-D vectors to row vectors return N.dot(self, asmatrix(other)) - if isscalar(other) or not hasattr(other, '__rmul__') : + if isscalar(other) or not hasattr(other, '__rmul__'): return N.dot(self, other) return NotImplemented @@ -249,9 +251,9 @@ def _align(self, axis): """ if axis is None: return self[0, 0] - elif axis==0: + elif axis == 0: return self - elif axis==1: + elif axis == 1: return self.transpose() else: raise ValueError("unsupported axis") @@ -324,7 +326,6 @@ def sum(self, axis=None, dtype=None, out=None): """ return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) - # To update docstring from array to matrix... def squeeze(self, axis=None): """ @@ -377,7 +378,6 @@ def squeeze(self, axis=None): """ return N.ndarray.squeeze(self, axis=axis) - # To update docstring from array to matrix... def flatten(self, order='C'): """ @@ -482,7 +482,8 @@ def std(self, axis=None, dtype=None, out=None, ddof=0): [ 1.11803399]]) """ - return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + return N.ndarray.std(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) def var(self, axis=None, dtype=None, out=None, ddof=0): """ @@ -516,7 +517,8 @@ def var(self, axis=None, dtype=None, out=None, ddof=0): [1.25]]) """ - return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + return N.ndarray.var(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) def prod(self, axis=None, dtype=None, out=None): """ @@ -796,7 +798,7 @@ def ptp(self, axis=None, out=None): return N.ptp(self, axis, out)._align(axis) @property - def I(self): + def I(self): # noqa: E743 """ Returns the (multiplicative) inverse of invertible `self`. @@ -899,7 +901,6 @@ def A1(self): """ return self.__array__().ravel() - def ravel(self, order='C'): """ Return a flattened matrix. diff --git a/blimgui/dist64/numpy/matrixlib/defmatrix.pyi b/blimgui/dist64/numpy/matrixlib/defmatrix.pyi index fbec8b1..825ab89 100644 --- a/blimgui/dist64/numpy/matrixlib/defmatrix.pyi +++ b/blimgui/dist64/numpy/matrixlib/defmatrix.pyi @@ -1,17 +1,218 @@ +from _typeshed import Incomplete from collections.abc import Mapping, Sequence -from typing import Any +from types import EllipsisType +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeVar -from numpy import matrix -from numpy._typing import ArrayLike, DTypeLike, NDArray +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeInt_co, + _NestedSequence, + _ShapeLike, +) __all__ = ["asmatrix", "bmat", "matrix"] +_T = TypeVar("_T") +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_2D: TypeAlias = tuple[int, int] +_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] +_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike | None = None, + copy: bool = True, + ) -> _Matrix[Incomplete]: ... + + # + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndex1 | _ToIndex2, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: str, /) -> _Matrix[Incomplete]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: list[str], /) -> matrix[_2D, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> _Matrix[Incomplete]: ... # noqa: E743 + def getI(self) -> _Matrix[Incomplete]: ... + @property + def A(self) -> np.ndarray[_2D, _DTypeT_co]: ... + def getA(self) -> np.ndarray[_2D, _DTypeT_co]: ... + @property + def A1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], - ldict: None | Mapping[str, Any] = ..., - gdict: None | Mapping[str, Any] = ..., -) -> matrix[tuple[int, int], Any]: ... + ldict: Mapping[str, Any] | None = None, + gdict: Mapping[str, Any] | None = None, +) -> _Matrix[Incomplete]: ... -def asmatrix( - data: ArrayLike, dtype: DTypeLike = ... -) -> matrix[tuple[int, int], Any]: ... +def asmatrix(data: ArrayLike, dtype: DTypeLike | None = None) -> _Matrix[Incomplete]: ... diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_defmatrix.py b/blimgui/dist64/numpy/matrixlib/tests/test_defmatrix.py index 026f995..ef4d77d 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_defmatrix.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_defmatrix.py @@ -1,12 +1,17 @@ import collections.abc import numpy as np -from numpy import matrix, asmatrix, bmat -from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_array_equal, - assert_array_almost_equal, assert_raises - ) +from numpy import asmatrix, bmat, matrix from numpy.linalg import matrix_power +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + class TestCtor: def test_basic(self): @@ -47,11 +52,11 @@ def test_bmat_nondefault_str(self): [5, 6, 1, 2], [7, 8, 3, 4]]) assert_(np.all(bmat("A,A;A,A") == Aresult)) - assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) - assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_(np.all(bmat("A,A;A,A", ldict={'A': B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A': B}) assert_( - np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) - b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + np.all(bmat("A,A;A,A", ldict={'A': A}, gdict={'A': B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A': A, 'B': B}, gdict={'C': B, 'D': A}) assert_(np.all(b2 == mixresult)) @@ -132,7 +137,7 @@ def test_basic(self): assert_(np.all(np.array(np.transpose(A) == mA.H))) assert_(np.all(A == mA.A)) - B = A + 2j*A + B = A + 2j * A mB = matrix(B) assert_(np.allclose(linalg.inv(B), mB.I)) assert_(np.all(np.array(np.transpose(B) == mB.T))) @@ -149,9 +154,9 @@ def test_comparisons(self): A = np.arange(100).reshape(10, 10) mA = matrix(A) mB = matrix(A) + 0.1 - assert_(np.all(mB == A+0.1)) - assert_(np.all(mB == matrix(A+0.1))) - assert_(not np.any(mB == matrix(A-0.1))) + assert_(np.all(mB == A + 0.1)) + assert_(np.all(mB == matrix(A + 0.1))) + assert_(not np.any(mB == matrix(A - 0.1))) assert_(np.all(mA < mB)) assert_(np.all(mA <= mB)) assert_(np.all(mA <= mA)) @@ -199,7 +204,7 @@ def test_basic(self): mB = mB + O assert_(mB.dtype.type == np.float64) assert_(np.all(mA != mB)) - assert_(np.all(mB == mA+0.1)) + assert_(np.all(mB == mA + 0.1)) mC = mA.copy() O = np.ones((10, 10), np.complex128) @@ -228,11 +233,11 @@ def test_basic(self): assert_(np.allclose((mA * mA).A, np.dot(A, A))) assert_(np.allclose((mA + mA).A, (A + A))) - assert_(np.allclose((3*mA).A, (3*A))) + assert_(np.allclose((3 * mA).A, (3 * A))) mA2 = matrix(A) mA2 *= 3 - assert_(np.allclose(mA2.A, 3*A)) + assert_(np.allclose(mA2.A, 3 * A)) def test_pow(self): """Test raising a matrix to an integer power works as expected.""" @@ -264,7 +269,7 @@ def test_notimplemented(self): # __mul__ with something not a list, ndarray, tuple, or scalar with assert_raises(TypeError): - A*object() + A * object() class TestMatrixReturn: @@ -283,10 +288,10 @@ def test_instance_methods(self): 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', 'newbyteorder', 'to_device', - 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'partition', 'argpartition', 'to_device', + 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + 'prod', 'std', 'ctypes', 'bitwise_count', ] for attrib in dir(a): if attrib.startswith('_') or attrib in excluded_methods: @@ -296,12 +301,9 @@ def test_instance_methods(self): # reset contents of a a.astype('f8') a.fill(1.0) - if attrib in methodargs: - args = methodargs[attrib] - else: - args = () + args = methodargs.get(attrib, ()) b = f(*args) - assert_(type(b) is matrix, "%s" % attrib) + assert_(type(b) is matrix, f"{attrib}") assert_(type(a.real) is matrix) assert_(type(a.imag) is matrix) c, d = matrix([0.0]).nonzero() @@ -342,10 +344,10 @@ def test_fancy_indexing(self): assert_equal(x, matrix([[3, 4, 3]])) x = a[[1, 0]] assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[3, 4], [1, 2]])) + assert_equal(x, matrix([[3, 4], [1, 2]])) x = a[[[1], [0]], [[1, 0], [0, 1]]] assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[4, 3], [1, 2]])) + assert_equal(x, matrix([[4, 3], [1, 2]])) def test_matrix_element(self): x = matrix([[1, 2, 3], [4, 5, 6]]) @@ -365,8 +367,8 @@ def test_scalar_indexing(self): def test_row_column_indexing(self): x = asmatrix(np.eye(2)) - assert_array_equal(x[0,:], [[1, 0]]) - assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[0, :], [[1, 0]]) + assert_array_equal(x[1, :], [[0, 1]]) assert_array_equal(x[:, 0], [[1], [0]]) assert_array_equal(x[:, 1], [[0], [1]]) @@ -375,14 +377,14 @@ def test_boolean_indexing(self): A.shape = (3, 2) x = asmatrix(A) assert_array_equal(x[:, np.array([True, False])], x[:, 0]) - assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) + assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) def test_list_indexing(self): A = np.arange(6) A.shape = (3, 2) x = asmatrix(A) assert_array_equal(x[:, [1, 0]], x[:, ::-1]) - assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) class TestPower: diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_interaction.py b/blimgui/dist64/numpy/matrixlib/tests/test_interaction.py index fa6067b..2386cfb 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_interaction.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_interaction.py @@ -2,15 +2,21 @@ Note that tests with MaskedArray and linalg are done in separate files. """ -import pytest - import textwrap import warnings +import pytest + import numpy as np -from numpy.testing import (assert_, assert_equal, assert_raises, - assert_raises_regex, assert_array_equal, - assert_almost_equal, assert_array_almost_equal) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) def test_fancy_indexing(): @@ -225,7 +231,7 @@ def test_nanfunctions_matrices_general(): assert_(res.shape == (3, 3)) res = f(mat) assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3*3)) + assert_(res.shape == (1, 3 * 3)) def test_average_matrix(): @@ -238,7 +244,7 @@ def test_average_matrix(): r = np.average(a, axis=0, weights=w) assert_equal(type(r), np.matrix) - assert_equal(r, [[2.5, 10.0/3]]) + assert_equal(r, [[2.5, 10.0 / 3]]) def test_dot_matrix(): @@ -255,8 +261,8 @@ def test_dot_matrix(): def test_ediff1d_matrix(): # 2018-04-29: moved here from core.tests.test_arraysetops. - assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) - assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + assert isinstance(np.ediff1d(np.matrix(1)), np.matrix) + assert isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix) def test_apply_along_axis_matrix(): diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_masked_matrix.py b/blimgui/dist64/numpy/matrixlib/tests/test_masked_matrix.py index 753c301..853b341 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_masked_matrix.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_masked_matrix.py @@ -1,13 +1,22 @@ import pickle import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import (assert_, assert_equal, assert_raises, - assert_array_equal) -from numpy.ma.core import (masked_array, masked_values, masked, allequal, - MaskType, getmask, MaskedArray, nomask, - log, add, hypot, divide) +from numpy.ma.core import ( + MaskedArray, + MaskType, + add, + allequal, + divide, + getmask, + hypot, + log, + masked, + masked_array, + masked_values, + nomask, +) from numpy.ma.extras import mr_ +from numpy.ma.testutils import assert_, assert_array_equal, assert_equal, assert_raises class MMatrix(MaskedArray, np.matrix,): @@ -20,7 +29,6 @@ def __new__(cls, data, mask=nomask): def __array_finalize__(self, obj): np.matrix.__array_finalize__(self, obj) MaskedArray.__array_finalize__(self, obj) - return @property def _series(self): @@ -108,7 +116,7 @@ def test_flat(self): # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] + testflat[:] = testflat[np.array([2, 1, 0])] assert_equal(test, control) testflat[0] = 9 # test that matrices keep the correct shape (#4615) @@ -174,40 +182,40 @@ def test_view(self): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), MMatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a MMatrix assert_(isinstance(add(mx, mx), MMatrix)) assert_(isinstance(add(mx, x), MMatrix)) # Result should work - assert_equal(add(mx, x), mx+x) + assert_equal(add(mx, x), mx + x) assert_(isinstance(add(mx, mx)._data, np.matrix)) - with assert_warns(DeprecationWarning): - assert_(isinstance(add.outer(mx, mx), MMatrix)) + with assert_raises(TypeError): + add.outer(mx, mx) assert_(isinstance(hypot(mx, mx), MMatrix)) assert_(isinstance(hypot(mx, x), MMatrix)) def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), MMatrix)) assert_(isinstance(divide(mx, x), MMatrix)) diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_matrix_linalg.py b/blimgui/dist64/numpy/matrixlib/tests/test_matrix_linalg.py index abfa018..b86f74f 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_matrix_linalg.py @@ -1,12 +1,26 @@ """ Test functions for linalg module using the matrix class.""" -import numpy as np +import pytest +import numpy as np from numpy.linalg.tests.test_linalg import ( - LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, - _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, - SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, - PinvCases, DetCases, LstsqCases) - + CondCases, + DetCases, + EigCases, + EigvalsCases, + InvCases, + LinalgCase, + LinalgTestCase, + LstsqCases, + PinvCases, + SolveCases, + SVDCases, + TestQR as _TestQR, + _TestNorm2D, + _TestNormDoubleBase, + _TestNormInt64Base, + _TestNormSingleBase, + apply_tag, +) CASES = [] @@ -69,6 +83,9 @@ class TestDetMatrix(DetCases, MatrixTestCase): pass +@pytest.mark.thread_unsafe( + reason="residuals not calculated properly for square tests (gh-29851)" +) class TestLstsqMatrix(LstsqCases, MatrixTestCase): pass diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_multiarray.py b/blimgui/dist64/numpy/matrixlib/tests/test_multiarray.py index 71198be..5ce4300 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_multiarray.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_multiarray.py @@ -1,5 +1,6 @@ import numpy as np -from numpy.testing import assert_, assert_equal, assert_array_equal +from numpy.testing import assert_, assert_array_equal, assert_equal + class TestView: def test_type(self): diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_numeric.py b/blimgui/dist64/numpy/matrixlib/tests/test_numeric.py index 77525af..94a46d8 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_numeric.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_numeric.py @@ -1,14 +1,15 @@ import numpy as np from numpy.testing import assert_equal + class TestDot: def test_matscalar(self): b1 = np.matrix(np.ones((3, 3), dtype=complex)) - assert_equal(b1*1.0, b1) + assert_equal(b1 * 1.0, b1) def test_diagonal(): - b1 = np.matrix([[1,2],[3,4]]) + b1 = np.matrix([[1, 2], [3, 4]]) diag_b1 = np.matrix([[1, 4]]) array_b1 = np.array([1, 4]) diff --git a/blimgui/dist64/numpy/matrixlib/tests/test_regression.py b/blimgui/dist64/numpy/matrixlib/tests/test_regression.py index 092bc7c..9ac71b8 100644 --- a/blimgui/dist64/numpy/matrixlib/tests/test_regression.py +++ b/blimgui/dist64/numpy/matrixlib/tests/test_regression.py @@ -20,7 +20,7 @@ def test_matrix_properties(self): def test_matrix_multiply_by_1d_vector(self): # Ticket #473 def mul(): - np.asmatrix(np.eye(2))*np.ones(2) + np.asmatrix(np.eye(2)) * np.ones(2) assert_raises(ValueError, mul) diff --git a/blimgui/dist64/numpy/polynomial/__init__.py b/blimgui/dist64/numpy/polynomial/__init__.py index 301e176..a46e16c 100644 --- a/blimgui/dist64/numpy/polynomial/__init__.py +++ b/blimgui/dist64/numpy/polynomial/__init__.py @@ -69,7 +69,6 @@ - ``Poly.window`` -- Default window - ``Poly.basis_name`` -- String used to represent the basis - ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed -- ``Poly.nickname`` -- String used in printing Creation -------- @@ -115,14 +114,14 @@ - ``p.truncate(size)`` -- Truncate ``p`` to given size """ -from .polynomial import Polynomial from .chebyshev import Chebyshev -from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial -__all__ = [ +__all__ = [ # noqa: F822 "set_default_printstyle", "polynomial", "Polynomial", "chebyshev", "Chebyshev", @@ -183,5 +182,6 @@ def set_default_printstyle(style): from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/polynomial/__init__.pyi b/blimgui/dist64/numpy/polynomial/__init__.pyi index 681d0fa..ec08b12 100644 --- a/blimgui/dist64/numpy/polynomial/__init__.pyi +++ b/blimgui/dist64/numpy/polynomial/__init__.pyi @@ -1,24 +1,31 @@ from typing import Final, Literal -from .polynomial import Polynomial +from . import chebyshev, hermite, hermite_e, laguerre, legendre, polynomial from .chebyshev import Chebyshev -from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre -from . import polynomial, chebyshev, legendre, hermite, hermite_e, laguerre +from .legendre import Legendre +from .polynomial import Polynomial __all__ = [ "set_default_printstyle", - "polynomial", "Polynomial", - "chebyshev", "Chebyshev", - "legendre", "Legendre", - "hermite", "Hermite", - "hermite_e", "HermiteE", - "laguerre", "Laguerre", + "polynomial", + "Polynomial", + "chebyshev", + "Chebyshev", + "legendre", + "Legendre", + "hermite", + "Hermite", + "hermite_e", + "HermiteE", + "laguerre", + "Laguerre", ] def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... from numpy._pytesttester import PytestTester as _PytestTester + test: Final[_PytestTester] diff --git a/blimgui/dist64/numpy/polynomial/_polybase.py b/blimgui/dist64/numpy/polynomial/_polybase.py index 5e1950f..08a82b1 100644 --- a/blimgui/dist64/numpy/polynomial/_polybase.py +++ b/blimgui/dist64/numpy/polynomial/_polybase.py @@ -6,12 +6,13 @@ abc module from the stdlib, hence it is only available for Python >= 2.6. """ -import os import abc import numbers -from typing import Callable +import os +from collections.abc import Callable import numpy as np + from . import polyutils as pu __all__ = ['ABCPolyBase'] @@ -199,12 +200,10 @@ def has_samecoef(self, other): True if the coefficients are the same, False otherwise. """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True + return ( + len(self.coef) == len(other.coef) + and np.all(self.coef == other.coef) + ) def has_samedomain(self, other): """Check if domains match. @@ -432,7 +431,7 @@ def _repr_latex_term(cls, i, arg_str, needs_parens): def _repr_latex_scalar(x, parens=False): # TODO: we're stuck with disabling math formatting until we handle # exponents in this function - return r'\text{{{}}}'.format(pu.format_float(x, parens=parens)) + return fr'\text{{{pu.format_float(x, parens=parens)}}}' def _format_term(self, scalar_format: Callable, off: float, scale: float): """ Format a single term in the expansion """ @@ -493,8 +492,6 @@ def _repr_latex_(self): return rf"${self.symbol} \mapsto {body}$" - - # Pickle and copy def __getstate__(self): @@ -615,10 +612,6 @@ def __rmul__(self, other): return NotImplemented return self.__class__(coef, self.domain, self.window, self.symbol) - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - def __rtruediv__(self, other): # An instance of ABCPolyBase is not considered a # Number. @@ -687,6 +680,7 @@ def degree(self): Create a polynomial object for ``1 + 7*x + 4*x**2``: + >>> np.polynomial.set_default_printstyle("unicode") >>> poly = np.polynomial.Polynomial([1, 7, 4]) >>> print(poly) 1.0 + 7.0·x + 4.0·x² @@ -877,8 +871,8 @@ def integ(self, m=1, k=[], lbnd=None): if lbnd is None: lbnd = 0 else: - lbnd = off + scl*lbnd - coef = self._int(self.coef, m, k, lbnd, 1./scl) + lbnd = off + scl * lbnd + coef = self._int(self.coef, m, k, lbnd, 1. / scl) return self.__class__(coef, self.domain, self.window, self.symbol) def deriv(self, m=1): @@ -1022,7 +1016,7 @@ class domain in NumPy 1.4 and ``None`` in later versions. if domain[0] == domain[1]: domain[0] -= 1 domain[1] += 1 - elif type(domain) is list and len(domain) == 0: + elif isinstance(domain, list) and len(domain) == 0: domain = cls.domain if window is None: @@ -1070,7 +1064,7 @@ def fromroots(cls, roots, domain=[], window=None, symbol='x'): [roots] = pu.as_series([roots], trim=False) if domain is None: domain = pu.getdomain(roots) - elif type(domain) is list and len(domain) == 0: + elif isinstance(domain, list) and len(domain) == 0: domain = cls.domain if window is None: @@ -1078,7 +1072,7 @@ def fromroots(cls, roots, domain=[], window=None, symbol='x'): deg = len(roots) off, scl = pu.mapparms(domain, window) - rnew = off + scl*roots + rnew = off + scl * roots coef = cls._fromroots(rnew) / scl**deg return cls(coef, domain=domain, window=window, symbol=symbol) @@ -1154,7 +1148,7 @@ def basis(cls, deg, domain=None, window=None, symbol='x'): if ideg != deg or ideg < 0: raise ValueError("deg must be non-negative integer") - return cls([0]*ideg + [1], domain, window, symbol) + return cls([0] * ideg + [1], domain, window, symbol) @classmethod def cast(cls, series, domain=None, window=None): diff --git a/blimgui/dist64/numpy/polynomial/_polybase.pyi b/blimgui/dist64/numpy/polynomial/_polybase.pyi index ccef24a..e8a4aa5 100644 --- a/blimgui/dist64/numpy/polynomial/_polybase.pyi +++ b/blimgui/dist64/numpy/polynomial/_polybase.pyi @@ -1,287 +1,262 @@ import abc import decimal -import numbers -from collections.abc import Iterator, Mapping, Sequence +from collections.abc import Iterator, Sequence from typing import ( Any, ClassVar, - Final, Generic, Literal, + Self, SupportsIndex, TypeAlias, - TypeGuard, overload, ) +from typing_extensions import TypeIs, TypeVar import numpy as np import numpy.typing as npt from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _FloatLike_co, _NumberLike_co, - - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, ) from ._polytypes import ( _AnyInt, - _CoefLike_co, - _Array2, - _Tuple2, - - _Series, + _ArrayLikeCoef_co, + _ArrayLikeCoefObject_co, + _CoefLike_co, _CoefSeries, - - _SeriesLikeInt_co, + _Series, _SeriesLikeCoef_co, - - _ArrayLikeCoefObject_co, - _ArrayLikeCoef_co, + _SeriesLikeInt_co, + _Tuple2, ) -from typing_extensions import LiteralString, TypeVar - - -__all__: Final[Sequence[str]] = ("ABCPolyBase",) - - -_NameCo = TypeVar("_NameCo", bound=LiteralString | None, covariant=True, default=LiteralString | None) -_Self = TypeVar("_Self") -_Other = TypeVar("_Other", bound=ABCPolyBase) +__all__ = ["ABCPolyBase"] +_NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) +_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) _AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -_Hundred: TypeAlias = Literal[100] +class ABCPolyBase(Generic[_NameT_co], abc.ABC): + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] = None + maxpower: ClassVar[Literal[100]] = 100 -class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): - __hash__: ClassVar[None] # type: ignore[assignment] - __array_ufunc__: ClassVar[None] + _superscript_mapping: ClassVar[dict[int, str]] = ... + _subscript_mapping: ClassVar[dict[int, str]] = ... + _use_unicode: ClassVar[bool] = ... - maxpower: ClassVar[_Hundred] - _superscript_mapping: ClassVar[Mapping[int, str]] - _subscript_mapping: ClassVar[Mapping[int, str]] - _use_unicode: ClassVar[bool] + _symbol: str + @property + def symbol(self, /) -> str: ... + @property + @abc.abstractmethod + def domain(self) -> _Array2[np.float64 | Any]: ... + @property + @abc.abstractmethod + def window(self) -> _Array2[np.float64 | Any]: ... + @property + @abc.abstractmethod + def basis_name(self) -> _NameT_co: ... - basis_name: _NameCo coef: _CoefSeries - domain: _Array2[np.inexact[Any] | np.object_] - window: _Array2[np.inexact[Any] | np.object_] - - _symbol: LiteralString - @property - def symbol(self, /) -> LiteralString: ... def __init__( self, /, coef: _SeriesLikeCoef_co, - domain: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> None: ... + # @overload - def __call__(self, /, arg: _Other) -> _Other: ... - # TODO: Once `_ShapeType@ndarray` is covariant and bounded (see #26081), - # additionally include 0-d arrays as input types with scalar return type. + def __call__(self, /, arg: _PolyT) -> _PolyT: ... @overload - def __call__( - self, - /, - arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, - ) -> np.float64 | np.complex128: ... + def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload - def __call__( - self, - /, - arg: _NumberLike_co | numbers.Complex, - ) -> np.complex128: ... + def __call__(self, /, arg: _NumberLike_co) -> np.complex128 | Any: ... @overload - def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( - npt.NDArray[np.float64] - | npt.NDArray[np.complex128] - | npt.NDArray[np.object_] - ): ... + def __call__(self, /, arg: _ArrayLikeFloat_co) -> npt.NDArray[np.float64 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeCoefObject_co, - ) -> npt.NDArray[np.object_]: ... - - def __format__(self, fmt_str: str, /) -> str: ... - def __eq__(self, x: object, /) -> bool: ... - def __ne__(self, x: object, /) -> bool: ... - def __neg__(self: _Self, /) -> _Self: ... - def __pos__(self: _Self, /) -> _Self: ... - def __add__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __sub__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __mul__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __truediv__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __floordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __mod__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __divmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... - def __pow__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __radd__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rsub__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rmul__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rtruediv__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rfloordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rmod__(self: _Self, x: _AnyOther, /) -> _Self: ... - def __rdivmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... + def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... + + # unary ops + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + + # binary ops + def __add__(self, x: _AnyOther, /) -> Self: ... + def __sub__(self, x: _AnyOther, /) -> Self: ... + def __mul__(self, x: _AnyOther, /) -> Self: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... + def __truediv__(self, x: _AnyOther, /) -> Self: ... + def __floordiv__(self, x: _AnyOther, /) -> Self: ... + def __mod__(self, x: _AnyOther, /) -> Self: ... + def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # reflected binary ops + def __radd__(self, x: _AnyOther, /) -> Self: ... + def __rsub__(self, x: _AnyOther, /) -> Self: ... + def __rmul__(self, x: _AnyOther, /) -> Self: ... + def __rtruediv__(self, x: _AnyOther, /) -> Self: ... + def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... + def __rmod__(self, x: _AnyOther, /) -> Self: ... + def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # iterable and sized def __len__(self, /) -> int: ... - def __iter__(self, /) -> Iterator[np.inexact[Any] | object]: ... + def __iter__(self, /) -> Iterator[np.float64 | Any]: ... + + # pickling def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... + # def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... - @overload - def has_sametype(self: _Self, /, other: ABCPolyBase) -> TypeGuard[_Self]: ... - @overload - def has_sametype(self, /, other: object) -> Literal[False]: ... + def has_sametype(self, /, other: object) -> TypeIs[Self]: ... - def copy(self: _Self, /) -> _Self: ... + # + def copy(self, /) -> Self: ... def degree(self, /) -> int: ... - def cutdeg(self: _Self, /) -> _Self: ... - def trim(self: _Self, /, tol: _FloatLike_co = ...) -> _Self: ... - def truncate(self: _Self, /, size: _AnyInt) -> _Self: ... + def cutdeg(self, /, deg: int) -> Self: ... + def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... + def truncate(self, /, size: _AnyInt) -> Self: ... + # @overload def convert( self, - domain: None | _SeriesLikeCoef_co, - kind: type[_Other], /, - window: None | _SeriesLikeCoef_co = ..., - ) -> _Other: ... + domain: _SeriesLikeCoef_co | None, + kind: type[_PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> _PolyT: ... @overload def convert( self, /, - domain: None | _SeriesLikeCoef_co = ..., + domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_Other], - window: None | _SeriesLikeCoef_co = ..., - ) -> _Other: ... + kind: type[_PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> _PolyT: ... @overload def convert( - self: _Self, + self, /, - domain: None | _SeriesLikeCoef_co = ..., - kind: None | type[_Self] = ..., - window: None | _SeriesLikeCoef_co = ..., - ) -> _Self: ... + domain: _SeriesLikeCoef_co | None = None, + kind: None = None, + window: _SeriesLikeCoef_co | None = None, + ) -> Self: ... + # def mapparms(self, /) -> _Tuple2[Any]: ... - def integ( - self: _Self, /, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: None | _CoefLike_co = ..., - ) -> _Self: ... - - def deriv(self: _Self, /, m: SupportsIndex = ...) -> _Self: ... - + self, + /, + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co | None = None, + ) -> Self: ... + def deriv(self, /, m: SupportsIndex = 1) -> Self: ... def roots(self, /) -> _CoefSeries: ... - def linspace( - self, /, - n: SupportsIndex = ..., - domain: None | _SeriesLikeCoef_co = ..., + self, + /, + n: SupportsIndex = 100, + domain: _SeriesLikeCoef_co | None = None, ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + # @overload @classmethod def fit( - cls: type[_Self], /, + cls, x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: None | _SeriesLikeCoef_co = ..., - rcond: _FloatLike_co = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., - ) -> _Self: ... + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... @overload @classmethod def fit( - cls: type[_Self], /, + cls, x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: None | _SeriesLikeCoef_co = ..., - rcond: _FloatLike_co = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., - ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... @overload @classmethod def fit( - cls: type[_Self], + cls, x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: None | _SeriesLikeCoef_co, + domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co, - full: Literal[True], /, - w: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., - ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... + full: Literal[True], + /, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + # @classmethod def fromroots( - cls: type[_Self], /, + cls, roots: _ArrayLikeCoef_co, - domain: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., - ) -> _Self: ... - + domain: _SeriesLikeCoef_co | None = [], + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... @classmethod def identity( - cls: type[_Self], /, - domain: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., - ) -> _Self: ... - + cls, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... @classmethod def basis( - cls: type[_Self], /, + cls, deg: _AnyInt, - domain: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - symbol: str = ..., - ) -> _Self: ... - + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... @classmethod def cast( - cls: type[_Self], /, + cls, series: ABCPolyBase, - domain: None | _SeriesLikeCoef_co = ..., - window: None | _SeriesLikeCoef_co = ..., - ) -> _Self: ... - + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + ) -> Self: ... @classmethod def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... - @staticmethod - def _str_term_ascii(i: str, arg_str: str) -> str: ... - @staticmethod - def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... + @classmethod + def _str_term_ascii(cls, /, i: str, arg_str: str) -> str: ... + @classmethod + def _repr_latex_term(cls, /, i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/blimgui/dist64/numpy/polynomial/_polytypes.pyi b/blimgui/dist64/numpy/polynomial/_polytypes.pyi index 6d3297c..10a92d1 100644 --- a/blimgui/dist64/numpy/polynomial/_polytypes.pyi +++ b/blimgui/dist64/numpy/polynomial/_polytypes.pyi @@ -1,12 +1,16 @@ -from collections.abc import Callable, Sequence +# ruff: noqa: PYI046 + +from collections.abc import Sequence from typing import ( Any, Literal, NoReturn, Protocol, + Self, SupportsIndex, SupportsInt, TypeAlias, + TypeVar, overload, type_check_only, ) @@ -14,62 +18,53 @@ from typing import ( import numpy as np import numpy.typing as npt from numpy._typing import ( + _ArrayLikeComplex_co, # array-likes _ArrayLikeFloat_co, - _ArrayLikeComplex_co, _ArrayLikeNumber_co, - _ArrayLikeObject_co, - _NestedSequence, - _SupportsArray, - + _ComplexLike_co, + _FloatLike_co, # scalar-likes _IntLike_co, - _FloatLike_co, - _ComplexLike_co, + _NestedSequence, _NumberLike_co, + _SupportsArray, ) -from typing_extensions import LiteralString, TypeVar - - _T = TypeVar("_T") _T_contra = TypeVar("_T_contra", contravariant=True) -_Self = TypeVar("_Self") -_SCT = TypeVar("_SCT", bound=np.number[Any] | np.bool | np.object_) +_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) # compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase @type_check_only class _SupportsCoefOps(Protocol[_T_contra]): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... - - def __neg__(self: _Self, /) -> _Self: ... - def __pos__(self: _Self, /) -> _Self: ... - - def __add__(self: _Self, x: _T_contra, /) -> _Self: ... - def __sub__(self: _Self, x: _T_contra, /) -> _Self: ... - def __mul__(self: _Self, x: _T_contra, /) -> _Self: ... - def __pow__(self: _Self, x: _T_contra, /) -> _Self | float: ... - - def __radd__(self: _Self, x: _T_contra, /) -> _Self: ... - def __rsub__(self: _Self, x: _T_contra, /) -> _Self: ... - def __rmul__(self: _Self, x: _T_contra, /) -> _Self: ... - -_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] - -_FloatSeries: TypeAlias = _Series[np.floating[Any]] -_ComplexSeries: TypeAlias = _Series[np.complexfloating[Any, Any]] + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __add__(self, x: _T_contra, /) -> Self: ... + def __sub__(self, x: _T_contra, /) -> Self: ... + def __mul__(self, x: _T_contra, /) -> Self: ... + def __pow__(self, x: _T_contra, /) -> Self | float: ... + def __radd__(self, x: _T_contra, /) -> Self: ... + def __rsub__(self, x: _T_contra, /) -> Self: ... + def __rmul__(self, x: _T_contra, /) -> Self: ... + +_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_FloatSeries: TypeAlias = _Series[np.floating] +_ComplexSeries: TypeAlias = _Series[np.complexfloating] _ObjectSeries: TypeAlias = _Series[np.object_] -_CoefSeries: TypeAlias = _Series[np.inexact[Any] | np.object_] +_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] -_FloatArray: TypeAlias = npt.NDArray[np.floating[Any]] -_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_FloatArray: TypeAlias = npt.NDArray[np.floating] +_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] _ObjectArray: TypeAlias = npt.NDArray[np.object_] -_CoefArray: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] +_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] _Tuple2: TypeAlias = tuple[_T, _T] -_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] -_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] _AnyInt: TypeAlias = SupportsInt | SupportsIndex @@ -77,76 +72,33 @@ _CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] _CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = ( - _SupportsArray[np.dtype[np.bool]] - | Sequence[bool | np.bool] -) -_SeriesLikeInt_co: TypeAlias = ( - _SupportsArray[np.dtype[np.integer[Any] | np.bool]] - | Sequence[_IntLike_co] -) -_SeriesLikeFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any] | np.bool]] - | Sequence[_FloatLike_co] -) -_SeriesLikeComplex_co: TypeAlias = ( - _SupportsArray[np.dtype[np.inexact[Any] | np.integer[Any] | np.bool]] - | Sequence[_ComplexLike_co] -) -_SeriesLikeObject_co: TypeAlias = ( - _SupportsArray[np.dtype[np.object_]] - | Sequence[_CoefObjectLike_co] -) -_SeriesLikeCoef_co: TypeAlias = ( - _SupportsArray[np.dtype[np.number[Any] | np.bool | np.object_]] - | Sequence[_CoefLike_co] -) +_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] -_ArrayLikeCoefObject_co: TypeAlias = ( - _CoefObjectLike_co - | _SeriesLikeObject_co - | _NestedSequence[_SeriesLikeObject_co] -) -_ArrayLikeCoef_co: TypeAlias = ( - npt.NDArray[np.number[Any] | np.bool | np.object_] - | _ArrayLikeNumber_co - | _ArrayLikeCoefObject_co -) - -_Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True, default=LiteralString) - -@type_check_only -class _Named(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... +_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co -_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] +_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @type_check_only -class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): +class _FuncLine(Protocol): @overload - def __call__(self, /, off: _SCT, scl: _SCT) -> _Line[_SCT]: ... + def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... @overload - def __call__(self, /, off: int, scl: int) -> _Line[np.int_] : ... + def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... @overload def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload - def __call__( - self, - /, - off: complex, - scl: complex, - ) -> _Line[np.complex128]: ... + def __call__(self, /, off: complex, scl: complex) -> _Line[np.complex128]: ... @overload - def __call__( - self, - /, - off: _SupportsCoefOps[Any], - scl: _SupportsCoefOps[Any], - ) -> _Line[np.object_]: ... + def __call__(self, /, off: _SupportsCoefOps[Any], scl: _SupportsCoefOps[Any]) -> _Line[np.object_]: ... @type_check_only -class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFromRoots(Protocol): @overload def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -155,38 +107,18 @@ class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncBinOp(Protocol): @overload - def __call__( - self, - /, - c1: _SeriesLikeBool_co, - c2: _SeriesLikeBool_co, - ) -> NoReturn: ... + def __call__(self, /, c1: _SeriesLikeBool_co, c2: _SeriesLikeBool_co) -> NoReturn: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, - ) -> _FloatSeries: ... + def __call__(self, /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, - ) -> _ComplexSeries: ... + def __call__(self, /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, - ) -> _ObjectSeries: ... + def __call__(self, /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncUnOp(Protocol): @overload def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -195,7 +127,7 @@ class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPoly2Ortho(Protocol): @overload def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -204,253 +136,112 @@ class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPow(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - pow: _IntLike_co, - maxpower: None | _IntLike_co = ..., - ) -> _FloatSeries: ... + def __call__(self, /, c: _SeriesLikeFloat_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - pow: _IntLike_co, - maxpower: None | _IntLike_co = ..., - ) -> _ComplexSeries: ... + def __call__(self, /, c: _SeriesLikeComplex_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeCoef_co, - pow: _IntLike_co, - maxpower: None | _IntLike_co = ..., - ) -> _ObjectSeries: ... + def __call__(self, /, c: _SeriesLikeCoef_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ObjectSeries: ... @type_check_only -class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): +class _FuncDer(Protocol): @overload def __call__( self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only -class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): +class _FuncInteg(Protocol): @overload def __call__( self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - k: _FloatLike_co | _SeriesLikeFloat_co = ..., - lbnd: _FloatLike_co = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _FloatLike_co | _SeriesLikeFloat_co = [], + lbnd: _FloatLike_co = 0, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - k: _ComplexLike_co | _SeriesLikeComplex_co = ..., - lbnd: _ComplexLike_co = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _ComplexLike_co | _SeriesLikeComplex_co = [], + lbnd: _ComplexLike_co = 0, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co = 0, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only -class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - r: _FloatLike_co, - tensor: bool = ..., - ) -> np.floating[Any]: ... + def __call__(self, /, x: _FloatLike_co, c: _SeriesLikeFloat_co, tensor: bool = True) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - r: _NumberLike_co, - tensor: bool = ..., - ) -> np.complexfloating[Any, Any]: ... + def __call__(self, /, x: _NumberLike_co, c: _SeriesLikeComplex_co, tensor: bool = True) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _FloatLike_co | _ArrayLikeFloat_co, - r: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = True) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _NumberLike_co | _ArrayLikeComplex_co, - r: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = True) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co | _ArrayLikeCoef_co, - r: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - x: _CoefLike_co, - r: _CoefLike_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... - -@type_check_only -class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - x: _FloatLike_co, - c: _SeriesLikeFloat_co, - tensor: bool = ..., - ) -> np.floating[Any]: ... - @overload - def __call__( - self, - /, - x: _NumberLike_co, - c: _SeriesLikeComplex_co, - tensor: bool = ..., - ) -> np.complexfloating[Any, Any]: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = True) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - c: _SeriesLikeObject_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = True) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal2D(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - y: _FloatLike_co, - c: _SeriesLikeFloat_co, - ) -> np.floating[Any]: ... + def __call__(self, /, x: _FloatLike_co, y: _FloatLike_co, c: _SeriesLikeFloat_co) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - y: _NumberLike_co, - c: _SeriesLikeComplex_co, - ) -> np.complexfloating[Any, Any]: ... + def __call__(self, /, x: _NumberLike_co, y: _NumberLike_co, c: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - y: _CoefLike_co, - c: _SeriesLikeCoef_co, - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal3D(Protocol): @overload def __call__( self, @@ -458,8 +249,8 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): x: _FloatLike_co, y: _FloatLike_co, z: _FloatLike_co, - c: _SeriesLikeFloat_co - ) -> np.floating[Any]: ... + c: _SeriesLikeFloat_co, + ) -> np.floating: ... @overload def __call__( self, @@ -468,7 +259,7 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): y: _NumberLike_co, z: _NumberLike_co, c: _SeriesLikeComplex_co, - ) -> np.complexfloating[Any, Any]: ... + ) -> np.complexfloating: ... @overload def __call__( self, @@ -506,132 +297,32 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): c: _SeriesLikeCoef_co, ) -> _SupportsCoefOps[Any]: ... -_AnyValF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike, bool], - _CoefArray, -] - @type_check_only -class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander(Protocol): @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - /, - *args: _FloatLike_co, - ) -> np.floating[Any]: ... + def __call__(self, /, x: _ArrayLikeFloat_co, deg: SupportsIndex) -> _FloatArray: ... @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - /, - *args: _NumberLike_co, - ) -> np.complexfloating[Any, Any]: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - /, - *args: _ArrayLikeFloat_co, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, deg: SupportsIndex) -> _ComplexArray: ... @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - /, - *args: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeObject_co, - /, - *args: _CoefObjectLike_co, - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _ArrayLikeCoef_co, deg: SupportsIndex) -> _ObjectArray: ... @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - /, - *args: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... - -@type_check_only -class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - deg: SupportsIndex, - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - deg: SupportsIndex, - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - deg: SupportsIndex, - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - x: npt.ArrayLike, - deg: SupportsIndex, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... _AnyDegrees: TypeAlias = Sequence[SupportsIndex] @type_check_only -class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander2D(Protocol): @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - deg: _AnyDegrees, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - deg: _AnyDegrees, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - deg: _AnyDegrees, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: npt.ArrayLike, - y: npt.ArrayLike, - deg: _AnyDegrees, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees) -> _CoefArray: ... @type_check_only -class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander3D(Protocol): @overload def __call__( self, @@ -669,53 +360,10 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): deg: _AnyDegrees, ) -> _CoefArray: ... -# keep in sync with the broadest overload of `._FuncVander` -_AnyFuncVander: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] - -@type_check_only -class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeFloat_co], - degrees: Sequence[SupportsIndex], - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeComplex_co], - degrees: Sequence[SupportsIndex], - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[ - _ArrayLikeObject_co | _ArrayLikeComplex_co, - ], - degrees: Sequence[SupportsIndex], - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[npt.ArrayLike], - degrees: Sequence[SupportsIndex], - ) -> _CoefArray: ... - -_FullFitResult: TypeAlias = Sequence[np.inexact[Any] | np.int32] +_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] @type_check_only -class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFit(Protocol): @overload def __call__( self, @@ -723,9 +371,9 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, - rcond: None | float = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeFloat_co = ..., + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., ) -> _FloatArray: ... @overload def __call__( @@ -733,10 +381,10 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, - rcond: None | float, + rcond: float | None, full: Literal[True], /, - w: None | _SeriesLikeFloat_co = ..., + w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_FloatArray, _FullFitResult]: ... @overload def __call__( @@ -745,12 +393,11 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, - rcond: None | float = ..., + rcond: float | None = ..., *, full: Literal[True], - w: None | _SeriesLikeFloat_co = ..., + w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_FloatArray, _FullFitResult]: ... - @overload def __call__( self, @@ -758,9 +405,9 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, - rcond: None | float = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeFloat_co = ..., + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., ) -> _ComplexArray: ... @overload def __call__( @@ -768,10 +415,10 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, - rcond: None | float, + rcond: float | None, full: Literal[True], /, - w: None | _SeriesLikeFloat_co = ..., + w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ComplexArray, _FullFitResult]: ... @overload def __call__( @@ -780,12 +427,11 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, - rcond: None | float = ..., + rcond: float | None = ..., *, full: Literal[True], - w: None | _SeriesLikeFloat_co = ..., + w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ComplexArray, _FullFitResult]: ... - @overload def __call__( self, @@ -793,9 +439,9 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, - rcond: None | float = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeFloat_co = ..., + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., ) -> _ObjectArray: ... @overload def __call__( @@ -803,10 +449,10 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, - rcond: None | float, + rcond: float | None, full: Literal[True], /, - w: None | _SeriesLikeFloat_co = ..., + w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ObjectArray, _FullFitResult]: ... @overload def __call__( @@ -815,74 +461,41 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): x: _SeriesLikeComplex_co, y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, - rcond: None | float = ..., + rcond: float | None = ..., *, full: Literal[True], - w: None | _SeriesLikeFloat_co = ..., + w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ObjectArray, _FullFitResult]: ... @type_check_only -class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncRoots(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Series[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Series[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Series[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Series[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... - -_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] +_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] @type_check_only -class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): +class _FuncCompanion(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Companion[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Companion[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Companion[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Companion[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @type_check_only -class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): - def __call__( - self, - /, - deg: SupportsIndex, - ) -> _Tuple2[_Series[np.float64]]: ... +class _FuncGauss(Protocol): + def __call__(self, /, deg: SupportsIndex) -> _Tuple2[_Series[np.float64]]: ... @type_check_only -class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): +class _FuncWeight(Protocol): @overload - def __call__( - self, - /, - c: _ArrayLikeFloat_co, - ) -> npt.NDArray[np.float64]: ... + def __call__(self, /, x: _ArrayLikeFloat_co) -> npt.NDArray[np.float64]: ... @overload - def __call__( - self, - /, - c: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128]: ... + def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... @overload - def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... - -@type_check_only -class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): - def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... + def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... diff --git a/blimgui/dist64/numpy/polynomial/chebyshev.py b/blimgui/dist64/numpy/polynomial/chebyshev.py index e6d7a63..b804494 100644 --- a/blimgui/dist64/numpy/polynomial/chebyshev.py +++ b/blimgui/dist64/numpy/polynomial/chebyshev.py @@ -106,10 +106,8 @@ Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) -""" +""" # noqa: E501 import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -150,8 +148,8 @@ def _cseries_to_zseries(c): """ n = c.size - zs = np.zeros(2*n-1, dtype=c.dtype) - zs[n-1:] = c/2 + zs = np.zeros(2 * n - 1, dtype=c.dtype) + zs[n - 1:] = c / 2 return zs + zs[::-1] @@ -174,8 +172,8 @@ def _zseries_to_cseries(zs): Chebyshev coefficients, ordered from low to high. """ - n = (zs.size + 1)//2 - c = zs[n-1:].copy() + n = (zs.size + 1) // 2 + c = zs[n - 1:].copy() c[1:n] *= 2 return c @@ -246,9 +244,9 @@ def _zseries_div(z1, z2): lc2 = len(z2) if lc2 == 1: z1 /= z2 - return z1, z1[:1]*0 + return z1, z1[:1] * 0 elif lc1 < lc2: - return z1[:1]*0, z1 + return z1[:1] * 0, z1 else: dlen = lc1 - lc2 scl = z2[0] @@ -260,17 +258,17 @@ def _zseries_div(z1, z2): r = z1[i] quo[i] = z1[i] quo[dlen - i] = r - tmp = r*z2 - z1[i:i+lc2] -= tmp - z1[j:j+lc2] -= tmp + tmp = r * z2 + z1[i:i + lc2] -= tmp + z1[j:j + lc2] -= tmp i += 1 j -= 1 r = z1[i] quo[i] = r - tmp = r*z2 - z1[i:i+lc2] -= tmp + tmp = r * z2 + z1[i:i + lc2] -= tmp quo /= scl - rem = z1[i+1:i-1+lc2].copy() + rem = z1[i + 1:i - 1 + lc2].copy() return quo, rem @@ -299,9 +297,9 @@ def _zseries_der(zs): division. """ - n = len(zs)//2 + n = len(zs) // 2 ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs *= np.arange(-n, n+1)*2 + zs *= np.arange(-n, n + 1) * 2 d, r = _zseries_div(zs, ns) return d @@ -330,12 +328,12 @@ def _zseries_int(zs): dividing the resulting zs by two. """ - n = 1 + len(zs)//2 + n = 1 + len(zs) // 2 ns = np.array([-1, 0, 1], dtype=zs.dtype) zs = _zseries_mul(zs, ns) - div = np.arange(-n, n+1)*2 + div = np.arange(-n, n + 1) * 2 zs[:n] /= div[:n] - zs[n+1:] /= div[n+1:] + zs[n + 1:] /= div[n + 1:] zs[n] = 0 return zs @@ -438,7 +436,7 @@ def cheb2poly(c): array([-2., -8., 4., 12.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -451,7 +449,7 @@ def cheb2poly(c): for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1) - c1 = polyadd(tmp, polymulx(c1)*2) + c1 = polyadd(tmp, polymulx(c1) * 2) return polyadd(c0, polymulx(c1)) @@ -688,10 +686,10 @@ def chebmulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1] = c[0] if len(c) > 1: - tmp = c[1:]/2 + tmp = c[1:] / 2 prd[2:] = tmp prd[0:-2] += tmp return prd @@ -801,9 +799,9 @@ def chebdiv(c1, c2): lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: - return c1[:1]*0, c1 + return c1[:1] * 0, c1 elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 + return c1 / c2[-1], c1[:1] * 0 else: z1 = _cseries_to_zseries(c1) z2 = _cseries_to_zseries(c2) @@ -936,7 +934,7 @@ def chebder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -944,17 +942,17 @@ def chebder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 2, -1): - der[j - 1] = (2*j)*c[j] - c[j - 2] += (j*c[j])/(j - 2) + der[j - 1] = (2 * j) * c[j] + c[j - 2] += (j * c[j]) / (j - 2) if n > 1: - der[1] = 4*c[2] + der[1] = 4 * c[2] der[0] = c[1] c = der c = np.moveaxis(c, 0, iaxis) @@ -1059,13 +1057,13 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -1073,13 +1071,13 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] if n > 1: - tmp[2] = c[1]/4 + tmp[2] = c[1] / 4 for j in range(2, n): - tmp[j + 1] = c[j]/(2*(j + 1)) - tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[j - 1] -= c[j] / (2 * (j - 1)) tmp[0] += k[i] - chebval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -1149,7 +1147,7 @@ def chebval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -1158,14 +1156,14 @@ def chebval(x, c, tensor=True): c0 = c[0] c1 = c[1] else: - x2 = 2*x + x2 = 2 * x c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 c0 = c[-i] - c1 - c1 = tmp + c1*x2 - return c0 + c1*x + c1 = tmp + c1 * x2 + return c0 + c1 * x def chebval2d(x, y, c): @@ -1397,12 +1395,12 @@ def chebvander(x, deg): dtyp = x.dtype v = np.empty(dims, dtype=dtyp) # Use forward recursion to generate the entries. - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: - x2 = 2*x + x2 = 2 * x v[1] = x for i in range(2, ideg + 1): - v[i] = v[i-1]*x2 - v[i-2] + v[i] = v[i - 1] * x2 - v[i - 2] return np.moveaxis(v, 0, -1) @@ -1651,17 +1649,17 @@ def chebcompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] + scl = np.array([1.] + [np.sqrt(.5)] * (n - 1)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] top[0] = np.sqrt(.5) - top[1:] = 1/2 + top[1:] = 1 / 2 bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * .5 return mat @@ -1717,11 +1715,11 @@ def chebroots(c): if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) # rotated companion matrix reduces error - m = chebcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = chebcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1785,7 +1783,7 @@ def chebinterpolate(func, deg, args=()): m = chebvander(xcheb, deg) c = np.dot(m.T, yfunc) c[0] /= order - c[1:] /= 0.5*order + c[1:] /= 0.5 * order return c @@ -1826,8 +1824,8 @@ def chebgauss(deg): if ideg <= 0: raise ValueError("deg must be a positive integer") - x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) - w = np.ones(ideg)*(np.pi/ideg) + x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg)) + w = np.ones(ideg) * (np.pi / ideg) return x, w @@ -1850,7 +1848,7 @@ def chebweight(x): w : ndarray The weight function at `x`. """ - w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + w = 1. / (np.sqrt(1. + x) * np.sqrt(1. - x)) return w @@ -1881,7 +1879,7 @@ def chebpts1(npts): if _npts < 1: raise ValueError("npts must be >= 1") - x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2) + x = 0.5 * np.pi / _npts * np.arange(-_npts + 1, _npts + 1, 2) return np.sin(x) diff --git a/blimgui/dist64/numpy/polynomial/chebyshev.pyi b/blimgui/dist64/numpy/polynomial/chebyshev.pyi index 8169015..c8d5379 100644 --- a/blimgui/dist64/numpy/polynomial/chebyshev.pyi +++ b/blimgui/dist64/numpy/polynomial/chebyshev.pyi @@ -1,9 +1,12 @@ +from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable from typing import ( Any, + ClassVar, Concatenate, Final, Literal as L, + Self, TypeVar, overload, ) @@ -14,9 +17,7 @@ from numpy._typing import _IntLike_co from ._polybase import ABCPolyBase from ._polytypes import ( - _SeriesLikeCoef_co, _Array1, - _Series, _Array2, _CoefSeries, _FuncBinOp, @@ -29,17 +30,17 @@ from ._polytypes import ( _FuncLine, _FuncPoly2Ortho, _FuncPow, - _FuncPts, _FuncRoots, _FuncUnOp, _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, _FuncWeight, + _Series, + _SeriesLikeCoef_co, ) from .polyutils import trimcoef as chebtrim @@ -80,113 +81,100 @@ __all__ = [ "chebinterpolate", ] -_SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) -def _cseries_to_zseries(c: npt.NDArray[_SCT]) -> _Series[_SCT]: ... -def _zseries_to_cseries(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... -def _zseries_mul( - z1: npt.NDArray[_SCT], - z2: npt.NDArray[_SCT], -) -> _Series[_SCT]: ... -def _zseries_div( - z1: npt.NDArray[_SCT], - z2: npt.NDArray[_SCT], -) -> _Series[_SCT]: ... -def _zseries_der(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... -def _zseries_int(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) -poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] -cheb2poly: _FuncUnOp[L["cheb2poly"]] +def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -chebdomain: Final[_Array2[np.float64]] -chebzero: Final[_Array1[np.int_]] -chebone: Final[_Array1[np.int_]] -chebx: Final[_Array2[np.int_]] +poly2cheb: Final[_FuncPoly2Ortho] = ... +cheb2poly: Final[_FuncUnOp] = ... -chebline: _FuncLine[L["chebline"]] -chebfromroots: _FuncFromRoots[L["chebfromroots"]] -chebadd: _FuncBinOp[L["chebadd"]] -chebsub: _FuncBinOp[L["chebsub"]] -chebmulx: _FuncUnOp[L["chebmulx"]] -chebmul: _FuncBinOp[L["chebmul"]] -chebdiv: _FuncBinOp[L["chebdiv"]] -chebpow: _FuncPow[L["chebpow"]] -chebder: _FuncDer[L["chebder"]] -chebint: _FuncInteg[L["chebint"]] -chebval: _FuncVal[L["chebval"]] -chebval2d: _FuncVal2D[L["chebval2d"]] -chebval3d: _FuncVal3D[L["chebval3d"]] -chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] -chebgrid2d: _FuncVal2D[L["chebgrid2d"]] -chebgrid3d: _FuncVal3D[L["chebgrid3d"]] -chebvander: _FuncVander[L["chebvander"]] -chebvander2d: _FuncVander2D[L["chebvander2d"]] -chebvander3d: _FuncVander3D[L["chebvander3d"]] -chebfit: _FuncFit[L["chebfit"]] -chebcompanion: _FuncCompanion[L["chebcompanion"]] -chebroots: _FuncRoots[L["chebroots"]] -chebgauss: _FuncGauss[L["chebgauss"]] -chebweight: _FuncWeight[L["chebweight"]] -chebpts1: _FuncPts[L["chebpts1"]] -chebpts2: _FuncPts[L["chebpts2"]] +chebdomain: Final[_Array2[np.float64]] = ... +chebzero: Final[_Array1[np.int_]] = ... +chebone: Final[_Array1[np.int_]] = ... +chebx: Final[_Array2[np.int_]] = ... -# keep in sync with `Chebyshev.interpolate` -_RT = TypeVar("_RT", bound=np.number[Any] | np.bool | np.object_) +chebline: Final[_FuncLine] = ... +chebfromroots: Final[_FuncFromRoots] = ... +chebadd: Final[_FuncBinOp] = ... +chebsub: Final[_FuncBinOp] = ... +chebmulx: Final[_FuncUnOp] = ... +chebmul: Final[_FuncBinOp] = ... +chebdiv: Final[_FuncBinOp] = ... +chebpow: Final[_FuncPow] = ... +chebder: Final[_FuncDer] = ... +chebint: Final[_FuncInteg] = ... +chebval: Final[_FuncVal] = ... +chebval2d: Final[_FuncVal2D] = ... +chebval3d: Final[_FuncVal3D] = ... +chebgrid2d: Final[_FuncVal2D] = ... +chebgrid3d: Final[_FuncVal3D] = ... +chebvander: Final[_FuncVander] = ... +chebvander2d: Final[_FuncVander2D] = ... +chebvander3d: Final[_FuncVander3D] = ... +chebfit: Final[_FuncFit] = ... +chebcompanion: Final[_FuncCompanion] = ... +chebroots: Final[_FuncRoots] = ... +chebgauss: Final[_FuncGauss] = ... +chebweight: Final[_FuncWeight] = ... +def chebpts1(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +def chebpts2(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... + +# keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) @overload def chebinterpolate( func: np.ufunc, deg: _IntLike_co, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _RT], + func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], deg: _IntLike_co, - args: tuple[()] = ..., -) -> npt.NDArray[_RT]: ... + args: tuple[()] = (), +) -> npt.NDArray[_CoefScalarT]: ... @overload def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_RT]: ... - -_Self = TypeVar("_Self", bound=object) +) -> npt.NDArray[_CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload @classmethod def interpolate( - cls: type[_Self], - /, + cls, func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, - domain: None | _SeriesLikeCoef_co = ..., - args: tuple[()] = ..., - ) -> _Self: ... + domain: _SeriesLikeCoef_co | None = None, + args: tuple[()] = (), + ) -> Self: ... @overload @classmethod def interpolate( - cls: type[_Self], - /, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + cls, + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, - domain: None | _SeriesLikeCoef_co = ..., + domain: _SeriesLikeCoef_co | None = None, *, args: Iterable[Any], - ) -> _Self: ... + ) -> Self: ... @overload @classmethod def interpolate( - cls: type[_Self], - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + cls, + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, - domain: None | _SeriesLikeCoef_co, + domain: _SeriesLikeCoef_co | None, args: Iterable[Any], - /, - ) -> _Self: ... + ) -> Self: ... diff --git a/blimgui/dist64/numpy/polynomial/hermite.py b/blimgui/dist64/numpy/polynomial/hermite.py index 0ac4a1e..24f0507 100644 --- a/blimgui/dist64/numpy/polynomial/hermite.py +++ b/blimgui/dist64/numpy/polynomial/hermite.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -177,7 +175,7 @@ def herm2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -192,9 +190,9 @@ def herm2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], c1*(2*(i - 1))) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)*2) + c0 = polysub(c[i - 2], c1 * (2 * (i - 1))) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1) * 2) # @@ -212,7 +210,7 @@ def herm2poly(c): hermone = np.array([1]) # Hermite coefficients representing the identity x. -hermx = np.array([0, 1/2]) +hermx = np.array([0, 1 / 2]) def hermline(off, scl): @@ -250,7 +248,7 @@ def hermline(off, scl): """ if scl != 0: - return np.array([off, scl/2]) + return np.array([off, scl / 2]) else: return np.array([off]) @@ -436,11 +434,11 @@ def hermmulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0]/2 + prd[0] = c[0] * 0 + prd[1] = c[0] / 2 for i in range(1, len(c)): - prd[i + 1] = c[i]/2 - prd[i - 1] += c[i]*i + prd[i + 1] = c[i] / 2 + prd[i - 1] += c[i] * i return prd @@ -493,21 +491,21 @@ def hermmul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) - c1 = hermadd(tmp, hermmulx(c1)*2) - return hermadd(c0, hermmulx(c1)*2) + c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1))) + c1 = hermadd(tmp, hermmulx(c1) * 2) + return hermadd(c0, hermmulx(c1) * 2) def hermdiv(c1, c2): @@ -655,7 +653,7 @@ def hermder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -663,14 +661,14 @@ def hermder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): - der[j - 1] = (2*j)*c[j] + der[j - 1] = (2 * j) * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c @@ -772,13 +770,13 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -786,10 +784,10 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0]/2 + tmp[0] = c[0] * 0 + tmp[1] = c[0] / 2 for j in range(1, n): - tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j + 1] = c[j] / (2 * (j + 1)) tmp[0] += k[i] - hermval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -798,7 +796,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermval(x, c, tensor=True): """ - Evaluate an Hermite series at points x. + Evaluate a Hermite series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -869,9 +867,9 @@ def hermval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) - x2 = x*2 + x2 = x * 2 if len(c) == 1: c0 = c[0] c1 = 0 @@ -885,9 +883,9 @@ def hermval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - c1*(2*(nd - 1)) - c1 = tmp + c1*x2 - return c0 + c1*x2 + c0 = c[-i] - c1 * (2 * (nd - 1)) + c1 = tmp + c1 * x2 + return c0 + c1 * x2 def hermval2d(x, y, c): @@ -1175,12 +1173,12 @@ def hermvander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: - x2 = x*2 + x2 = x * 2 v[1] = x2 for i in range(2, ideg + 1): - v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + v[i] = (v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))) return np.moveaxis(v, 0, -1) @@ -1441,7 +1439,7 @@ def hermcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides + symmetric when `c` is a Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1470,17 +1468,17 @@ def hermcompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-.5*c[0]/c[1]]]) + return np.array([[-.5 * c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.hstack((1., 1. / np.sqrt(2. * np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(.5*np.arange(1, n)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(.5 * np.arange(1, n)) bot[...] = top - mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) + mat[:, -1] -= scl * c[:-1] / (2.0 * c[-1]) return mat @@ -1539,11 +1537,11 @@ def hermroots(c): if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-.5*c[0]/c[1]]) + return np.array([-.5 * c[0] / c[1]]) # rotated companion matrix reduces error - m = hermcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = hermcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1576,17 +1574,17 @@ def _normed_hermite_n(x, n): """ if n == 0: - return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) + return np.full(x.shape, 1 / np.sqrt(np.sqrt(np.pi))) c0 = 0. - c1 = 1./np.sqrt(np.sqrt(np.pi)) + c1 = 1. / np.sqrt(np.sqrt(np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 - c0 = -c1*np.sqrt((nd - 1.)/nd) - c1 = tmp + c1*x*np.sqrt(2./nd) + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(2. / nd) nd = nd - 1.0 - return c0 + c1*x*np.sqrt(2) + return c0 + c1 * x * np.sqrt(2) def hermgauss(deg): @@ -1634,24 +1632,24 @@ def hermgauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1], dtype=np.float64) + c = np.array([0] * deg + [1], dtype=np.float64) m = hermcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_n(x, ideg) - df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) - x -= dy/df + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2 * ideg) + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_n(x, ideg - 1) fm /= np.abs(fm).max() - w = 1/(fm * fm) + w = 1 / (fm * fm) # for Hermite we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 # scale w to get the right value w *= np.sqrt(np.pi) / w.sum() @@ -1695,7 +1693,7 @@ def hermweight(x): # class Hermite(ABCPolyBase): - """An Hermite series class. + """A Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/blimgui/dist64/numpy/polynomial/hermite.pyi b/blimgui/dist64/numpy/polynomial/hermite.pyi index d4bfc91..6d14cf8 100644 --- a/blimgui/dist64/numpy/polynomial/hermite.pyi +++ b/blimgui/dist64/numpy/polynomial/hermite.pyi @@ -1,6 +1,7 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -21,7 +22,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,44 +63,44 @@ __all__ = [ "hermweight", ] -poly2herm: _FuncPoly2Ortho[L["poly2herm"]] -herm2poly: _FuncUnOp[L["herm2poly"]] +_ShapeT = TypeVar("_ShapeT", bound=_Shape) -hermdomain: Final[_Array2[np.float64]] -hermzero: Final[_Array1[np.int_]] -hermone: Final[_Array1[np.int_]] -hermx: Final[_Array2[np.int_]] +poly2herm: Final[_FuncPoly2Ortho] = ... +herm2poly: Final[_FuncUnOp] = ... -hermline: _FuncLine[L["hermline"]] -hermfromroots: _FuncFromRoots[L["hermfromroots"]] -hermadd: _FuncBinOp[L["hermadd"]] -hermsub: _FuncBinOp[L["hermsub"]] -hermmulx: _FuncUnOp[L["hermmulx"]] -hermmul: _FuncBinOp[L["hermmul"]] -hermdiv: _FuncBinOp[L["hermdiv"]] -hermpow: _FuncPow[L["hermpow"]] -hermder: _FuncDer[L["hermder"]] -hermint: _FuncInteg[L["hermint"]] -hermval: _FuncVal[L["hermval"]] -hermval2d: _FuncVal2D[L["hermval2d"]] -hermval3d: _FuncVal3D[L["hermval3d"]] -hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] -hermgrid2d: _FuncVal2D[L["hermgrid2d"]] -hermgrid3d: _FuncVal3D[L["hermgrid3d"]] -hermvander: _FuncVander[L["hermvander"]] -hermvander2d: _FuncVander2D[L["hermvander2d"]] -hermvander3d: _FuncVander3D[L["hermvander3d"]] -hermfit: _FuncFit[L["hermfit"]] -hermcompanion: _FuncCompanion[L["hermcompanion"]] -hermroots: _FuncRoots[L["hermroots"]] +hermdomain: Final[_Array2[np.float64]] = ... +hermzero: Final[_Array1[np.int_]] = ... +hermone: Final[_Array1[np.int_]] = ... +hermx: Final[_Array2[np.int_]] = ... -_ND = TypeVar("_ND", bound=Any) -def _normed_hermite_n( - x: np.ndarray[_ND, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +hermline: Final[_FuncLine] = ... +hermfromroots: Final[_FuncFromRoots] = ... +hermadd: Final[_FuncBinOp] = ... +hermsub: Final[_FuncBinOp] = ... +hermmulx: Final[_FuncUnOp] = ... +hermmul: Final[_FuncBinOp] = ... +hermdiv: Final[_FuncBinOp] = ... +hermpow: Final[_FuncPow] = ... +hermder: Final[_FuncDer] = ... +hermint: Final[_FuncInteg] = ... +hermval: Final[_FuncVal] = ... +hermval2d: Final[_FuncVal2D] = ... +hermval3d: Final[_FuncVal3D] = ... +hermgrid2d: Final[_FuncVal2D] = ... +hermgrid3d: Final[_FuncVal3D] = ... +hermvander: Final[_FuncVander] = ... +hermvander2d: Final[_FuncVander2D] = ... +hermvander3d: Final[_FuncVander3D] = ... +hermfit: Final[_FuncFit] = ... +hermcompanion: Final[_FuncCompanion] = ... +hermroots: Final[_FuncRoots] = ... -hermgauss: _FuncGauss[L["hermgauss"]] -hermweight: _FuncWeight[L["hermweight"]] +def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... -class Hermite(ABCPolyBase[L["H"]]): ... +hermgauss: Final[_FuncGauss] = ... +hermweight: Final[_FuncWeight] = ... + +class Hermite(ABCPolyBase[L["H"]]): + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/blimgui/dist64/numpy/polynomial/hermite_e.py b/blimgui/dist64/numpy/polynomial/hermite_e.py index 79fd4bf..fc4f26f 100644 --- a/blimgui/dist64/numpy/polynomial/hermite_e.py +++ b/blimgui/dist64/numpy/polynomial/hermite_e.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -179,7 +177,7 @@ def herme2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -193,7 +191,7 @@ def herme2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], c1*(i - 1)) + c0 = polysub(c[i - 2], c1 * (i - 1)) c1 = polyadd(tmp, polymulx(c1)) return polyadd(c0, polymulx(c1)) @@ -436,11 +434,11 @@ def hermemulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1] = c[0] for i in range(1, len(c)): prd[i + 1] = c[i] - prd[i - 1] += c[i]*i + prd[i - 1] += c[i] * i return prd @@ -493,19 +491,19 @@ def hermemul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c0 = hermesub(c[-i] * xs, c1 * (nd - 1)) c1 = hermeadd(tmp, hermemulx(c1)) return hermeadd(c0, hermemulx(c1)) @@ -653,7 +651,7 @@ def hermeder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -661,14 +659,14 @@ def hermeder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - return c[:1]*0 + return c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): - der[j - 1] = j*c[j] + der[j - 1] = j * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c @@ -770,13 +768,13 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -784,10 +782,10 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) + tmp[j + 1] = c[j] / (j + 1) tmp[0] += k[i] - hermeval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -796,7 +794,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermeval(x, c, tensor=True): """ - Evaluate an HermiteE series at points x. + Evaluate a HermiteE series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -867,7 +865,7 @@ def hermeval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -882,9 +880,9 @@ def hermeval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - c1*(nd - 1) - c1 = tmp + c1*x - return c0 + c1*x + c0 = c[-i] - c1 * (nd - 1) + c1 = tmp + c1 * x + return c0 + c1 * x def hermeval2d(x, y, c): @@ -1125,11 +1123,11 @@ def hermevander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): - v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + v[i] = (v[i - 1] * x - v[i - 2] * (i - 1)) return np.moveaxis(v, 0, -1) @@ -1367,7 +1365,7 @@ def hermecompanion(c): Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides + symmetric when `c` is a HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1388,17 +1386,17 @@ def hermecompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.hstack((1., 1. / np.sqrt(np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] top[...] = np.sqrt(np.arange(1, n)) bot[...] = top - mat[:, -1] -= scl*c[:-1]/c[-1] + mat[:, -1] -= scl * c[:-1] / c[-1] return mat @@ -1457,11 +1455,11 @@ def hermeroots(c): if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) # rotated companion matrix reduces error - m = hermecompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = hermecompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1494,17 +1492,17 @@ def _normed_hermite_e_n(x, n): """ if n == 0: - return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) + return np.full(x.shape, 1 / np.sqrt(np.sqrt(2 * np.pi))) c0 = 0. - c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + c1 = 1. / np.sqrt(np.sqrt(2 * np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 - c0 = -c1*np.sqrt((nd - 1.)/nd) - c1 = tmp + c1*x*np.sqrt(1./nd) + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(1. / nd) nd = nd - 1.0 - return c0 + c1*x + return c0 + c1 * x def hermegauss(deg): @@ -1546,27 +1544,27 @@ def hermegauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0] * deg + [1]) m = hermecompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) - x -= dy/df + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_e_n(x, ideg - 1) fm /= np.abs(fm).max() - w = 1/(fm * fm) + w = 1 / (fm * fm) # for Hermite_e we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 # scale w to get the right value - w *= np.sqrt(2*np.pi) / w.sum() + w *= np.sqrt(2 * np.pi) / w.sum() return x, w @@ -1588,7 +1586,7 @@ def hermeweight(x): w : ndarray The weight function at `x`. """ - w = np.exp(-.5*x**2) + w = np.exp(-.5 * x**2) return w @@ -1597,7 +1595,7 @@ def hermeweight(x): # class HermiteE(ABCPolyBase): - """An HermiteE series class. + """A HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/blimgui/dist64/numpy/polynomial/hermite_e.pyi b/blimgui/dist64/numpy/polynomial/hermite_e.pyi index 153c18f..547e559 100644 --- a/blimgui/dist64/numpy/polynomial/hermite_e.pyi +++ b/blimgui/dist64/numpy/polynomial/hermite_e.pyi @@ -1,6 +1,7 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -21,7 +22,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,44 +63,44 @@ __all__ = [ "hermeweight", ] -poly2herme: _FuncPoly2Ortho[L["poly2herme"]] -herme2poly: _FuncUnOp[L["herme2poly"]] +_ShapeT = TypeVar("_ShapeT", bound=_Shape) -hermedomain: Final[_Array2[np.float64]] -hermezero: Final[_Array1[np.int_]] -hermeone: Final[_Array1[np.int_]] -hermex: Final[_Array2[np.int_]] +poly2herme: Final[_FuncPoly2Ortho] = ... +herme2poly: Final[_FuncUnOp] = ... -hermeline: _FuncLine[L["hermeline"]] -hermefromroots: _FuncFromRoots[L["hermefromroots"]] -hermeadd: _FuncBinOp[L["hermeadd"]] -hermesub: _FuncBinOp[L["hermesub"]] -hermemulx: _FuncUnOp[L["hermemulx"]] -hermemul: _FuncBinOp[L["hermemul"]] -hermediv: _FuncBinOp[L["hermediv"]] -hermepow: _FuncPow[L["hermepow"]] -hermeder: _FuncDer[L["hermeder"]] -hermeint: _FuncInteg[L["hermeint"]] -hermeval: _FuncVal[L["hermeval"]] -hermeval2d: _FuncVal2D[L["hermeval2d"]] -hermeval3d: _FuncVal3D[L["hermeval3d"]] -hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] -hermegrid2d: _FuncVal2D[L["hermegrid2d"]] -hermegrid3d: _FuncVal3D[L["hermegrid3d"]] -hermevander: _FuncVander[L["hermevander"]] -hermevander2d: _FuncVander2D[L["hermevander2d"]] -hermevander3d: _FuncVander3D[L["hermevander3d"]] -hermefit: _FuncFit[L["hermefit"]] -hermecompanion: _FuncCompanion[L["hermecompanion"]] -hermeroots: _FuncRoots[L["hermeroots"]] +hermedomain: Final[_Array2[np.float64]] = ... +hermezero: Final[_Array1[np.int_]] = ... +hermeone: Final[_Array1[np.int_]] = ... +hermex: Final[_Array2[np.int_]] = ... -_ND = TypeVar("_ND", bound=Any) -def _normed_hermite_e_n( - x: np.ndarray[_ND, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +hermeline: Final[_FuncLine] = ... +hermefromroots: Final[_FuncFromRoots] = ... +hermeadd: Final[_FuncBinOp] = ... +hermesub: Final[_FuncBinOp] = ... +hermemulx: Final[_FuncUnOp] = ... +hermemul: Final[_FuncBinOp] = ... +hermediv: Final[_FuncBinOp] = ... +hermepow: Final[_FuncPow] = ... +hermeder: Final[_FuncDer] = ... +hermeint: Final[_FuncInteg] = ... +hermeval: Final[_FuncVal] = ... +hermeval2d: Final[_FuncVal2D] = ... +hermeval3d: Final[_FuncVal3D] = ... +hermegrid2d: Final[_FuncVal2D] = ... +hermegrid3d: Final[_FuncVal3D] = ... +hermevander: Final[_FuncVander] = ... +hermevander2d: Final[_FuncVander2D] = ... +hermevander3d: Final[_FuncVander3D] = ... +hermefit: Final[_FuncFit] = ... +hermecompanion: Final[_FuncCompanion] = ... +hermeroots: Final[_FuncRoots] = ... -hermegauss: _FuncGauss[L["hermegauss"]] -hermeweight: _FuncWeight[L["hermeweight"]] +def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... -class HermiteE(ABCPolyBase[L["He"]]): ... +hermegauss: Final[_FuncGauss] = ... +hermeweight: Final[_FuncWeight] = ... + +class HermiteE(ABCPolyBase[L["He"]]): + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/blimgui/dist64/numpy/polynomial/laguerre.py b/blimgui/dist64/numpy/polynomial/laguerre.py index b7d7619..6c9b989 100644 --- a/blimgui/dist64/numpy/polynomial/laguerre.py +++ b/blimgui/dist64/numpy/polynomial/laguerre.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -177,7 +175,7 @@ def lag2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -189,8 +187,8 @@ def lag2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, polysub((2 * i - 1) * c1, polymulx(c1)) / i) return polyadd(c0, polysub(c1, polymulx(c1))) @@ -434,9 +432,9 @@ def lagmulx(c): prd[0] = c[0] prd[1] = -c[0] for i in range(1, len(c)): - prd[i + 1] = -c[i]*(i + 1) - prd[i] += c[i]*(2*i + 1) - prd[i - 1] -= c[i]*i + prd[i + 1] = -c[i] * (i + 1) + prd[i] += c[i] * (2 * i + 1) + prd[i - 1] -= c[i] * i return prd @@ -489,20 +487,20 @@ def lagmul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + c0 = lagsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd) return lagadd(c0, lagsub(c1, lagmulx(c1))) @@ -650,7 +648,7 @@ def lagder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -658,7 +656,7 @@ def lagder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 @@ -770,13 +768,13 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -868,7 +866,7 @@ def lagval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -883,9 +881,9 @@ def lagval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*((2*nd - 1) - x))/nd - return c0 + c1*(1 - x) + c0 = c[-i] - (c1 * (nd - 1)) / nd + c1 = tmp + (c1 * ((2 * nd - 1) - x)) / nd + return c0 + c1 * (1 - x) def lagval2d(x, y, c): @@ -1161,11 +1159,11 @@ def lagvander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = 1 - x for i in range(2, ideg + 1): - v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + v[i] = (v[i - 1] * (2 * i - 1 - x) - v[i - 2] * (i - 1)) / i return np.moveaxis(v, 0, -1) @@ -1185,7 +1183,7 @@ def lagvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1452,17 +1450,17 @@ def lagcompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[1 + c[0]/c[1]]]) + return np.array([[1 + c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - top = mat.reshape(-1)[1::n+1] - mid = mat.reshape(-1)[0::n+1] - bot = mat.reshape(-1)[n::n+1] + top = mat.reshape(-1)[1::n + 1] + mid = mat.reshape(-1)[0::n + 1] + bot = mat.reshape(-1)[n::n + 1] top[...] = -np.arange(1, n) - mid[...] = 2.*np.arange(n) + 1. + mid[...] = 2. * np.arange(n) + 1. bot[...] = top - mat[:, -1] += (c[:-1]/c[-1])*n + mat[:, -1] += (c[:-1] / c[-1]) * n return mat @@ -1521,11 +1519,11 @@ def lagroots(c): if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([1 + c[0]/c[1]]) + return np.array([1 + c[0] / c[1]]) # rotated companion matrix reduces error - m = lagcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = lagcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1575,21 +1573,21 @@ def laggauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0] * deg + [1]) m = lagcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = lagval(x, c) df = lagval(x, lagder(c)) - x -= dy/df + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = lagval(x, c[1:]) fm /= np.abs(fm).max() df /= np.abs(df).max() - w = 1/(fm * df) + w = 1 / (fm * df) # scale w to get the right value, 1 in this case w /= w.sum() diff --git a/blimgui/dist64/numpy/polynomial/laguerre.pyi b/blimgui/dist64/numpy/polynomial/laguerre.pyi index 709e7ca..b9e131a 100644 --- a/blimgui/dist64/numpy/polynomial/laguerre.pyi +++ b/blimgui/dist64/numpy/polynomial/laguerre.pyi @@ -1,4 +1,4 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,38 +62,39 @@ __all__ = [ "lagweight", ] -poly2lag: _FuncPoly2Ortho[L["poly2lag"]] -lag2poly: _FuncUnOp[L["lag2poly"]] +poly2lag: Final[_FuncPoly2Ortho] = ... +lag2poly: Final[_FuncUnOp] = ... -lagdomain: Final[_Array2[np.float64]] -lagzero: Final[_Array1[np.int_]] -lagone: Final[_Array1[np.int_]] -lagx: Final[_Array2[np.int_]] +lagdomain: Final[_Array2[np.float64]] = ... +lagzero: Final[_Array1[np.int_]] = ... +lagone: Final[_Array1[np.int_]] = ... +lagx: Final[_Array2[np.int_]] = ... -lagline: _FuncLine[L["lagline"]] -lagfromroots: _FuncFromRoots[L["lagfromroots"]] -lagadd: _FuncBinOp[L["lagadd"]] -lagsub: _FuncBinOp[L["lagsub"]] -lagmulx: _FuncUnOp[L["lagmulx"]] -lagmul: _FuncBinOp[L["lagmul"]] -lagdiv: _FuncBinOp[L["lagdiv"]] -lagpow: _FuncPow[L["lagpow"]] -lagder: _FuncDer[L["lagder"]] -lagint: _FuncInteg[L["lagint"]] -lagval: _FuncVal[L["lagval"]] -lagval2d: _FuncVal2D[L["lagval2d"]] -lagval3d: _FuncVal3D[L["lagval3d"]] -lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] -laggrid2d: _FuncVal2D[L["laggrid2d"]] -laggrid3d: _FuncVal3D[L["laggrid3d"]] -lagvander: _FuncVander[L["lagvander"]] -lagvander2d: _FuncVander2D[L["lagvander2d"]] -lagvander3d: _FuncVander3D[L["lagvander3d"]] -lagfit: _FuncFit[L["lagfit"]] -lagcompanion: _FuncCompanion[L["lagcompanion"]] -lagroots: _FuncRoots[L["lagroots"]] -laggauss: _FuncGauss[L["laggauss"]] -lagweight: _FuncWeight[L["lagweight"]] +lagline: Final[_FuncLine] = ... +lagfromroots: Final[_FuncFromRoots] = ... +lagadd: Final[_FuncBinOp] = ... +lagsub: Final[_FuncBinOp] = ... +lagmulx: Final[_FuncUnOp] = ... +lagmul: Final[_FuncBinOp] = ... +lagdiv: Final[_FuncBinOp] = ... +lagpow: Final[_FuncPow] = ... +lagder: Final[_FuncDer] = ... +lagint: Final[_FuncInteg] = ... +lagval: Final[_FuncVal] = ... +lagval2d: Final[_FuncVal2D] = ... +lagval3d: Final[_FuncVal3D] = ... +laggrid2d: Final[_FuncVal2D] = ... +laggrid3d: Final[_FuncVal3D] = ... +lagvander: Final[_FuncVander] = ... +lagvander2d: Final[_FuncVander2D] = ... +lagvander3d: Final[_FuncVander3D] = ... +lagfit: Final[_FuncFit] = ... +lagcompanion: Final[_FuncCompanion] = ... +lagroots: Final[_FuncRoots] = ... +laggauss: Final[_FuncGauss] = ... +lagweight: Final[_FuncWeight] = ... - -class Laguerre(ABCPolyBase[L["L"]]): ... +class Laguerre(ABCPolyBase[L["L"]]): + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/blimgui/dist64/numpy/polynomial/legendre.py b/blimgui/dist64/numpy/polynomial/legendre.py index cfce892..e0dfcca 100644 --- a/blimgui/dist64/numpy/polynomial/legendre.py +++ b/blimgui/dist64/numpy/polynomial/legendre.py @@ -80,8 +80,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -191,7 +189,7 @@ def leg2poly(c): """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -203,8 +201,8 @@ def leg2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, (polymulx(c1) * (2 * i - 1)) / i) return polyadd(c0, polymulx(c1)) @@ -452,14 +450,14 @@ def legmulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1] = c[0] for i in range(1, len(c)): j = i + 1 k = i - 1 s = i + j - prd[j] = (c[i]*j)/s - prd[k] += (c[i]*i)/s + prd[j] = (c[i] * j) / s + prd[k] += (c[i] * i) / s return prd @@ -514,20 +512,20 @@ def legmul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + c0 = legsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = legadd(tmp, (legmulx(c1) * (2 * nd - 1)) / nd) return legadd(c0, legmulx(c1)) @@ -676,7 +674,7 @@ def legder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -684,17 +682,17 @@ def legder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 2, -1): - der[j - 1] = (2*j - 1)*c[j] + der[j - 1] = (2 * j - 1) * c[j] c[j - 2] += c[j] if n > 1: - der[1] = 3*c[2] + der[1] = 3 * c[2] der[0] = c[1] c = der c = np.moveaxis(c, 0, iaxis) @@ -799,13 +797,13 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -813,12 +811,12 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] if n > 1: - tmp[2] = c[1]/3 + tmp[2] = c[1] / 3 for j in range(2, n): - t = c[j]/(2*j + 1) + t = c[j] / (2 * j + 1) tmp[j + 1] = t tmp[j - 1] -= t tmp[0] += k[i] - legval(lbnd, tmp) @@ -890,7 +888,7 @@ def legval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -905,9 +903,9 @@ def legval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*x*(2*nd - 1))/nd - return c0 + c1*x + c0 = c[-i] - c1 * ((nd - 1) / nd) + c1 = tmp + c1 * x * ((2 * nd - 1) / nd) + return c0 + c1 * x def legval2d(x, y, c): @@ -1140,11 +1138,11 @@ def legvander(x, deg): v = np.empty(dims, dtype=dtyp) # Use forward recursion to generate the entries. This is not as accurate # as reverse recursion in this application but it is more efficient. - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): - v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i return np.moveaxis(v, 0, -1) @@ -1164,7 +1162,7 @@ def legvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1395,16 +1393,16 @@ def legcompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = 1./np.sqrt(2*np.arange(n) + 1) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + scl = 1. / np.sqrt(2 * np.arange(n) + 1) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.arange(1, n) * scl[:n - 1] * scl[1:n] bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * (n / (2 * n - 1)) return mat @@ -1460,11 +1458,11 @@ def legroots(c): if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) # rotated companion matrix reduces error - m = legcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = legcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1508,25 +1506,25 @@ def leggauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0] * deg + [1]) m = legcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = legval(x, c) df = legval(x, legder(c)) - x -= dy/df + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = legval(x, c[1:]) fm /= np.abs(fm).max() df /= np.abs(df).max() - w = 1/(fm * df) + w = 1 / (fm * df) # for Legendre we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 # scale w to get the right value w *= 2. / w.sum() @@ -1552,7 +1550,7 @@ def legweight(x): w : ndarray The weight function at `x`. """ - w = x*0.0 + 1.0 + w = x * 0.0 + 1.0 return w # diff --git a/blimgui/dist64/numpy/polynomial/legendre.pyi b/blimgui/dist64/numpy/polynomial/legendre.pyi index d16b30c..4fdc233 100644 --- a/blimgui/dist64/numpy/polynomial/legendre.pyi +++ b/blimgui/dist64/numpy/polynomial/legendre.pyi @@ -1,4 +1,4 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,37 +62,39 @@ __all__ = [ "legweight", ] -poly2leg: _FuncPoly2Ortho[L["poly2leg"]] -leg2poly: _FuncUnOp[L["leg2poly"]] +poly2leg: Final[_FuncPoly2Ortho] = ... +leg2poly: Final[_FuncUnOp] = ... -legdomain: Final[_Array2[np.float64]] -legzero: Final[_Array1[np.int_]] -legone: Final[_Array1[np.int_]] -legx: Final[_Array2[np.int_]] +legdomain: Final[_Array2[np.float64]] = ... +legzero: Final[_Array1[np.int_]] = ... +legone: Final[_Array1[np.int_]] = ... +legx: Final[_Array2[np.int_]] = ... -legline: _FuncLine[L["legline"]] -legfromroots: _FuncFromRoots[L["legfromroots"]] -legadd: _FuncBinOp[L["legadd"]] -legsub: _FuncBinOp[L["legsub"]] -legmulx: _FuncUnOp[L["legmulx"]] -legmul: _FuncBinOp[L["legmul"]] -legdiv: _FuncBinOp[L["legdiv"]] -legpow: _FuncPow[L["legpow"]] -legder: _FuncDer[L["legder"]] -legint: _FuncInteg[L["legint"]] -legval: _FuncVal[L["legval"]] -legval2d: _FuncVal2D[L["legval2d"]] -legval3d: _FuncVal3D[L["legval3d"]] -legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] -leggrid2d: _FuncVal2D[L["leggrid2d"]] -leggrid3d: _FuncVal3D[L["leggrid3d"]] -legvander: _FuncVander[L["legvander"]] -legvander2d: _FuncVander2D[L["legvander2d"]] -legvander3d: _FuncVander3D[L["legvander3d"]] -legfit: _FuncFit[L["legfit"]] -legcompanion: _FuncCompanion[L["legcompanion"]] -legroots: _FuncRoots[L["legroots"]] -leggauss: _FuncGauss[L["leggauss"]] -legweight: _FuncWeight[L["legweight"]] +legline: Final[_FuncLine] = ... +legfromroots: Final[_FuncFromRoots] = ... +legadd: Final[_FuncBinOp] = ... +legsub: Final[_FuncBinOp] = ... +legmulx: Final[_FuncUnOp] = ... +legmul: Final[_FuncBinOp] = ... +legdiv: Final[_FuncBinOp] = ... +legpow: Final[_FuncPow] = ... +legder: Final[_FuncDer] = ... +legint: Final[_FuncInteg] = ... +legval: Final[_FuncVal] = ... +legval2d: Final[_FuncVal2D] = ... +legval3d: Final[_FuncVal3D] = ... +leggrid2d: Final[_FuncVal2D] = ... +leggrid3d: Final[_FuncVal3D] = ... +legvander: Final[_FuncVander] = ... +legvander2d: Final[_FuncVander2D] = ... +legvander3d: Final[_FuncVander3D] = ... +legfit: Final[_FuncFit] = ... +legcompanion: Final[_FuncCompanion] = ... +legroots: Final[_FuncRoots] = ... +leggauss: Final[_FuncGauss] = ... +legweight: Final[_FuncWeight] = ... -class Legendre(ABCPolyBase[L["P"]]): ... +class Legendre(ABCPolyBase[L["P"]]): + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/blimgui/dist64/numpy/polynomial/polynomial.py b/blimgui/dist64/numpy/polynomial/polynomial.py index 3c2476a..89e22f3 100644 --- a/blimgui/dist64/numpy/polynomial/polynomial.py +++ b/blimgui/dist64/numpy/polynomial/polynomial.py @@ -81,8 +81,7 @@ 'polycompanion'] import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index +from numpy._core.overrides import array_function_dispatch as _array_function_dispatch from . import polyutils as pu from ._polybase import ABCPolyBase @@ -323,7 +322,7 @@ def polymulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1:] = c return prd @@ -408,20 +407,20 @@ def polydiv(c1, c2): lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: - return c1[:1]*0, c1 + return c1[:1] * 0, c1 elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 + return c1 / c2[-1], c1[:1] * 0 else: dlen = lc1 - lc2 scl = c2[-1] - c2 = c2[:-1]/scl + c2 = c2[:-1] / scl i = dlen j = lc1 - 1 while i >= 0: - c1[i:j] -= c2*c1[j] + c1[i:j] -= c2 * c1[j] i -= 1 j -= 1 - return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + return c1[j + 1:] / scl, pu.trimseq(c1[:j + 1]) def polypow(c, pow, maxpower=None): @@ -522,7 +521,7 @@ def polyder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -530,14 +529,14 @@ def polyder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=cdt) for j in range(n, 0, -1): - der[j - 1] = j*c[j] + der[j - 1] = j * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c @@ -636,12 +635,12 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) c = np.moveaxis(c, iaxis, 0) for i in range(cnt): n = len(c) @@ -650,10 +649,10 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) + tmp[j + 1] = c[j] / (j + 1) tmp[0] += k[i] - polyval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -716,6 +715,10 @@ def polyval(x, c, tensor=True): ----- The evaluation uses Horner's method. + When using coefficients from polynomials created with ``Polynomial.fit()``, + use ``p(x)`` or ``polyval(x, p.convert().coef)`` to handle domain/window + scaling correctly, not ``polyval(x, p.coef)``. + Examples -------- >>> import numpy as np @@ -747,11 +750,11 @@ def polyval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) - c0 = c[-1] + x*0 + c0 = c[-1] + x * 0 for i in range(2, len(c) + 1): - c0 = c[-i] + c0*x + c0 = c[-i] + c0 * x return c0 @@ -836,12 +839,18 @@ def polyvalfromroots(x, r, tensor=True): x = np.asarray(x) if isinstance(x, np.ndarray): if tensor: - r = r.reshape(r.shape + (1,)*x.ndim) + r = r.reshape(r.shape + (1,) * x.ndim) elif x.ndim >= r.ndim: raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) + +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) +@_array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -893,7 +902,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) - +@_array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. @@ -1121,11 +1130,11 @@ def polyvander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): - v[i] = v[i-1]*x + v[i] = v[i - 1] * x return np.moveaxis(v, 0, -1) @@ -1469,13 +1478,13 @@ def polycompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - bot = mat.reshape(-1)[n::n+1] + bot = mat.reshape(-1)[n::n + 1] bot[...] = 1 - mat[:, -1] -= c[:-1]/c[-1] + mat[:, -1] -= c[:-1] / c[-1] return mat @@ -1533,11 +1542,10 @@ def polyroots(c): if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) - # rotated companion matrix reduces error - m = polycompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = polycompanion(c) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/blimgui/dist64/numpy/polynomial/polynomial.pyi b/blimgui/dist64/numpy/polynomial/polynomial.pyi index 8b12430..738214c 100644 --- a/blimgui/dist64/numpy/polynomial/polynomial.pyi +++ b/blimgui/dist64/numpy/polynomial/polynomial.pyi @@ -1,12 +1,19 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, overload import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _FloatLike_co, + _NumberLike_co, +) + from ._polybase import ABCPolyBase from ._polytypes import ( _Array1, _Array2, - _FuncVal2D, - _FuncVal3D, + _ArrayLikeCoef_co, _FuncBinOp, _FuncCompanion, _FuncDer, @@ -18,10 +25,11 @@ from ._polytypes import ( _FuncRoots, _FuncUnOp, _FuncVal, + _FuncVal2D, + _FuncVal3D, _FuncVander, _FuncVander2D, _FuncVander3D, - _FuncValFromRoots, ) from .polyutils import trimcoef as polytrim @@ -56,32 +64,46 @@ __all__ = [ "polycompanion", ] -polydomain: Final[_Array2[np.float64]] -polyzero: Final[_Array1[np.int_]] -polyone: Final[_Array1[np.int_]] -polyx: Final[_Array2[np.int_]] +polydomain: Final[_Array2[np.float64]] = ... +polyzero: Final[_Array1[np.int_]] = ... +polyone: Final[_Array1[np.int_]] = ... +polyx: Final[_Array2[np.int_]] = ... + +polyline: Final[_FuncLine] = ... +polyfromroots: Final[_FuncFromRoots] = ... +polyadd: Final[_FuncBinOp] = ... +polysub: Final[_FuncBinOp] = ... +polymulx: Final[_FuncUnOp] = ... +polymul: Final[_FuncBinOp] = ... +polydiv: Final[_FuncBinOp] = ... +polypow: Final[_FuncPow] = ... +polyder: Final[_FuncDer] = ... +polyint: Final[_FuncInteg] = ... +polyval: Final[_FuncVal] = ... +polyval2d: Final[_FuncVal2D] = ... +polyval3d: Final[_FuncVal3D] = ... + +@overload +def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... +@overload +def polyvalfromroots(x: _NumberLike_co, r: _NumberLike_co, tensor: bool = True) -> np.complex128 | Any: ... +@overload +def polyvalfromroots(x: _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = True) -> npt.NDArray[np.float64 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeNumber_co, r: _ArrayLikeNumber_co, tensor: bool = True) -> npt.NDArray[np.complex128 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = True) -> npt.NDArray[np.object_ | Any]: ... -polyline: _FuncLine[L["Polyline"]] -polyfromroots: _FuncFromRoots[L["polyfromroots"]] -polyadd: _FuncBinOp[L["polyadd"]] -polysub: _FuncBinOp[L["polysub"]] -polymulx: _FuncUnOp[L["polymulx"]] -polymul: _FuncBinOp[L["polymul"]] -polydiv: _FuncBinOp[L["polydiv"]] -polypow: _FuncPow[L["polypow"]] -polyder: _FuncDer[L["polyder"]] -polyint: _FuncInteg[L["polyint"]] -polyval: _FuncVal[L["polyval"]] -polyval2d: _FuncVal2D[L["polyval2d"]] -polyval3d: _FuncVal3D[L["polyval3d"]] -polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] -polygrid2d: _FuncVal2D[L["polygrid2d"]] -polygrid3d: _FuncVal3D[L["polygrid3d"]] -polyvander: _FuncVander[L["polyvander"]] -polyvander2d: _FuncVander2D[L["polyvander2d"]] -polyvander3d: _FuncVander3D[L["polyvander3d"]] -polyfit: _FuncFit[L["polyfit"]] -polycompanion: _FuncCompanion[L["polycompanion"]] -polyroots: _FuncRoots[L["polyroots"]] +polygrid2d: Final[_FuncVal2D] = ... +polygrid3d: Final[_FuncVal3D] = ... +polyvander: Final[_FuncVander] = ... +polyvander2d: Final[_FuncVander2D] = ... +polyvander3d: Final[_FuncVander3D] = ... +polyfit: Final[_FuncFit] = ... +polycompanion: Final[_FuncCompanion] = ... +polyroots: Final[_FuncRoots] = ... -class Polynomial(ABCPolyBase[None]): ... +class Polynomial(ABCPolyBase[None]): + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/blimgui/dist64/numpy/polynomial/polyutils.py b/blimgui/dist64/numpy/polynomial/polyutils.py index 2f375b2..3fb5c78 100644 --- a/blimgui/dist64/numpy/polynomial/polyutils.py +++ b/blimgui/dist64/numpy/polynomial/polyutils.py @@ -18,15 +18,12 @@ mapparms parameters of the linear map between domains. """ -import operator import functools +import operator import warnings import numpy as np -from numpy._core.multiarray import dragon4_positional, dragon4_scientific -from numpy.exceptions import RankWarning - __all__ = [ 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'format_float'] @@ -60,7 +57,7 @@ def trimseq(seq): for i in range(len(seq) - 1, -1, -1): if seq[i] != 0: break - return seq[:i+1] + return seq[:i + 1] def as_series(alist, trim=True): @@ -118,25 +115,28 @@ def as_series(alist, trim=True): for a in arrays: if a.size == 0: raise ValueError("Coefficient array is empty") - if any(a.ndim != 1 for a in arrays): - raise ValueError("Coefficient array is not 1-d") + if a.ndim != 1: + raise ValueError("Coefficient array is not 1-d") if trim: arrays = [trimseq(a) for a in arrays] - if any(a.dtype == np.dtype(object) for a in arrays): + try: + dtype = np.common_type(*arrays) + except Exception as e: + object_dtype = np.dtypes.ObjectDType() + has_one_object_type = False ret = [] for a in arrays: - if a.dtype != np.dtype(object): - tmp = np.empty(len(a), dtype=np.dtype(object)) + if a.dtype != object_dtype: + tmp = np.empty(len(a), dtype=object_dtype) tmp[:] = a[:] ret.append(tmp) else: + has_one_object_type = True ret.append(a.copy()) - else: - try: - dtype = np.common_type(*arrays) - except Exception as e: + if not has_one_object_type: raise ValueError("Coefficient arrays have no common type") from e + else: ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] return ret @@ -187,7 +187,7 @@ def trimcoef(c, tol=0): [c] = as_series([c]) [ind] = np.nonzero(np.abs(c) > tol) if len(ind) == 0: - return c[:1]*0 + return c[:1] * 0 else: return c[:ind[-1] + 1].copy() @@ -281,8 +281,8 @@ def mapparms(old, new): """ oldlen = old[1] - old[0] newlen = new[1] - new[0] - off = (old[1]*new[0] - old[0]*new[1])/oldlen - scl = newlen/oldlen + off = (old[1] * new[0] - old[0] * new[1]) / oldlen + scl = newlen / oldlen return off, scl def mapdomain(x, old, new): @@ -352,7 +352,7 @@ def mapdomain(x, old, new): if type(x) not in (int, float, complex) and not isinstance(x, np.generic): x = np.asanyarray(x) off, scl = mapparms(old, new) - return off + scl*x + return off + scl * x def _nth_slice(i, ndim): @@ -405,7 +405,7 @@ def _vander_nd(vander_fs, points, degrees): ------- vander_nd : ndarray An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. - """ + """ # noqa: E501 n_dims = len(vander_fs) if n_dims != len(points): raise ValueError( @@ -462,7 +462,7 @@ def _fromroots(line_f, mul_f, roots): n = len(p) while n > 1: m, r = divmod(n, 2) - tmp = [mul_f(p[i], p[i+m]) for i in range(m)] + tmp = [mul_f(p[i], p[i + m]) for i in range(m)] if r: tmp[0] = mul_f(tmp[0], p[-1]) p = tmp @@ -538,16 +538,16 @@ def _div(mul_f, c1, c2): lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: - return c1[:1]*0, c1 + return c1[:1] * 0, c1 elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 + return c1 / c2[-1], c1[:1] * 0 else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): - p = mul_f([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] + p = mul_f([0] * i + [1], c2) + q = rem[-1] / p[-1] + rem = rem[:-1] - q * p[:-1] quo[i] = q return quo, trimseq(rem) @@ -634,7 +634,7 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): # set rcond if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps + rcond = len(x) * np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): @@ -644,22 +644,22 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): scl[scl == 0] = 1 # Solve the least squares problem. - c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T + c, resids, rank, s = np.linalg.lstsq(lhs.T / scl, rhs.T, rcond) + c = (c.T / scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim > 0: if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) else: - cc = np.zeros(lmax+1, dtype=c.dtype) + cc = np.zeros(lmax + 1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=2) + warnings.warn(msg, np.exceptions.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] @@ -723,6 +723,8 @@ def _as_int(x, desc): def format_float(x, parens=False): + from numpy._core.multiarray import dragon4_positional, dragon4_scientific + if not np.issubdtype(type(x), np.floating): return str(x) @@ -736,7 +738,7 @@ def format_float(x, parens=False): exp_format = False if x != 0: a = np.abs(x) - if a >= 1.e8 or a < 10**min(0, -(opts['precision']-1)//2): + if a >= 1.e8 or a < 10**min(0, -(opts['precision'] - 1) // 2): exp_format = True trim, unique = '0', True diff --git a/blimgui/dist64/numpy/polynomial/polyutils.pyi b/blimgui/dist64/numpy/polynomial/polyutils.pyi index 06e29b9..857d833 100644 --- a/blimgui/dist64/numpy/polynomial/polyutils.pyi +++ b/blimgui/dist64/numpy/polynomial/polyutils.pyi @@ -1,384 +1,262 @@ from collections.abc import Callable, Iterable, Sequence from typing import ( - Any, Final, Literal, + Protocol, SupportsIndex, TypeAlias, TypeVar, overload, + type_check_only, ) import numpy as np import numpy.typing as npt from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, _FloatLike_co, _NumberLike_co, - - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, ) from ._polytypes import ( _AnyInt, - _CoefLike_co, - _Array2, - _Tuple2, - - _FloatSeries, + _ArrayLikeCoef_co, + _CoefArray, + _CoefLike_co, _CoefSeries, - _ComplexSeries, - _ObjectSeries, - _ComplexArray, + _ComplexSeries, _FloatArray, - _CoefArray, + _FloatSeries, + _FuncBinOp, _ObjectArray, - - _SeriesLikeInt_co, - _SeriesLikeFloat_co, - _SeriesLikeComplex_co, + _ObjectSeries, _SeriesLikeCoef_co, + _SeriesLikeComplex_co, + _SeriesLikeFloat_co, + _SeriesLikeInt_co, + _SeriesLikeObject_co, + _Tuple2, +) - _ArrayLikeCoef_co, +__all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] - _FuncBinOp, - _FuncValND, - _FuncVanderND, -) +_T = TypeVar("_T") +_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) -__all__: Final[Sequence[str]] = [ - "as_series", - "format_float", - "getdomain", - "mapdomain", - "mapparms", - "trimcoef", - "trimseq", -] +_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] +_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] -_AnyLineF: TypeAlias = Callable[ - [_CoefLike_co, _CoefLike_co], - _CoefArray, -] -_AnyMulF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike], - _CoefArray, -] -_AnyVanderF: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] +@type_check_only +class _ValFunc(Protocol[_T]): + def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... + +### @overload -def as_series( - alist: npt.NDArray[np.integer[Any]] | _FloatArray, - trim: bool = ..., -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: _ComplexArray, - trim: bool = ..., -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: _ObjectArray, - trim: bool = ..., -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_FloatArray | npt.NDArray[np.integer[Any]]], - trim: bool = ..., -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_ComplexArray], - trim: bool = ..., -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_ObjectArray], - trim: bool = ..., -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_SeriesLikeFloat_co | float], - trim: bool = ..., -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeComplex_co | complex], - trim: bool = ..., -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeCoef_co | object], - trim: bool = ..., -) -> list[_ObjectSeries]: ... +def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: _ComplexArray, trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: _ObjectArray, trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_FloatArray | npt.NDArray[np.integer]], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_ComplexArray], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_ObjectArray], trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... -_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) -def trimseq(seq: _T_seq) -> _T_seq: ... +# +def trimseq(seq: _SeqT) -> _SeqT: ... +# @overload -def trimcoef( # type: ignore[overload-overlap] - c: npt.NDArray[np.integer[Any]] | _FloatArray, - tol: _FloatLike_co = ..., -) -> _FloatSeries: ... +def trimcoef(c: npt.NDArray[np.integer] | _FloatArray, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _ComplexArray, - tol: _FloatLike_co = ..., -) -> _ComplexSeries: ... +def trimcoef(c: _ComplexArray, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _ObjectArray, - tol: _FloatLike_co = ..., -) -> _ObjectSeries: ... +def trimcoef(c: _ObjectArray, tol: _FloatLike_co = 0) -> _ObjectSeries: ... @overload -def trimcoef( # type: ignore[overload-overlap] - c: _SeriesLikeFloat_co | float, - tol: _FloatLike_co = ..., -) -> _FloatSeries: ... +def trimcoef(c: _SeriesLikeFloat_co | float, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _SeriesLikeComplex_co | complex, - tol: _FloatLike_co = ..., -) -> _ComplexSeries: ... +def trimcoef(c: _SeriesLikeComplex_co | complex, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _SeriesLikeCoef_co | object, - tol: _FloatLike_co = ..., -) -> _ObjectSeries: ... +def trimcoef(c: _SeriesLikeCoef_co | object, tol: _FloatLike_co = 0) -> _ObjectSeries: ... +# @overload -def getdomain( # type: ignore[overload-overlap] - x: _FloatArray | npt.NDArray[np.integer[Any]], -) -> _Array2[np.float64]: ... +def getdomain(x: _FloatArray | npt.NDArray[np.integer]) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _ComplexArray, -) -> _Array2[np.complex128]: ... +def getdomain(x: _ComplexArray) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _ObjectArray, -) -> _Array2[np.object_]: ... +def getdomain(x: _ObjectArray) -> _Array2[np.object_]: ... @overload -def getdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co | float, -) -> _Array2[np.float64]: ... +def getdomain(x: _SeriesLikeFloat_co | float) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _SeriesLikeComplex_co | complex, -) -> _Array2[np.complex128]: ... +def getdomain(x: _SeriesLikeComplex_co | complex) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _SeriesLikeCoef_co | object, -) -> _Array2[np.object_]: ... +def getdomain(x: _SeriesLikeCoef_co | object) -> _Array2[np.object_]: ... +# +@overload +def mapparms(old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer]) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: npt.NDArray[np.object_ | np.number], new: npt.NDArray[np.object_ | np.number]) -> _Tuple2[object]: ... @overload -def mapparms( # type: ignore[overload-overlap] - old: npt.NDArray[np.floating[Any] | np.integer[Any]], - new: npt.NDArray[np.floating[Any] | np.integer[Any]], -) -> _Tuple2[np.floating[Any]]: ... -@overload -def mapparms( - old: npt.NDArray[np.number[Any]], - new: npt.NDArray[np.number[Any]], -) -> _Tuple2[np.complexfloating[Any, Any]]: ... -@overload -def mapparms( - old: npt.NDArray[np.object_ | np.number[Any]], - new: npt.NDArray[np.object_ | np.number[Any]], -) -> _Tuple2[object]: ... -@overload -def mapparms( # type: ignore[overload-overlap] - old: Sequence[float], - new: Sequence[float], -) -> _Tuple2[float]: ... -@overload -def mapparms( - old: Sequence[complex], - new: Sequence[complex], -) -> _Tuple2[complex]: ... -@overload -def mapparms( - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _Tuple2[np.floating[Any]]: ... -@overload -def mapparms( - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _Tuple2[np.complexfloating[Any, Any]]: ... -@overload -def mapparms( - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _Tuple2[object]: ... +def mapparms(old: Sequence[float], new: Sequence[float]) -> _Tuple2[float]: ... +@overload +def mapparms(old: Sequence[complex], new: Sequence[complex]) -> _Tuple2[complex]: ... +@overload +def mapparms(old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _Tuple2[object]: ... +# +@overload +def mapdomain(x: _FloatLike_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> np.floating: ... @overload -def mapdomain( # type: ignore[overload-overlap] - x: _FloatLike_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> np.floating[Any]: ... +def mapdomain(x: _NumberLike_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload def mapdomain( - x: _NumberLike_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> np.complexfloating[Any, Any]: ... -@overload -def mapdomain( # type: ignore[overload-overlap] - x: npt.NDArray[np.floating[Any] | np.integer[Any]], - old: npt.NDArray[np.floating[Any] | np.integer[Any]], - new: npt.NDArray[np.floating[Any] | np.integer[Any]], + x: npt.NDArray[np.floating | np.integer], + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], ) -> _FloatSeries: ... @overload -def mapdomain( - x: npt.NDArray[np.number[Any]], - old: npt.NDArray[np.number[Any]], - new: npt.NDArray[np.number[Any]], -) -> _ComplexSeries: ... +def mapdomain(x: npt.NDArray[np.number], old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _ComplexSeries: ... @overload def mapdomain( - x: npt.NDArray[np.object_ | np.number[Any]], - old: npt.NDArray[np.object_ | np.number[Any]], - new: npt.NDArray[np.object_ | np.number[Any]], + x: npt.NDArray[np.object_ | np.number], + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], ) -> _ObjectSeries: ... @overload -def mapdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def mapdomain(x: _SeriesLikeFloat_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def mapdomain( - x: _SeriesLikeComplex_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def mapdomain(x: _SeriesLikeComplex_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def mapdomain( - x: _SeriesLikeCoef_co, - old:_SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _ObjectSeries: ... +def mapdomain(x: _SeriesLikeCoef_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _ObjectSeries: ... @overload -def mapdomain( - x: _CoefLike_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> object: ... +def mapdomain(x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> object: ... + +# +def _nth_slice(i: SupportsIndex, ndim: SupportsIndex) -> tuple[slice | None, ...]: ... -def _nth_slice( - i: SupportsIndex, - ndim: SupportsIndex, -) -> tuple[None | slice, ...]: ... +# keep in sync with `vander_nd_flat` +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... -_vander_nd: _FuncVanderND[Literal["_vander_nd"]] -_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] +# keep in sync with `vander_nd` +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... # keep in sync with `._polytypes._FuncFromRoots` @overload -def _fromroots( # type: ignore[overload-overlap] - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, -) -> _ObjectSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) -> _ObjectSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, -) -> _CoefSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... + +# keep in sync with `_gridnd` +def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... -_valnd: _FuncValND[Literal["_valnd"]] -_gridnd: _FuncValND[Literal["_gridnd"]] +# keep in sync with `_valnd` +def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... # keep in sync with `_polytypes._FuncBinOp` @overload -def _div( # type: ignore[overload-overlap] - mul_f: _AnyMulF, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, -) -> _Tuple2[_FloatSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, -) -> _Tuple2[_ComplexSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, -) -> _Tuple2[_ObjectSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, -) -> _Tuple2[_CoefSeries]: ... +def _div(mul_f: _AnyMulF, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _Tuple2[_FloatSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeObject_co, c2: _SeriesLikeObject_co) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _Tuple2[_CoefSeries]: ... -_add: Final[_FuncBinOp] -_sub: Final[_FuncBinOp] +_add: Final[_FuncBinOp] = ... +_sub: Final[_FuncBinOp] = ... # keep in sync with `_polytypes._FuncPow` @overload -def _pow( # type: ignore[overload-overlap] - mul_f: _AnyMulF, - c: _SeriesLikeFloat_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., -) -> _FloatSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _FloatSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeComplex_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., -) -> _ComplexSeries: ... -@overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., -) -> _ObjectSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ComplexSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeObject_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ObjectSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: None | _AnyInt = ..., -) -> _CoefSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @overload -def _fit( # type: ignore[overload-overlap] +def _fit( vander_f: _AnyVanderF, x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: _SeriesLikeInt_co, - domain: None | _SeriesLikeFloat_co = ..., - rcond: None | _FloatLike_co = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeFloat_co = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = None, ) -> _FloatArray: ... @overload def _fit( @@ -386,10 +264,9 @@ def _fit( x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: _SeriesLikeInt_co, - domain: None | _SeriesLikeComplex_co = ..., - rcond: None | _FloatLike_co = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeComplex_co = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeComplex_co | None = None, ) -> _ComplexArray: ... @overload def _fit( @@ -397,10 +274,9 @@ def _fit( x: _SeriesLikeCoef_co, y: _ArrayLikeCoef_co, deg: _SeriesLikeInt_co, - domain: None | _SeriesLikeCoef_co = ..., - rcond: None | _FloatLike_co = ..., - full: Literal[False] = ..., - w: None | _SeriesLikeCoef_co = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, ) -> _CoefArray: ... @overload def _fit( @@ -408,24 +284,24 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: None | _SeriesLikeCoef_co, - rcond: None | _FloatLike_co , + rcond: _FloatLike_co | None, full: Literal[True], - /, - w: None | _SeriesLikeCoef_co = ..., -) -> tuple[_CoefSeries, Sequence[np.inexact[Any] | np.int32]]: ... + w: _SeriesLikeCoef_co | None = None, +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... @overload def _fit( vander_f: _AnyVanderF, x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: None | _SeriesLikeCoef_co = ..., - rcond: None | _FloatLike_co = ..., + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: None | _SeriesLikeCoef_co = ..., -) -> tuple[_CoefSeries, Sequence[np.inexact[Any] | np.int32]]: ... + w: _SeriesLikeCoef_co | None = None, +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +# def _as_int(x: SupportsIndex, desc: str) -> int: ... -def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... + +# +def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... diff --git a/blimgui/dist64/numpy/polynomial/tests/test_chebyshev.py b/blimgui/dist64/numpy/polynomial/tests/test_chebyshev.py index a3f8b96..97b00d2 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_chebyshev.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_chebyshev.py @@ -6,14 +6,13 @@ import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises def trim(x): return cheb.chebtrim(x, tol=1e-6) + T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] @@ -32,15 +31,15 @@ class TestPrivate: def test__cseries_to_zseries(self): for i in range(5): - inp = np.array([2] + [1]*i, np.double) - tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + inp = np.array([2] + [1] * i, np.double) + tgt = np.array([.5] * i + [2] + [.5] * i, np.double) res = cheb._cseries_to_zseries(inp) assert_equal(res, tgt) def test__zseries_to_cseries(self): for i in range(5): - inp = np.array([.5]*i + [2] + [.5]*i, np.double) - tgt = np.array([2] + [1]*i, np.double) + inp = np.array([.5] * i + [2] + [.5] * i, np.double) + tgt = np.array([2] + [1] * i, np.double) res = cheb._zseries_to_cseries(inp) assert_equal(res, tgt) @@ -69,7 +68,7 @@ def test_chebadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + res = cheb.chebadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebsub(self): @@ -79,15 +78,15 @@ def test_chebsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + res = cheb.chebsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebmulx(self): assert_equal(cheb.chebmulx([0]), [0]) assert_equal(cheb.chebmulx([1]), [0, 1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [.5, 0, .5] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [.5, 0, .5] assert_equal(cheb.chebmulx(ser), tgt) def test_chebmul(self): @@ -97,15 +96,15 @@ def test_chebmul(self): tgt = np.zeros(i + j + 1) tgt[i + j] += .5 tgt[abs(i - j)] += .5 - res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + res = cheb.chebmul([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = cheb.chebadd(ci, cj) quo, rem = cheb.chebdiv(tgt, ci) res = cheb.chebadd(cheb.chebmul(quo, ci), rem) @@ -116,7 +115,7 @@ def test_chebpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + tgt = reduce(cheb.chebmul, [c] * j, np.array([1])) res = cheb.chebpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -128,25 +127,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_chebval(self): - #check empty input + # check empty input assert_equal(cheb.chebval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Tlist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = cheb.chebval(x, [0]*i + [1]) + res = cheb.chebval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(cheb.chebval(x, [1]).shape, dims) assert_equal(cheb.chebval(x, [1, 0]).shape, dims) @@ -156,15 +155,15 @@ def test_chebval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = cheb.chebval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -173,15 +172,15 @@ def test_chebval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = cheb.chebval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -190,29 +189,29 @@ def test_chebgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = cheb.chebgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_chebgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = cheb.chebgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -228,15 +227,15 @@ def test_chebint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = cheb.chebint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i]) res = cheb.cheb2poly(chebint) @@ -245,7 +244,7 @@ def test_chebint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) assert_almost_equal(cheb.chebval(-1, chebint), i) @@ -253,8 +252,8 @@ def test_chebint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) res = cheb.cheb2poly(chebint) @@ -263,7 +262,7 @@ def test_chebint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1) @@ -273,7 +272,7 @@ def test_chebint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k]) @@ -283,7 +282,7 @@ def test_chebint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) @@ -293,7 +292,7 @@ def test_chebint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) @@ -326,21 +325,21 @@ def test_chebder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = cheb.chebder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -359,7 +358,7 @@ def test_chebder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_chebvander(self): # check for 1d x @@ -367,7 +366,7 @@ def test_chebvander(self): v = cheb.chebvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], cheb.chebval(x, coef)) # check for 2d x @@ -375,7 +374,7 @@ def test_chebvander(self): v = cheb.chebvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], cheb.chebval(x, coef)) def test_chebvander2d(self): @@ -409,7 +408,7 @@ class TestFitting: def test_chebfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -501,8 +500,8 @@ def powx(x, p): return x**p x = np.linspace(-1, 1, 10) - for deg in range(0, 10): - for p in range(0, deg + 1): + for deg in range(10): + for p in range(deg + 1): c = cheb.chebinterpolate(powx, deg, (p,)) assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) @@ -515,7 +514,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(cheb.chebcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -532,7 +531,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = cheb.chebvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -547,9 +546,9 @@ def test_chebfromroots(self): res = cheb.chebfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = [0]*i + [1] - res = cheb.chebfromroots(roots)*2**(i-1) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = [0] * i + [1] + res = cheb.chebfromroots(roots) * 2**(i - 1) assert_almost_equal(trim(res), trim(tgt)) def test_chebroots(self): @@ -576,24 +575,24 @@ def test_chebline(self): def test_cheb2poly(self): for i in range(10): - assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + assert_almost_equal(cheb.cheb2poly([0] * i + [1]), Tlist[i]) def test_poly2cheb(self): for i in range(10): - assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-1, 1, 11)[1:-1] - tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + tgt = 1. / (np.sqrt(1 + x) * np.sqrt(1 - x)) res = cheb.chebweight(x) assert_almost_equal(res, tgt) def test_chebpts1(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts1, 1.5) assert_raises(ValueError, cheb.chebpts1, 0) - #test points + # test points tgt = [0] assert_almost_equal(cheb.chebpts1(1), tgt) tgt = [-0.70710678118654746, 0.70710678118654746] @@ -604,11 +603,11 @@ def test_chebpts1(self): assert_almost_equal(cheb.chebpts1(4), tgt) def test_chebpts2(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts2, 1.5) assert_raises(ValueError, cheb.chebpts2, 1) - #test points + # test points tgt = [-1, 1] assert_almost_equal(cheb.chebpts2(2), tgt) tgt = [-1, 0, 1] diff --git a/blimgui/dist64/numpy/polynomial/tests/test_classes.py b/blimgui/dist64/numpy/polynomial/tests/test_classes.py index 48f370a..2045da4 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_classes.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_classes.py @@ -7,13 +7,18 @@ from numbers import Number import pytest + import numpy as np -from numpy.polynomial import ( - Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) from numpy.exceptions import RankWarning +from numpy.polynomial import ( + Chebyshev, + Hermite, + HermiteE, + Laguerre, + Legendre, + Polynomial, +) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises # # fixtures @@ -29,6 +34,7 @@ def Poly(request): return request.param + # # helper functions # @@ -57,12 +63,12 @@ def test_conversion(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 p1 = Poly1(coef, domain=d1, window=w1) - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 p2 = p1.convert(kind=Poly2, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) @@ -74,12 +80,12 @@ def test_cast(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 p1 = Poly1(coef, domain=d1, window=w1) - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 p2 = Poly2.cast(p1, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) @@ -93,8 +99,8 @@ def test_cast(Poly1, Poly2): def test_identity(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 x = np.linspace(d[0], d[1], 11) p = Poly.identity(domain=d, window=w) assert_equal(p.domain, d) @@ -103,19 +109,19 @@ def test_identity(Poly): def test_basis(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p = Poly.basis(5, domain=d, window=w) assert_equal(p.domain, d) assert_equal(p.window, w) - assert_equal(p.coef, [0]*5 + [1]) + assert_equal(p.coef, [0] * 5 + [1]) def test_fromroots(Poly): # check that requested roots are zeros of a polynomial # of correct degree, domain, and window. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 r = random((5,)) p1 = Poly.fromroots(r, domain=d, window=w) assert_equal(p1.degree(), len(r)) @@ -144,7 +150,7 @@ def test_bad_conditioned_fit(Poly): def test_fit(Poly): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) x = np.linspace(0, 3) y = f(x) @@ -155,8 +161,8 @@ def f(x): assert_equal(p.degree(), 3) # check with given domains and window - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p = Poly.fit(x, y, 3, domain=d, window=w) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) @@ -176,7 +182,7 @@ def f(x): # check that fit accepts weights. w = np.zeros_like(x) - z = y + random(y.shape)*.25 + z = y + random(y.shape) * .25 w[::2] = 1 p1 = Poly.fit(x[::2], z[::2], 3) p2 = Poly.fit(x, z, 3, w=w) @@ -291,7 +297,7 @@ def test_floordiv(Poly): assert_poly_almost_equal(p4 // np.array(c2), p1) assert_poly_almost_equal(np.array(c4) // p2, p1) assert_poly_almost_equal(2 // p2, Poly([0])) - assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_poly_almost_equal(p2 // 2, 0.5 * p2) assert_raises( TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) assert_raises( @@ -305,7 +311,7 @@ def test_floordiv(Poly): def test_truediv(Poly): # true division is valid only if the denominator is a Number and # not a python bool. - p1 = Poly([1,2,3]) + p1 = Poly([1, 2, 3]) p2 = p1 * 5 for stype in np.ScalarType: @@ -322,7 +328,7 @@ def test_truediv(Poly): s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) - for s in [tuple(), list(), dict(), bool(), np.array([1])]: + for s in [(), [], {}, False, np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) for ptype in classes: @@ -388,7 +394,7 @@ def test_divmod(Poly): assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p2, 2) - assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(quo, 0.5 * p2) assert_poly_almost_equal(rem, Poly([0])) quo, rem = divmod(2, p2) assert_poly_almost_equal(quo, Poly([0])) @@ -430,26 +436,26 @@ def test_copy(Poly): def test_integ(Poly): P = Polynomial # Check defaults - p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) # Check with k - p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) p1 = P.cast(p0.integ(k=1)) p2 = P.cast(p0.integ(2, k=[1, 1])) assert_poly_almost_equal(p1, P([1, 2, 3, 4])) assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) # Check with lbnd - p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) p1 = P.cast(p0.integ(lbnd=1)) p2 = P.cast(p0.integ(2, lbnd=1)) assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) # Check scaling - d = 2*Poly.domain - p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + d = 2 * Poly.domain + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]), domain=d) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) @@ -459,8 +465,8 @@ def test_integ(Poly): def test_deriv(Poly): # Check that the derivative is the inverse of integration. It is # assumes that the integration has been checked elsewhere. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p1 = Poly([1, 2, 3], domain=d, window=w) p2 = p1.integ(2, k=[1, 2]) p3 = p1.integ(1, k=[1]) @@ -475,8 +481,8 @@ def test_deriv(Poly): def test_linspace(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p = Poly([1, 2, 3], domain=d, window=w) # check default domain xtgt = np.linspace(d[0], d[1], 20) @@ -493,8 +499,8 @@ def test_linspace(Poly): def test_pow(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 tgt = Poly([1], domain=d, window=w) tst = Poly([1, 2, 3], domain=d, window=w) for i in range(5): @@ -518,7 +524,7 @@ def test_call(Poly): # Check defaults p = Poly.cast(P([1, 2, 3])) - tgt = 1 + x*(2 + 3*x) + tgt = 1 + x * (2 + 3 * x) res = p(x) assert_almost_equal(res, tgt) @@ -565,7 +571,7 @@ def test_mapparms(Poly): p = Poly([1], domain=d, window=w) assert_almost_equal([0, 1], p.mapparms()) # - w = 2*d + 1 + w = 2 * d + 1 p = Poly([1], domain=d, window=w) assert_almost_equal([1, 2], p.mapparms()) @@ -601,7 +607,7 @@ def powx(x, p): return x**p x = np.linspace(0, 2, 10) - for deg in range(0, 10): - for t in range(0, deg + 1): + for deg in range(10): + for t in range(deg + 1): p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/blimgui/dist64/numpy/polynomial/tests/test_hermite.py b/blimgui/dist64/numpy/polynomial/tests/test_hermite.py index 37a5540..8b8d2ca 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_hermite.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_hermite.py @@ -6,9 +6,7 @@ import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises H0 = np.array([1]) H1 = np.array([0, 2]) @@ -53,7 +51,7 @@ def test_hermadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = herm.hermadd([0]*i + [1], [0]*j + [1]) + res = herm.hermadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermsub(self): @@ -63,37 +61,37 @@ def test_hermsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = herm.hermsub([0]*i + [1], [0]*j + [1]) + res = herm.hermsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermmulx(self): assert_equal(herm.hermmulx([0]), [0]) assert_equal(herm.hermmulx([1]), [0, .5]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, .5] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, .5] assert_equal(herm.hermmulx(ser), tgt) def test_hermmul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = herm.hermval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = herm.hermval(self.x, pol2) pol3 = herm.hermmul(pol1, pol2) val3 = herm.hermval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_hermdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = herm.hermadd(ci, cj) quo, rem = herm.hermdiv(tgt, ci) res = herm.hermadd(herm.hermmul(quo, ci), rem) @@ -104,7 +102,7 @@ def test_hermpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + tgt = reduce(herm.hermmul, [c] * j, np.array([1])) res = herm.hermpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -116,25 +114,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermval(self): - #check empty input + # check empty input assert_equal(herm.hermval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Hlist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = herm.hermval(x, [0]*i + [1]) + res = herm.hermval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(herm.hermval(x, [1]).shape, dims) assert_equal(herm.hermval(x, [1, 0]).shape, dims) @@ -144,15 +142,15 @@ def test_hermval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = herm.hermval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +159,15 @@ def test_hermval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = herm.hermval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,29 +176,29 @@ def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herm.hermgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_hermgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herm.hermgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -216,15 +214,15 @@ def test_hermint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = herm.hermint([0], m=i, k=k) assert_almost_equal(res, [0, .5]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i]) res = herm.herm2poly(hermint) @@ -233,7 +231,7 @@ def test_hermint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) assert_almost_equal(herm.hermval(-1, hermint), i) @@ -241,8 +239,8 @@ def test_hermint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) res = herm.herm2poly(hermint) @@ -251,7 +249,7 @@ def test_hermint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1) @@ -261,7 +259,7 @@ def test_hermint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k]) @@ -271,7 +269,7 @@ def test_hermint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) @@ -281,7 +279,7 @@ def test_hermint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k], scl=2) @@ -314,21 +312,21 @@ def test_hermder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herm.hermder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herm.hermder(herm.hermint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -347,7 +345,7 @@ def test_hermder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_hermvander(self): # check for 1d x @@ -355,7 +353,7 @@ def test_hermvander(self): v = herm.hermvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) # check for 2d x @@ -363,7 +361,7 @@ def test_hermvander(self): v = herm.hermvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) def test_hermvander2d(self): @@ -397,7 +395,7 @@ class TestFitting: def test_hermfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -478,7 +476,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(herm.hermcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -495,7 +493,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = herm.hermvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -510,7 +508,7 @@ def test_hermfromroots(self): res = herm.hermfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = herm.hermfromroots(roots) res = herm.hermval(roots, pol) tgt = 0 @@ -542,11 +540,11 @@ def test_hermline(self): def test_herm2poly(self): for i in range(10): - assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + assert_almost_equal(herm.herm2poly([0] * i + [1]), Hlist[i]) def test_poly2herm(self): for i in range(10): - assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + assert_almost_equal(herm.poly2herm(Hlist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) diff --git a/blimgui/dist64/numpy/polynomial/tests/test_hermite_e.py b/blimgui/dist64/numpy/polynomial/tests/test_hermite_e.py index 5b92f01..4560125 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_hermite_e.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_hermite_e.py @@ -6,9 +6,7 @@ import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises He0 = np.array([1]) He1 = np.array([0, 1]) @@ -53,7 +51,7 @@ def test_hermeadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + res = herme.hermeadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermesub(self): @@ -63,37 +61,37 @@ def test_hermesub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = herme.hermesub([0]*i + [1], [0]*j + [1]) + res = herme.hermesub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermemulx(self): assert_equal(herme.hermemulx([0]), [0]) assert_equal(herme.hermemulx([1]), [0, 1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, 1] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, 1] assert_equal(herme.hermemulx(ser), tgt) def test_hermemul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = herme.hermeval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = herme.hermeval(self.x, pol2) pol3 = herme.hermemul(pol1, pol2) val3 = herme.hermeval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_hermediv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = herme.hermeadd(ci, cj) quo, rem = herme.hermediv(tgt, ci) res = herme.hermeadd(herme.hermemul(quo, ci), rem) @@ -104,7 +102,7 @@ def test_hermepow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + tgt = reduce(herme.hermemul, [c] * j, np.array([1])) res = herme.hermepow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -116,25 +114,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermeval(self): - #check empty input + # check empty input assert_equal(herme.hermeval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = herme.hermeval(x, [0]*i + [1]) + res = herme.hermeval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(herme.hermeval(x, [1]).shape, dims) assert_equal(herme.hermeval(x, [1, 0]).shape, dims) @@ -144,15 +142,15 @@ def test_hermeval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = herme.hermeval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +159,15 @@ def test_hermeval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = herme.hermeval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,29 +176,29 @@ def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herme.hermegrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_hermegrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herme.hermegrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -216,15 +214,15 @@ def test_hermeint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = herme.hermeint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i]) res = herme.herme2poly(hermeint) @@ -233,7 +231,7 @@ def test_hermeint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) assert_almost_equal(herme.hermeval(-1, hermeint), i) @@ -241,8 +239,8 @@ def test_hermeint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) res = herme.herme2poly(hermeint) @@ -251,7 +249,7 @@ def test_hermeint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1) @@ -261,7 +259,7 @@ def test_hermeint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k]) @@ -271,7 +269,7 @@ def test_hermeint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) @@ -281,7 +279,7 @@ def test_hermeint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) @@ -314,21 +312,21 @@ def test_hermeder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herme.hermeder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herme.hermeder( herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -348,7 +346,7 @@ def test_hermeder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_hermevander(self): # check for 1d x @@ -356,7 +354,7 @@ def test_hermevander(self): v = herme.hermevander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) # check for 2d x @@ -364,7 +362,7 @@ def test_hermevander(self): v = herme.hermevander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) def test_hermevander2d(self): @@ -398,7 +396,7 @@ class TestFitting: def test_hermefit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -479,7 +477,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(herme.hermecompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -496,12 +494,12 @@ def test_100(self): # functions like Laguerre can be very confusing. v = herme.hermevander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct - tgt = np.sqrt(2*np.pi) + tgt = np.sqrt(2 * np.pi) assert_almost_equal(w.sum(), tgt) @@ -511,7 +509,7 @@ def test_hermefromroots(self): res = herme.hermefromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = herme.hermefromroots(roots) res = herme.hermeval(roots, pol) tgt = 0 @@ -543,14 +541,14 @@ def test_hermeline(self): def test_herme2poly(self): for i in range(10): - assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i]) def test_poly2herme(self): for i in range(10): - assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) - tgt = np.exp(-.5*x**2) + tgt = np.exp(-.5 * x**2) res = herme.hermeweight(x) assert_almost_equal(res, tgt) diff --git a/blimgui/dist64/numpy/polynomial/tests/test_laguerre.py b/blimgui/dist64/numpy/polynomial/tests/test_laguerre.py index ca6188a..7cc017c 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_laguerre.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_laguerre.py @@ -6,17 +6,15 @@ import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -L0 = np.array([1])/1 -L1 = np.array([1, -1])/1 -L2 = np.array([2, -4, 1])/2 -L3 = np.array([6, -18, 9, -1])/6 -L4 = np.array([24, -96, 72, -16, 1])/24 -L5 = np.array([120, -600, 600, -200, 25, -1])/120 -L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +L0 = np.array([1]) / 1 +L1 = np.array([1, -1]) / 1 +L2 = np.array([2, -4, 1]) / 2 +L3 = np.array([6, -18, 9, -1]) / 6 +L4 = np.array([24, -96, 72, -16, 1]) / 24 +L5 = np.array([120, -600, 600, -200, 25, -1]) / 120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1]) / 720 Llist = [L0, L1, L2, L3, L4, L5, L6] @@ -50,7 +48,7 @@ def test_lagadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = lag.lagadd([0]*i + [1], [0]*j + [1]) + res = lag.lagadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_lagsub(self): @@ -60,37 +58,37 @@ def test_lagsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = lag.lagsub([0]*i + [1], [0]*j + [1]) + res = lag.lagsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_lagmulx(self): assert_equal(lag.lagmulx([0]), [0]) assert_equal(lag.lagmulx([1]), [1, -1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [-i, 2 * i + 1, -(i + 1)] assert_almost_equal(lag.lagmulx(ser), tgt) def test_lagmul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = lag.lagval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = lag.lagval(self.x, pol2) pol3 = lag.lagmul(pol1, pol2) val3 = lag.lagval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_lagdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = lag.lagadd(ci, cj) quo, rem = lag.lagdiv(tgt, ci) res = lag.lagadd(lag.lagmul(quo, ci), rem) @@ -101,7 +99,7 @@ def test_lagpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + tgt = reduce(lag.lagmul, [c] * j, np.array([1])) res = lag.lagpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -113,25 +111,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_lagval(self): - #check empty input + # check empty input assert_equal(lag.lagval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(7): msg = f"At i={i}" tgt = y[i] - res = lag.lagval(x, [0]*i + [1]) + res = lag.lagval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(lag.lagval(x, [1]).shape, dims) assert_equal(lag.lagval(x, [1, 0]).shape, dims) @@ -141,15 +139,15 @@ def test_lagval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = lag.lagval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -158,15 +156,15 @@ def test_lagval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = lag.lagval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -175,29 +173,29 @@ def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = lag.laggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_laggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = lag.laggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -213,15 +211,15 @@ def test_lagint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = lag.lagint([0], m=i, k=k) assert_almost_equal(res, [1, -1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i]) res = lag.lag2poly(lagint) @@ -230,7 +228,7 @@ def test_lagint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) assert_almost_equal(lag.lagval(-1, lagint), i) @@ -238,8 +236,8 @@ def test_lagint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) res = lag.lag2poly(lagint) @@ -248,7 +246,7 @@ def test_lagint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1) @@ -258,7 +256,7 @@ def test_lagint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k]) @@ -268,7 +266,7 @@ def test_lagint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) @@ -278,7 +276,7 @@ def test_lagint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k], scl=2) @@ -311,21 +309,21 @@ def test_lagder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = lag.lagder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = lag.lagder(lag.lagint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -344,7 +342,7 @@ def test_lagder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_lagvander(self): # check for 1d x @@ -352,7 +350,7 @@ def test_lagvander(self): v = lag.lagvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], lag.lagval(x, coef)) # check for 2d x @@ -360,7 +358,7 @@ def test_lagvander(self): v = lag.lagvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], lag.lagval(x, coef)) def test_lagvander2d(self): @@ -394,7 +392,7 @@ class TestFitting: def test_lagfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) @@ -460,7 +458,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(lag.lagcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -477,7 +475,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = lag.lagvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -492,7 +490,7 @@ def test_lagfromroots(self): res = lag.lagfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = lag.lagfromroots(roots) res = lag.lagval(roots, pol) tgt = 0 @@ -524,11 +522,11 @@ def test_lagline(self): def test_lag2poly(self): for i in range(7): - assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + assert_almost_equal(lag.lag2poly([0] * i + [1]), Llist[i]) def test_poly2lag(self): for i in range(7): - assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + assert_almost_equal(lag.poly2lag(Llist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(0, 10, 11) diff --git a/blimgui/dist64/numpy/polynomial/tests/test_legendre.py b/blimgui/dist64/numpy/polynomial/tests/test_legendre.py index f60dd1c..20b100a 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_legendre.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_legendre.py @@ -6,20 +6,18 @@ import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) L1 = np.array([0, 1]) -L2 = np.array([-1, 0, 3])/2 -L3 = np.array([0, -3, 0, 5])/2 -L4 = np.array([3, 0, -30, 0, 35])/8 -L5 = np.array([0, 15, 0, -70, 0, 63])/8 -L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 -L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 -L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 -L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 +L2 = np.array([-1, 0, 3]) / 2 +L3 = np.array([0, -3, 0, 5]) / 2 +L4 = np.array([3, 0, -30, 0, 35]) / 8 +L5 = np.array([0, 15, 0, -70, 0, 63]) / 8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231]) / 16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429]) / 16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435]) / 128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155]) / 128 Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] @@ -53,7 +51,7 @@ def test_legadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = leg.legadd([0]*i + [1], [0]*j + [1]) + res = leg.legadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_legsub(self): @@ -63,38 +61,38 @@ def test_legsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = leg.legsub([0]*i + [1], [0]*j + [1]) + res = leg.legsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_legmulx(self): assert_equal(leg.legmulx([0]), [0]) assert_equal(leg.legmulx([1]), [0, 1]) for i in range(1, 5): - tmp = 2*i + 1 - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + tmp = 2 * i + 1 + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i / tmp, 0, (i + 1) / tmp] assert_equal(leg.legmulx(ser), tgt) def test_legmul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = leg.legval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = leg.legval(self.x, pol2) pol3 = leg.legmul(pol1, pol2) val3 = leg.legval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_legdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = leg.legadd(ci, cj) quo, rem = leg.legdiv(tgt, ci) res = leg.legadd(leg.legmul(quo, ci), rem) @@ -105,7 +103,7 @@ def test_legpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(leg.legmul, [c]*j, np.array([1])) + tgt = reduce(leg.legmul, [c] * j, np.array([1])) res = leg.legpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -117,25 +115,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_legval(self): - #check empty input + # check empty input assert_equal(leg.legval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = leg.legval(x, [0]*i + [1]) + res = leg.legval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(leg.legval(x, [1]).shape, dims) assert_equal(leg.legval(x, [1, 0]).shape, dims) @@ -145,15 +143,15 @@ def test_legval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = leg.legval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -162,15 +160,15 @@ def test_legval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = leg.legval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -179,29 +177,29 @@ def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = leg.leggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_leggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = leg.leggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -217,15 +215,15 @@ def test_legint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = leg.legint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i]) res = leg.leg2poly(legint) @@ -234,7 +232,7 @@ def test_legint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) assert_almost_equal(leg.legval(-1, legint), i) @@ -242,8 +240,8 @@ def test_legint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i], scl=2) res = leg.leg2poly(legint) @@ -252,7 +250,7 @@ def test_legint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1) @@ -262,7 +260,7 @@ def test_legint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k]) @@ -272,7 +270,7 @@ def test_legint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) @@ -282,7 +280,7 @@ def test_legint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k], scl=2) @@ -318,21 +316,21 @@ def test_legder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = leg.legder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = leg.legder(leg.legint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -354,7 +352,7 @@ def test_legder_orderhigherthancoeff(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_legvander(self): # check for 1d x @@ -362,7 +360,7 @@ def test_legvander(self): v = leg.legvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) # check for 2d x @@ -370,7 +368,7 @@ def test_legvander(self): v = leg.legvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) def test_legvander2d(self): @@ -407,7 +405,7 @@ class TestFitting: def test_legfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -488,7 +486,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(leg.legcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -505,7 +503,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = leg.legvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -520,7 +518,7 @@ def test_legfromroots(self): res = leg.legfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = leg.legfromroots(roots) res = leg.legval(roots, pol) tgt = 0 @@ -555,11 +553,11 @@ def test_legline_zeroscl(self): def test_leg2poly(self): for i in range(10): - assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + assert_almost_equal(leg.leg2poly([0] * i + [1]), Llist[i]) def test_poly2leg(self): for i in range(10): - assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + assert_almost_equal(leg.poly2leg(Llist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-1, 1, 11) diff --git a/blimgui/dist64/numpy/polynomial/tests/test_polynomial.py b/blimgui/dist64/numpy/polynomial/tests/test_polynomial.py index 3a232f7..80e8abf 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_polynomial.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_polynomial.py @@ -1,21 +1,29 @@ """Tests for polynomial module. """ -from functools import reduce +import pickle +from copy import deepcopy from fractions import Fraction +from functools import reduce + +import pytest + import numpy as np import numpy.polynomial.polynomial as poly -import numpy.polynomial.polyutils as pu -import pickle -from copy import deepcopy from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex, assert_warns) + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) def trim(x): return poly.polytrim(x, tol=1e-6) + T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] @@ -63,7 +71,7 @@ def test_polyadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = poly.polyadd([0]*i + [1], [0]*j + [1]) + res = poly.polyadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polysub(self): @@ -73,15 +81,15 @@ def test_polysub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = poly.polysub([0]*i + [1], [0]*j + [1]) + res = poly.polysub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polymulx(self): assert_equal(poly.polymulx([0]), [0]) assert_equal(poly.polymulx([1]), [0, 1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i + 1) + [1] + ser = [0] * i + [1] + tgt = [0] * (i + 1) + [1] assert_equal(poly.polymulx(ser), tgt) def test_polymul(self): @@ -90,7 +98,7 @@ def test_polymul(self): msg = f"At i={i}, j={j}" tgt = np.zeros(i + j + 1) tgt[i + j] += 1 - res = poly.polymul([0]*i + [1], [0]*j + [1]) + res = poly.polymul([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polydiv(self): @@ -107,8 +115,8 @@ def test_polydiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1, 2] - cj = [0]*j + [1, 2] + ci = [0] * i + [1, 2] + cj = [0] * j + [1, 2] tgt = poly.polyadd(ci, cj) quo, rem = poly.polydiv(tgt, ci) res = poly.polyadd(poly.polymul(quo, ci), rem) @@ -119,7 +127,7 @@ def test_polypow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(poly.polymul, [c]*j, np.array([1])) + tgt = reduce(poly.polymul, [c] * j, np.array([1])) res = poly.polypow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -150,39 +158,39 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = poly.polyval(x, [1., 2., 3.]) def test_polyval(self): - #check empty input + # check empty input assert_equal(poly.polyval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5): tgt = y[i] - res = poly.polyval(x, [0]*i + [1]) + res = poly.polyval(x, [0] * i + [1]) assert_almost_equal(res, tgt) - tgt = x*(x**2 - 1) + tgt = x * (x**2 - 1) res = poly.polyval(x, [0, -1, 0, 1]) assert_almost_equal(res, tgt) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(poly.polyval(x, [1]).shape, dims) assert_equal(poly.polyval(x, [1, 0]).shape, dims) assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) - #check masked arrays are processed correctly + # check masked arrays are processed correctly mask = [False, True, False] mx = np.ma.array([1, 2, 3], mask=mask) res = np.polyval([7, 5, 3], mx) assert_array_equal(res.mask, mask) - #check subtypes of ndarray are preserved + # check subtypes of ndarray are preserved class C(np.ndarray): pass @@ -212,15 +220,15 @@ def test_polyvalfromroots(self): y = [x**i for i in range(5)] for i in range(1, 5): tgt = y[i] - res = poly.polyvalfromroots(x, [0]*i) + res = poly.polyvalfromroots(x, [0] * i) assert_almost_equal(res, tgt) - tgt = x*(x - 1)*(x + 1) + tgt = x * (x - 1) * (x + 1) res = poly.polyvalfromroots(x, [-1, 0, 1]) assert_almost_equal(res, tgt) # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) @@ -245,7 +253,7 @@ def test_polyvalfromroots(self): assert_equal(res, tgt) # check tensor=True - x = np.vstack([x, 2*x]) + x = np.vstack([x, 2 * x]) res = poly.polyvalfromroots(x, r, tensor=True) tgt = np.empty(r.shape[1:] + x.shape) for ii in range(r.shape[1]): @@ -257,16 +265,16 @@ def test_polyval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = poly.polyval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -275,16 +283,16 @@ def test_polyval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = poly.polyval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -293,29 +301,29 @@ def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = poly.polygrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_polygrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = poly.polygrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -332,37 +340,37 @@ def test_polyint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = poly.polyint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] res = poly.polyint(pol, m=1, k=[i]) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] res = poly.polyint(pol, m=1, k=[i], lbnd=-1) assert_almost_equal(poly.polyval(-1, res), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] res = poly.polyint(pol, m=1, k=[i], scl=2) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1) @@ -372,7 +380,7 @@ def test_polyint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k]) @@ -382,7 +390,7 @@ def test_polyint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) @@ -392,7 +400,7 @@ def test_polyint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], scl=2) @@ -425,21 +433,21 @@ def test_polyder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = poly.polyder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = poly.polyder(poly.polyint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -458,7 +466,7 @@ def test_polyder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_polyvander(self): # check for 1d x @@ -466,7 +474,7 @@ def test_polyvander(self): v = poly.polyvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) # check for 2d x @@ -474,7 +482,7 @@ def test_polyvander(self): v = poly.polyvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) def test_polyvander2d(self): @@ -516,7 +524,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(poly.polycompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -529,9 +537,9 @@ def test_polyfromroots(self): res = poly.polyfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) tgt = Tlist[i] - res = poly.polyfromroots(roots)*2**(i-1) + res = poly.polyfromroots(roots) * 2**(i - 1) assert_almost_equal(trim(res), trim(tgt)) def test_polyroots(self): @@ -542,9 +550,23 @@ def test_polyroots(self): res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + def test_polyfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -634,7 +656,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with assert_warns(pu.RankWarning): + with pytest.warns(np.exceptions.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): @@ -645,3 +667,25 @@ def test_result_type(self): arr = np.polydiv(1, np.float32(1)) assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" diff --git a/blimgui/dist64/numpy/polynomial/tests/test_polyutils.py b/blimgui/dist64/numpy/polynomial/tests/test_polyutils.py index 176353a..34f0c42 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_polyutils.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_polyutils.py @@ -3,9 +3,7 @@ """ import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises class TestMisc: diff --git a/blimgui/dist64/numpy/polynomial/tests/test_printing.py b/blimgui/dist64/numpy/polynomial/tests/test_printing.py index 8c64420..43c3a12 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_printing.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_printing.py @@ -1,12 +1,14 @@ -from math import nan, inf -import pytest -from numpy._core import array, arange, printoptions -import numpy.polynomial as poly -from numpy.testing import assert_equal, assert_ +from decimal import Decimal # For testing polynomial printing with object arrays from fractions import Fraction -from decimal import Decimal +from math import inf, nan + +import pytest + +import numpy.polynomial as poly +from numpy._core import arange, array, printoptions +from numpy.testing import assert_, assert_equal class TestStrUnicodeSuperSubscripts: @@ -244,6 +246,7 @@ def test_linewidth_printoption(self, lw, tgt): assert_(len(line) < lw) +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_set_default_printoptions(): p = poly.Polynomial([1, 2, 3]) c = poly.Chebyshev([1, 2, 3]) @@ -257,9 +260,10 @@ def test_set_default_printoptions(): poly.set_default_printstyle('invalid_input') +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_complex_coefficients(): """Test both numpy and built-in complex.""" - coefs = [0+1j, 1+1j, -2+2j, 3+0j] + coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] # numpy complex p1 = poly.Polynomial(coefs) # Python complex @@ -413,7 +417,7 @@ def test_simple_polynomial(self): # translated input p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # noqa: E501 # scaled input p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) @@ -423,7 +427,7 @@ def test_simple_polynomial(self): # affine input p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') # noqa: E501 def test_basis_func(self): p = poly.Chebyshev([1, 2, 3]) @@ -432,7 +436,7 @@ def test_basis_func(self): # affine input - check no surplus parens are added p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) assert_equal(self.as_latex(p), - r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') # noqa: E501 def test_multichar_basis_func(self): p = poly.HermiteE([1, 2, 3]) @@ -480,6 +484,7 @@ def test_numeric_object_coefficients(self): p = poly.Polynomial(coefs) assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$') + SWITCH_TO_EXP = ( '1.0 + (1.0e-01) x + (1.0e-02) x**2', '1.2 + (1.2e-01) x + (1.2e-02) x**2', @@ -505,7 +510,7 @@ def use_ascii(self): poly.set_default_printstyle('ascii') def test_str(self): - p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' '+ (1.42857143e+08) x**3') @@ -514,7 +519,7 @@ def test_str(self): '+ (1.429e+08) x**3') def test_latex(self): - p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) assert_equal(p._repr_latex_(), r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' r'\text{14285714.28571429}\,x^{2} + ' @@ -526,7 +531,7 @@ def test_latex(self): r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') def test_fixed(self): - p = poly.Polynomial([1/2]) + p = poly.Polynomial([1 / 2]) assert_equal(str(p), '0.5') with printoptions(floatmode='fixed'): @@ -538,14 +543,14 @@ def test_fixed(self): def test_switch_to_exp(self): for i, s in enumerate(SWITCH_TO_EXP): with printoptions(precision=i): - p = poly.Polynomial([1.23456789*10**-i - for i in range(i//2+3)]) + p = poly.Polynomial([1.23456789 * 10**-i + for i in range(i // 2 + 3)]) assert str(p).replace('\n', ' ') == s def test_non_finite(self): p = poly.Polynomial([nan, inf]) assert str(p) == 'nan + inf x' - assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' # noqa: RUF027 with printoptions(nanstr='NAN', infstr='INF'): assert str(p) == 'NAN + INF x' assert p._repr_latex_() == \ diff --git a/blimgui/dist64/numpy/polynomial/tests/test_symbol.py b/blimgui/dist64/numpy/polynomial/tests/test_symbol.py index be64e96..50bea29 100644 --- a/blimgui/dist64/numpy/polynomial/tests/test_symbol.py +++ b/blimgui/dist64/numpy/polynomial/tests/test_symbol.py @@ -3,9 +3,10 @@ """ import pytest + import numpy.polynomial as poly from numpy._core import array -from numpy.testing import assert_equal, assert_raises, assert_ +from numpy.testing import assert_, assert_equal, assert_raises class TestInit: @@ -195,7 +196,7 @@ def test_composition(): def test_fit(): - x, y = (range(10),)*2 + x, y = (range(10),) * 2 p = poly.Polynomial.fit(x, y, deg=1, symbol='z') assert_equal(p.symbol, 'z') diff --git a/blimgui/dist64/numpy/random/__init__.py b/blimgui/dist64/numpy/random/__init__.py index 45e1557..370803f 100644 --- a/blimgui/dist64/numpy/random/__init__.py +++ b/blimgui/dist64/numpy/random/__init__.py @@ -177,16 +177,13 @@ ] # add these for module-freeze analysis (like PyInstaller) -from . import _pickle -from . import _common -from . import _bounded_integers - +from . import _bounded_integers, _common, _pickle from ._generator import Generator, default_rng -from .bit_generator import SeedSequence, BitGenerator from ._mt19937 import MT19937 from ._pcg64 import PCG64, PCG64DXSM from ._philox import Philox from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence from .mtrand import * __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', @@ -211,5 +208,6 @@ def __RandomState_ctor(): from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/random/__init__.pyi b/blimgui/dist64/numpy/random/__init__.pyi index 3b438ee..a5ec69a 100644 --- a/blimgui/dist64/numpy/random/__init__.pyi +++ b/blimgui/dist64/numpy/random/__init__.pyi @@ -1,11 +1,9 @@ -from ._generator import Generator -from ._generator import default_rng +from ._generator import Generator, default_rng from ._mt19937 import MT19937 from ._pcg64 import PCG64, PCG64DXSM from ._philox import Philox from ._sfc64 import SFC64 -from .bit_generator import BitGenerator -from .bit_generator import SeedSequence +from .bit_generator import BitGenerator, SeedSequence from .mtrand import ( RandomState, beta, diff --git a/blimgui/dist64/numpy/random/_bounded_integers.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_bounded_integers.cp314-win_amd64.lib similarity index 63% rename from blimgui/dist64/numpy/random/_bounded_integers.cp313-win_amd64.lib rename to blimgui/dist64/numpy/random/_bounded_integers.cp314-win_amd64.lib index d7e97d4..5a94c29 100644 Binary files a/blimgui/dist64/numpy/random/_bounded_integers.cp313-win_amd64.lib and b/blimgui/dist64/numpy/random/_bounded_integers.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_bounded_integers.pyi b/blimgui/dist64/numpy/random/_bounded_integers.pyi new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/blimgui/dist64/numpy/random/_bounded_integers.pyi @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/blimgui/dist64/numpy/random/_common.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_common.cp313-win_amd64.lib deleted file mode 100644 index 5c05029..0000000 Binary files a/blimgui/dist64/numpy/random/_common.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/random/_common.cp314-win_amd64.lib b/blimgui/dist64/numpy/random/_common.cp314-win_amd64.lib new file mode 100644 index 0000000..09acd1e Binary files /dev/null and b/blimgui/dist64/numpy/random/_common.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_common.pxd b/blimgui/dist64/numpy/random/_common.pxd index 499acf7..5593f27 100644 --- a/blimgui/dist64/numpy/random/_common.pxd +++ b/blimgui/dist64/numpy/random/_common.pxd @@ -26,12 +26,15 @@ cdef enum ConstraintType: LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG ctypedef ConstraintType constraint_type +ctypedef fused double_or_int64: + double + int64_t cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) cdef object prepare_cffi(bitgen_t *bitgen) cdef object prepare_ctypes(bitgen_t *bitgen) -cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 cdef extern from "include/aligned_malloc.h": diff --git a/blimgui/dist64/numpy/random/_common.pyi b/blimgui/dist64/numpy/random/_common.pyi new file mode 100644 index 0000000..b667fd1 --- /dev/null +++ b/blimgui/dist64/numpy/random/_common.pyi @@ -0,0 +1,16 @@ +from collections.abc import Callable +from typing import Any, NamedTuple, TypeAlias + +import numpy as np + +__all__: list[str] = ["interface"] + +_CDataVoidPointer: TypeAlias = Any + +class interface(NamedTuple): + state_address: int + state: _CDataVoidPointer + next_uint64: Callable[..., np.uint64] + next_uint32: Callable[..., np.uint32] + next_double: Callable[..., np.float64] + bit_generator: _CDataVoidPointer diff --git a/blimgui/dist64/numpy/random/_examples/cffi/extending.py b/blimgui/dist64/numpy/random/_examples/cffi/extending.py index b8aca71..dc9921d 100644 --- a/blimgui/dist64/numpy/random/_examples/cffi/extending.py +++ b/blimgui/dist64/numpy/random/_examples/cffi/extending.py @@ -2,9 +2,13 @@ Use cffi to access any of the underlying C functions from distributions.h """ import os -import numpy as np + import cffi + +import numpy as np + from .parse import parse_distributions_h + ffi = cffi.FFI() inc_dir = os.path.join(np.get_include(), 'numpy') diff --git a/blimgui/dist64/numpy/random/_examples/cffi/parse.py b/blimgui/dist64/numpy/random/_examples/cffi/parse.py index 2616f5b..439b0e6 100644 --- a/blimgui/dist64/numpy/random/_examples/cffi/parse.py +++ b/blimgui/dist64/numpy/random/_examples/cffi/parse.py @@ -51,4 +51,3 @@ def parse_distributions_h(ffi, inc_dir): line = line.replace('RAND_INT_TYPE', 'int64_t') s.append(line) ffi.cdef('\n'.join(s)) - diff --git a/blimgui/dist64/numpy/random/_examples/cython/extending_distributions.pyx b/blimgui/dist64/numpy/random/_examples/cython/extending_distributions.pyx index dd05b2e..6139f3e 100644 --- a/blimgui/dist64/numpy/random/_examples/cython/extending_distributions.pyx +++ b/blimgui/dist64/numpy/random/_examples/cython/extending_distributions.pyx @@ -12,6 +12,8 @@ from numpy.random import PCG64 from numpy.random.c_distributions cimport ( random_standard_uniform_fill, random_standard_uniform_fill_f) +np.import_array() + @cython.boundscheck(False) @cython.wraparound(False) @@ -90,7 +92,6 @@ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The default dtype value is 'd' """ - cdef Py_ssize_t i cdef bitgen_t *rng cdef const char *capsule_name = "BitGenerator" cdef np.ndarray randoms diff --git a/blimgui/dist64/numpy/random/_examples/numba/extending.py b/blimgui/dist64/numpy/random/_examples/numba/extending.py index ef1a002..92b7359 100644 --- a/blimgui/dist64/numpy/random/_examples/numba/extending.py +++ b/blimgui/dist64/numpy/random/_examples/numba/extending.py @@ -1,8 +1,9 @@ -import numpy as np +from timeit import timeit + import numba as nb +import numpy as np from numpy.random import PCG64 -from timeit import timeit bit_gen = PCG64() next_d = bit_gen.cffi.next_double @@ -24,6 +25,7 @@ def normals(n, state): out[2 * i + 1] = f * x2 return out + # Compile using Numba normalsj = nb.jit(normals, nopython=True) # Must use state address not state with numba @@ -32,11 +34,13 @@ def normals(n, state): def numbacall(): return normalsj(n, state_addr) + rg = np.random.Generator(PCG64()) def numpycall(): return rg.normal(size=n) + # Check that the functions work r1 = numbacall() r2 = numpycall() @@ -80,5 +84,3 @@ def bounded_uints(lb, ub, n, state): bounded_uints(323, 2394691, 10000000, ctypes_state.value) - - diff --git a/blimgui/dist64/numpy/random/_examples/numba/extending_distributions.py b/blimgui/dist64/numpy/random/_examples/numba/extending_distributions.py index 2a2eb43..6051f5c 100644 --- a/blimgui/dist64/numpy/random/_examples/numba/extending_distributions.py +++ b/blimgui/dist64/numpy/random/_examples/numba/extending_distributions.py @@ -27,9 +27,9 @@ import os import numba as nb -import numpy as np from cffi import FFI +import numpy as np from numpy.random import PCG64 ffi = FFI() diff --git a/blimgui/dist64/numpy/random/_generator.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_generator.cp314-win_amd64.lib similarity index 67% rename from blimgui/dist64/numpy/random/_generator.cp313-win_amd64.lib rename to blimgui/dist64/numpy/random/_generator.cp314-win_amd64.lib index d658fc1..73c9d2c 100644 Binary files a/blimgui/dist64/numpy/random/_generator.cp313-win_amd64.lib and b/blimgui/dist64/numpy/random/_generator.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_generator.pyi b/blimgui/dist64/numpy/random/_generator.pyi index 0b345f7..329614f 100644 --- a/blimgui/dist64/numpy/random/_generator.pyi +++ b/blimgui/dist64/numpy/random/_generator.pyi @@ -1,4 +1,4 @@ -from collections.abc import Callable +from collections.abc import Callable, MutableSequence from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np @@ -68,134 +68,134 @@ class Generator: @overload def standard_normal( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_normal( # type: ignore[misc] self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: None | NDArray[float32] = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: None | NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... + def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... @overload - def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... + def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... @overload def standard_exponential( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None = ..., + method: Literal["zig", "inv"] = "zig", + out: None = None, ) -> float: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, *, - method: Literal["zig", "inv"] = ..., - out: None | NDArray[float64] = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = ..., - out: None | NDArray[float32] = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None | NDArray[float64] = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def random( self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, *, - out: None | NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: None | NDArray[float32] = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: None | NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def beta( self, a: _FloatLike_co, b: _FloatLike_co, - size: None = ..., + size: None = None, ) -> float: ... # type: ignore[misc] @overload def beta( self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload - def exponential(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... # @overload @@ -462,7 +462,7 @@ class Generator: low: int, high: int | None = None, size: None = None, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., endpoint: bool = False, ) -> Any: ... @overload @@ -471,7 +471,7 @@ class Generator: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., endpoint: bool = False, ) -> NDArray[Any]: ... @@ -481,135 +481,136 @@ class Generator: def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> NDArray[int64]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> NDArray[Any]: ... @overload def uniform( self, - low: _FloatLike_co = ..., - high: _FloatLike_co = ..., - size: None = ..., + low: _FloatLike_co = 0.0, + high: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def normal( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: _FloatLike_co, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: None | NDArray[float32] = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: None | NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_f( self, dfnum: _FloatLike_co, dfden: _FloatLike_co, - nonc: _FloatLike_co, size: None = ... + nonc: _FloatLike_co, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def noncentral_f( @@ -617,140 +618,140 @@ class Generator: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def vonmises( self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def gumbel( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def logistic( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def lognormal( self, - mean: _FloatLike_co = ..., - sigma: _FloatLike_co = ..., - size: None = ..., + mean: _FloatLike_co = 0.0, + sigma: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def wald( self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def triangular( @@ -758,7 +759,7 @@ class Generator: left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, - size: None = ..., + size: None = None, ) -> float: ... # type: ignore[misc] @overload def triangular( @@ -766,46 +767,46 @@ class Generator: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + self, n: _FloatLike_co, p: _FloatLike_co, size: None = None ) -> int: ... # type: ignore[misc] @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... + self, ngood: int, nbad: int, nsample: int, size: None = None ) -> int: ... # type: ignore[misc] @overload def hypergeometric( @@ -813,44 +814,49 @@ class Generator: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[int64]: ... @overload - def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, *, - method: Literal["svd", "eigh", "cholesky"] = ..., + method: Literal["svd", "eigh", "cholesky"] = "svd", ) -> NDArray[float64]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... + size: _ShapeLike | None = None ) -> NDArray[int64]: ... def multivariate_hypergeometric( self, colors: _ArrayLikeInt_co, nsample: int, - size: None | _ShapeLike = ..., - method: Literal["marginals", "count"] = ..., + size: _ShapeLike | None = None, + method: Literal["marginals", "count"] = "marginals", ) -> NDArray[int64]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def permuted( - self, x: ArrayLike, *, axis: None | int = ..., out: None | NDArray[Any] = ... + self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + + # axis must be 0 for MutableSequence + @overload + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... + @overload + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... def default_rng( - seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState = ... + seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None ) -> Generator: ... diff --git a/blimgui/dist64/numpy/random/_mt19937.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_mt19937.cp313-win_amd64.lib deleted file mode 100644 index acfa802..0000000 Binary files a/blimgui/dist64/numpy/random/_mt19937.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/random/_mt19937.cp314-win_amd64.lib b/blimgui/dist64/numpy/random/_mt19937.cp314-win_amd64.lib new file mode 100644 index 0000000..196cae9 Binary files /dev/null and b/blimgui/dist64/numpy/random/_mt19937.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_mt19937.pyi b/blimgui/dist64/numpy/random/_mt19937.pyi index 4e0b68b..1c16616 100644 --- a/blimgui/dist64/numpy/random/_mt19937.pyi +++ b/blimgui/dist64/numpy/random/_mt19937.pyi @@ -1,9 +1,11 @@ from typing import TypedDict, type_check_only from numpy import uint32 -from numpy.typing import NDArray -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +__all__ = ["MT19937"] @type_check_only class _MT19937Internal(TypedDict): @@ -16,10 +18,10 @@ class _MT19937State(TypedDict): state: _MT19937Internal class MT19937(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... - def jumped(self, jumps: int = ...) -> MT19937: ... - @property + def jumped(self, jumps: int = 1) -> MT19937: ... + @property # type: ignore[override] def state(self) -> _MT19937State: ... @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/blimgui/dist64/numpy/random/_pcg64.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_pcg64.cp313-win_amd64.lib deleted file mode 100644 index 30e4113..0000000 Binary files a/blimgui/dist64/numpy/random/_pcg64.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/random/_pcg64.cp314-win_amd64.lib b/blimgui/dist64/numpy/random/_pcg64.cp314-win_amd64.lib new file mode 100644 index 0000000..f4d723c Binary files /dev/null and b/blimgui/dist64/numpy/random/_pcg64.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_pcg64.pyi b/blimgui/dist64/numpy/random/_pcg64.pyi index 62de5cd..b32adc3 100644 --- a/blimgui/dist64/numpy/random/_pcg64.pyi +++ b/blimgui/dist64/numpy/random/_pcg64.pyi @@ -1,7 +1,9 @@ from typing import TypedDict, type_check_only -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +__all__ = ["PCG64"] @type_check_only class _PCG64Internal(TypedDict): @@ -16,9 +18,9 @@ class _PCG64State(TypedDict): uinteger: int class PCG64(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64: ... - @property + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = 1) -> PCG64: ... + @property # type: ignore[override] def state( self, ) -> _PCG64State: ... @@ -30,15 +32,10 @@ class PCG64(BitGenerator): def advance(self, delta: int) -> PCG64: ... class PCG64DXSM(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64DXSM: ... - @property - def state( - self, - ) -> _PCG64State: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = 1) -> PCG64DXSM: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/blimgui/dist64/numpy/random/_philox.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_philox.cp313-win_amd64.lib deleted file mode 100644 index c76e635..0000000 Binary files a/blimgui/dist64/numpy/random/_philox.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/random/_philox.cp314-win_amd64.lib b/blimgui/dist64/numpy/random/_philox.cp314-win_amd64.lib new file mode 100644 index 0000000..0f37664 Binary files /dev/null and b/blimgui/dist64/numpy/random/_philox.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_philox.pyi b/blimgui/dist64/numpy/random/_philox.pyi index d44d3fe..2ab0cb3 100644 --- a/blimgui/dist64/numpy/random/_philox.pyi +++ b/blimgui/dist64/numpy/random/_philox.pyi @@ -1,9 +1,11 @@ from typing import TypedDict, type_check_only from numpy import uint64 -from numpy.typing import NDArray -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +__all__ = ["Philox"] @type_check_only class _PhiloxInternal(TypedDict): @@ -22,18 +24,13 @@ class _PhiloxState(TypedDict): class Philox(BitGenerator): def __init__( self, - seed: None | _ArrayLikeInt_co | SeedSequence = ..., - counter: None | _ArrayLikeInt_co = ..., - key: None | _ArrayLikeInt_co = ..., + seed: _ArrayLikeInt_co | SeedSequence | None = ..., + counter: _ArrayLikeInt_co | None = ..., + key: _ArrayLikeInt_co | None = ..., ) -> None: ... - @property - def state( - self, - ) -> _PhiloxState: ... + @property # type: ignore[override] + def state(self) -> _PhiloxState: ... @state.setter - def state( - self, - value: _PhiloxState, - ) -> None: ... - def jumped(self, jumps: int = ...) -> Philox: ... + def state(self, value: _PhiloxState) -> None: ... + def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/blimgui/dist64/numpy/random/_pickle.py b/blimgui/dist64/numpy/random/_pickle.py index c965ead..8d10e51 100644 --- a/blimgui/dist64/numpy/random/_pickle.py +++ b/blimgui/dist64/numpy/random/_pickle.py @@ -1,11 +1,10 @@ -from .bit_generator import BitGenerator -from .mtrand import RandomState -from ._philox import Philox -from ._pcg64 import PCG64, PCG64DXSM -from ._sfc64 import SFC64 - from ._generator import Generator from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .mtrand import RandomState BitGenerators = {'MT19937': MT19937, 'PCG64': PCG64, diff --git a/blimgui/dist64/numpy/random/_pickle.pyi b/blimgui/dist64/numpy/random/_pickle.pyi index caa07e1..6e383c5 100644 --- a/blimgui/dist64/numpy/random/_pickle.pyi +++ b/blimgui/dist64/numpy/random/_pickle.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Final, Literal, TypeVar, TypedDict, overload, type_check_only +from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 diff --git a/blimgui/dist64/numpy/random/_sfc64.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/_sfc64.cp313-win_amd64.lib deleted file mode 100644 index 37e1c4c..0000000 Binary files a/blimgui/dist64/numpy/random/_sfc64.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/random/_sfc64.cp314-win_amd64.lib b/blimgui/dist64/numpy/random/_sfc64.cp314-win_amd64.lib new file mode 100644 index 0000000..e5a7bef Binary files /dev/null and b/blimgui/dist64/numpy/random/_sfc64.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/_sfc64.pyi b/blimgui/dist64/numpy/random/_sfc64.pyi index 8e6e485..9449d22 100644 --- a/blimgui/dist64/numpy/random/_sfc64.pyi +++ b/blimgui/dist64/numpy/random/_sfc64.pyi @@ -1,8 +1,10 @@ from typing import TypedDict, type_check_only from numpy import uint64 -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import NDArray, _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +__all__ = ["SFC64"] @type_check_only class _SFC64Internal(TypedDict): @@ -16,13 +18,8 @@ class _SFC64State(TypedDict): uinteger: int class SFC64(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - @property - def state( - self, - ) -> _SFC64State: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + @property # type: ignore[override] + def state(self) -> _SFC64State: ... @state.setter - def state( - self, - value: _SFC64State, - ) -> None: ... + def state(self, value: _SFC64State) -> None: ... diff --git a/blimgui/dist64/numpy/random/bit_generator.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/bit_generator.cp313-win_amd64.lib deleted file mode 100644 index e97ac7c..0000000 Binary files a/blimgui/dist64/numpy/random/bit_generator.cp313-win_amd64.lib and /dev/null differ diff --git a/blimgui/dist64/numpy/random/bit_generator.cp314-win_amd64.lib b/blimgui/dist64/numpy/random/bit_generator.cp314-win_amd64.lib new file mode 100644 index 0000000..7313ebc Binary files /dev/null and b/blimgui/dist64/numpy/random/bit_generator.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/bit_generator.pxd b/blimgui/dist64/numpy/random/bit_generator.pxd index 0b1d793..5a7aadf 100644 --- a/blimgui/dist64/numpy/random/bit_generator.pxd +++ b/blimgui/dist64/numpy/random/bit_generator.pxd @@ -31,5 +31,10 @@ cdef class SeedSequence(): np.ndarray[np.npy_uint32, ndim=1] entropy_array) cdef get_assembled_entropy(self) -cdef class SeedlessSequence(): +cdef class SeedlessSeedSequence: + pass + +# NOTE: This has no implementation and should not be used. It purely exists for +# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. +cdef class SeedlessSequence: pass diff --git a/blimgui/dist64/numpy/random/bit_generator.pyi b/blimgui/dist64/numpy/random/bit_generator.pyi index 19434be..e1e6942 100644 --- a/blimgui/dist64/numpy/random/bit_generator.pyi +++ b/blimgui/dist64/numpy/random/bit_generator.pyi @@ -1,13 +1,29 @@ import abc +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence from threading import Lock -from typing import Any, ClassVar, Literal, NamedTuple, TypeAlias, TypedDict, overload, type_check_only - -from _typeshed import Incomplete -from typing_extensions import CapsuleType, Self +from typing import ( + Any, + ClassVar, + Literal, + NamedTuple, + Self, + TypeAlias, + TypedDict, + overload, + type_check_only, +) +from typing_extensions import CapsuleType import numpy as np -from numpy._typing import NDArray, _ArrayLikeInt_co, _DTypeLike, _ShapeLike, _UInt32Codes, _UInt64Codes +from numpy._typing import ( + NDArray, + _ArrayLikeInt_co, + _DTypeLike, + _ShapeLike, + _UInt32Codes, + _UInt64Codes, +) __all__ = ["BitGenerator", "SeedSequence"] diff --git a/blimgui/dist64/numpy/random/mtrand.cp313-win_amd64.lib b/blimgui/dist64/numpy/random/mtrand.cp314-win_amd64.lib similarity index 68% rename from blimgui/dist64/numpy/random/mtrand.cp313-win_amd64.lib rename to blimgui/dist64/numpy/random/mtrand.cp314-win_amd64.lib index 3a87fd8..9c0a5b8 100644 Binary files a/blimgui/dist64/numpy/random/mtrand.cp313-win_amd64.lib and b/blimgui/dist64/numpy/random/mtrand.cp314-win_amd64.lib differ diff --git a/blimgui/dist64/numpy/random/mtrand.pyi b/blimgui/dist64/numpy/random/mtrand.pyi index a87dccf..9274005 100644 --- a/blimgui/dist64/numpy/random/mtrand.pyi +++ b/blimgui/dist64/numpy/random/mtrand.pyi @@ -1,6 +1,6 @@ import builtins from collections.abc import Callable -from typing import Any, overload, Literal +from typing import Any, Literal, overload import numpy as np from numpy import ( @@ -12,14 +12,13 @@ from numpy import ( int64, int_, long, + uint, uint8, uint16, uint32, uint64, - uint, ulong, ) -from numpy.random.bit_generator import BitGenerator from numpy._typing import ( ArrayLike, NDArray, @@ -41,52 +40,111 @@ from numpy._typing import ( _UIntCodes, _ULongCodes, ) +from numpy.random.bit_generator import BitGenerator +__all__ = [ + "RandomState", + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_bit_generator", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_bit_generator", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", +] class RandomState: _bit_generator: BitGenerator - def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ... + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... - def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 + def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... @overload - def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload def get_state( - self, legacy: Literal[True] = ... + self, legacy: Literal[True] = True ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... def set_state( self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] ) -> None: ... @overload - def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def random(self, size: None = ...) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... # type: ignore[misc] @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] @overload def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @@ -94,265 +152,267 @@ class RandomState: def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., + high: int | None = None, + size: None = None, ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[bool] = ..., ) -> bool: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[np.bool] = ..., ) -> np.bool: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[int] = ..., ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> uint8: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> uint16: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> uint32: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> ulong: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> uint64: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> int8: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> int16: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> int32: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> long: ... @overload def randint( # type: ignore[misc] self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + high: int | None = None, + size: None = None, + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 ) -> int64: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: _DTypeLikeBool = ..., ) -> NDArray[np.bool]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> NDArray[int8]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> NDArray[int16]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> NDArray[int32]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 ) -> NDArray[int64]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> NDArray[uint8]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> NDArray[uint16]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> NDArray[uint32]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> NDArray[uint64]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> NDArray[long]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> NDArray[ulong]: ... def bytes(self, length: int) -> builtins.bytes: ... @overload def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[long]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... @overload - def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def uniform( + self, low: float = 0.0, high: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def rand(self) -> float: ... @@ -363,233 +423,276 @@ class RandomState: @overload def randn(self, *args: int) -> NDArray[float64]: ... @overload - def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc] + def random_integers( + self, low: int, high: int | None = None, size: None = None + ) -> int: ... # type: ignore[misc] @overload def random_integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_normal( # type: ignore[misc] - self, size: _ShapeLike = ... + self, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def normal( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: float, - size: None = ..., + size: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] @overload def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_f( + self, dfnum: float, dfden: float, nonc: float, size: None = None + ) -> float: ... # type: ignore[misc] @overload def noncentral_f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_chisquare( + self, df: float, nonc: float, size: None = None + ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] @overload def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload - def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def laplace( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gumbel( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def logistic( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def lognormal( + self, mean: float = 0.0, sigma: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] @overload def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + def triangular( + self, left: float, mode: float, right: float, size: None = None + ) -> float: ... # type: ignore[misc] @overload def triangular( self, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def binomial( + self, n: int, p: float, size: None = None + ) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def negative_binomial( + self, n: float, p: float, size: None = None + ) -> int: ... # type: ignore[misc] @overload def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + def poisson( + self, lam: float = 1.0, size: None = None + ) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = None + ) -> int: ... # type: ignore[misc] @overload def hypergeometric( self, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: None | _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, ) -> NDArray[float64]: ... def multinomial( - self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = None ) -> NDArray[long]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def shuffle(self, x: ArrayLike) -> None: ... @overload @@ -651,8 +754,6 @@ zipf = _rand.zipf sample = _rand.random_sample ranf = _rand.random_sample -def set_bit_generator(bitgen: BitGenerator) -> None: - ... +def set_bit_generator(bitgen: BitGenerator) -> None: ... -def get_bit_generator() -> BitGenerator: - ... +def get_bit_generator() -> BitGenerator: ... diff --git a/blimgui/dist64/numpy/random/tests/test_direct.py b/blimgui/dist64/numpy/random/tests/test_direct.py index 8993a43..b6f188f 100644 --- a/blimgui/dist64/numpy/random/tests/test_direct.py +++ b/blimgui/dist64/numpy/random/tests/test_direct.py @@ -1,17 +1,28 @@ import os -from os.path import join import sys +from os.path import join -import numpy as np -from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, - assert_raises) import pytest +import numpy as np from numpy.random import ( - Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence, - SFC64, default_rng + MT19937, + PCG64, + PCG64DXSM, + SFC64, + Generator, + Philox, + RandomState, + SeedSequence, + default_rng, ) from numpy.random._common import interface +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) try: import cffi # noqa: F401 @@ -130,9 +141,11 @@ def gauss_from_uint(x, n, bits): def test_seedsequence(): - from numpy.random.bit_generator import (ISeedSequence, - ISpawnableSeedSequence, - SeedlessSeedSequence) + from numpy.random.bit_generator import ( + ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence, + ) s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) s1.spawn(10) @@ -432,7 +445,6 @@ def test_advange_large(self): assert state["state"] == advanced_state - class TestPCG64DXSM(Base): @classmethod def setup_class(cls): @@ -560,6 +572,9 @@ def test_passthrough(self): assert rg2 is rg assert rg2.bit_generator is bg + @pytest.mark.thread_unsafe( + reason="np.random.set_bit_generator affects global state" + ) def test_coercion_RandomState_Generator(self): # use default_rng to coerce RandomState to Generator rs = RandomState(1234) diff --git a/blimgui/dist64/numpy/random/tests/test_extending.py b/blimgui/dist64/numpy/random/tests/test_extending.py index bede8a7..efb1241 100644 --- a/blimgui/dist64/numpy/random/tests/test_extending.py +++ b/blimgui/dist64/numpy/random/tests/test_extending.py @@ -1,15 +1,15 @@ -from importlib.util import spec_from_file_location, module_from_spec import os -import pytest import shutil import subprocess import sys import sysconfig import warnings +from importlib.util import module_from_spec, spec_from_file_location -import numpy as np -from numpy.testing import IS_WASM, IS_EDITABLE +import pytest +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM try: import cffi @@ -54,7 +54,13 @@ ) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow +@pytest.mark.thread_unsafe( + reason="building cython code in a subprocess doesn't make sense to do in many " + "threads and sometimes crashes" +) def test_cython(tmp_path): import glob # build the examples in a temporary directory @@ -94,8 +100,7 @@ def test_cython(tmp_path): if txt_to_find in line: break else: - assert False, ("Could not find '{}' in C file, " - "wrong pxd used".format(txt_to_find)) + assert False, f"Could not find '{txt_to_find}' in C file, wrong pxd used" # import without adding the directory to sys.path suffix = sysconfig.get_config_var('EXT_SUFFIX') diff --git a/blimgui/dist64/numpy/random/tests/test_generator_mt19937.py b/blimgui/dist64/numpy/random/tests/test_generator_mt19937.py index 56339ae..92a57ba 100644 --- a/blimgui/dist64/numpy/random/tests/test_generator_mt19937.py +++ b/blimgui/dist64/numpy/random/tests/test_generator_mt19937.py @@ -1,18 +1,24 @@ +import hashlib import os.path import sys -import hashlib +import warnings import pytest import numpy as np from numpy.exceptions import AxisError from numpy.linalg import LinAlgError +from numpy.random import MT19937, Generator, RandomState, SeedSequence from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_allclose, - assert_warns, assert_no_warnings, assert_array_equal, - assert_array_almost_equal, suppress_warnings, IS_WASM) - -from numpy.random import Generator, MT19937, SeedSequence, RandomState + IS_WASM, + assert_, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) random = Generator(MT19937()) @@ -20,20 +26,20 @@ { "seed": 0, "steps": 10, - "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, - "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, # noqa: E501 + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, # noqa: E501 }, { - "seed":384908324, - "steps":312, - "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, - "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, + "seed": 384908324, + "steps": 312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, # noqa: E501 + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, # noqa: E501 }, { "seed": [839438204, 980239840, 859048019, 821], "steps": 511, - "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, - "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, # noqa: E501 + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, # noqa: E501 }, ] @@ -93,6 +99,24 @@ def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6 * sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg + class TestMultinomial: def test_basic(self): @@ -151,8 +175,7 @@ def test_multinomial_pvals_float32(self): class TestMultivariateHypergeometric: - def setup_method(self): - self.seed = 8675309 + seed = 8675309 def test_argument_validation(self): # Error cases... @@ -215,7 +238,7 @@ def test_edge_cases(self, method): x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, method=method) - assert_array_equal(x, [[3, 4, 5]]*3) + assert_array_equal(x, [[3, 4, 5]] * 3) # Cases for nsample: # nsample < 10 @@ -284,37 +307,40 @@ def test_repeatability3(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.rg = Generator(MT19937(self.seed)) - self.bit_generator = self.rg.bit_generator - self.state = self.bit_generator.state - self.legacy_state = (self.state['bit_generator'], - self.state['state']['key'], - self.state['state']['pos']) + def _create_rng(self): + seed = 1234567890 + rg = Generator(MT19937(seed)) + bit_generator = rg.bit_generator + state = bit_generator.state + legacy_state = (state['bit_generator'], + state['state']['key'], + state['state']['pos']) + return rg, bit_generator, state def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.rg.standard_normal(size=3) - self.bit_generator.state = self.state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.rg.standard_normal() - state = self.bit_generator.state - old = self.rg.standard_normal(size=3) - self.bit_generator.state = state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + rg.standard_normal() + state = bit_generator.state + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.rg.negative_binomial(0.5, 0.5) + rg, _, _ = self._create_rng() + rg.negative_binomial(0.5, 0.5) class TestIntegers: @@ -349,7 +375,7 @@ def test_bounds_checking(self, endpoint): endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, 1, [0], endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, [ubnd+1], [ubnd], + assert_raises(ValueError, self.rfunc, [ubnd + 1], [ubnd], endpoint=endpoint, dtype=dt) def test_bounds_checking_array(self, endpoint): @@ -494,15 +520,15 @@ def test_repeatability(self, endpoint): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', - 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', - 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', - 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', - 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', - 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', - 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', - 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', - 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', # noqa: E501 + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', # noqa: E501 + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} # noqa: E501 for dt in self.itype[1:]: random = Generator(MT19937(1234)) @@ -589,12 +615,12 @@ def test_repeatability_32bit_boundary_broadcasting(self): def test_int64_uint64_broadcast_exceptions(self, endpoint): configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), - (-2**63-1, -2**63-1))} + (-2**63 - 1, -2**63 - 1))} for dtype in configs: for config in configs[dtype]: low, high = config high = high - endpoint - low_a = np.array([[low]*10]) + low_a = np.array([[low] * 10]) high_a = np.array([high] * 10) assert_raises(ValueError, random.integers, low, high, endpoint=endpoint, dtype=dtype) @@ -605,7 +631,7 @@ def test_int64_uint64_broadcast_exceptions(self, endpoint): assert_raises(ValueError, random.integers, low_a, high_a, endpoint=endpoint, dtype=dtype) - low_o = np.array([[low]*10], dtype=object) + low_o = np.array([[low] * 10], dtype=object) high_o = np.array([high] * 10, dtype=object) assert_raises(ValueError, random.integers, low_o, high, endpoint=endpoint, dtype=dtype) @@ -712,9 +738,7 @@ def test_integers_small_dtype_chisquared(self, sample_size, high, class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_integers(self): random = Generator(MT19937(self.seed)) @@ -733,7 +757,7 @@ def test_integers_masked(self): def test_integers_closed(self): random = Generator(MT19937(self.seed)) actual = random.integers(-99, 99, size=(3, 2), endpoint=True) - desired = np.array([[-80, -56], [ 41, 38], [-83, -15]]) + desired = np.array([[-80, -56], [41, 38], [-83, -15]]) assert_array_equal(actual, desired) def test_integers_max_int(self): @@ -763,7 +787,7 @@ def test_random(self): def test_random_float(self): random = Generator(MT19937(self.seed)) actual = random.random((3, 2)) - desired = np.array([[0.0969992 , 0.70751746], + desired = np.array([[0.0969992 , 0.70751746], # noqa: E203 [0.08436483, 0.76773121], [0.66506902, 0.71548719]]) assert_array_almost_equal(actual, desired, decimal=7) @@ -867,7 +891,7 @@ def test_choice_return_shape(self): assert_(random.choice(arr, replace=True) is a) # Check 0-d array - s = tuple() + s = () assert_(not np.isscalar(random.choice(2, s, replace=True))) assert_(not np.isscalar(random.choice(2, s, replace=False))) assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) @@ -1179,10 +1203,10 @@ def test_dirichlet(self): alpha = np.array([51.72840233779265162, 39.74494232180943953]) actual = random.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.5439892869558927, 0.45601071304410745], - [0.5588917345860708, 0.4411082654139292 ]], + [0.5588917345860708, 0.4411082654139292 ]], # noqa: E202 [[0.5632074165063435, 0.43679258349365657], [0.54862581112627, 0.45137418887373015]], - [[0.49961831357047226, 0.5003816864295278 ], + [[0.49961831357047226, 0.5003816864295278 ], # noqa: E202 [0.52374806183482, 0.47625193816517997]]]) assert_array_almost_equal(actual, desired, decimal=15) bad_alpha = np.array([5.4e-01, -1.0e-16]) @@ -1242,9 +1266,10 @@ def test_dirichlet_small_alpha(self): assert_array_almost_equal(actual, expected, decimal=15) @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path - alpha = np.array([0.02, 0.04]) + alpha = np.array([0.02, 0.04, 0.03]) exact_mean = alpha / alpha.sum() random = Generator(MT19937(self.seed)) sample = random.dirichlet(alpha, size=20000000) @@ -1275,7 +1300,7 @@ def test_exponential(self): actual = random.exponential(1.1234, size=(3, 2)) desired = np.array([[0.098845481066258, 1.560752510746964], [0.075730916041636, 1.769098974710777], - [1.488602544592235, 2.49684815275751 ]]) + [1.488602544592235, 2.49684815275751 ]]) # noqa: E202 assert_array_almost_equal(actual, desired, decimal=15) def test_exponential_0(self): @@ -1286,14 +1311,14 @@ def test_f(self): random = Generator(MT19937(self.seed)) actual = random.f(12, 77, size=(3, 2)) desired = np.array([[0.461720027077085, 1.100441958872451], - [1.100337455217484, 0.91421736740018 ], + [1.100337455217484, 0.91421736740018 ], # noqa: E202 [0.500811891303113, 0.826802454552058]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): random = Generator(MT19937(self.seed)) actual = random.gamma(5, 3, size=(3, 2)) - desired = np.array([[ 5.03850858902096, 7.9228656732049 ], + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], # noqa: E202 [18.73983605132985, 19.57961681699238], [18.17897755150825, 18.17653912505234]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1373,7 +1398,7 @@ def test_logistic(self): random = Generator(MT19937(self.seed)) actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[-4.338584631510999, 1.890171436749954], - [-4.64547787337966 , 2.514545562919217], + [-4.64547787337966 , 2.514545562919217], # noqa: E203 [ 1.495389489198666, 1.967827627577474]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1433,12 +1458,12 @@ def test_multivariate_normal(self, method): cov = [[1, 0], [0, 1]] size = (3, 2) actual = random.multivariate_normal(mean, cov, size, method=method) - desired = np.array([[[-1.747478062846581, 11.25613495182354 ], - [-0.9967333370066214, 10.342002097029821 ]], - [[ 0.7850019631242964, 11.181113712443013 ], - [ 0.8901349653255224, 8.873825399642492 ]], - [[ 0.7130260107430003, 9.551628690083056 ], - [ 0.7127098726541128, 11.991709234143173 ]]]) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], # noqa: E202 + [-0.9967333370066214, 10.342002097029821]], + [[ 0.7850019631242964, 11.181113712443013], + [ 0.8901349653255224, 8.873825399642492]], + [[ 0.7130260107430003, 9.551628690083056], + [ 0.7127098726541128, 11.991709234143173]]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1456,8 +1481,8 @@ def test_multivariate_normal(self, method): # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') @@ -1484,10 +1509,9 @@ def test_multivariate_normal(self, method): method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter("error") random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) @@ -1500,7 +1524,7 @@ def test_multivariate_normal(self, method): assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3)) - @pytest.mark.parametrize('mean, cov', [([0], [[1+1j]]), ([0j], [[1]])]) + @pytest.mark.parametrize('mean, cov', [([0], [[1 + 1j]]), ([0j], [[1]])]) def test_multivariate_normal_disallow_complex(self, mean, cov): random = Generator(MT19937(self.seed)) with pytest.raises(TypeError, match="must not be complex"): @@ -1550,7 +1574,7 @@ def test_noncentral_chisquare(self): actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[ 1.70561552362133, 15.97378184942111], [13.71483425173724, 20.17859633310629], - [11.3615477156643 , 3.67891108738029]]) + [11.3615477156643 , 3.67891108738029]]) # noqa: E203 assert_array_almost_equal(actual, desired, decimal=14) actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) @@ -1570,8 +1594,8 @@ def test_noncentral_f(self): random = Generator(MT19937(self.seed)) actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) - desired = np.array([[0.060310671139 , 0.23866058175939], - [0.86860246709073, 0.2668510459738 ], + desired = np.array([[0.060310671139 , 0.23866058175939], # noqa: E203 + [0.86860246709073, 0.2668510459738 ], # noqa: E202 [0.23375780078364, 1.88922102885943]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1668,7 +1692,7 @@ def test_standard_gamma(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[0.62970724056362, 1.22379851271008], - [3.899412530884 , 4.12479964250139], + [3.899412530884 , 4.12479964250139], # noqa: E203 [3.74994102464584, 3.74929307690815]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1681,8 +1705,8 @@ def test_standard_gammma_scalar_float(self): def test_standard_gamma_float(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[0.62971, 1.2238 ], - [3.89941, 4.1248 ], + desired = np.array([[0.62971, 1.2238], + [3.89941, 4.1248], [3.74994, 3.74929]]) assert_array_almost_equal(actual, desired, decimal=5) @@ -1717,7 +1741,7 @@ def test_standard_gamma_0(self): def test_standard_normal(self): random = Generator(MT19937(self.seed)) actual = random.standard_normal(size=(3, 2)) - desired = np.array([[-1.870934851846581, 1.25613495182354 ], + desired = np.array([[-1.870934851846581, 1.25613495182354 ], # noqa: E202 [-1.120190126006621, 0.342002097029821], [ 0.661545174124296, 1.181113712443012]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1728,7 +1752,7 @@ def test_standard_normal_unsupported_type(self): def test_standard_t(self): random = Generator(MT19937(self.seed)) actual = random.standard_t(df=10, size=(3, 2)) - desired = np.array([[-1.484666193042647, 0.30597891831161 ], + desired = np.array([[-1.484666193042647, 0.30597891831161], [ 1.056684299648085, -0.407312602088507], [ 0.130704414281157, -2.038053410490321]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1737,7 +1761,7 @@ def test_triangular(self): random = Generator(MT19937(self.seed)) actual = random.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) - desired = np.array([[ 7.86664070590917, 13.6313848513185 ], + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], # noqa: E202 [ 7.68152445215983, 14.36169131136546], [13.16105603911429, 13.72341621856971]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1745,7 +1769,7 @@ def test_triangular(self): def test_uniform(self): random = Generator(MT19937(self.seed)) actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) - desired = np.array([[2.13306255040998 , 7.816987531021207], + desired = np.array([[2.13306255040998 , 7.816987531021207], # noqa: E203 [2.015436610109887, 8.377577533009589], [7.421792588856135, 7.891185744455209]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1779,7 +1803,7 @@ def test_uniform_neg_range(self): func = random.uniform assert_raises(ValueError, func, 2, 1) assert_raises(ValueError, func, [1, 2], [1, 1]) - assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) + assert_raises(ValueError, func, [[0, 1], [2, 3]], 2) def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions @@ -1846,11 +1870,16 @@ def test_vonmises_large_kappa_range(self, mu, kappa): def test_wald(self): random = Generator(MT19937(self.seed)) actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) - desired = np.array([[0.26871721804551, 3.2233942732115 ], + desired = np.array([[0.26871721804551, 3.2233942732115 ], # noqa: E202 [2.20328374987066, 2.40958405189353], [2.07093587449261, 0.73073890064369]]) assert_array_almost_equal(actual, desired, decimal=14) + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + def test_weibull(self): random = Generator(MT19937(self.seed)) actual = random.weibull(a=1.23, size=(3, 2)) @@ -1876,8 +1905,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def test_uniform(self): random = Generator(MT19937(self.seed)) @@ -1899,7 +1927,7 @@ def test_normal(self): scale = [1] bad_scale = [-1] random = Generator(MT19937(self.seed)) - desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) random = Generator(MT19937(self.seed)) actual = random.normal(loc * 3, scale) @@ -2094,7 +2122,7 @@ def test_vonmises(self): def test_pareto(self): a = [1] bad_a = [-1] - desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013]) + desired = np.array([0.95905052946317, 0.2383810889437, 1.04988745750013]) random = Generator(MT19937(self.seed)) actual = random.pareto(a * 3) @@ -2367,16 +2395,16 @@ def test_hypergeometric(self): assert_array_equal(actual, desired) assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) # noqa: E501 random = Generator(MT19937(self.seed)) actual = random.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) # noqa: E501 random = Generator(MT19937(self.seed)) hypergeom = random.hypergeometric @@ -2450,7 +2478,7 @@ def test_multinomial_pval_broadcast(self, n): random = Generator(MT19937(self.seed)) pvals = np.array([1 / 4] * 4) actual = random.multinomial(n, pvals) - n_shape = tuple() if isinstance(n, int) else n.shape + n_shape = () if isinstance(n, int) else n.shape expected_shape = n_shape + (4,) assert actual.shape == expected_shape pvals = np.vstack([pvals, pvals]) @@ -2489,8 +2517,7 @@ def test_empty_outputs(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -2535,13 +2562,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -2556,11 +2581,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -2576,18 +2602,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_integers(self, endpoint): + _, _, _, tgtShape = self._create_arrays() itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = random.integers @@ -2596,27 +2623,28 @@ def test_integers(self, endpoint): for dt in itype: out = func(low, high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) @pytest.mark.parametrize("config", JUMP_TEST_DATA) @@ -2780,8 +2808,8 @@ def test_pickle_preserves_seed_sequence(): @pytest.mark.parametrize("version", [121, 126]) def test_legacy_pickle(version): # Pickling format was changes in 1.22.x and in 2.0.x - import pickle import gzip + import pickle base_path = os.path.split(os.path.abspath(__file__))[0] pkl_file = os.path.join( diff --git a/blimgui/dist64/numpy/random/tests/test_generator_mt19937_regressions.py b/blimgui/dist64/numpy/random/tests/test_generator_mt19937_regressions.py index 5067461..bec962a 100644 --- a/blimgui/dist64/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/blimgui/dist64/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,34 +1,37 @@ -from numpy.testing import (assert_, assert_array_equal) -import numpy as np import pytest -from numpy.random import Generator, MT19937 + +import numpy as np +from numpy.random import MT19937, Generator +from numpy.testing import assert_, assert_array_equal class TestRegression: - - def setup_method(self): - self.mt19937 = Generator(MT19937(121263137472525314065)) + def _create_generator(self): + return Generator(MT19937(121263137472525314065)) def test_vonmises_range(self): # Make sure generated random variables are in [-pi, pi]. # Regression test for ticket #986. + mt19937 = self._create_generator() for mu in np.linspace(-7., 7., 5): - r = self.mt19937.vonmises(mu, 1, 50) + r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 - assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + mt19937 = self._create_generator() + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) # Test for ticket #5623 args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems - assert_(self.mt19937.hypergeometric(*args) > 0) + assert_(mt19937.hypergeometric(*args) > 0) def test_logseries_convergence(self): # Test for ticket #923 + mt19937 = self._create_generator() N = 1000 - rvsn = self.mt19937.logseries(0.8, size=N) + rvsn = mt19937.logseries(0.8, size=N) # these two frequency counts should be close to theoretical # numbers with this large sample # theoretical large N result is 0.49706795 @@ -59,40 +62,45 @@ def test_call_within_randomstate(self): mt19937 = Generator(MT19937(i)) m = Generator(MT19937(4321)) # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. # Check that the multivariate_normal size argument can be a # numpy integer. - self.mt19937.multivariate_normal([0], [[0]], size=1) - self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) - self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + mt19937 = self._create_generator() + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) def test_beta_small_parameters(self): # Test that beta with small a and b parameters does not produce # NaNs due to roundoff errors causing 0 / 0, gh-5851 - x = self.mt19937.beta(0.0001, 0.0001, size=100) + mt19937 = self._create_generator() + x = mt19937.beta(0.0001, 0.0001, size=100) assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') def test_beta_very_small_parameters(self): # gh-24203: beta would hang with very small parameters. - self.mt19937.beta(1e-49, 1e-40) + mt19937 = self._create_generator() + mt19937.beta(1e-49, 1e-40) def test_beta_ridiculously_small_parameters(self): # gh-24266: beta would generate nan when the parameters # were subnormal or a small multiple of the smallest normal. + mt19937 = self._create_generator() tiny = np.finfo(1.0).tiny - x = self.mt19937.beta(tiny/32, tiny/40, size=50) + x = mt19937.beta(tiny / 32, tiny / 40, size=50) assert not np.any(np.isnan(x)) def test_beta_expected_zero_frequency(self): # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta # would generate too many zeros. + mt19937 = self._create_generator() a = 0.0025 b = 0.0025 n = 1000000 - x = self.mt19937.beta(a, b, size=n) + x = mt19937.beta(a, b, size=n) nzeros = np.count_nonzero(x == 0) # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 # is p = 0.0776169083131899, e.g, @@ -107,30 +115,32 @@ def test_beta_expected_zero_frequency(self): # exprected_freq = float(n*p) # expected_freq = 77616.90831318991 - assert 0.95*expected_freq < nzeros < 1.05*expected_freq + assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. # See numpy github issue 6123. + mt19937 = self._create_generator() a = [1, 2, 3] counts = [4, 4, 2] for dt in np.float16, np.float32, np.float64: probs = np.array(counts, dtype=dt) / sum(counts) - c = self.mt19937.choice(a, p=probs) + c = mt19937.choice(a, p=probs) assert_(c in a) with pytest.raises(ValueError): - self.mt19937.choice(a, p=probs*0.9) + mt19937.choice(a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings # will not cause a segfault on garbage collection # Tests gh-7710 + mt19937 = self._create_generator() a = np.array(['a', 'a' * 1000]) for _ in range(100): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -140,10 +150,11 @@ def test_shuffle_of_array_of_objects(self): # Test that permuting an array of objects will not cause # a segfault on garbage collection. # See gh-7719 + mt19937 = self._create_generator() a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -173,10 +184,11 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) def test_gamma_0(self): - assert self.mt19937.standard_gamma(0.0) == 0.0 - assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + mt19937 = self._create_generator() + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) - actual = self.mt19937.standard_gamma([0.0], dtype='float') + actual = mt19937.standard_gamma([0.0], dtype='float') expected = np.array([0.], dtype=np.float32) assert_array_equal(actual, expected) @@ -184,23 +196,26 @@ def test_geometric_tiny_prob(self): # Regression test for gh-17007. # When p = 1e-30, the probability that a sample will exceed 2**63-1 # is 0.9999999999907766, so we expect the result to be all 2**63-1. - assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + mt19937 = self._create_generator() + assert_array_equal(mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) def test_zipf_large_parameter(self): # Regression test for part of gh-9829: a call such as rng.zipf(10000) # would hang. + mt19937 = self._create_generator() n = 8 - sample = self.mt19937.zipf(10000, size=n) + sample = mt19937.zipf(10000, size=n) assert_array_equal(sample, np.ones(n, dtype=np.int64)) def test_zipf_a_near_1(self): # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) # would hang. + mt19937 = self._create_generator() n = 100000 - sample = self.mt19937.zipf(1.0000000000001, size=n) + sample = mt19937.zipf(1.0000000000001, size=n) # Not much of a test, but let's do something more than verify that # it doesn't hang. Certainly for a monotonically decreasing # discrete distribution truncated to signed 64 bit integers, more # than half should be less than 2**62. - assert np.count_nonzero(sample < 2**62) > n/2 + assert np.count_nonzero(sample < 2**62) > n / 2 diff --git a/blimgui/dist64/numpy/random/tests/test_random.py b/blimgui/dist64/numpy/random/tests/test_random.py index b893aec..1702d6f 100644 --- a/blimgui/dist64/numpy/random/tests/test_random.py +++ b/blimgui/dist64/numpy/random/tests/test_random.py @@ -1,15 +1,19 @@ +import sys import warnings import pytest import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings, IS_WASM - ) from numpy import random -import sys +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) class TestSeed: @@ -101,101 +105,109 @@ def test_multidimensional_pvals(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() + def _create_rng(self): + seed = 1234567890 + prng = random.RandomState(seed) + state = prng.get_state() + return prng, state def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) + prng, state = self._create_rng() + old = prng.tomaxint(16) + prng.set_state(state) + new = prng.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + prng.standard_normal() + state = prng.get_state() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) + prng, state = self._create_rng() + old_state = state[:-2] + x1 = prng.standard_normal(size=16) + prng.set_state(old_state) + x2 = prng.standard_normal(size=16) + prng.set_state(state) + x3 = prng.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) + prng, _ = self._create_rng() + prng.negative_binomial(0.5, 0.5) def test_set_invalid_state(self): # gh-25402 + prng, _ = self._create_rng() with pytest.raises(IndexError): - self.prng.set_state(()) + prng.set_state(()) class TestRandint: - rfunc = np.random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) - tgt = (lbnd + ubnd)//2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + tgt = (lbnd + ubnd) // 2 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -203,15 +215,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - np.random.seed() + rng = random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -221,31 +233,31 @@ def test_repeatability(self): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', - 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', - 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - np.random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -274,11 +286,12 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -287,7 +300,7 @@ def test_respect_dtype_singleton(self): ubnd = 2 if dt is bool else np.iinfo("long").max + 1 # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -295,40 +308,36 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -340,11 +349,9 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) @@ -364,41 +371,41 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random(self): - np.random.seed(self.seed) - actual = np.random.random((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -436,7 +443,7 @@ def test_choice_return_shape(self): assert_(np.random.choice(arr, replace=True) is a) # Check 0-d array - s = tuple() + s = () assert_(not np.isscalar(np.random.choice(2, s, replace=True))) assert_(not np.isscalar(np.random.choice(2, s, replace=False))) assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) @@ -473,8 +480,8 @@ def test_choice_nan_probabilities(self): assert_raises(ValueError, np.random.choice, a, p=p) def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -497,9 +504,9 @@ def test_shuffle(self): # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object), ("b", np.int32)])]: - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -559,11 +566,11 @@ def test_shuffle_memoryview(self): # gh-18273 # allow graceful handling of memoryviews # (treat the same as arrays) - np.random.seed(self.seed) + rng = random.RandomState(self.seed) a = np.arange(5).data - np.random.shuffle(a) + rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) - rng = np.random.RandomState(self.seed) + rng = random.RandomState(self.seed) rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) rng = np.random.default_rng(self.seed) @@ -577,8 +584,8 @@ def test_shuffle_not_writeable(self): np.random.shuffle(a) def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -586,25 +593,25 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -637,8 +644,8 @@ def test_dirichlet_bad_alpha(self): assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -649,16 +656,16 @@ def test_exponential_0(self): assert_raises(ValueError, np.random.exponential, scale=-0.) def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -669,16 +676,16 @@ def test_gamma_0(self): assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) assert_array_equal(actual, desired) def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -689,34 +696,34 @@ def test_gumbel_0(self): assert_raises(ValueError, np.random.gumbel, scale=-0.) def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10, 5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -727,16 +734,16 @@ def test_laplace_0(self): assert_raises(ValueError, np.random.laplace, scale=-0.) def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -747,16 +754,16 @@ def test_lognormal_0(self): assert_raises(ValueError, np.random.lognormal, sigma=-0.) def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) assert_array_equal(actual, desired) def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -766,11 +773,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -781,7 +788,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -789,54 +796,53 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(np.random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 + with warnings.catch_warnings(): + warnings.simplefilter('error') + rng.multivariate_normal(mean, cov) def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -844,8 +850,8 @@ def test_noncentral_f(self): assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -856,8 +862,8 @@ def test_normal_0(self): assert_raises(ValueError, np.random.normal, scale=-0.) def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -871,8 +877,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -882,21 +888,21 @@ def test_poisson_exceptions(self): lambig = np.iinfo('l').max lamneg = -1 assert_raises(ValueError, np.random.poisson, lamneg) - assert_raises(ValueError, np.random.poisson, [lamneg]*10) + assert_raises(ValueError, np.random.poisson, [lamneg] * 10) assert_raises(ValueError, np.random.poisson, lambig) - assert_raises(ValueError, np.random.poisson, [lambig]*10) + assert_raises(ValueError, np.random.poisson, [lambig] * 10) def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -907,24 +913,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, np.random.rayleigh, scale=-0.) def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -935,24 +941,24 @@ def test_standard_gamma_0(self): assert_raises(ValueError, np.random.standard_gamma, shape=-0.) def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -960,8 +966,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1008,8 +1014,8 @@ def __int__(self): assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1022,16 +1028,16 @@ def test_vonmises_small(self): np.testing.assert_(np.isfinite(r).all()) def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1043,8 +1049,8 @@ def test_weibull_0(self): assert_raises(ValueError, np.random.weibull, a=-0.) def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1054,11 +1060,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 - - def setSeed(self): - np.random.seed(self.seed) + seed = 123456789 # TODO: Include test for randint once it can broadcast # Can steal the test written in PR #6938 @@ -1066,129 +1068,122 @@ def setSeed(self): def test_uniform(self): low = [0] high = [1] - uniform = np.random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.setSeed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = np.random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.setSeed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.setSeed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = np.random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.setSeed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.setSeed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = np.random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = np.random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = np.random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.setSeed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.setSeed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = np.random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.setSeed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.setSeed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1197,256 +1192,242 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = np.random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.setSeed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.setSeed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = np.random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.setSeed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = np.random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.setSeed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.setSeed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = np.random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.setSeed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = np.random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.setSeed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.setSeed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = np.random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.setSeed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = np.random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = np.random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = np.random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.setSeed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.setSeed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = np.random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.setSeed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.setSeed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = np.random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.setSeed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.setSeed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = np.random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.setSeed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) - self.setSeed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = np.random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.setSeed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = np.random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.setSeed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) - self.setSeed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1455,33 +1436,32 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = np.random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.setSeed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.setSeed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.setSeed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) def test_binomial(self): @@ -1490,22 +1470,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = np.random.binomial desired = np.array([1, 1, 1]) - self.setSeed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.setSeed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1513,22 +1492,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = np.random.negative_binomial desired = np.array([1, 0, 1]) - self.setSeed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.setSeed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = np.random.RandomState()._poisson_lam_max @@ -1536,41 +1514,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = np.random.poisson desired = np.array([1, 1, 0]) - self.setSeed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = np.random.zipf desired = np.array([2, 2, 1]) - self.setSeed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = np.random.geometric desired = np.array([2, 2, 2]) - self.setSeed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1580,52 +1555,49 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = np.random.hypergeometric desired = np.array([1, 1, 1]) - self.setSeed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = np.random.logseries desired = np.array([1, 1, 1]) - self.setSeed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1661,19 +1633,17 @@ def gen_random(state, out): def test_multinomial(self): def gen_random(state, out): - out[...] = state.multinomial(10, [1/6.]*6, size=10000) + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) self.check_function(gen_random, sz=(10000, 6)) # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (np.random.exponential, np.random.standard_gamma, np.random.chisquare, np.random.standard_t, np.random.pareto, np.random.weibull, @@ -1688,11 +1658,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (np.random.uniform, np.random.normal, np.random.beta, np.random.gamma, np.random.f, np.random.noncentral_chisquare, @@ -1708,18 +1679,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_randint(self): + _, _, _, tgtShape = self._create_arrays() itype = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = np.random.randint @@ -1728,24 +1700,25 @@ def test_randint(self): for dt in itype: out = func(low, high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [np.random.noncentral_f, np.random.triangular, np.random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) diff --git a/blimgui/dist64/numpy/random/tests/test_randomstate.py b/blimgui/dist64/numpy/random/tests/test_randomstate.py index 2588253..fefce6f 100644 --- a/blimgui/dist64/numpy/random/tests/test_randomstate.py +++ b/blimgui/dist64/numpy/random/tests/test_randomstate.py @@ -3,16 +3,20 @@ import sys import warnings -import numpy as np import pytest -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings, IS_WASM - ) -from numpy.random import MT19937, PCG64 +import numpy as np from numpy import random +from numpy.random import MT19937, PCG64 +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) INT_FUNCS = {'binomial': (100.0, 0.6), 'geometric': (.5,), @@ -26,24 +30,24 @@ if np.iinfo(np.long).max < 2**32: # Windows and some 32-bit platforms, e.g., ARM - INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', - 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', - 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', - 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', - 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', - 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', - 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', - 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', # noqa: E501 + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', # noqa: E501 + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', # noqa: E501 + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', # noqa: E501 + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', # noqa: E501 + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', # noqa: E501 + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', # noqa: E501 + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', # noqa: E501 } else: - INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', - 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', - 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', - 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', - 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', - 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', - 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', - 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', # noqa: E501 + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', # noqa: E501 + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', # noqa: E501 + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', # noqa: E501 + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', # noqa: E501 + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', # noqa: E501 + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', # noqa: E501 + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', # noqa: E501 } @@ -169,10 +173,10 @@ def test_p_non_contiguous(self): p = np.arange(15.) p /= np.sum(p[1::3]) pvals = p[1::3] - random.seed(1432985819) - non_contig = random.multinomial(100, pvals=pvals) - random.seed(1432985819) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + rng = random.RandomState(1432985819) + non_contig = rng.multinomial(100, pvals=pvals) + rng = random.RandomState(1432985819) + contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) def test_multinomial_pvals_float32(self): @@ -187,136 +191,146 @@ def test_multinomial_n_float(self): # Non-index integer types should gracefully truncate floats random.multinomial(100.5, [0.2, 0.8]) + class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.random_state = random.RandomState(self.seed) - self.state = self.random_state.get_state() + def _create_state(self): + seed = 1234567890 + random_state = random.RandomState(seed) + state = random_state.get_state() + return random_state, state def test_basic(self): - old = self.random_state.tomaxint(16) - self.random_state.set_state(self.state) - new = self.random_state.tomaxint(16) + random_state, state = self._create_state() + old = random_state.tomaxint(16) + random_state.set_state(state) + new = random_state.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(self.state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.random_state.standard_normal() - state = self.random_state.get_state() - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + random_state.standard_normal() + state = random_state.get_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.random_state.standard_normal(size=16) - self.random_state.set_state(old_state) - x2 = self.random_state.standard_normal(size=16) - self.random_state.set_state(self.state) - x3 = self.random_state.standard_normal(size=16) + random_state, state = self._create_state() + old_state = state[:-2] + x1 = random_state.standard_normal(size=16) + random_state.set_state(old_state) + x2 = random_state.standard_normal(size=16) + random_state.set_state(state) + x3 = random_state.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.random_state.negative_binomial(0.5, 0.5) + random_state, _ = self._create_state() + random_state.negative_binomial(0.5, 0.5) def test_get_state_warning(self): rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): - state = self.random_state.get_state() + random_state, state = self._create_state() + state = random_state.get_state() new_state = ('Unknown', ) + state[1:] - assert_raises(ValueError, self.random_state.set_state, new_state) - assert_raises(TypeError, self.random_state.set_state, + assert_raises(ValueError, random_state.set_state, new_state) + assert_raises(TypeError, random_state.set_state, np.array(new_state, dtype=object)) - state = self.random_state.get_state(legacy=False) + state = random_state.get_state(legacy=False) del state['bit_generator'] - assert_raises(ValueError, self.random_state.set_state, state) + assert_raises(ValueError, random_state.set_state, state) def test_pickle(self): - self.random_state.seed(0) - self.random_state.random_sample(100) - self.random_state.standard_normal() - pickled = self.random_state.get_state(legacy=False) + random_state, _ = self._create_state() + random_state.seed(0) + random_state.random_sample(100) + random_state.standard_normal() + pickled = random_state.get_state(legacy=False) assert_equal(pickled['has_gauss'], 1) - rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + rs_unpick = pickle.loads(pickle.dumps(random_state)) unpickled = rs_unpick.get_state(legacy=False) assert_mt19937_state_equal(pickled, unpickled) def test_state_setting(self): - attr_state = self.random_state.__getstate__() - self.random_state.standard_normal() - self.random_state.__setstate__(attr_state) - state = self.random_state.get_state(legacy=False) + random_state, state = self._create_state() + attr_state = random_state.__getstate__() + random_state.standard_normal() + random_state.__setstate__(attr_state) + state = random_state.get_state(legacy=False) assert_mt19937_state_equal(attr_state, state) def test_repr(self): - assert repr(self.random_state).startswith('RandomState(MT19937)') + random_state, _ = self._create_state() + assert repr(random_state).startswith('RandomState(MT19937)') class TestRandint: - rfunc = random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = np.random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) - tgt = (lbnd + ubnd)//2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + tgt = (lbnd + ubnd) // 2 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -324,15 +338,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - random.seed() + rng = np.random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -341,31 +355,31 @@ def test_repeatability(self): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', - 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', - 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -388,8 +402,8 @@ def test_repeatability_32bit_boundary_broadcasting(self): [2978368172, 764731833, 2282559898], [ 105711276, 720447391, 3596512484]]]) for size in [None, (5, 3, 3)]: - random.seed(12345) - x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + rng = random.RandomState(12345) + x = rng.randint([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], size=size) assert_array_equal(x, desired if size is not None else desired[0]) @@ -418,11 +432,13 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = np.random.RandomState() + for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -433,7 +449,7 @@ def test_respect_dtype_singleton(self): lbnd = 0 if dt is bool else np.iinfo(op_dtype).min ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -441,64 +457,57 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): - random.seed(self.seed) - actual = random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rand_singleton(self): - random.seed(self.seed) - actual = random.rand() + rng = random.RandomState(self.seed) + actual = rng.rand() desired = 0.61879477158567997 assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - random.seed(self.seed) - actual = random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_randint(self): - random.seed(self.seed) - actual = random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(198, size=(3, 2)) assert_array_equal(actual, desired + 100) def test_tomaxint(self): - random.seed(self.seed) rs = random.RandomState(self.seed) actual = rs.tomaxint(size=(3, 2)) if np.iinfo(np.long).max == 2147483647: @@ -523,20 +532,16 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): typer = np.dtype('l').type actual = random.random_integers(typer(np.iinfo('l').max), typer(np.iinfo('l').max)) - assert_(len(w) == 1) assert_equal(actual, desired) def test_random_integers_deprecated(self): @@ -554,44 +559,44 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random_sample(self): - random.seed(self.seed) - actual = random.random_sample((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random_sample((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.random_sample() + rng = random.RandomState(self.seed) + actual = rng.random_sample() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_choice_uniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - random.seed(self.seed) - actual = random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -629,7 +634,7 @@ def test_choice_return_shape(self): assert_(random.choice(arr, replace=True) is a) # Check 0-d array - s = tuple() + s = () assert_(not np.isscalar(random.choice(2, s, replace=True))) assert_(not np.isscalar(random.choice(2, s, replace=False))) assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) @@ -668,15 +673,15 @@ def test_choice_nan_probabilities(self): def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 - random.seed(self.seed) - non_contig = random.choice(5, 3, p=p[::2]) - random.seed(self.seed) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + rng = random.RandomState(self.seed) + non_contig = rng.choice(5, 3, p=p[::2]) + rng = random.RandomState(self.seed) + contig = rng.choice(5, 3, p=np.ascontiguousarray(p[::2])) assert_array_equal(non_contig, contig) def test_bytes(self): - random.seed(self.seed) - actual = random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -700,9 +705,9 @@ def test_shuffle(self): lambda x: np.asarray([(i, i) for i in x], [("a", object, (1,)), ("b", np.int32, (1,))])]: - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -726,35 +731,35 @@ def test_shuffle_invalid_objects(self): assert_raises(TypeError, random.shuffle, x) def test_permutation(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) + actual = rng.permutation(alist) desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) + actual = rng.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_str = "abcd" assert_raises(IndexError, random.permutation, bad_x_str) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_float = 1.2 assert_raises(IndexError, random.permutation, bad_x_float) integer_val = 10 desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] - random.seed(self.seed) - actual = random.permutation(integer_val) + rng = random.RandomState(self.seed) + actual = rng.permutation(integer_val) assert_array_equal(actual, desired) def test_beta(self): - random.seed(self.seed) - actual = random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -762,30 +767,30 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - random.seed(self.seed) - actual = random.binomial(100.123, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) - random.seed(self.seed) - actual = random.binomial(100.123, .456) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456) desired = 37 assert_array_equal(actual, desired) def test_chisquare(self): - random.seed(self.seed) - actual = random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -796,9 +801,9 @@ def test_dirichlet(self): bad_alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, bad_alpha) - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) + actual = rng.dirichlet(alpha) assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_dirichlet_size(self): @@ -821,16 +826,16 @@ def test_dirichlet_bad_alpha(self): def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] - random.seed(self.seed) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random.seed(self.seed) - contig = random.dirichlet(np.ascontiguousarray(alpha), + rng = random.RandomState(self.seed) + non_contig = rng.dirichlet(alpha, size=(3, 2)) + rng = random.RandomState(self.seed) + contig = rng.dirichlet(np.ascontiguousarray(alpha), size=(3, 2)) assert_array_almost_equal(non_contig, contig) def test_exponential(self): - random.seed(self.seed) - actual = random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -841,16 +846,16 @@ def test_exponential_0(self): assert_raises(ValueError, random.exponential, scale=-0.) def test_f(self): - random.seed(self.seed) - actual = random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - random.seed(self.seed) - actual = random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -861,8 +866,8 @@ def test_gamma_0(self): assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) def test_geometric(self): - random.seed(self.seed) - actual = random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) @@ -873,14 +878,14 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) def test_gumbel(self): - random.seed(self.seed) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -891,34 +896,34 @@ def test_gumbel_0(self): assert_raises(ValueError, random.gumbel, scale=-0.) def test_hypergeometric(self): - random.seed(self.seed) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - random.seed(self.seed) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -929,16 +934,16 @@ def test_laplace_0(self): assert_raises(ValueError, random.laplace, scale=-0.) def test_logistic(self): - random.seed(self.seed) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - random.seed(self.seed) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -949,8 +954,8 @@ def test_lognormal_0(self): assert_raises(ValueError, random.lognormal, sigma=-0.) def test_logseries(self): - random.seed(self.seed) - actual = random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) @@ -972,8 +977,8 @@ def test_logseries_exceptions(self, value): random.logseries(np.array([value] * 10)[::2]) def test_multinomial(self): - random.seed(self.seed) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -983,11 +988,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -998,7 +1003,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1006,72 +1011,71 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + rng.multivariate_normal(mean, cov) mu = np.zeros(2) cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='other') - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.eye(3)) def test_negative_binomial(self): - random.seed(self.seed) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) def test_noncentral_chisquare(self): - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -1084,8 +1088,8 @@ def test_noncentral_f_nan(self): assert np.isnan(actual) def test_normal(self): - random.seed(self.seed) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -1096,8 +1100,8 @@ def test_normal_0(self): assert_raises(ValueError, random.normal, scale=-0.) def test_pareto(self): - random.seed(self.seed) - actual = random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -1111,8 +1115,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - random.seed(self.seed) - actual = random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -1125,22 +1129,22 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) def test_power(self): - random.seed(self.seed) - actual = random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - random.seed(self.seed) - actual = random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -1151,24 +1155,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, random.rayleigh, scale=-0.) def test_standard_cauchy(self): - random.seed(self.seed) - actual = random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - random.seed(self.seed) - actual = random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - random.seed(self.seed) - actual = random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -1179,30 +1183,30 @@ def test_standard_gamma_0(self): assert_raises(ValueError, random.standard_gamma, shape=-0.) def test_standard_normal(self): - random.seed(self.seed) - actual = random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn_singleton(self): - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() desired = np.array(1.34016345771863121) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - random.seed(self.seed) - actual = random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - random.seed(self.seed) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -1210,8 +1214,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - random.seed(self.seed) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1256,8 +1260,8 @@ def __int__(self): assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - random.seed(self.seed) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1271,8 +1275,8 @@ def test_vonmises_small(self): def test_vonmises_large(self): # guard against changes in RandomState when Generator is fixed - random.seed(self.seed) - actual = random.vonmises(mu=0., kappa=1e7, size=3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=0., kappa=1e7, size=3) desired = np.array([4.634253748521111e-04, 3.558873596114509e-04, -2.337119622577433e-04]) @@ -1284,16 +1288,16 @@ def test_vonmises_nan(self): assert_(np.isnan(r)) def test_wald(self): - random.seed(self.seed) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - random.seed(self.seed) - actual = random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1305,8 +1309,8 @@ def test_weibull_0(self): assert_raises(ValueError, random.weibull, a=-0.) def test_zipf(self): - random.seed(self.seed) - actual = random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1316,138 +1320,127 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 - - def set_seed(self): - random.seed(self.seed) + seed = 123456789 def test_uniform(self): low = [0] high = [1] - uniform = random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.set_seed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.set_seed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.set_seed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.set_seed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.set_seed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.set_seed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.set_seed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.set_seed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.set_seed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1456,267 +1449,253 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.set_seed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + assert np.all(np.isnan(rng.noncentral_f(dfnum, dfden, [np.nan] * 3))) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.set_seed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.set_seed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.set_seed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.set_seed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.set_seed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) assert_raises(ValueError, random.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.set_seed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.set_seed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.set_seed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) assert_raises(ValueError, random.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) assert_raises(ValueError, random.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) assert_raises(ValueError, random.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.set_seed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.set_seed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.set_seed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.set_seed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.set_seed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.set_seed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + assert_equal(rng.logistic(1.0, 0.0), 1.0) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.set_seed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) - self.set_seed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.set_seed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.set_seed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) assert_raises(ValueError, random.wald, bad_mean * 3, scale) assert_raises(ValueError, random.wald, mean * 3, bad_scale) - self.set_seed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1725,38 +1704,37 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.set_seed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.set_seed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.set_seed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) + assert_raises(ValueError, rng.triangular, 10., 0., 20.) + assert_raises(ValueError, rng.triangular, 10., 25., 20.) + assert_raises(ValueError, rng.triangular, 10., 10., 10.) def test_binomial(self): n = [1] @@ -1764,22 +1742,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = random.binomial desired = np.array([1, 1, 1]) - self.set_seed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.set_seed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1787,22 +1764,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = random.negative_binomial desired = np.array([1, 0, 1]) - self.set_seed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.set_seed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = random.RandomState()._poisson_lam_max @@ -1810,41 +1786,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = random.poisson desired = np.array([1, 1, 0]) - self.set_seed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = random.zipf desired = np.array([2, 2, 1]) - self.set_seed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = random.geometric desired = np.array([2, 2, 2]) - self.set_seed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1854,57 +1827,54 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = random.hypergeometric desired = np.array([1, 1, 1]) - self.set_seed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, 0) - assert_raises(ValueError, hypergeom, 10, 10, 25) + assert_raises(ValueError, rng.hypergeometric, -1, 10, 20) + assert_raises(ValueError, rng.hypergeometric, 10, -1, 20) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 0) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 25) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = random.logseries desired = np.array([1, 1, 1]) - self.set_seed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1949,13 +1919,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -1970,11 +1938,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -1990,30 +1959,31 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) # Ensure returned array dtype is correct for platform @@ -2026,9 +1996,9 @@ def test_integer_dtype(int_func): def test_integer_repeat(int_func): - random.seed(123456789) + rng = random.RandomState(123456789) fname, args, sha256 = int_func - f = getattr(random, fname) + f = getattr(rng, fname) val = f(*args, size=1000000) if sys.byteorder != 'little': val = val.byteswap() @@ -2064,6 +2034,7 @@ def test_randomstate_ctor_old_style_pickle(): assert_equal(state_a['gauss'], state_b['gauss']) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_hot_swap(restore_singleton_bitgen): # GH 21808 def_bg = np.random.default_rng(0) @@ -2075,6 +2046,7 @@ def test_hot_swap(restore_singleton_bitgen): assert bg is second_bg +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_seed_alt_bit_gen(restore_singleton_bitgen): # GH 21808 bg = PCG64(0) @@ -2089,6 +2061,7 @@ def test_seed_alt_bit_gen(restore_singleton_bitgen): assert state["state"]["inc"] != new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_state_error_alt_bit_gen(restore_singleton_bitgen): # GH 21808 state = np.random.get_state() @@ -2098,6 +2071,7 @@ def test_state_error_alt_bit_gen(restore_singleton_bitgen): np.random.set_state(state) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swap_worked(restore_singleton_bitgen): # GH 21808 np.random.seed(98765) @@ -2116,6 +2090,7 @@ def test_swap_worked(restore_singleton_bitgen): assert new_state["state"]["inc"] == new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swapped_singleton_against_direct(restore_singleton_bitgen): np.random.set_bit_generator(PCG64(98765)) singleton_vals = np.random.randint(0, 2 ** 30, 10) diff --git a/blimgui/dist64/numpy/random/tests/test_randomstate_regression.py b/blimgui/dist64/numpy/random/tests/test_randomstate_regression.py index 6a71956..befcf7e 100644 --- a/blimgui/dist64/numpy/random/tests/test_randomstate_regression.py +++ b/blimgui/dist64/numpy/random/tests/test_randomstate_regression.py @@ -2,12 +2,9 @@ import pytest -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) import numpy as np - from numpy import random +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: @@ -57,9 +54,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - random.seed(12345) + rng = random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -71,7 +68,7 @@ def test_call_within_randomstate(self): random.seed(i) m.seed(4321) # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. @@ -99,7 +96,7 @@ def test_choice_sum_of_probs_tolerance(self): probs = np.array(counts, dtype=dt) / sum(counts) c = random.choice(a, p=probs) assert_(c in a) - assert_raises(ValueError, random.choice, a, p=probs*0.9) + assert_raises(ValueError, random.choice, a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings @@ -134,9 +131,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - random.seed(1) + rng = random.RandomState(1) orig = np.arange(3).view(N) - perm = random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -146,9 +143,9 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - random.seed(1) + rng = random.RandomState(1) m = M() - perm = random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) @@ -166,9 +163,9 @@ def test_named_argument_initialization(self): def test_choice_retun_dtype(self): # GH 9867, now long since the NumPy default changed. - c = np.random.choice(10, p=[.1]*10, size=2) + c = np.random.choice(10, p=[.1] * 10, size=2) assert c.dtype == np.dtype(np.long) - c = np.random.choice(10, p=[.1]*10, replace=False, size=2) + c = np.random.choice(10, p=[.1] * 10, replace=False, size=2) assert c.dtype == np.dtype(np.long) c = np.random.choice(10, size=2) assert c.dtype == np.dtype(np.long) @@ -179,27 +176,27 @@ def test_choice_retun_dtype(self): reason='Cannot test with 32-bit C long') def test_randint_117(self): # GH 14189 - random.seed(0) + rng = random.RandomState(0) expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, 2588848963, 3684848379, 2340255427, 3638918503, 1819583497, 2678185683], dtype='int64') - actual = random.randint(2**32, size=10) + actual = rng.randint(2**32, size=10) assert_array_equal(actual, expected) def test_p_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(12345) - assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + rng = random.RandomState(12345) + assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(8675309) + rng = random.RandomState(8675309) expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) - assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)), expected) diff --git a/blimgui/dist64/numpy/random/tests/test_regression.py b/blimgui/dist64/numpy/random/tests/test_regression.py index 3810750..15921be 100644 --- a/blimgui/dist64/numpy/random/tests/test_regression.py +++ b/blimgui/dist64/numpy/random/tests/test_regression.py @@ -1,9 +1,11 @@ +import inspect import sys -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) -from numpy import random + +import pytest + import numpy as np +from numpy import random +from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises class TestRegression: @@ -53,9 +55,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - np.random.seed(12345) + rng = np.random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -67,7 +69,7 @@ def test_call_within_randomstate(self): np.random.seed(i) m.seed(4321) # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. @@ -95,7 +97,7 @@ def test_choice_sum_of_probs_tolerance(self): probs = np.array(counts, dtype=dt) / sum(counts) c = np.random.choice(a, p=probs) assert_(c in a) - assert_raises(ValueError, np.random.choice, a, p=probs*0.9) + assert_raises(ValueError, np.random.choice, a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings @@ -130,9 +132,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - np.random.seed(1) + rng = np.random.RandomState(1) orig = np.arange(3).view(N) - perm = np.random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -142,8 +144,32 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - np.random.seed(1) + rng = np.random.RandomState(1) m = M() - perm = np.random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize( + "cls", + [ + random.Generator, + random.MT19937, + random.PCG64, + random.PCG64DXSM, + random.Philox, + random.RandomState, + random.SFC64, + random.BitGenerator, + random.SeedSequence, + random.bit_generator.SeedlessSeedSequence, + ], + ) + def test_inspect_signature(self, cls: type) -> None: + assert hasattr(cls, "__text_signature__") + try: + inspect.signature(cls) + except ValueError: + pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}") diff --git a/blimgui/dist64/numpy/random/tests/test_seed_sequence.py b/blimgui/dist64/numpy/random/tests/test_seed_sequence.py index 304d06b..50e89a9 100644 --- a/blimgui/dist64/numpy/random/tests/test_seed_sequence.py +++ b/blimgui/dist64/numpy/random/tests/test_seed_sequence.py @@ -1,7 +1,6 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_array_compare - from numpy.random import SeedSequence +from numpy.testing import assert_array_compare, assert_array_equal def test_reference_data(): diff --git a/blimgui/dist64/numpy/random/tests/test_smoke.py b/blimgui/dist64/numpy/random/tests/test_smoke.py index 3c86d82..e9e561f 100644 --- a/blimgui/dist64/numpy/random/tests/test_smoke.py +++ b/blimgui/dist64/numpy/random/tests/test_smoke.py @@ -1,16 +1,15 @@ import pickle +from dataclasses import dataclass from functools import partial -import numpy as np import pytest -from numpy.testing import assert_equal, assert_, assert_array_equal -from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64) -@pytest.fixture(scope='module', - params=(np.bool, np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64)) -def dtype(request): - return request.param +import numpy as np +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox +from numpy.testing import assert_, assert_array_equal, assert_equal + +DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) def params_0(f): @@ -66,13 +65,12 @@ def comp_state(state1, state2): identical &= comp_state(state1[key], state2[key]) elif type(state1) != type(state2): identical &= type(state1) == type(state2) + elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) else: - if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( - state2, (list, tuple, np.ndarray))): - for s1, s2 in zip(state1, state2): - identical &= comp_state(s1, s2) - else: - identical &= state1 == state2 + identical &= state1 == state2 return identical @@ -91,403 +89,459 @@ def warmup(rg, n=None): rg.random(n, dtype=np.float32) +@dataclass +class RNGData: + bit_generator: type[np.random.BitGenerator] + advance: int + seed: list[int] + rg: Generator + seed_vector_bits: int + + class RNG: @classmethod - def setup_class(cls): + def _create_rng(cls): # Overridden in test classes. Place holder to silence IDE noise - cls.bit_generator = PCG64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - @classmethod - def _extra_setup(cls): - cls.vec_1d = np.arange(2.0, 102.0) - cls.vec_2d = np.arange(2.0, 102.0)[None, :] - cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) - cls.seed_error = TypeError - - def _reset_state(self): - self.rg.bit_generator.state = self.initial_state + bit_generator = PCG64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_init(self): - rg = Generator(self.bit_generator()) - state = rg.bit_generator.state - rg.standard_normal(1) - rg.standard_normal(1) - rg.bit_generator.state = state - new_state = rg.bit_generator.state + data = self._create_rng() + data.rg = Generator(data.bit_generator()) + state = data.rg.bit_generator.state + data.rg.standard_normal(1) + data.rg.standard_normal(1) + data.rg.bit_generator.state = state + new_state = data.rg.bit_generator.state assert_(comp_state(state, new_state)) def test_advance(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'advance'): - self.rg.bit_generator.advance(self.advance) - assert_(not comp_state(state, self.rg.bit_generator.state)) + data = self._create_rng() + state = data.rg.bit_generator.state + if hasattr(data.rg.bit_generator, 'advance'): + data.rg.bit_generator.advance(data.advance) + assert_(not comp_state(state, data.rg.bit_generator.state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = data.rg.bit_generator.__class__.__name__ pytest.skip(f'Advance is not supported by {bitgen_name}') def test_jump(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'jumped'): - bit_gen2 = self.rg.bit_generator.jumped() + rg = self._create_rng().rg + state = rg.bit_generator.state + if hasattr(rg.bit_generator, 'jumped'): + bit_gen2 = rg.bit_generator.jumped() jumped_state = bit_gen2.state assert_(not comp_state(state, jumped_state)) - self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) - self.rg.bit_generator.state = state - bit_gen3 = self.rg.bit_generator.jumped() + rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + rg.bit_generator.state = state + bit_gen3 = rg.bit_generator.jumped() rejumped_state = bit_gen3.state assert_(comp_state(jumped_state, rejumped_state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = rg.bit_generator.__class__.__name__ if bitgen_name not in ('SFC64',): raise AttributeError(f'no "jumped" in {bitgen_name}') pytest.skip(f'Jump is not supported by {bitgen_name}') def test_uniform(self): - r = self.rg.uniform(-1.0, 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(-1.0, 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_uniform_array(self): - r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(np.array([-1.0] * 10), + r = rg.uniform(np.array([-1.0] * 10), np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + r = rg.uniform(-1.0, np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_random(self): - assert_(len(self.rg.random(10)) == 10) - params_0(self.rg.random) + rg = self._create_rng().rg + assert_(len(rg.random(10)) == 10) + params_0(rg.random) def test_standard_normal_zig(self): - assert_(len(self.rg.standard_normal(10)) == 10) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) def test_standard_normal(self): - assert_(len(self.rg.standard_normal(10)) == 10) - params_0(self.rg.standard_normal) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + params_0(rg.standard_normal) def test_standard_gamma(self): - assert_(len(self.rg.standard_gamma(10, 10)) == 10) - assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) - params_1(self.rg.standard_gamma) + rg = self._create_rng().rg + assert_(len(rg.standard_gamma(10, 10)) == 10) + assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(rg.standard_gamma) def test_standard_exponential(self): - assert_(len(self.rg.standard_exponential(10)) == 10) - params_0(self.rg.standard_exponential) + rg = self._create_rng().rg + assert_(len(rg.standard_exponential(10)) == 10) + params_0(rg.standard_exponential) def test_standard_exponential_float(self): - randoms = self.rg.standard_exponential(10, dtype='float32') + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32')) + params_0(partial(rg.standard_exponential, dtype='float32')) def test_standard_exponential_float_log(self): - randoms = self.rg.standard_exponential(10, dtype='float32', + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32', method='inv') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32', + params_0(partial(rg.standard_exponential, dtype='float32', method='inv')) def test_standard_cauchy(self): - assert_(len(self.rg.standard_cauchy(10)) == 10) - params_0(self.rg.standard_cauchy) + rg = self._create_rng().rg + assert_(len(rg.standard_cauchy(10)) == 10) + params_0(rg.standard_cauchy) def test_standard_t(self): - assert_(len(self.rg.standard_t(10, 10)) == 10) - params_1(self.rg.standard_t) + rg = self._create_rng().rg + assert_(len(rg.standard_t(10, 10)) == 10) + params_1(rg.standard_t) def test_binomial(self): - assert_(self.rg.binomial(10, .5) >= 0) - assert_(self.rg.binomial(1000, .5) >= 0) + rg = self._create_rng().rg + assert_(rg.binomial(10, .5) >= 0) + assert_(rg.binomial(1000, .5) >= 0) def test_reset_state(self): - state = self.rg.bit_generator.state - int_1 = self.rg.integers(2**31) - self.rg.bit_generator.state = state - int_2 = self.rg.integers(2**31) + rg = self._create_rng().rg + state = rg.bit_generator.state + int_1 = rg.integers(2**31) + rg.bit_generator.state = state + int_2 = rg.integers(2**31) assert_(int_1 == int_2) def test_entropy_init(self): - rg = Generator(self.bit_generator()) - rg2 = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + rg2 = Generator(bit_generator()) assert_(not comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_seed(self): - rg = Generator(self.bit_generator(*self.seed)) - rg2 = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg2 = Generator(data.bit_generator(*data.seed)) rg.random() rg2.random() assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_reset_state_gauss(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.standard_normal() state = rg.bit_generator.state n1 = rg.standard_normal(size=10) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.standard_normal(size=10) assert_array_equal(n1, n2) def test_reset_state_uint32(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.integers(0, 2 ** 24, 120, dtype=np.uint32) state = rg.bit_generator.state n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) assert_array_equal(n1, n2) def test_reset_state_float(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.random(dtype='float32') state = rg.bit_generator.state n1 = rg.random(size=10, dtype='float32') - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.random(size=10, dtype='float32') assert_((n1 == n2).all()) def test_shuffle(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_permutation(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_beta(self): - vals = self.rg.beta(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.beta(2.0, 2.0, 10) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), 2.0) + vals = rg.beta(np.array([2.0] * 10), 2.0) assert_(len(vals) == 10) - vals = self.rg.beta(2.0, np.array([2.0] * 10)) + vals = rg.beta(2.0, np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) assert_(vals.shape == (10, 10)) def test_bytes(self): - vals = self.rg.bytes(10) + rg = self._create_rng().rg + vals = rg.bytes(10) assert_(len(vals) == 10) def test_chisquare(self): - vals = self.rg.chisquare(2.0, 10) + rg = self._create_rng().rg + vals = rg.chisquare(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.chisquare) + params_1(rg.chisquare) def test_exponential(self): - vals = self.rg.exponential(2.0, 10) + rg = self._create_rng().rg + vals = rg.exponential(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential) + params_1(rg.exponential) def test_f(self): - vals = self.rg.f(3, 1000, 10) + rg = self._create_rng().rg + vals = rg.f(3, 1000, 10) assert_(len(vals) == 10) def test_gamma(self): - vals = self.rg.gamma(3, 2, 10) + rg = self._create_rng().rg + vals = rg.gamma(3, 2, 10) assert_(len(vals) == 10) def test_geometric(self): - vals = self.rg.geometric(0.5, 10) + rg = self._create_rng().rg + vals = rg.geometric(0.5, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential, bounded=True) + params_1(rg.exponential, bounded=True) def test_gumbel(self): - vals = self.rg.gumbel(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.gumbel(2.0, 2.0, 10) assert_(len(vals) == 10) def test_laplace(self): - vals = self.rg.laplace(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.laplace(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logitic(self): - vals = self.rg.logistic(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.logistic(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logseries(self): - vals = self.rg.logseries(0.5, 10) + rg = self._create_rng().rg + vals = rg.logseries(0.5, 10) assert_(len(vals) == 10) def test_negative_binomial(self): - vals = self.rg.negative_binomial(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.negative_binomial(10, 0.2, 10) assert_(len(vals) == 10) def test_noncentral_chisquare(self): - vals = self.rg.noncentral_chisquare(10, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_chisquare(10, 2, 10) assert_(len(vals) == 10) def test_noncentral_f(self): - vals = self.rg.noncentral_f(3, 1000, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_f(3, 1000, 2, 10) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + vals = rg.noncentral_f(np.array([3] * 10), 1000, 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + vals = rg.noncentral_f(3, np.array([1000] * 10), 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + vals = rg.noncentral_f(3, 1000, np.array([2] * 10)) assert_(len(vals) == 10) def test_normal(self): - vals = self.rg.normal(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.normal(10, 0.2, 10) assert_(len(vals) == 10) def test_pareto(self): - vals = self.rg.pareto(3.0, 10) + rg = self._create_rng().rg + vals = rg.pareto(3.0, 10) assert_(len(vals) == 10) def test_poisson(self): - vals = self.rg.poisson(10, 10) + rg = self._create_rng().rg + vals = rg.poisson(10, 10) assert_(len(vals) == 10) - vals = self.rg.poisson(np.array([10] * 10)) + vals = rg.poisson(np.array([10] * 10)) assert_(len(vals) == 10) - params_1(self.rg.poisson) + params_1(rg.poisson) def test_power(self): - vals = self.rg.power(0.2, 10) + rg = self._create_rng().rg + vals = rg.power(0.2, 10) assert_(len(vals) == 10) def test_integers(self): - vals = self.rg.integers(10, 20, 10) + rg = self._create_rng().rg + vals = rg.integers(10, 20, 10) assert_(len(vals) == 10) def test_rayleigh(self): - vals = self.rg.rayleigh(0.2, 10) + rg = self._create_rng().rg + vals = rg.rayleigh(0.2, 10) assert_(len(vals) == 10) - params_1(self.rg.rayleigh, bounded=True) + params_1(rg.rayleigh, bounded=True) def test_vonmises(self): - vals = self.rg.vonmises(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.vonmises(10, 0.2, 10) assert_(len(vals) == 10) def test_wald(self): - vals = self.rg.wald(1.0, 1.0, 10) + rg = self._create_rng().rg + vals = rg.wald(1.0, 1.0, 10) assert_(len(vals) == 10) def test_weibull(self): - vals = self.rg.weibull(1.0, 10) + rg = self._create_rng().rg + vals = rg.weibull(1.0, 10) assert_(len(vals) == 10) def test_zipf(self): - vals = self.rg.zipf(10, 10) + rg = self._create_rng().rg + vec_1d = np.arange(2.0, 102.0) + vec_2d = np.arange(2.0, 102.0)[None, :] + mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + vals = rg.zipf(10, 10) assert_(len(vals) == 10) - vals = self.rg.zipf(self.vec_1d) + vals = rg.zipf(vec_1d) assert_(len(vals) == 100) - vals = self.rg.zipf(self.vec_2d) + vals = rg.zipf(vec_2d) assert_(vals.shape == (1, 100)) - vals = self.rg.zipf(self.mat) + vals = rg.zipf(mat) assert_(vals.shape == (100, 100)) def test_hypergeometric(self): - vals = self.rg.hypergeometric(25, 25, 20) + rg = self._create_rng().rg + vals = rg.hypergeometric(25, 25, 20) assert_(np.isscalar(vals)) - vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + vals = rg.hypergeometric(np.array([25] * 10), 25, 20) assert_(vals.shape == (10,)) def test_triangular(self): - vals = self.rg.triangular(-5, 0, 5) + rg = self._create_rng().rg + vals = rg.triangular(-5, 0, 5) assert_(np.isscalar(vals)) - vals = self.rg.triangular(-5, np.array([0] * 10), 5) + vals = rg.triangular(-5, np.array([0] * 10), 5) assert_(vals.shape == (10,)) def test_multivariate_normal(self): + rg = self._create_rng().rg mean = [0, 0] cov = [[1, 0], [0, 100]] # diagonal covariance - x = self.rg.multivariate_normal(mean, cov, 5000) + x = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_zig = self.rg.multivariate_normal(mean, cov, 5000) + x_zig = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_inv = self.rg.multivariate_normal(mean, cov, 5000) + x_inv = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) assert_((x_zig != x_inv).any()) def test_multinomial(self): - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + rg = self._create_rng().rg + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3]) assert_(vals.shape == (2,)) - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) assert_(vals.shape == (10, 2)) def test_dirichlet(self): - s = self.rg.dirichlet((10, 5, 3), 20) + rg = self._create_rng().rg + s = rg.dirichlet((10, 5, 3), 20) assert_(s.shape == (20, 3)) def test_pickle(self): - pick = pickle.dumps(self.rg) + rg = self._create_rng().rg + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) - pick = pickle.dumps(self.rg) + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) def test_seed_array(self): - if self.seed_vector_bits is None: - bitgen_name = self.bit_generator.__name__ + data = self._create_rng() + if data.seed_vector_bits is None: + bitgen_name = data.bit_generator.__name__ pytest.skip(f'Vector seeding is not supported by {bitgen_name}') - if self.seed_vector_bits == 32: + if data.seed_vector_bits == 32: dtype = np.uint32 else: dtype = np.uint64 seed = np.array([1], dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(1) + bg = data.bit_generator(1) state2 = bg.state assert_(comp_state(state1, state2)) seed = np.arange(4, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = np.arange(1500, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = 2 ** np.mod(np.arange(1500, dtype=dtype), - self.seed_vector_bits - 1) + 1 - bg = self.bit_generator(seed) + data.seed_vector_bits - 1) + 1 + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) def test_uniform_float(self): - rg = Generator(self.bit_generator(12345)) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator(12345)) warmup(rg) state = rg.bit_generator.state r1 = rg.random(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.random(11, dtype=np.float32) @@ -496,11 +550,12 @@ def test_uniform_float(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_gamma_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) @@ -509,11 +564,12 @@ def test_gamma_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -522,11 +578,12 @@ def test_normal_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_zig_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -535,7 +592,7 @@ def test_normal_zig_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_output_fill(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -557,7 +614,7 @@ def test_output_fill(self): assert_equal(direct, existing) def test_output_filling_uniform(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -575,7 +632,7 @@ def test_output_filling_uniform(self): assert_equal(direct, existing) def test_output_filling_exponential(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -593,7 +650,7 @@ def test_output_filling_exponential(self): assert_equal(direct, existing) def test_output_filling_gamma(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.zeros(size) @@ -611,7 +668,7 @@ def test_output_filling_gamma(self): assert_equal(direct, existing) def test_output_filling_gamma_broadcast(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) mu = np.arange(97.0) + 1.0 @@ -630,7 +687,7 @@ def test_output_filling_gamma_broadcast(self): assert_equal(direct, existing) def test_output_fill_error(self): - rg = self.rg + rg = self._create_rng().rg size = (31, 7, 97) existing = np.empty(size) with pytest.raises(TypeError): @@ -652,7 +709,14 @@ def test_output_fill_error(self): with pytest.raises(ValueError): rg.standard_gamma(1.0, out=existing[::3]) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast(self, dtype): + rg = self._create_rng().rg + initial_state = rg.bit_generator.state + + def reset_state(rng): + rng.bit_generator.state = initial_state + if dtype == np.bool: upper = 2 lower = 0 @@ -660,45 +724,50 @@ def test_integers_broadcast(self, dtype): info = np.iinfo(dtype) upper = int(info.max) + 1 lower = info.min - self._reset_state() - a = self.rg.integers(lower, [upper] * 10, dtype=dtype) - self._reset_state() - b = self.rg.integers([lower] * 10, upper, dtype=dtype) + reset_state(rg) + rg.bit_generator.state = initial_state + a = rg.integers(lower, [upper] * 10, dtype=dtype) + reset_state(rg) + b = rg.integers([lower] * 10, upper, dtype=dtype) assert_equal(a, b) - self._reset_state() - c = self.rg.integers(lower, upper, size=10, dtype=dtype) + reset_state(rg) + c = rg.integers(lower, upper, size=10, dtype=dtype) assert_equal(a, c) - self._reset_state() - d = self.rg.integers(np.array( + reset_state(rg) + d = rg.integers(np.array( [lower] * 10), np.array([upper], dtype=object), size=10, dtype=dtype) assert_equal(a, d) - self._reset_state() - e = self.rg.integers( + reset_state(rg) + e = rg.integers( np.array([lower] * 10), np.array([upper] * 10), size=10, dtype=dtype) assert_equal(a, e) - self._reset_state() - a = self.rg.integers(0, upper, size=10, dtype=dtype) - self._reset_state() - b = self.rg.integers([upper] * 10, dtype=dtype) + reset_state(rg) + a = rg.integers(0, upper, size=10, dtype=dtype) + reset_state(rg) + b = rg.integers([upper] * 10, dtype=dtype) assert_equal(a, b) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_numpy(self, dtype): + rg = self._create_rng().rg high = np.array([1]) low = np.array([0]) - out = self.rg.integers(low, high, dtype=dtype) + out = rg.integers(low, high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low[0], high, dtype=dtype) + out = rg.integers(low[0], high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low, high[0], dtype=dtype) + out = rg.integers(low, high[0], dtype=dtype) assert out.shape == (1,) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast_errors(self, dtype): + rg = self._create_rng().rg if dtype == np.bool: upper = 2 lower = 0 @@ -707,102 +776,97 @@ def test_integers_broadcast_errors(self, dtype): upper = int(info.max) + 1 lower = info.min with pytest.raises(ValueError): - self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + rg.integers(lower, [upper + 1] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + rg.integers(lower - 1, [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + rg.integers([lower - 1], [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([0], [0], dtype=dtype) + rg.integers([0], [0], dtype=dtype) class TestMT19937(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.advance = None - cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 32 - cls._extra_setup() - cls.seed_error = ValueError + def _create_rng(cls): + bit_generator = MT19937 + advance = None + seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 32 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_numpy_state(self): + rg = self._create_rng().rg nprg = np.random.RandomState() nprg.standard_normal(99) state = nprg.get_state() - self.rg.bit_generator.state = state - state2 = self.rg.bit_generator.state + rg.bit_generator.state = state + state2 = rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = Philox + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestSFC64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 192 - cls._extra_setup() + def _create_rng(cls): + bit_generator = SFC64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 192 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64DXSM(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64DXSM - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64DXSM + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestDefaultRNG(RNG): @classmethod - def setup_class(cls): + def _create_rng(cls): # This will duplicate some tests that directly instantiate a fresh # Generator(), but that's okay. - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = np.random.default_rng(*cls.seed) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = np.random.default_rng(*seed) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_default_is_pcg64(self): # In order to change the default BitGenerator, we'll go through # a deprecation cycle to move to a different function. - assert_(isinstance(self.rg.bit_generator, PCG64)) + rg = self._create_rng().rg + assert_(isinstance(rg.bit_generator, PCG64)) def test_seed(self): np.random.default_rng() diff --git a/blimgui/dist64/numpy/rec/__init__.py b/blimgui/dist64/numpy/rec/__init__.py index c917248..f96c6ed 100644 --- a/blimgui/dist64/numpy/rec/__init__.py +++ b/blimgui/dist64/numpy/rec/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.records import __all__, __doc__ from numpy._core.records import * +from numpy._core.records import __all__, __doc__ diff --git a/blimgui/dist64/numpy/rec/__init__.pyi b/blimgui/dist64/numpy/rec/__init__.pyi index 5cb112a..a807ffa 100644 --- a/blimgui/dist64/numpy/rec/__init__.pyi +++ b/blimgui/dist64/numpy/rec/__init__.pyi @@ -1,14 +1,15 @@ from numpy._core.records import ( - record, - recarray, + array, find_duplicate, format_parser, fromarrays, + fromfile, fromrecords, fromstring, - fromfile, - array, + recarray, + record, ) + __all__ = [ "record", "recarray", diff --git a/blimgui/dist64/numpy/strings/__init__.py b/blimgui/dist64/numpy/strings/__init__.py index 3bbb8c9..cae8a9b 100644 --- a/blimgui/dist64/numpy/strings/__init__.py +++ b/blimgui/dist64/numpy/strings/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.strings import __all__, __doc__ from numpy._core.strings import * +from numpy._core.strings import __all__, __doc__ diff --git a/blimgui/dist64/numpy/strings/__init__.pyi b/blimgui/dist64/numpy/strings/__init__.pyi index f5e85ca..4d005fd 100644 --- a/blimgui/dist64/numpy/strings/__init__.pyi +++ b/blimgui/dist64/numpy/strings/__init__.pyi @@ -1,49 +1,50 @@ from numpy._core.strings import ( + add, + capitalize, + center, + count, + decode, + encode, + endswith, equal, - not_equal, - greater_equal, - less_equal, + expandtabs, + find, greater, - less, - add, - multiply, - mod, - isalpha, + greater_equal, + index, isalnum, - isdigit, - isspace, - isnumeric, + isalpha, isdecimal, + isdigit, islower, - isupper, + isnumeric, + isspace, istitle, - str_len, - find, + isupper, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, rfind, - index, rindex, - count, - startswith, - endswith, - decode, - encode, - expandtabs, - center, - ljust, rjust, - lstrip, + rpartition, rstrip, + slice, + startswith, + str_len, strip, - zfill, - upper, - lower, swapcase, - capitalize, title, - replace, - partition, - rpartition, translate, + upper, + zfill, ) __all__ = [ @@ -92,4 +93,5 @@ __all__ = [ "decode", "encode", "translate", + "slice", ] diff --git a/blimgui/dist64/numpy/testing/__init__.py b/blimgui/dist64/numpy/testing/__init__.py index 4f64f42..813d67a 100644 --- a/blimgui/dist64/numpy/testing/__init__.py +++ b/blimgui/dist64/numpy/testing/__init__.py @@ -7,16 +7,16 @@ """ from unittest import TestCase -from . import _private -from ._private.utils import * -from ._private.utils import (_assert_valid_refcount, _gen_alignment_data) +from . import _private, overrides from ._private import extbuild -from . import overrides +from ._private.utils import * +from ._private.utils import _assert_valid_refcount, _gen_alignment_data __all__ = ( _private.utils.__all__ + ['TestCase', 'overrides'] ) from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/testing/__init__.pyi b/blimgui/dist64/numpy/testing/__init__.pyi index 338e12c..0e3f7e4 100644 --- a/blimgui/dist64/numpy/testing/__init__.pyi +++ b/blimgui/dist64/numpy/testing/__init__.pyi @@ -1,9 +1,12 @@ from unittest import TestCase -from . import overrides +from . import _private as _private, overrides +from ._private import extbuild as extbuild from ._private.utils import ( + BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, + IS_64BIT, IS_EDITABLE, IS_INSTALLED, IS_MUSL, @@ -51,8 +54,10 @@ from ._private.utils import ( ) __all__ = [ + "BLAS_SUPPORTS_FPE", "HAS_LAPACK64", "HAS_REFCOUNT", + "IS_64BIT", "IS_EDITABLE", "IS_INSTALLED", "IS_MUSL", diff --git a/blimgui/dist64/numpy/testing/_private/extbuild.py b/blimgui/dist64/numpy/testing/_private/extbuild.py index 9eff66d..62fee58 100644 --- a/blimgui/dist64/numpy/testing/_private/extbuild.py +++ b/blimgui/dist64/numpy/testing/_private/extbuild.py @@ -16,7 +16,7 @@ def build_and_import_extension( modname, functions, *, prologue="", build_dir=None, - include_dirs=[], more_init=""): + include_dirs=None, more_init=""): """ Build and imports a c-extension module `modname` from a list of function fragments `functions`. @@ -53,6 +53,8 @@ def build_and_import_extension( >>> assert not mod.test_bytes('abc') >>> assert mod.test_bytes(b'abc') """ + if include_dirs is None: + include_dirs = [] body = prologue + _make_methods(functions, modname) init = """ PyObject *mod = PyModule_Create(&moduledef); @@ -68,12 +70,8 @@ def build_and_import_extension( init += more_init init += "\nreturn mod;" source_string = _make_source(modname, init, body) - try: - mod_so = compile_extension_module( - modname, build_dir, include_dirs, source_string) - except Exception as e: - # shorten the exception chain - raise RuntimeError(f"could not compile in {build_dir}:") from e + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) import importlib.util spec = importlib.util.spec_from_file_location(modname, mod_so) foo = importlib.util.module_from_spec(spec) @@ -83,7 +81,7 @@ def build_and_import_extension( def compile_extension_module( name, builddir, include_dirs, - source_string, libraries=[], library_dirs=[]): + source_string, libraries=None, library_dirs=None): """ Build an extension module and return the filename of the resulting native code file. @@ -106,11 +104,14 @@ def compile_extension_module( dirname = builddir / name dirname.mkdir(exist_ok=True) cfile = _convert_str_to_file(source_string, dirname) - include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')] + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] return _c_compile( cfile, outputfilename=dirname / modname, - include_dirs=include_dirs, libraries=[], library_dirs=[], + include_dirs=include_dirs, libraries=libraries, + library_dirs=library_dirs, ) @@ -133,19 +134,19 @@ def _make_methods(functions, modname): methods_table = [] codes = [] for funcname, flags, code in functions: - cfuncname = "%s_%s" % (modname, funcname) + cfuncname = f"{modname}_{funcname}" if 'METH_KEYWORDS' in flags: signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' else: signature = '(PyObject *self, PyObject *args)' methods_table.append( "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) - func_code = """ + func_code = f""" static PyObject* {cfuncname}{signature} {{ {code} }} - """.format(cfuncname=cfuncname, signature=signature, code=code) + """ codes.append(func_code) body = "\n".join(codes) + """ @@ -160,7 +161,7 @@ def _make_methods(functions, modname): -1, /* m_size */ methods, /* m_methods */ }; - """ % dict(methods='\n'.join(methods_table), modname=modname) + """ % {'methods': '\n'.join(methods_table), 'modname': modname} return body @@ -176,41 +177,28 @@ def _make_source(name, init, body): PyInit_%(name)s(void) { %(init)s } - """ % dict( - name=name, init=init, body=body, - ) + """ % { + 'name': name, 'init': init, 'body': body, + } return code -def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[], - library_dirs=[]): +def _c_compile(cfile, outputfilename, include_dirs, libraries, + library_dirs): + link_extra = [] if sys.platform == 'win32': compile_extra = ["/we4013"] - link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')] + link_extra.append('/DEBUG') # generate .pdb file elif sys.platform.startswith('linux'): compile_extra = [ "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] - link_extra = [] else: - compile_extra = link_extra = [] - pass - if sys.platform == 'win32': - link_extra = link_extra + ['/DEBUG'] # generate .pdb file - if sys.platform == 'darwin': - # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): - if (s + 'include' not in include_dirs - and os.path.exists(s + 'include')): - include_dirs.append(s + 'include') - if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'): - library_dirs.append(s + 'lib') - - outputfilename = outputfilename.with_suffix(get_so_suffix()) - build( + compile_extra = [] + + return build( cfile, outputfilename, compile_extra, link_extra, include_dirs, libraries, library_dirs) - return outputfilename def build(cfile, outputfilename, compile_extra, link_extra, @@ -219,20 +207,25 @@ def build(cfile, outputfilename, compile_extra, link_extra, build_dir = cfile.parent / "build" os.makedirs(build_dir, exist_ok=True) - so_name = outputfilename.parts[-1] with open(cfile.parent / "meson.build", "wt") as fid: - includes = ['-I' + d for d in include_dirs] link_dirs = ['-L' + d for d in library_dirs] fid.write(textwrap.dedent(f"""\ project('foo', 'c') - shared_module('{so_name}', '{cfile.parts[-1]}', - c_args: {includes} + {compile_extra}, - link_args: {link_dirs} + {link_extra}, - link_with: {libraries}, - name_prefix: '', - name_suffix: 'dummy', + py = import('python').find_installation(pure: false) + py.extension_module( + '{outputfilename.parts[-1]}', + '{cfile.parts[-1]}', + c_args: {compile_extra}, + link_args: {link_dirs}, + include_directories: {include_dirs}, ) """)) + native_file_name = cfile.parent / ".mesonpy-native-file.ini" + with open(native_file_name, "wt") as fid: + fid.write(textwrap.dedent(f"""\ + [binaries] + python = '{sys.executable}' + """)) if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--buildtype=release", @@ -240,11 +233,16 @@ def build(cfile, outputfilename, compile_extra, link_extra, cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", "--vsenv", ".."], + subprocess.check_call(["meson", "setup", "--vsenv", + "..", f'--native-file={os.fspath(native_file_name)}'], cwd=build_dir ) + + so_name = outputfilename.parts[-1] + get_so_suffix() subprocess.check_call(["meson", "compile"], cwd=build_dir) - os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name) + os.rename(str(build_dir / so_name), cfile.parent / so_name) + return cfile.parent / so_name + def get_so_suffix(): ret = sysconfig.get_config_var('EXT_SUFFIX') diff --git a/blimgui/dist64/numpy/testing/_private/extbuild.pyi b/blimgui/dist64/numpy/testing/_private/extbuild.pyi index 968045e..5d7243b 100644 --- a/blimgui/dist64/numpy/testing/_private/extbuild.pyi +++ b/blimgui/dist64/numpy/testing/_private/extbuild.pyi @@ -10,7 +10,7 @@ def build_and_import_extension( *, prologue: str = "", build_dir: pathlib.Path | None = None, - include_dirs: Sequence[str] = [], + include_dirs: Sequence[str] | None = None, more_init: str = "", ) -> types.ModuleType: ... @@ -20,6 +20,6 @@ def compile_extension_module( builddir: pathlib.Path, include_dirs: Sequence[str], source_string: str, - libraries: Sequence[str] = [], - library_dirs: Sequence[str] = [], + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, ) -> pathlib.Path: ... diff --git a/blimgui/dist64/numpy/testing/_private/utils.py b/blimgui/dist64/numpy/testing/_private/utils.py index e2b76d2..cc8083f 100644 --- a/blimgui/dist64/numpy/testing/_private/utils.py +++ b/blimgui/dist64/numpy/testing/_private/utils.py @@ -2,36 +2,31 @@ Utility function to facilitate testing. """ +import concurrent.futures +import contextlib +import gc +import importlib.metadata +import operator import os -import sys import pathlib import platform +import pprint import re -import gc -import operator +import shutil +import sys +import sysconfig +import threading import warnings from functools import partial, wraps -import shutil -import contextlib +from io import StringIO from tempfile import mkdtemp, mkstemp from unittest.case import SkipTest from warnings import WarningMessage -import pprint -import sysconfig -import concurrent.futures -import threading -import importlib.metadata import numpy as np -from numpy._core import ( - intp, float32, empty, arange, array_repr, ndarray, isnat, array) -from numpy import isfinite, isnan, isinf import numpy.linalg._umath_linalg -from numpy._utils import _rename_parameter -from numpy._core.tests._natype import pd_NA - -from io import StringIO - +from numpy import isfinite, isnan +from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', @@ -47,6 +42,7 @@ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', ] @@ -71,7 +67,8 @@ class KnownFailureException(Exception): IS_EDITABLE = np_dist.origin.dir_info.editable else: # Backport importlib.metadata.Distribution.origin - import json, types # noqa: E401 + import json # noqa: E401 + import types origin = json.loads( np_dist.read_text('direct_url.json') or '{}', object_hook=lambda data: types.SimpleNamespace(**data), @@ -93,6 +90,8 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) + HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 IS_MUSL = False @@ -137,7 +136,7 @@ def GetPerformanceAttributes(object, counter, instance=None, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp - #(dead link) + # (dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh @@ -165,11 +164,12 @@ def memusage(processName="python", instance=0): win32pdh.PDH_FMT_LONG, None) elif sys.platform[:5] == 'linux': - def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'): + def memusage(_proc_pid_stat=None): """ Return virtual memory size in bytes of the running python. """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' try: with open(_proc_pid_stat) as f: l = f.readline().split(' ') @@ -186,7 +186,7 @@ def memusage(): if sys.platform[:5] == 'linux': - def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): + def jiffies(_proc_pid_stat=None, _load_time=None): """ Return number of jiffies elapsed. @@ -194,6 +194,8 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): process has been scheduled in user mode. See man 5 proc. """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + _load_time = _load_time or [] import time if not _load_time: _load_time.append(time.time()) @@ -294,9 +296,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): Notes ----- - By default, when one of `actual` and `desired` is a scalar and the other is - an array, the function checks that each element of the array is equal to - the scalar. This behaviour can be disabled by setting ``strict==True``. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -354,7 +357,7 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', @@ -366,8 +369,8 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', verbose) return - from numpy._core import ndarray, isscalar, signbit - from numpy import iscomplexobj, real, imag + from numpy import imag, iscomplexobj, real + from numpy._core import isscalar, ndarray, signbit if isinstance(actual, ndarray) or isinstance(desired, ndarray): return assert_array_equal(actual, desired, err_msg, verbose, strict=strict) @@ -564,6 +567,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): Arrays are not almost equal to 9 decimals Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) Max absolute difference among violations: 6.66669964e-09 Max relative difference among violations: 2.85715698e-09 ACTUAL: array([1. , 2.333333333]) @@ -571,8 +576,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ __tracebackhide__ = True # Hide traceback for py.test + from numpy import imag, iscomplexobj, real from numpy._core import ndarray - from numpy import iscomplexobj, real, imag # Handle complex numbers: separate into real/imag to handle # nan/inf/negative zero correctly @@ -617,9 +622,8 @@ def _build_err_msg(): if isnan(desired) or isnan(actual): if not (isnan(desired) and isnan(actual)): raise AssertionError(_build_err_msg()) - else: - if not desired == actual: - raise AssertionError(_build_err_msg()) + elif not desired == actual: + raise AssertionError(_build_err_msg()) return except (NotImplementedError, TypeError): pass @@ -717,9 +721,8 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', if isnan(desired) or isnan(actual): if not (isnan(desired) and isnan(actual)): raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) + elif not desired == actual: + raise AssertionError(msg) return except (TypeError, NotImplementedError): pass @@ -731,8 +734,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True, *, strict=False, names=('ACTUAL', 'DESIRED')): __tracebackhide__ = True # Hide traceback for py.test - from numpy._core import (array2string, isnan, inf, errstate, - all, max, object_) + from numpy._core import all, array2string, errstate, inf, isnan, max, object_ x = np.asanyarray(x) y = np.asanyarray(y) @@ -749,6 +751,24 @@ def istime(x): def isvstring(x): return x.dtype.char == "T" + def robust_any_difference(x, y): + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting cases (2) and (3), but it's nice to + # support them if possible. + result = x == y + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + return result.all() != True + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): """Handling nan/inf. @@ -760,18 +780,7 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): x_id = func(x) y_id = func(y) - # We include work-arounds here to handle three types of slightly - # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True - # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to np.bool() and - # use isinstance(..., bool) checks - # (3) subclasses with bare-bones __array_function__ implementations may - # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to - # support them if possible. - if np.bool(x_id == y_id).all() != True: + if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -781,6 +790,9 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise AssertionError(msg) # If there is a scalar, then here we know the array has the same # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). if isinstance(x_id, bool) or x_id.ndim == 0: return np.bool(x_id) elif isinstance(y_id, bool) or y_id.ndim == 0: @@ -788,6 +800,29 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): else: return y_id + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + try: if strict: cond = x.shape == y.shape and x.dtype == y.dtype @@ -812,12 +847,15 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask elif istime(x) and istime(y): # If one is datetime64 and the other timedelta64 there is no point @@ -866,9 +904,33 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): n_mismatch = reduced.size - reduced.sum(dtype=intp) n_elements = flagged.size if flagged.ndim != 0 else reduced.size percent_mismatch = 100 * n_mismatch / n_elements - remarks = [ - 'Mismatched elements: {} / {} ({:.3g}%)'.format( - n_mismatch, n_elements, percent_mismatch)] + remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)'] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) with errstate(all='ignore'): # ignore errors for non-numeric types @@ -929,7 +991,6 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise ValueError(msg) -@_rename_parameter(['x', 'y'], ['actual', 'desired'], dep_version='2.0.0') def assert_array_equal(actual, desired, err_msg='', verbose=True, *, strict=False): """ @@ -986,9 +1047,10 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Notes ----- - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1007,6 +1069,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Arrays are not equal Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) Max absolute difference among violations: 4.4408921e-16 Max relative difference among violations: 1.41357986e-16 ACTUAL: array([1. , 3.141593, nan]) @@ -1059,7 +1123,6 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, strict=strict) -@_rename_parameter(['x', 'y'], ['actual', 'desired'], dep_version='2.0.0') def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', verbose=True): """ @@ -1121,6 +1184,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', Arrays are not almost equal to 5 decimals Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) Max absolute difference among violations: 6.e-05 Max relative difference among violations: 2.57136612e-05 ACTUAL: array([1. , 2.33333, nan]) @@ -1141,23 +1206,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type from numpy._core.numerictypes import issubdtype - from numpy._core.fromnumeric import any as npany def compare(x, y): - try: - if npany(isinf(x)) or npany(isinf(y)): - xinfid = isinf(x) - yinfid = isinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) @@ -1242,6 +1292,8 @@ def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): Arrays are not strictly ordered `x < y` Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) Max absolute difference among violations: 0. Max relative difference among violations: 0. x: array([ 1., 1., nan]) @@ -1387,8 +1439,9 @@ def rundocs(filename=None, raise_on_error=True): >>> np.lib.test(doctests=True) # doctest: +SKIP """ - from numpy.distutils.misc_util import exec_mod_from_location import doctest + + from numpy.distutils.misc_util import exec_mod_from_location if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] @@ -1400,7 +1453,7 @@ def rundocs(filename=None, raise_on_error=True): msg = [] if raise_on_error: - out = lambda s: msg.append(s) + out = msg.append else: out = None @@ -1533,7 +1586,6 @@ def decorate_methods(cls, decorator, testmatch=None): continue if testmatch.search(funcname) and not funcname.startswith('_'): setattr(cls, funcname, decorator(function)) - return def measure(code_str, times=1, label=None): @@ -1591,6 +1643,7 @@ def _assert_valid_refcount(op): return True import gc + import numpy as np b = np.arange(100 * 100).reshape(100, 100) @@ -1605,7 +1658,6 @@ def _assert_valid_refcount(op): assert_(sys.getrefcount(i) >= rc) finally: gc.enable() - del d # for pyflakes def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, @@ -1620,9 +1672,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. Parameters ---------- @@ -1658,10 +1711,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Notes ----- - When one of `actual` and `desired` is a scalar and the other is - array_like, the function performs the comparison as if the scalar were - broadcasted to the shape of the array. - This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1883,8 +1936,7 @@ def nulp_diff(x, y, dtype=None): y[np.isnan(y)] = np.nan if not x.shape == y.shape: - raise ValueError("Arrays do not have the same shape: %s - %s" % - (x.shape, y.shape)) + raise ValueError(f"Arrays do not have the same shape: {x.shape} - {y.shape}") def _diff(rx, ry, vdt): diff = np.asarray(rx - ry, dtype=vdt) @@ -1903,9 +1955,8 @@ def _integer_repr(x, vdt, comp): rx = x.view(vdt) if not (rx.size == 1): rx[rx < 0] = comp - rx[rx < 0] - else: - if rx < 0: - rx = comp - rx + elif rx < 0: + rx = comp - rx return rx @@ -1927,7 +1978,7 @@ def integer_repr(x): @contextlib.contextmanager def _assert_warns_context(warning_class, name=None): __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: + with suppress_warnings(_warn=False) as sup: l = sup.record(warning_class) yield if not len(l) > 0: @@ -1951,6 +2002,11 @@ def assert_warns(warning_class, *args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.catch_warnings` or + ``pytest.warns`` instead. + Parameters ---------- warning_class : class @@ -1978,6 +2034,11 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) if not args and not kwargs: return _assert_warns_context(warning_class) elif len(args) < 1: @@ -2086,7 +2147,7 @@ def _gen_alignment_data(dtype=float32, type='binary', max_size=24): inp1 = lambda: arange(s, dtype=dtype)[o:] inp2 = lambda: arange(s, dtype=dtype)[o:] out = empty((s,), dtype=dtype)[o:] - yield out, inp1(), inp2(), bfmt % \ + yield out, inp1(), inp2(), bfmt % \ (o, o, o, s, dtype, 'out of place') d = inp1() yield d, d, inp2(), bfmt % \ @@ -2166,7 +2227,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): This makes it possible to trigger any warning afresh inside the context manager without disturbing the state of warnings outside. - For compatibility with Python 3.0, please consider all arguments to be + For compatibility with Python, please consider all arguments to be keyword-only. Parameters @@ -2230,6 +2291,11 @@ class suppress_warnings: tests might need to see the warning. Additionally it allows easier specificity for testing warnings and can be nested. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.filterwarnings` or + ``pytest.filterwarnings`` instead. + Parameters ---------- forwarding_rule : str, optional @@ -2290,7 +2356,13 @@ def some_function(): # do something which causes a warning in np.ma.core pass """ - def __init__(self, forwarding_rule="always"): + def __init__(self, forwarding_rule="always", _warn=True): + if _warn: + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) self._entered = False # Suppressions are either instance or defined inside one with block: @@ -2657,8 +2729,9 @@ def _parse_size(size_str): 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} - size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format( - '|'.join(suffixes.keys())), re.I) + pipe_suffixes = "|".join(suffixes.keys()) + + size_re = re.compile(fr'^\s*(\d+|\d+\.\d+)\s*({pipe_suffixes})\s*$', re.I) m = size_re.match(size_str.lower()) if not m or m.group(2) not in suffixes: @@ -2745,16 +2818,13 @@ def run_threaded(func, max_workers=8, pass_count=False, futures = [] for arg in all_args: futures.append(tpe.submit(*arg)) + except RuntimeError as e: + import pytest + pytest.skip(f"Spawning {max_workers} threads failed with " + f"error {e!r} (likely due to resource limits on the " + "system running the tests)") finally: if len(futures) < max_workers and pass_barrier: barrier.abort() for f in futures: f.result() - - -def get_stringdtype_dtype(na_object, coerce=True): - # explicit is check for pd_NA because != with pd_NA returns pd_NA - if na_object is pd_NA or na_object != "unset": - return np.dtypes.StringDType(na_object=na_object, coerce=coerce) - else: - return np.dtypes.StringDType(coerce=coerce) diff --git a/blimgui/dist64/numpy/testing/_private/utils.pyi b/blimgui/dist64/numpy/testing/_private/utils.pyi index 68c6dfa..16daa4d 100644 --- a/blimgui/dist64/numpy/testing/_private/utils.pyi +++ b/blimgui/dist64/numpy/testing/_private/utils.pyi @@ -3,6 +3,7 @@ import sys import types import unittest import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager from pathlib import Path @@ -13,18 +14,19 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NoReturn, + ParamSpec, + Self, SupportsIndex, TypeAlias, + TypeVarTuple, overload, type_check_only, ) -from typing import Literal as L +from typing_extensions import TypeVar, deprecated from unittest.case import SkipTest -from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath -from typing_extensions import ParamSpec, Self, TypeVar, TypeVarTuple, Unpack - import numpy as np from numpy._typing import ( ArrayLike, @@ -42,9 +44,13 @@ __all__ = [ # noqa: RUF022 "IS_PYPY", "IS_PYSTON", "IS_WASM", + "IS_INSTALLED", + "IS_64BIT", "HAS_LAPACK64", "HAS_REFCOUNT", + "BLAS_SUPPORTS_FPE", "NOGIL_BUILD", + "NUMPY_ROOT", "assert_", "assert_array_almost_equal_nulp", "assert_raises_regex", @@ -91,7 +97,6 @@ _Tss = ParamSpec("_Tss") _ET = TypeVar("_ET", bound=BaseException, default=BaseException) _FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_T_or_bool = TypeVar("_T_or_bool", default=bool) _StrLike: TypeAlias = str | bytes _RegexLike: TypeAlias = _StrLike | Pattern[Any] @@ -128,15 +133,16 @@ IS_MUSL: Final[bool] = ... IS_PYPY: Final[bool] = ... IS_PYSTON: Final[bool] = ... IS_WASM: Final[bool] = ... +IS_64BIT: Final[bool] = ... HAS_REFCOUNT: Final[bool] = ... HAS_LAPACK64: Final[bool] = ... +BLAS_SUPPORTS_FPE: Final[bool] = ... NOGIL_BUILD: Final[bool] = ... class KnownFailureException(Exception): ... class IgnoreException(Exception): ... -# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed -class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): class_modules: ClassVar[tuple[types.ModuleType, ...]] = () modules: Final[set[types.ModuleType]] @overload # record: True @@ -146,6 +152,7 @@ class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): @overload # record; bool def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... +@deprecated("Please use warnings.filterwarnings or pytest.mark.filterwarnings instead") class suppress_warnings: log: Final[_WarnLog] def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... @@ -160,14 +167,14 @@ class suppress_warnings: # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": - def memusage(processName: str = ..., instance: int = ...) -> int: ... + def memusage(processName: str = "python", instance: int = 0) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... + def memusage(_proc_pid_stat: StrOrBytesPath | None = None) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... else: def jiffies(_load_time: list[float] = []) -> int: ... @@ -175,10 +182,10 @@ else: def build_err_msg( arrays: Iterable[object], err_msg: object, - header: str = ..., - verbose: bool = ..., - names: Sequence[str] = ..., - precision: SupportsIndex | None = ..., + header: str = "Items are not equal:", + verbose: bool = True, + names: Sequence[str] = ("ACTUAL", "DESIRED"), # = ('ACTUAL', 'DESIRED') + precision: SupportsIndex | None = 8, ) -> str: ... # @@ -357,8 +364,10 @@ def assert_array_max_ulp( # @overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... # @@ -450,7 +459,7 @@ def temppath( ) -> _GeneratorContextManager[AnyStr]: ... # -def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 +def check_support_sve(__cache: list[bool] = ..., /) -> bool: ... # stubdefaulter: ignore[missing-default] # def decorate_methods( @@ -471,22 +480,22 @@ def run_threaded( ) -> None: ... @overload def run_threaded( - func: Callable[[Unpack[_Ts]], None], + func: Callable[[*_Ts], None], max_workers: int, pass_count: bool, pass_barrier: bool, outer_iterations: int, - prepare_args: tuple[Unpack[_Ts]], + prepare_args: tuple[*_Ts], ) -> None: ... @overload def run_threaded( - func: Callable[[Unpack[_Ts]], None], + func: Callable[[*_Ts], None], max_workers: int = 8, pass_count: bool = False, pass_barrier: bool = False, outer_iterations: int = 1, *, - prepare_args: tuple[Unpack[_Ts]], + prepare_args: tuple[*_Ts], ) -> None: ... # diff --git a/blimgui/dist64/numpy/testing/overrides.py b/blimgui/dist64/numpy/testing/overrides.py index 0d879d9..14d9b71 100644 --- a/blimgui/dist64/numpy/testing/overrides.py +++ b/blimgui/dist64/numpy/testing/overrides.py @@ -3,9 +3,10 @@ """ -from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions -from numpy import ufunc as _ufunc import numpy._core.umath as _umath +from numpy import ufunc as _ufunc +from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions + def get_overridable_numpy_ufuncs(): """List all numpy ufuncs overridable via `__array_ufunc__` diff --git a/blimgui/dist64/numpy/testing/overrides.pyi b/blimgui/dist64/numpy/testing/overrides.pyi index c2f819e..60caf8d 100644 --- a/blimgui/dist64/numpy/testing/overrides.pyi +++ b/blimgui/dist64/numpy/testing/overrides.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Hashable from typing import Any - from typing_extensions import TypeIs import numpy as np diff --git a/blimgui/dist64/numpy/testing/print_coercion_tables.py b/blimgui/dist64/numpy/testing/print_coercion_tables.py index 4d99991..6e49fa2 100644 --- a/blimgui/dist64/numpy/testing/print_coercion_tables.py +++ b/blimgui/dist64/numpy/testing/print_coercion_tables.py @@ -2,9 +2,11 @@ """Prints type-coercion tables for the built-in NumPy types """ +from collections import namedtuple + import numpy as np from numpy._core.numerictypes import obj2sctype -from collections import namedtuple + # Generic object that can be added, but doesn't do anything else class GenericObject: @@ -40,7 +42,8 @@ def print_cancast_table(ntypes): print(cast, end=' ') print() -def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, + use_promote_types=False): print('+', end=' ') for char in ntypes: print(char, end=' ') @@ -96,7 +99,7 @@ def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): 4: ".", # unsafe casting } flags_table = { - 0 : "▗", 7: "█", + 0: "▗", 7: "█", 1: "▚", 2: "▐", 4: "▄", 3: "▜", 5: "▙", 6: "▟", @@ -132,6 +135,7 @@ def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): # The np.dtype(x.type) is a bit strange, because dtype classes do # not expose much yet. types = np.typecodes["All"] + def sorter(x): # This is a bit weird hack, to get a table as close as possible to # the one printing all typecodes (but expecting user-dtypes). @@ -171,8 +175,10 @@ def print_table(field="can_cast"): if flags: print() - print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, " - f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors") + print(f"{flags_table[0]}: no flags, " + f"{flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, " + f"{flags_table[4]}: no-float-errors") print() print_table("flags") diff --git a/blimgui/dist64/numpy/testing/print_coercion_tables.pyi b/blimgui/dist64/numpy/testing/print_coercion_tables.pyi index 0a44f43..8e71872 100644 --- a/blimgui/dist64/numpy/testing/print_coercion_tables.pyi +++ b/blimgui/dist64/numpy/testing/print_coercion_tables.pyi @@ -1,7 +1,6 @@ from collections.abc import Iterable -from typing import ClassVar, Generic - -from typing_extensions import Self, TypeVar +from typing import ClassVar, Generic, Self +from typing_extensions import TypeVar import numpy as np diff --git a/blimgui/dist64/numpy/testing/tests/test_utils.py b/blimgui/dist64/numpy/testing/tests/test_utils.py index 34ebf17..0d95d31 100644 --- a/blimgui/dist64/numpy/testing/tests/test_utils.py +++ b/blimgui/dist64/numpy/testing/tests/test_utils.py @@ -1,25 +1,44 @@ -import warnings -import sys -import os import itertools -import pytest -import weakref +import os import re +import sys +import warnings +import weakref + +import pytest import numpy as np import numpy._core._multiarray_umath as ncu from numpy.testing import ( - assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_array_less, build_err_msg, - assert_raises, assert_warns, assert_no_warnings, assert_allclose, - assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, - clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, - tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + HAS_REFCOUNT, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_string_equal, + assert_warns, + build_err_msg, + clear_and_catch_warnings, + suppress_warnings, + tempdir, + temppath, ) class _GenericTest: + def _assert_func(self, *args, **kwargs): + pass + def _test_equal(self, a, b): self._assert_func(a, b) @@ -66,8 +85,8 @@ def test_array_likes(self): class TestArrayEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_equal + def _assert_func(self, *args, **kwargs): + assert_array_equal(*args, **kwargs) def test_generic_rank1(self): """Test rank 1 array for all dtypes.""" @@ -181,6 +200,40 @@ def test_masked_nan_inf(self): self._test_equal(a, b) self._test_equal(b, a) + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having @@ -249,6 +302,8 @@ def test_array_vs_array_not_equal(self): b = np.array([34986, 545676, 439655, 0]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 563766\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -256,6 +311,9 @@ def test_array_vs_array_not_equal(self): a = np.array([34986, 545676, 439655.2, 563766]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '563766.\n' 'Max relative difference among violations: ' @@ -334,8 +392,8 @@ def test_build_err_msg_custom_precision(self): class TestEqual(TestArrayEqual): - def setup_method(self): - self._assert_func = assert_equal + def _assert_func(self, *args, **kwargs): + assert_equal(*args, **kwargs) def test_nan_items(self): self._assert_func(np.nan, np.nan) @@ -429,8 +487,8 @@ def test_object(self): class TestArrayAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_almost_equal + def _assert_func(self, *args, **kwargs): + assert_array_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -450,6 +508,8 @@ def test_closeness(self): self._assert_func([1.499999], [0.0], decimal=0) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 1.5\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -458,12 +518,16 @@ def test_closeness(self): a = [1.4999999, 0.00003] b = [1.49999991, 0] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(a, b, decimal=7) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -477,6 +541,8 @@ def test_simple(self): self._assert_func(x, y, decimal=4) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' 'Max absolute difference among violations: ' '1.e-04\n' 'Max relative difference among violations: ' @@ -488,6 +554,9 @@ def test_array_vs_scalar(self): a = [5498.42354, 849.54345, 0.00] b = 5498.42354 expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 1.') @@ -495,6 +564,9 @@ def test_array_vs_scalar(self): self._assert_func(a, b, decimal=9) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 5.4722099') @@ -503,6 +575,8 @@ def test_array_vs_scalar(self): a = [5498.42354, 0.00] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -511,6 +585,8 @@ def test_array_vs_scalar(self): b = 0 expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -539,6 +615,18 @@ def test_inf(self): assert_raises(AssertionError, lambda: self._assert_func(a, b)) + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], @@ -587,6 +675,8 @@ def all(self, *args, **kwargs): all(z) b = np.array([1., 202]).view(MyArray) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' 'Max absolute difference among violations: 200.\n' 'Max relative difference among violations: 0.99009') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -613,8 +703,8 @@ def all(self, *args, **kwargs): class TestAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_almost_equal + def _assert_func(self, *args, **kwargs): + assert_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -677,6 +767,10 @@ def test_error_message(self): # Test with a different amount of decimal digits expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -692,6 +786,8 @@ def test_error_message(self): # differs. Note that we only check for the formatting of the arrays # themselves. expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -704,6 +800,8 @@ def test_error_message(self): x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.\n' ' ACTUAL: array([inf, 0.])\n' @@ -715,6 +813,9 @@ def test_error_message(self): x = np.array([1, 2]) y = np.array([0, 0]) expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 2\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -726,6 +827,12 @@ def test_error_message_2(self): x = 2 y = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -734,6 +841,12 @@ def test_error_message_2(self): y = 2 x = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -760,8 +873,8 @@ def all(self, *args, **kwargs): class TestApproxEqual: - def setup_method(self): - self._assert_func = assert_approx_equal + def _assert_func(self, *args, **kwargs): + assert_approx_equal(*args, **kwargs) def test_simple_0d_arrays(self): x = np.array(1234.22) @@ -803,8 +916,8 @@ def test_nan_items(self): class TestArrayAssertLess: - def setup_method(self): - self._assert_func = assert_array_less + def _assert_func(self, *args, **kwargs): + assert_array_less(*args, **kwargs) def test_simple_arrays(self): x = np.array([1.1, 2.2]) @@ -822,6 +935,9 @@ def test_simple_arrays(self): b = np.array([2, 4, 6, 8]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' 'Max absolute difference among violations: 12\n' 'Max relative difference among violations: 1.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -833,6 +949,11 @@ def test_rank2(self): self._assert_func(x, y) expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' 'Max absolute difference among violations: 0.1\n' 'Max relative difference among violations: 0.09090909') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -844,13 +965,15 @@ def test_rank2(self): def test_rank3(self): x = np.ones(shape=(2, 2, 2)) - y = np.ones(shape=(2, 2, 2))+1 + y = np.ones(shape=(2, 2, 2)) + 1 self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y[0, 0, 0] = 0 expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -894,12 +1017,20 @@ def test_simple_items_and_array(self): y = 999090.54 expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: 0.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' 'Max absolute difference among violations: ' '999087.0864\n' 'Max relative difference among violations: ' @@ -912,12 +1043,17 @@ def test_zeroes(self): y = np.array(87654.) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' 'Max absolute difference among violations: 458802.\n' 'Max relative difference among violations: 5.23423917') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' 'Max absolute difference among violations: 87654.\n' 'Max relative difference among violations: ' '5670.5626011') @@ -927,12 +1063,18 @@ def test_zeroes(self): y = 0 expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' 'Max absolute difference among violations: 546456.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1001,7 +1143,10 @@ def test_strict(self): with pytest.raises(AssertionError): self._assert_func(x, y.astype(np.float32), strict=True) - +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") class TestWarns: def test_warn(self): @@ -1118,12 +1263,16 @@ def test_simple(self): b = np.array([x, y, x, x]) c = np.array([x, y, x, z]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): assert_allclose(b, c) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1139,6 +1288,8 @@ def test_report_fail_percentage(self): b = np.array([1, 1, 1, 2]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1150,11 +1301,21 @@ def test_equal_nan(self): # Should not raise: assert_allclose(a, b, equal_nan=True) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) @@ -1203,6 +1364,33 @@ def test_strict(self): with pytest.raises(AssertionError): assert_allclose(x, x.astype(np.float32), strict=True) + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) class TestArrayAlmostEqualNulp: @@ -1216,12 +1404,12 @@ def test_float64_pass(self): # Addition eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. + y = x + x * eps * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) # Subtraction epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. + y = x - x * epsneg * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) def test_float64_fail(self): @@ -1231,12 +1419,12 @@ def test_float64_fail(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) @@ -1258,11 +1446,11 @@ def test_float32_pass(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. + y = x + x * eps * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. + y = x - x * epsneg * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) def test_float32_fail(self): @@ -1272,12 +1460,12 @@ def test_float32_fail(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) @@ -1299,11 +1487,11 @@ def test_float16_pass(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. + y = x + x * eps * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. + y = x - x * epsneg * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) def test_float16_fail(self): @@ -1313,12 +1501,12 @@ def test_float16_fail(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) @@ -1338,100 +1526,100 @@ def test_complex128_pass(self): x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change - y = x + x*eps*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x - x*epsneg*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) def test_complex128_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) + xi, y + x * 1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change - y = x + x*eps*nulp + y = x + x * eps * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x - x*epsneg*nulp + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) def test_complex64_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x + x*eps*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x - x*epsneg*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) def test_complex64_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x + x*eps*nulp + xi, y + x * 1j, nulp) + y = x + x * eps * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x - x*epsneg*nulp + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) class TestULP: @@ -1445,14 +1633,14 @@ def test_single(self): x = np.ones(10).astype(np.float32) x += 0.01 * np.random.randn(10).astype(np.float32) eps = np.finfo(np.float32).eps - assert_array_max_ulp(x, x+eps, maxulp=20) + assert_array_max_ulp(x, x + eps, maxulp=20) def test_double(self): # Generate 1 + small deviation, check that adding eps gives a few UNL x = np.ones(10).astype(np.float64) x += 0.01 * np.random.randn(10).astype(np.float64) eps = np.finfo(np.float64).eps - assert_array_max_ulp(x, x+eps, maxulp=200) + assert_array_max_ulp(x, x + eps, maxulp=200) def test_inf(self): for dt in [np.float32, np.float64]: @@ -1526,7 +1714,7 @@ def assert_warn_len_equal(mod, n_in_context): num_warns = len(mod_warns) if 'version' in mod_warns: - # Python 3 adds a 'version' entry to the registry, + # Python adds a 'version' entry to the registry, # do not count it. num_warns -= 1 @@ -1573,6 +1761,7 @@ def _get_fresh_mod(): return my_mod +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1605,6 +1794,10 @@ def test_clear_and_catch_warnings(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1651,6 +1844,10 @@ def warn(arr): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1679,6 +1876,12 @@ def test_suppress_warnings_type(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1694,6 +1897,12 @@ def warn(category): assert_equal(len(w), 1) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1731,9 +1940,16 @@ def test_suppress_warnings_record(): warnings.warn('Some warning') warnings.warn('Some other warning') assert_equal(len(sup2.log), 1) - assert_equal(len(sup.log), 1) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 2) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -1749,7 +1965,8 @@ def warn(arr): for i in range(2): warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1758,7 +1975,8 @@ def warn(arr): warnings.warn("Some warning") warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1768,7 +1986,8 @@ def warn(arr): warnings.warn("Some warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1778,7 +1997,8 @@ def warn(arr): warnings.warn("Some other warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) def test_tempdir(): @@ -1819,6 +2039,7 @@ class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings_inherit(): # Test can subclass and add default modules my_mod = _get_fresh_mod() @@ -1829,6 +2050,7 @@ def test_clear_and_catch_warnings_inherit(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") class TestAssertNoGcCycles: """ Test assert_no_gc_cycles """ @@ -1879,7 +2101,7 @@ def __del__(self): self.cycle = None if ReferenceCycleInDel.make_cycle: - # but create a new one so that the garbage collector has more + # but create a new one so that the garbage collector (GC) has more # work to do. ReferenceCycleInDel() @@ -1891,7 +2113,7 @@ def __del__(self): assert_no_gc_cycles(lambda: None) except AssertionError: # the above test is only necessary if the GC actually tried to free - # our object anyway, which python 2.7 does not. + # our object anyway. if w() is not None: pytest.skip("GC does not call __del__ on cyclic objects") raise @@ -1899,31 +2121,3 @@ def __del__(self): finally: # make sure that we stop creating reference cycles ReferenceCycleInDel.make_cycle = False - - -@pytest.mark.parametrize('assert_func', [assert_array_equal, - assert_array_almost_equal]) -def test_xy_rename(assert_func): - # Test that keywords `x` and `y` have been renamed to `actual` and - # `desired`, respectively. These tests and use of `_rename_parameter` - # decorator can be removed before the release of NumPy 2.2.0. - assert_func(1, 1) - assert_func(actual=1, desired=1) - - assert_message = "Arrays are not..." - with pytest.raises(AssertionError, match=assert_message): - assert_func(1, 2) - with pytest.raises(AssertionError, match=assert_message): - assert_func(actual=1, desired=2) - - dep_message = 'Use of keyword argument...' - with pytest.warns(DeprecationWarning, match=dep_message): - assert_func(x=1, desired=1) - with pytest.warns(DeprecationWarning, match=dep_message): - assert_func(1, y=1) - - type_message = '...got multiple values for argument' - with (pytest.warns(DeprecationWarning, match=dep_message), - pytest.raises(TypeError, match=type_message)): - assert_func(1, x=1) - assert_func(1, 2, y=2) diff --git a/blimgui/dist64/numpy/tests/test__all__.py b/blimgui/dist64/numpy/tests/test__all__.py index d10ad68..7f94eec 100644 --- a/blimgui/dist64/numpy/tests/test__all__.py +++ b/blimgui/dist64/numpy/tests/test__all__.py @@ -1,5 +1,6 @@ import collections + import numpy as np diff --git a/blimgui/dist64/numpy/tests/test_configtool.py b/blimgui/dist64/numpy/tests/test_configtool.py index 48396ae..5145533 100644 --- a/blimgui/dist64/numpy/tests/test_configtool.py +++ b/blimgui/dist64/numpy/tests/test_configtool.py @@ -1,43 +1,51 @@ +import importlib.metadata import os +import pathlib import subprocess -import sysconfig import pytest + import numpy as np +import numpy._core.include +import numpy._core.lib.pkgconfig +from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT + +INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include' +PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' -from numpy.testing import IS_WASM +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") +class TestNumpyConfig: + def check_numpyconfig(self, arg): + p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) + p.check_returncode() + return p.stdout.strip() -is_editable = not bool(np.__path__) -numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ -# We only expect to have a `numpy-config` available if NumPy was installed via -# a build frontend (and not `spin` for example) -if not (numpy_in_sitepackages or is_editable): - pytest.skip("`numpy-config` not expected to be installed", - allow_module_level=True) + def test_configtool_version(self): + stdout = self.check_numpyconfig('--version') + assert stdout == np.__version__ + def test_configtool_cflags(self): + stdout = self.check_numpyconfig('--cflags') + assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout -def check_numpyconfig(arg): - p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) - p.check_returncode() - return p.stdout.strip() + def test_configtool_pkgconfigdir(self): + stdout = self.check_numpyconfig('--pkgconfigdir') + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") -def test_configtool_version(): - stdout = check_numpyconfig('--version') - assert stdout == np.__version__ -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") -def test_configtool_cflags(): - stdout = check_numpyconfig('--cflags') - assert stdout.endswith(os.path.join('numpy', '_core', 'include')) +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") +def test_pkg_config_entrypoint(): + (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') + assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") -def test_configtool_pkgconfigdir(): - stdout = check_numpyconfig('--pkgconfigdir') - assert stdout.endswith(os.path.join('numpy', '_core', 'lib', 'pkgconfig')) - if not is_editable: - # Also check that the .pc file actually exists (unless we're using an - # editable install, then it'll be hiding in the build dir) - assert os.path.exists(os.path.join(stdout, 'numpy.pc')) +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") +def test_pkg_config_config_exists(): + assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/blimgui/dist64/numpy/tests/test_ctypeslib.py b/blimgui/dist64/numpy/tests/test_ctypeslib.py index a549d0c..3d70fb7 100644 --- a/blimgui/dist64/numpy/tests/test_ctypeslib.py +++ b/blimgui/dist64/numpy/tests/test_ctypeslib.py @@ -6,8 +6,8 @@ import pytest import numpy as np -from numpy.ctypeslib import ndpointer, load_library, as_array -from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal +from numpy.ctypeslib import as_array, load_library, ndpointer +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises try: import ctypes @@ -61,7 +61,7 @@ def test_basic2(self): # (including extension) does not work. try: so_ext = sysconfig.get_config_var('EXT_SUFFIX') - load_library('_multiarray_umath%s' % so_ext, + load_library(f'_multiarray_umath{so_ext}', np._core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" @@ -124,6 +124,7 @@ def test_flags(self): assert_(p.from_param(x)) assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + @pytest.mark.thread_unsafe(reason="checks that global ndpointer cache is updating") def test_cache(self): assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) @@ -150,12 +151,12 @@ def test_arguments(self): @pytest.mark.parametrize( 'dt', [ float, - np.dtype(dict( - formats=['= (3, 12): SKIP_LIST = [] +else: + SKIP_LIST = ["numpy.distutils.msvc9compiler"] -# suppressing warnings from deprecated modules -@pytest.mark.filterwarnings("ignore:.*np.compat.*:DeprecationWarning") def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -387,7 +372,7 @@ def find_unexpected_members(mod_name): if unexpected_members: raise AssertionError("Found unexpected object(s) that look like " - "modules: {}".format(unexpected_members)) + f"modules: {unexpected_members}") def test_api_importable(): @@ -413,7 +398,7 @@ def check_importable(module_name): if module_names: raise AssertionError("Modules in the public API that cannot be " - "imported: {}".format(module_names)) + f"imported: {module_names}") for module_name in PUBLIC_ALIASED_MODULES: try: @@ -423,19 +408,22 @@ def check_importable(module_name): if module_names: raise AssertionError("Modules in the public API that were not " - "found: {}".format(module_names)) + f"found: {module_names}") with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=DeprecationWarning) warnings.filterwarnings('always', category=ImportWarning) for module_name in PRIVATE_BUT_PRESENT_MODULES: if not check_importable(module_name): - module_names.append(module_name) + # Nasty hack to avoid new FreeBSD failures. This + # is only needed for NumPy 2.4.x, so go with it + if not module_name == 'numpy.distutils.msvccompiler': + module_names.append(module_name) if module_names: raise AssertionError("Modules that are not really public but looked " "public and can not be imported: " - "{}".format(module_names)) + f"{module_names}") @pytest.mark.xfail( @@ -457,14 +445,7 @@ def test_array_api_entry_point(): numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ eps = importlib.metadata.entry_points() - try: - xp_eps = eps.select(group="array_api") - except AttributeError: - # The select interface for entry_points was introduced in py3.10, - # deprecating its dict interface. We fallback to dict keys for finding - # Array API entry points so that running this test in <=3.9 will - # still work - see https://github.com/numpy/numpy/pull/19800. - xp_eps = eps.get("array_api", []) + xp_eps = eps.select(group="array_api") if len(xp_eps) == 0: if numpy_in_sitepackages: msg = "No entry points for 'array_api' found" @@ -544,8 +525,13 @@ def test_core_shims_coherence(): # np.core is a shim and all submodules of np.core are shims # but we should be able to import everything in those shims - # that are available in the "real" modules in np._core - if inspect.ismodule(member): + # that are available in the "real" modules in np._core, with + # the exception of the namespace packages (__spec__.origin is None), + # like numpy._core.include, or numpy._core.lib.pkgconfig. + if ( + inspect.ismodule(member) + and member.__spec__ and member.__spec__.origin is not None + ): submodule = member submodule_name = member_name for submodule_member_name in dir(submodule): @@ -574,20 +560,22 @@ def test_functions_single_location(): Test performs BFS search traversing NumPy's public API. It flags any function-like object that is accessible from more that one place. """ - from typing import Any, Callable, Dict, List, Set, Tuple + from collections.abc import Callable + from typing import Any + from numpy._core._multiarray_umath import ( - _ArrayFunctionDispatcher as dispatched_function + _ArrayFunctionDispatcher as dispatched_function, ) - visited_modules: Set[types.ModuleType] = {np} - visited_functions: Set[Callable[..., Any]] = set() + visited_modules: set[types.ModuleType] = {np} + visited_functions: set[Callable[..., Any]] = set() # Functions often have `__name__` overridden, therefore we need # to keep track of locations where functions have been found. - functions_original_paths: Dict[Callable[..., Any], str] = dict() + functions_original_paths: dict[Callable[..., Any], str] = {} # Here we aggregate functions with more than one location. # It must be empty for the test to pass. - duplicated_functions: List[Tuple] = [] + duplicated_functions: list[tuple] = [] modules_queue = [np] @@ -700,9 +688,9 @@ def test___module___attribute(): "numpy._core" not in member.__name__ and # outside _core # not in a skip module list member_name not in [ - "char", "core", "ctypeslib", "f2py", "ma", "lapack_lite", - "mrecords", "testing", "tests", "polynomial", "typing", - "mtrand", "bit_generator", + "char", "core", "f2py", "ma", "lapack_lite", "mrecords", + "testing", "tests", "polynomial", "typing", "mtrand", + "bit_generator", ] and member not in visited_modules # not visited yet ): @@ -729,6 +717,13 @@ def test___module___attribute(): ): continue + # ctypeslib exports ctypes c_long/c_longlong + if ( + member.__name__ in ("c_long", "c_longlong") and + module.__name__ == "numpy.ctypeslib" + ): + continue + # skip cdef classes if member.__name__ in ( "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", @@ -737,11 +732,11 @@ def test___module___attribute(): continue incorrect_entries.append( - dict( - Func=member.__name__, - actual=member.__module__, - expected=module.__name__, - ) + { + "Func": member.__name__, + "actual": member.__module__, + "expected": module.__name__, + } ) visited_functions.add(member) @@ -749,7 +744,7 @@ def test___module___attribute(): assert len(incorrect_entries) == 0, incorrect_entries -def _check___qualname__(obj) -> bool: +def _check_correct_qualname_and_module(obj) -> bool: qualname = obj.__qualname__ name = obj.__name__ module_name = obj.__module__ @@ -759,15 +754,19 @@ def _check___qualname__(obj) -> bool: actual_obj = functools.reduce(getattr, qualname.split("."), module) return ( actual_obj is obj or + # `obj` may be a bound method/property of `actual_obj`: ( - # for bound methods check qualname match - module_name.startswith("numpy.random") and + hasattr(actual_obj, "__get__") and hasattr(obj, "__self__") and + actual_obj.__module__ == obj.__module__ and actual_obj.__qualname__ == qualname ) ) -def test___qualname___attribute(): +def test___qualname___and___module___attribute(): + # NumPy messes with module and name/qualname attributes, but any object + # should be discoverable based on its module and qualname, so test that. + # We do this for anything with a name (ensuring qualname is also set). modules_queue = [np] visited_modules = {np} visited_functions = set() @@ -782,10 +781,7 @@ def test___qualname___attribute(): inspect.ismodule(member) and # it's a module "numpy" in member.__name__ and # inside NumPy not member_name.startswith("_") and # not private - member_name not in [ - "f2py", "ma", "tests", "testing", "typing", - "bit_generator", "ctypeslib", "lapack_lite", - ] and # skip modules + member_name not in {"tests", "typing"} and # type names don't match "numpy._core" not in member.__name__ and # outside _core member not in visited_modules # not visited yet ): @@ -796,13 +792,14 @@ def test___qualname___attribute(): hasattr(member, "__name__") and not member.__name__.startswith("_") and not member_name.startswith("_") and - not _check___qualname__(member) and + not _check_correct_qualname_and_module(member) and member not in visited_functions ): incorrect_entries.append( - dict( - actual=member.__qualname__, expected=member.__name__, - ) + { + "found_at": f"{module.__name__}:{member_name}", + "advertises": f"{member.__module__}:{member.__qualname__}", + } ) visited_functions.add(member) diff --git a/blimgui/dist64/numpy/tests/test_reloading.py b/blimgui/dist64/numpy/tests/test_reloading.py index ba2a11f..04d8aad 100644 --- a/blimgui/dist64/numpy/tests/test_reloading.py +++ b/blimgui/dist64/numpy/tests/test_reloading.py @@ -1,21 +1,16 @@ -import sys +import pickle import subprocess +import sys import textwrap from importlib import reload -import pickle import pytest import numpy.exceptions as ex -from numpy.testing import ( - assert_raises, - assert_warns, - assert_, - assert_equal, - IS_WASM, -) +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises +@pytest.mark.thread_unsafe(reason="reloads global module") def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. import numpy as np @@ -25,14 +20,14 @@ def test_numpy_reloading(): VisibleDeprecationWarning = ex.VisibleDeprecationWarning ModuleDeprecationWarning = ex.ModuleDeprecationWarning - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) @@ -48,27 +43,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/blimgui/dist64/numpy/tests/test_scripts.py b/blimgui/dist64/numpy/tests/test_scripts.py index bb64dcb..9110f7d 100644 --- a/blimgui/dist64/numpy/tests/test_scripts.py +++ b/blimgui/dist64/numpy/tests/test_scripts.py @@ -2,22 +2,23 @@ Test that we can run executable scripts that have been installed with numpy. """ -import sys import os -import pytest -from os.path import join as pathjoin, isfile, dirname import subprocess +import sys +from os.path import dirname, isfile, join as pathjoin + +import pytest import numpy as np -from numpy.testing import assert_equal, IS_WASM +from numpy.testing import IS_WASM, assert_equal -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) def find_f2py_commands(): if sys.platform == 'win32': exe_dir = dirname(sys.executable) - if exe_dir.endswith('Scripts'): # virtualenv + if exe_dir.endswith('Scripts'): # virtualenv return [os.path.join(exe_dir, 'f2py')] else: return [os.path.join(exe_dir, "Scripts", 'f2py')] diff --git a/blimgui/dist64/numpy/tests/test_warnings.py b/blimgui/dist64/numpy/tests/test_warnings.py index d4164cc..57cd82f 100644 --- a/blimgui/dist64/numpy/tests/test_warnings.py +++ b/blimgui/dist64/numpy/tests/test_warnings.py @@ -2,13 +2,15 @@ Tests which scan for certain occurrences in the code, they may not find all of these occurrences but should catch almost all. """ -import pytest - -from pathlib import Path import ast import tokenize +from pathlib import Path + +import pytest + import numpy + class ParseCall(ast.NodeVisitor): def __init__(self): self.ls = [] @@ -32,10 +34,11 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].value == "ignore": - raise AssertionError( - "warnings should have an appropriate stacklevel; found in " - "{} on line {}".format(self.__filename, node.lineno)) + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): @@ -51,8 +54,8 @@ def visit_Call(self, node): if "stacklevel" in args: return raise AssertionError( - "warnings should have an appropriate stacklevel; found in " - "{} on line {}".format(self.__filename, node.lineno)) + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") @pytest.mark.slow diff --git a/blimgui/dist64/numpy/typing/__init__.py b/blimgui/dist64/numpy/typing/__init__.py index aa75354..616e567 100644 --- a/blimgui/dist64/numpy/typing/__init__.py +++ b/blimgui/dist64/numpy/typing/__init__.py @@ -104,13 +104,45 @@ >>> import numpy.typing as npt >>> T = TypeVar("T", bound=npt.NBitBase) - >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + >>> def func(a: np.floating[T], b: np.floating[T]) -> np.floating[T]: ... ... Consequently, the likes of `~numpy.float16`, `~numpy.float32` and `~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to runtime, they're not necessarily considered as sub-classes. +.. deprecated:: 2.3 + The :class:`~numpy.typing.NBitBase` helper is deprecated and will be + removed in a future release. Prefer expressing precision relationships via + ``typing.overload`` or ``TypeVar`` definitions bounded by concrete scalar + classes. For example: + + .. code-block:: python + + from typing import TypeVar + import numpy as np + + S = TypeVar("S", bound=np.floating) + + def func(a: S, b: S) -> S: + ... + + or in the case of different input types mapping to different output types: + + .. code-block:: python + + from typing import overload + import numpy as np + + @overload + def phase(x: np.complex64) -> np.float32: ... + @overload + def phase(x: np.complex128) -> np.float64: ... + @overload + def phase(x: np.clongdouble) -> np.longdouble: ... + def phase(x: np.complexfloating) -> np.floating: + ... + Timedelta64 ~~~~~~~~~~~ @@ -125,7 +157,7 @@ corresponding `~numpy.generic` instance. Until the introduction of shape typing (see :pep:`646`) it is unfortunately not possible to make the necessary distinction between 0D and >0D arrays. While thus not strictly -correct, all operations are that can potentially perform a 0D-array -> scalar +correct, all operations that can potentially perform a 0D-array -> scalar cast are currently annotated as exclusively returning an `~numpy.ndarray`. If it is known in advance that an operation *will* perform a @@ -155,15 +187,40 @@ # NOTE: The API section will be appended with additional entries # further down in this file -from numpy._typing import ( - ArrayLike, - DTypeLike, - NBitBase, - NDArray, -) +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str) -> object: + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + if __doc__ is not None: from numpy._typing._add_docstring import _docstrings __doc__ += _docstrings @@ -171,5 +228,6 @@ del _docstrings from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/blimgui/dist64/numpy/typing/__init__.pyi b/blimgui/dist64/numpy/typing/__init__.pyi new file mode 100644 index 0000000..7a4c7b4 --- /dev/null +++ b/blimgui/dist64/numpy/typing/__init__.pyi @@ -0,0 +1,3 @@ +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/blimgui/dist64/numpy/typing/mypy_plugin.py b/blimgui/dist64/numpy/typing/mypy_plugin.py index 1d0decf..bdf2f3f 100644 --- a/blimgui/dist64/numpy/typing/mypy_plugin.py +++ b/blimgui/dist64/numpy/typing/mypy_plugin.py @@ -17,6 +17,13 @@ .. versionadded:: 1.22 +.. deprecated:: 2.3 + The :mod:`numpy.typing.mypy_plugin` entry-point is deprecated in favor of + platform-agnostic static type inference. Remove + ``numpy.typing.mypy_plugin`` from the ``plugins`` section of your mypy + configuration; if that surfaces new errors, please open an issue with a + minimal reproducer. + Examples -------- To enable the plugin, one must add it to their mypy `configuration file`_: @@ -31,27 +38,11 @@ """ -from __future__ import annotations - -from typing import Final, TYPE_CHECKING, Callable +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Final, TypeAlias, cast import numpy as np -if TYPE_CHECKING: - from collections.abc import Iterable - -try: - import mypy.types - from mypy.types import Type - from mypy.plugin import Plugin, AnalyzeTypeContext - from mypy.nodes import MypyFile, ImportFrom, Statement - from mypy.build import PRI_MED - - _HookFunc = Callable[[AnalyzeTypeContext], Type] - MYPY_EX: None | ModuleNotFoundError = None -except ModuleNotFoundError as ex: - MYPY_EX = ex - __all__: list[str] = [] @@ -70,43 +61,32 @@ def _get_precision_dict() -> dict[str, str]: ("_NBitDouble", np.double), ("_NBitLongDouble", np.longdouble), ] - ret = {} - module = "numpy._typing" + ret: dict[str, str] = {} for name, typ in names: - n: int = 8 * typ().dtype.itemsize - ret[f'{module}._nbit.{name}'] = f"{module}._nbit_base._{n}Bit" + n = 8 * np.dtype(typ).itemsize + ret[f"{_MODULE}._nbit.{name}"] = f"{_MODULE}._nbit_base._{n}Bit" return ret def _get_extended_precision_list() -> list[str]: extended_names = [ - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] return [i for i in extended_names if hasattr(np, i)] def _get_c_intp_name() -> str: # Adapted from `np.core._internal._getintp_ctype` - char = np.dtype('n').char - if char == 'i': - return "c_int" - elif char == 'l': - return "c_long" - elif char == 'q': - return "c_longlong" - else: - return "c_long" + return { + "i": "c_int", + "l": "c_long", + "q": "c_longlong", + }.get(np.dtype("n").char, "c_long") + +_MODULE: Final = "numpy._typing" #: A dictionary mapping type-aliases in `numpy._typing._nbit` to #: concrete `numpy.typing.NBitBase` subclasses. @@ -119,15 +99,31 @@ def _get_c_intp_name() -> str: _C_INTP: Final = _get_c_intp_name() -def _hook(ctx: AnalyzeTypeContext) -> Type: - """Replace a type-alias with a concrete ``NBitBase`` subclass.""" - typ, _, api = ctx - name = typ.name.split(".")[-1] - name_new = _PRECISION_DICT[f"numpy._typing._nbit.{name}"] - return api.named_type(name_new) +try: + if TYPE_CHECKING: + from mypy.typeanal import TypeAnalyser + import mypy.types + from mypy.build import PRI_MED + from mypy.nodes import ImportFrom, MypyFile, Statement + from mypy.plugin import AnalyzeTypeContext, Plugin + +except ModuleNotFoundError as e: + + def plugin(version: str) -> type: + raise e + +else: + + _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + + def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"{_MODULE}._nbit.{name}"] + return cast("TypeAnalyser", api).named_type(name_new) -if TYPE_CHECKING or MYPY_EX is None: def _index(iterable: Iterable[Statement], id: str) -> int: """Identify the first ``ImportFrom`` instance the specified `id`.""" for i, value in enumerate(iterable): @@ -139,7 +135,7 @@ def _index(iterable: Iterable[Statement], id: str) -> int: def _override_imports( file: MypyFile, module: str, - imports: list[tuple[str, None | str]], + imports: list[tuple[str, str | None]], ) -> None: """Override the first `module`-based import with new `imports`.""" # Construct a new `from module import y` statement @@ -147,14 +143,14 @@ def _override_imports( import_obj.is_top_level = True # Replace the first `module`-based import statement with `import_obj` - for lst in [file.defs, file.imports]: # type: list[Statement] + for lst in [file.defs, cast("list[Statement]", file.imports)]: i = _index(lst, module) lst[i] = import_obj class _NumpyPlugin(Plugin): """A mypy plugin for handling versus numpy-specific typing tasks.""" - def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc: + def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None: """Set the precision of platform-specific `numpy.number` subclasses. @@ -170,30 +166,35 @@ def get_additional_deps( """Handle all import-based overrides. * Import platform-specific extended-precision `numpy.number` - subclasses (*e.g.* `numpy.float96`, `numpy.float128` and - `numpy.complex256`). + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). * Import the appropriate `ctypes` equivalent to `numpy.intp`. """ - ret = [(PRI_MED, file.fullname, -1)] - - if file.fullname == "numpy": + fullname = file.fullname + if fullname == "numpy": _override_imports( - file, "numpy._typing._extended_precision", + file, + f"{_MODULE}._extended_precision", imports=[(v, v) for v in _EXTENDED_PRECISION_LIST], ) - elif file.fullname == "numpy.ctypeslib": + elif fullname == "numpy.ctypeslib": _override_imports( - file, "ctypes", + file, + "ctypes", imports=[(_C_INTP, "_c_intp")], ) - return ret + return [(PRI_MED, fullname, -1)] - def plugin(version: str) -> type[_NumpyPlugin]: - """An entry-point for mypy.""" - return _NumpyPlugin + def plugin(version: str) -> type: + import warnings -else: - def plugin(version: str) -> type[_NumpyPlugin]: - """An entry-point for mypy.""" - raise MYPY_EX + plugin = "numpy.typing.mypy_plugin" + # Deprecated 2025-01-10, NumPy 2.3 + warn_msg = ( + f"`{plugin}` is deprecated, and will be removed in a future " + f"release. Please remove `plugins = {plugin}` in your mypy config." + f"(deprecated in NumPy 2.3)" + ) + warnings.warn(warn_msg, DeprecationWarning, stacklevel=3) + + return _NumpyPlugin diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/arithmetic.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/arithmetic.pyi index 6ab77ea..736f8a6 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/arithmetic.pyi @@ -28,96 +28,99 @@ AR_LIKE_M: list[np.datetime64] # Array subtraction # NOTE: mypys `NoReturn` errors are, unfortunately, not that great -_1 = AR_b - AR_LIKE_b # E: Need type annotation -_2 = AR_LIKE_b - AR_b # E: Need type annotation -AR_i - bytes() # E: No overload variant +_1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] +_2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] +AR_i - b"" # type: ignore[operator] -AR_f - AR_LIKE_m # E: Unsupported operand types -AR_f - AR_LIKE_M # E: Unsupported operand types -AR_c - AR_LIKE_m # E: Unsupported operand types -AR_c - AR_LIKE_M # E: Unsupported operand types +AR_f - AR_LIKE_m # type: ignore[operator] +AR_f - AR_LIKE_M # type: ignore[operator] +AR_c - AR_LIKE_m # type: ignore[operator] +AR_c - AR_LIKE_M # type: ignore[operator] -AR_m - AR_LIKE_f # E: Unsupported operand types -AR_M - AR_LIKE_f # E: Unsupported operand types -AR_m - AR_LIKE_c # E: Unsupported operand types -AR_M - AR_LIKE_c # E: Unsupported operand types +AR_m - AR_LIKE_f # type: ignore[operator] +AR_M - AR_LIKE_f # type: ignore[operator] +AR_m - AR_LIKE_c # type: ignore[operator] +AR_M - AR_LIKE_c # type: ignore[operator] -AR_m - AR_LIKE_M # E: Unsupported operand types -AR_LIKE_m - AR_M # E: Unsupported operand types +AR_m - AR_LIKE_M # type: ignore[operator] +AR_LIKE_m - AR_M # type: ignore[operator] # array floor division -AR_M // AR_LIKE_b # E: Unsupported operand types -AR_M // AR_LIKE_u # E: Unsupported operand types -AR_M // AR_LIKE_i # E: Unsupported operand types -AR_M // AR_LIKE_f # E: Unsupported operand types -AR_M // AR_LIKE_c # E: Unsupported operand types -AR_M // AR_LIKE_m # E: Unsupported operand types -AR_M // AR_LIKE_M # E: Unsupported operand types - -AR_b // AR_LIKE_M # E: Unsupported operand types -AR_u // AR_LIKE_M # E: Unsupported operand types -AR_i // AR_LIKE_M # E: Unsupported operand types -AR_f // AR_LIKE_M # E: Unsupported operand types -AR_c // AR_LIKE_M # E: Unsupported operand types -AR_m // AR_LIKE_M # E: Unsupported operand types -AR_M // AR_LIKE_M # E: Unsupported operand types - -_3 = AR_m // AR_LIKE_b # E: Need type annotation -AR_m // AR_LIKE_c # E: Unsupported operand types - -AR_b // AR_LIKE_m # E: Unsupported operand types -AR_u // AR_LIKE_m # E: Unsupported operand types -AR_i // AR_LIKE_m # E: Unsupported operand types -AR_f // AR_LIKE_m # E: Unsupported operand types -AR_c // AR_LIKE_m # E: Unsupported operand types +AR_M // AR_LIKE_b # type: ignore[operator] +AR_M // AR_LIKE_u # type: ignore[operator] +AR_M // AR_LIKE_i # type: ignore[operator] +AR_M // AR_LIKE_f # type: ignore[operator] +AR_M // AR_LIKE_c # type: ignore[operator] +AR_M // AR_LIKE_m # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +AR_b // AR_LIKE_M # type: ignore[operator] +AR_u // AR_LIKE_M # type: ignore[operator] +AR_i // AR_LIKE_M # type: ignore[operator] +AR_f // AR_LIKE_M # type: ignore[operator] +AR_c // AR_LIKE_M # type: ignore[operator] +AR_m // AR_LIKE_M # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +_3 = AR_m // AR_LIKE_b # type: ignore[var-annotated] +AR_m // AR_LIKE_c # type: ignore[operator] + +AR_b // AR_LIKE_m # type: ignore[operator] +AR_u // AR_LIKE_m # type: ignore[operator] +AR_i // AR_LIKE_m # type: ignore[operator] +AR_f // AR_LIKE_m # type: ignore[operator] +AR_c // AR_LIKE_m # type: ignore[operator] + +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] # Array multiplication -AR_b *= AR_LIKE_u # E: incompatible type -AR_b *= AR_LIKE_i # E: incompatible type -AR_b *= AR_LIKE_f # E: incompatible type -AR_b *= AR_LIKE_c # E: incompatible type -AR_b *= AR_LIKE_m # E: incompatible type +AR_b *= AR_LIKE_u # type: ignore[arg-type] +AR_b *= AR_LIKE_i # type: ignore[arg-type] +AR_b *= AR_LIKE_f # type: ignore[arg-type] +AR_b *= AR_LIKE_c # type: ignore[arg-type] +AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # E: incompatible type -AR_u *= AR_LIKE_f # E: incompatible type -AR_u *= AR_LIKE_c # E: incompatible type -AR_u *= AR_LIKE_m # E: incompatible type +AR_u *= AR_LIKE_f # type: ignore[arg-type] +AR_u *= AR_LIKE_c # type: ignore[arg-type] +AR_u *= AR_LIKE_m # type: ignore[arg-type] -AR_i *= AR_LIKE_f # E: incompatible type -AR_i *= AR_LIKE_c # E: incompatible type -AR_i *= AR_LIKE_m # E: incompatible type +AR_i *= AR_LIKE_f # type: ignore[arg-type] +AR_i *= AR_LIKE_c # type: ignore[arg-type] +AR_i *= AR_LIKE_m # type: ignore[arg-type] -AR_f *= AR_LIKE_c # E: incompatible type -AR_f *= AR_LIKE_m # E: incompatible type +AR_f *= AR_LIKE_c # type: ignore[arg-type] +AR_f *= AR_LIKE_m # type: ignore[arg-type] # Array power -AR_b **= AR_LIKE_b # E: Invalid self argument -AR_b **= AR_LIKE_u # E: Invalid self argument -AR_b **= AR_LIKE_i # E: Invalid self argument -AR_b **= AR_LIKE_f # E: Invalid self argument -AR_b **= AR_LIKE_c # E: Invalid self argument +AR_b **= AR_LIKE_b # type: ignore[misc] +AR_b **= AR_LIKE_u # type: ignore[misc] +AR_b **= AR_LIKE_i # type: ignore[misc] +AR_b **= AR_LIKE_f # type: ignore[misc] +AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # E: incompatible type -AR_u **= AR_LIKE_f # E: incompatible type -AR_u **= AR_LIKE_c # E: incompatible type +AR_u **= AR_LIKE_f # type: ignore[arg-type] +AR_u **= AR_LIKE_c # type: ignore[arg-type] -AR_i **= AR_LIKE_f # E: incompatible type -AR_i **= AR_LIKE_c # E: incompatible type +AR_i **= AR_LIKE_f # type: ignore[arg-type] +AR_i **= AR_LIKE_c # type: ignore[arg-type] -AR_f **= AR_LIKE_c # E: incompatible type +AR_f **= AR_LIKE_c # type: ignore[arg-type] # Scalars -b_ - b_ # E: No overload variant +b_ - b_ # type: ignore[operator] -dt + dt # E: Unsupported operand types -td - dt # E: Unsupported operand types -td % 1 # E: Unsupported operand types -td / dt # E: No overload -td % dt # E: Unsupported operand types +dt + dt # type: ignore[operator] +td - dt # type: ignore[operator] +td % 1 # type: ignore[operator] +td / dt # type: ignore[operator] +td % dt # type: ignore[operator] --b_ # E: Unsupported operand type -+b_ # E: Unsupported operand type +-b_ # type: ignore[operator] ++b_ # type: ignore[operator] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/array_constructors.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/array_constructors.pyi index d4542c3..18e60ae 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/array_constructors.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/array_constructors.pyi @@ -4,31 +4,31 @@ import numpy.typing as npt a: npt.NDArray[np.float64] generator = (i for i in range(10)) -np.require(a, requirements=1) # E: No overload variant -np.require(a, requirements="TEST") # E: incompatible type +np.require(a, requirements=1) # type: ignore[call-overload] +np.require(a, requirements="TEST") # type: ignore[arg-type] -np.zeros("test") # E: incompatible type -np.zeros() # E: require at least one argument +np.zeros("test") # type: ignore[arg-type] +np.zeros() # type: ignore[call-overload] -np.ones("test") # E: incompatible type -np.ones() # E: require at least one argument +np.ones("test") # type: ignore[arg-type] +np.ones() # type: ignore[call-overload] -np.array(0, float, True) # E: No overload variant +np.array(0, float, True) # type: ignore[call-overload] -np.linspace(None, 'bob') # E: No overload variant -np.linspace(0, 2, num=10.0) # E: No overload variant -np.linspace(0, 2, endpoint='True') # E: No overload variant -np.linspace(0, 2, retstep=b'False') # E: No overload variant -np.linspace(0, 2, dtype=0) # E: No overload variant -np.linspace(0, 2, axis=None) # E: No overload variant +np.linspace(None, "bob") # type: ignore[call-overload] +np.linspace(0, 2, num=10.0) # type: ignore[call-overload] +np.linspace(0, 2, endpoint="True") # type: ignore[call-overload] +np.linspace(0, 2, retstep=b"False") # type: ignore[call-overload] +np.linspace(0, 2, dtype=0) # type: ignore[call-overload] +np.linspace(0, 2, axis=None) # type: ignore[call-overload] -np.logspace(None, 'bob') # E: No overload variant -np.logspace(0, 2, base=None) # E: No overload variant +np.logspace(None, "bob") # type: ignore[call-overload] +np.logspace(0, 2, base=None) # type: ignore[call-overload] -np.geomspace(None, 'bob') # E: No overload variant +np.geomspace(None, "bob") # type: ignore[call-overload] -np.stack(generator) # E: No overload variant -np.hstack({1, 2}) # E: No overload variant -np.vstack(1) # E: No overload variant +np.stack(generator) # type: ignore[call-overload] +np.hstack({1, 2}) # type: ignore[call-overload] +np.vstack(1) # type: ignore[call-overload] -np.array([1], like=1) # E: No overload variant +np.array([1], like=1) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/array_like.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/array_like.pyi index dffd87f..c30e192 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/array_like.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/array_like.pyi @@ -1,16 +1,15 @@ import numpy as np from numpy._typing import ArrayLike +class A: ... -class A: - pass - - -x1: ArrayLike = (i for i in range(10)) # E: Incompatible types in assignment -x2: ArrayLike = A() # E: Incompatible types in assignment -x3: ArrayLike = {1: "foo", 2: "bar"} # E: Incompatible types in assignment +x1: ArrayLike = (i for i in range(10)) # type: ignore[assignment] +x2: ArrayLike = A() # type: ignore[assignment] +x3: ArrayLike = {1: "foo", 2: "bar"} # type: ignore[assignment] scalar = np.int64(1) -scalar.__array__(dtype=np.float64) # E: No overload variant +scalar.__array__(dtype=np.float64) # type: ignore[call-overload] array = np.array([1]) -array.__array__(dtype=np.float64) # E: No overload variant +array.__array__(dtype=np.float64) # type: ignore[call-overload] + +array.setfield(np.eye(1), np.int32, (0, 1)) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/array_pad.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/array_pad.pyi index 8bf040a..dfaae81 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/array_pad.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/array_pad.pyi @@ -3,4 +3,4 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] -np.pad(AR_i8, 2, mode="bob") # E: No overload variant +np.pad(AR_i8, 2, mode="bob") # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/arrayprint.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/arrayprint.pyi index a341e2d..bda9725 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/arrayprint.pyi @@ -8,9 +8,8 @@ AR: npt.NDArray[np.float64] func1: Callable[[Any], str] func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # E: No overload variant -np.array2string(AR, legacy="1.14") # E: No overload variant -np.array2string(AR, sign="*") # E: No overload variant -np.array2string(AR, floatmode="default") # E: No overload variant -np.array2string(AR, formatter={"A": func1}) # E: No overload variant -np.array2string(AR, formatter={"float": func2}) # E: No overload variant +np.array2string(AR, legacy="1.14") # type: ignore[arg-type] +np.array2string(AR, sign="*") # type: ignore[arg-type] +np.array2string(AR, floatmode="default") # type: ignore[arg-type] +np.array2string(AR, formatter={"A": func1}) # type: ignore[arg-type] +np.array2string(AR, formatter={"float": func2}) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/arrayterator.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/arrayterator.pyi index 28626d6..1f5ab13 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/arrayterator.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/arrayterator.pyi @@ -4,11 +4,11 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] ar_iter = np.lib.Arrayterator(AR_i8) -np.lib.Arrayterator(np.int64()) # E: incompatible type -ar_iter.shape = (10, 5) # E: is read-only -ar_iter[None] # E: Invalid index type -ar_iter[None, 1] # E: Invalid index type -ar_iter[np.intp()] # E: Invalid index type -ar_iter[np.intp(), ...] # E: Invalid index type -ar_iter[AR_i8] # E: Invalid index type -ar_iter[AR_i8, :] # E: Invalid index type +np.lib.Arrayterator(np.int64()) # type: ignore[arg-type] +ar_iter.shape = (10, 5) # type: ignore[misc] +ar_iter[None] # type: ignore[index] +ar_iter[None, 1] # type: ignore[index] +ar_iter[np.intp()] # type: ignore[index] +ar_iter[np.intp(), ...] # type: ignore[index] +ar_iter[AR_i8] # type: ignore[index] +ar_iter[AR_i8, :] # type: ignore[index] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/bitwise_ops.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/bitwise_ops.pyi index 7cd2881..59169cd 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -4,18 +4,14 @@ i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool() -i = int() +i = 0 f8 = np.float64() -b_ >> f8 # E: No overload variant -i8 << f8 # E: No overload variant -i | f8 # E: Unsupported operand types -i8 ^ f8 # E: No overload variant -u8 & f8 # E: No overload variant -~f8 # E: Unsupported operand type +b_ >> f8 # type: ignore[operator] +i8 << f8 # type: ignore[operator] +i | f8 # type: ignore[operator] +i8 ^ f8 # type: ignore[operator] +u8 & f8 # type: ignore[operator] +~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail - -# mypys' error message for `NoReturn` is unfortunately pretty bad -# TODO: Re-enable this once we add support for numerical precision for `number`s -# a = u8 | 0 # E: Need type annotation diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/char.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/char.pyi index aae99f8..19d0a3e 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/char.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/char.pyi @@ -4,66 +4,60 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -np.char.equal(AR_U, AR_S) # E: incompatible type - -np.char.not_equal(AR_U, AR_S) # E: incompatible type - -np.char.greater_equal(AR_U, AR_S) # E: incompatible type - -np.char.less_equal(AR_U, AR_S) # E: incompatible type - -np.char.greater(AR_U, AR_S) # E: incompatible type - -np.char.less(AR_U, AR_S) # E: incompatible type - -np.char.encode(AR_S) # E: incompatible type -np.char.decode(AR_U) # E: incompatible type - -np.char.join(AR_U, b"_") # E: incompatible type -np.char.join(AR_S, "_") # E: incompatible type - -np.char.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.char.ljust(AR_S, 5, fillchar="a") # E: incompatible type -np.char.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.char.rjust(AR_S, 5, fillchar="a") # E: incompatible type - -np.char.lstrip(AR_U, chars=b"a") # E: incompatible type -np.char.lstrip(AR_S, chars="a") # E: incompatible type -np.char.strip(AR_U, chars=b"a") # E: incompatible type -np.char.strip(AR_S, chars="a") # E: incompatible type -np.char.rstrip(AR_U, chars=b"a") # E: incompatible type -np.char.rstrip(AR_S, chars="a") # E: incompatible type - -np.char.partition(AR_U, b"a") # E: incompatible type -np.char.partition(AR_S, "a") # E: incompatible type -np.char.rpartition(AR_U, b"a") # E: incompatible type -np.char.rpartition(AR_S, "a") # E: incompatible type - -np.char.replace(AR_U, b"_", b"-") # E: incompatible type -np.char.replace(AR_S, "_", "-") # E: incompatible type - -np.char.split(AR_U, b"_") # E: incompatible type -np.char.split(AR_S, "_") # E: incompatible type -np.char.rsplit(AR_U, b"_") # E: incompatible type -np.char.rsplit(AR_S, "_") # E: incompatible type - -np.char.count(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.count(AR_S, "a", end=9) # E: incompatible type - -np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.endswith(AR_S, "a", end=9) # E: incompatible type -np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.startswith(AR_S, "a", end=9) # E: incompatible type - -np.char.find(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.find(AR_S, "a", end=9) # E: incompatible type -np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.rfind(AR_S, "a", end=9) # E: incompatible type - -np.char.index(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.index(AR_S, "a", end=9) # E: incompatible type -np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.rindex(AR_S, "a", end=9) # E: incompatible type - -np.char.isdecimal(AR_S) # E: incompatible type -np.char.isnumeric(AR_S) # E: incompatible type +np.char.equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.char.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.greater(AR_U, AR_S) # type: ignore[arg-type] +np.char.less(AR_U, AR_S) # type: ignore[arg-type] + +np.char.encode(AR_S) # type: ignore[arg-type] +np.char.decode(AR_U) # type: ignore[arg-type] + +np.char.join(AR_U, b"_") # type: ignore[arg-type] +np.char.join(AR_S, "_") # type: ignore[arg-type] + +np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] + +np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] +np.char.strip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.strip(AR_S, chars="a") # type: ignore[arg-type] +np.char.rstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.rstrip(AR_S, chars="a") # type: ignore[arg-type] + +np.char.partition(AR_U, b"a") # type: ignore[arg-type] +np.char.partition(AR_S, "a") # type: ignore[arg-type] +np.char.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.char.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.char.replace(AR_U, b"_", b"-") # type: ignore[arg-type] +np.char.replace(AR_S, "_", "-") # type: ignore[arg-type] + +np.char.split(AR_U, b"_") # type: ignore[arg-type] +np.char.split(AR_S, "_") # type: ignore[arg-type] +np.char.rsplit(AR_U, b"_") # type: ignore[arg-type] +np.char.rsplit(AR_S, "_") # type: ignore[arg-type] + +np.char.count(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.count(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.endswith(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.startswith(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.find(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.find(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rfind(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.isdecimal(AR_S) # type: ignore[arg-type] +np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/chararray.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/chararray.pyi index bbf5813..04d0954 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/chararray.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/chararray.pyi @@ -1,61 +1,61 @@ +from typing import Any + import numpy as np -AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] -AR_S: np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]] - -AR_S.encode() # E: Invalid self argument -AR_U.decode() # E: Invalid self argument - -AR_U.join(b"_") # E: incompatible type -AR_S.join("_") # E: incompatible type - -AR_U.ljust(5, fillchar=b"a") # E: incompatible type -AR_S.ljust(5, fillchar="a") # E: incompatible type -AR_U.rjust(5, fillchar=b"a") # E: incompatible type -AR_S.rjust(5, fillchar="a") # E: incompatible type - -AR_U.lstrip(chars=b"a") # E: incompatible type -AR_S.lstrip(chars="a") # E: incompatible type -AR_U.strip(chars=b"a") # E: incompatible type -AR_S.strip(chars="a") # E: incompatible type -AR_U.rstrip(chars=b"a") # E: incompatible type -AR_S.rstrip(chars="a") # E: incompatible type - -AR_U.partition(b"a") # E: incompatible type -AR_S.partition("a") # E: incompatible type -AR_U.rpartition(b"a") # E: incompatible type -AR_S.rpartition("a") # E: incompatible type - -AR_U.replace(b"_", b"-") # E: incompatible type -AR_S.replace("_", "-") # E: incompatible type - -AR_U.split(b"_") # E: incompatible type -AR_S.split("_") # E: incompatible type -AR_S.split(1) # E: incompatible type -AR_U.rsplit(b"_") # E: incompatible type -AR_S.rsplit("_") # E: incompatible type - -AR_U.count(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.count("a", end=9) # E: incompatible type - -AR_U.endswith(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.endswith("a", end=9) # E: incompatible type -AR_U.startswith(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.startswith("a", end=9) # E: incompatible type - -AR_U.find(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.find("a", end=9) # E: incompatible type -AR_U.rfind(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.rfind("a", end=9) # E: incompatible type - -AR_U.index(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.index("a", end=9) # E: incompatible type -AR_U.rindex(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.rindex("a", end=9) # E: incompatible type - -AR_U == AR_S # E: Unsupported operand types -AR_U != AR_S # E: Unsupported operand types -AR_U >= AR_S # E: Unsupported operand types -AR_U <= AR_S # E: Unsupported operand types -AR_U > AR_S # E: Unsupported operand types -AR_U < AR_S # E: Unsupported operand types +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] + +AR_S.encode() # type: ignore[misc] +AR_U.decode() # type: ignore[misc] + +AR_U.join(b"_") # type: ignore[arg-type] +AR_S.join("_") # type: ignore[arg-type] + +AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] +AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] + +AR_U.lstrip(chars=b"a") # type: ignore[arg-type] +AR_S.lstrip(chars="a") # type: ignore[arg-type] +AR_U.strip(chars=b"a") # type: ignore[arg-type] +AR_S.strip(chars="a") # type: ignore[arg-type] +AR_U.rstrip(chars=b"a") # type: ignore[arg-type] +AR_S.rstrip(chars="a") # type: ignore[arg-type] + +AR_U.partition(b"a") # type: ignore[arg-type] +AR_S.partition("a") # type: ignore[arg-type] +AR_U.rpartition(b"a") # type: ignore[arg-type] +AR_S.rpartition("a") # type: ignore[arg-type] + +AR_U.replace(b"_", b"-") # type: ignore[arg-type] +AR_S.replace("_", "-") # type: ignore[arg-type] + +AR_U.split(b"_") # type: ignore[arg-type] +AR_S.split("_") # type: ignore[arg-type] +AR_S.split(1) # type: ignore[arg-type] +AR_U.rsplit(b"_") # type: ignore[arg-type] +AR_S.rsplit("_") # type: ignore[arg-type] + +AR_U.count(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.count("a", end=9) # type: ignore[arg-type] + +AR_U.endswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.endswith("a", end=9) # type: ignore[arg-type] +AR_U.startswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.startswith("a", end=9) # type: ignore[arg-type] + +AR_U.find(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.find("a", end=9) # type: ignore[arg-type] +AR_U.rfind(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rfind("a", end=9) # type: ignore[arg-type] + +AR_U.index(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.index("a", end=9) # type: ignore[arg-type] +AR_U.rindex(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rindex("a", end=9) # type: ignore[arg-type] + +AR_U == AR_S # type: ignore[operator] +AR_U != AR_S # type: ignore[operator] +AR_U >= AR_S # type: ignore[operator] +AR_U <= AR_S # type: ignore[operator] +AR_U > AR_S # type: ignore[operator] +AR_U < AR_S # type: ignore[operator] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/comparisons.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/comparisons.pyi index 4a489b9..7babea7 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/comparisons.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/comparisons.pyi @@ -7,21 +7,21 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] -AR_f > AR_m # E: Unsupported operand types -AR_c > AR_m # E: Unsupported operand types +AR_f > AR_m # type: ignore[operator] +AR_c > AR_m # type: ignore[operator] -AR_m > AR_f # E: Unsupported operand types -AR_m > AR_c # E: Unsupported operand types +AR_m > AR_f # type: ignore[operator] +AR_m > AR_c # type: ignore[operator] -AR_i > AR_M # E: Unsupported operand types -AR_f > AR_M # E: Unsupported operand types -AR_m > AR_M # E: Unsupported operand types +AR_i > AR_M # type: ignore[operator] +AR_f > AR_M # type: ignore[operator] +AR_m > AR_M # type: ignore[operator] -AR_M > AR_i # E: Unsupported operand types -AR_M > AR_f # E: Unsupported operand types -AR_M > AR_m # E: Unsupported operand types +AR_M > AR_i # type: ignore[operator] +AR_M > AR_f # type: ignore[operator] +AR_M > AR_m # type: ignore[operator] -AR_i > str() # E: No overload variant -AR_i > bytes() # E: No overload variant -str() > AR_M # E: Unsupported operand types -bytes() > AR_M # E: Unsupported operand types +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/constants.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/constants.pyi index dc52e9c..c9e0887 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/constants.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/constants.pyi @@ -1,3 +1,3 @@ import numpy as np -np.little_endian = np.little_endian # E: Cannot assign to final +np.little_endian = np.little_endian # type: ignore[misc] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/datasource.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/datasource.pyi index 00f3c7c..28932e1 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/datasource.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/datasource.pyi @@ -1,15 +1,16 @@ from pathlib import Path + import numpy as np path: Path d1: np.lib.npyio.DataSource -d1.abspath(path) # E: incompatible type -d1.abspath(b"...") # E: incompatible type +d1.abspath(path) # type: ignore[arg-type] +d1.abspath(b"...") # type: ignore[arg-type] -d1.exists(path) # E: incompatible type -d1.exists(b"...") # E: incompatible type +d1.exists(path) # type: ignore[arg-type] +d1.exists(b"...") # type: ignore[arg-type] -d1.open(path, "r") # E: incompatible type -d1.open(b"...", encoding="utf8") # E: incompatible type -d1.open(None, newline="/n") # E: incompatible type +d1.open(path, "r") # type: ignore[arg-type] +d1.open(b"...", encoding="utf8") # type: ignore[arg-type] +d1.open(None, newline="/n") # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/dtype.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/dtype.pyi index 5fbe545..ab9e343 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/dtype.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/dtype.pyi @@ -1,18 +1,15 @@ import numpy as np - class Test1: not_dtype = np.dtype(float) - class Test2: dtype = float +np.dtype(Test1()) # type: ignore[call-overload] +np.dtype(Test2()) # type: ignore[arg-type] -np.dtype(Test1()) # E: No overload variant of "dtype" matches -np.dtype(Test2()) # E: incompatible type - -np.dtype( # E: No overload variant of "dtype" matches +np.dtype( # type: ignore[call-overload] { "field1": (float, 1), "field2": (int, 3), diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/einsumfunc.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/einsumfunc.pyi index 9c3bd72..9d6e08e 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/einsumfunc.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/einsumfunc.pyi @@ -6,7 +6,7 @@ AR_f: npt.NDArray[np.float64] AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] -np.einsum("i,i->i", AR_i, AR_m) # E: incompatible type -np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # E: incompatible type -np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # E: Value of type variable "_ArrayType" of "einsum" cannot be -np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # E: No overload variant +np.einsum("i,i->i", AR_i, AR_m) # type: ignore[arg-type] +np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # type: ignore[arg-type] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # type: ignore[type-var] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/flatiter.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/flatiter.pyi index dab715f..201d024 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/flatiter.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/flatiter.pyi @@ -1,25 +1,38 @@ from typing import Any import numpy as np -import numpy._typing as npt +import numpy.typing as npt +class _Index: + def __index__(self) -> int: ... -class Index: - def __index__(self) -> int: - ... +class _MyArray: + def __array__(self) -> np.ndarray[tuple[int], np.dtypes.Float64DType]: ... +_index: _Index +_my_array: _MyArray +_something: Any +_dtype: np.dtype[np.int8] -a: np.flatiter[npt.NDArray[np.float64]] -supports_array: npt._SupportsArray[np.dtype[np.float64]] +_a_nd: np.flatiter[npt.NDArray[np.float64]] -a.base = Any # E: Property "base" defined in "flatiter" is read-only -a.coords = Any # E: Property "coords" defined in "flatiter" is read-only -a.index = Any # E: Property "index" defined in "flatiter" is read-only -a.copy(order='C') # E: Unexpected keyword argument +### + +_a_nd.base = _something # type: ignore[misc] +_a_nd.coords = _something # type: ignore[misc] +_a_nd.index = _something # type: ignore[misc] + +_a_nd.copy("C") # type: ignore[call-arg] +_a_nd.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; # boolean indexing is just plain broken (gh-17175) -a[np.bool()] # E: No overload variant of "__getitem__" -a[Index()] # E: No overload variant of "__getitem__" -a[supports_array] # E: No overload variant of "__getitem__" +_a_nd[np.True_] # type: ignore[call-overload] +_a_nd[_index] # type: ignore[call-overload] +_a_nd[_my_array] # type: ignore[call-overload] + +# `dtype` and `copy` are no-ops in `flatiter.__array__` +_a_nd.__array__(_dtype) # type: ignore[arg-type] +_a_nd.__array__(dtype=_dtype) # type: ignore[call-arg] +_a_nd.__array__(copy=True) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/fromnumeric.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/fromnumeric.pyi index 33b6041..e226023 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -7,159 +7,142 @@ A = np.array(True, ndmin=2, dtype=bool) A.setflags(write=False) AR_U: npt.NDArray[np.str_] AR_M: npt.NDArray[np.datetime64] +AR_f4: npt.NDArray[np.float32] a = np.bool(True) -np.take(a, None) # E: No overload variant -np.take(a, axis=1.0) # E: No overload variant -np.take(A, out=1) # E: No overload variant -np.take(A, mode="bob") # E: No overload variant +np.take(a, None) # type: ignore[call-overload] +np.take(a, axis=1.0) # type: ignore[call-overload] +np.take(A, out=1) # type: ignore[call-overload] +np.take(A, mode="bob") # type: ignore[call-overload] -np.reshape(a, None) # E: No overload variant -np.reshape(A, 1, order="bob") # E: No overload variant +np.reshape(a, None) # type: ignore[call-overload] +np.reshape(A, 1, order="bob") # type: ignore[call-overload] -np.choose(a, None) # E: No overload variant -np.choose(a, out=1.0) # E: No overload variant -np.choose(A, mode="bob") # E: No overload variant - -np.repeat(a, None) # E: No overload variant -np.repeat(A, 1, axis=1.0) # E: No overload variant - -np.swapaxes(A, None, 1) # E: No overload variant -np.swapaxes(A, 1, [0]) # E: No overload variant - -np.transpose(A, axes=1.0) # E: No overload variant - -np.partition(a, None) # E: No overload variant -np.partition( # E: No overload variant - a, 0, axis="bob" -) -np.partition( # E: No overload variant - A, 0, kind="bob" -) -np.partition( - A, 0, order=range(5) # E: Argument "order" to "partition" has incompatible type -) - -np.argpartition( - a, None # E: incompatible type -) -np.argpartition( - a, 0, axis="bob" # E: incompatible type -) -np.argpartition( - A, 0, kind="bob" # E: incompatible type -) -np.argpartition( - A, 0, order=range(5) # E: Argument "order" to "argpartition" has incompatible type -) - -np.sort(A, axis="bob") # E: No overload variant -np.sort(A, kind="bob") # E: No overload variant -np.sort(A, order=range(5)) # E: Argument "order" to "sort" has incompatible type - -np.argsort(A, axis="bob") # E: Argument "axis" to "argsort" has incompatible type -np.argsort(A, kind="bob") # E: Argument "kind" to "argsort" has incompatible type -np.argsort(A, order=range(5)) # E: Argument "order" to "argsort" has incompatible type - -np.argmax(A, axis="bob") # E: No overload variant of "argmax" matches argument type -np.argmax(A, kind="bob") # E: No overload variant of "argmax" matches argument type - -np.argmin(A, axis="bob") # E: No overload variant of "argmin" matches argument type -np.argmin(A, kind="bob") # E: No overload variant of "argmin" matches argument type - -np.searchsorted( # E: No overload variant of "searchsorted" matches argument type - A[0], 0, side="bob" -) -np.searchsorted( # E: No overload variant of "searchsorted" matches argument type - A[0], 0, sorter=1.0 -) - -np.resize(A, 1.0) # E: No overload variant - -np.squeeze(A, 1.0) # E: No overload variant of "squeeze" matches argument type - -np.diagonal(A, offset=None) # E: No overload variant -np.diagonal(A, axis1="bob") # E: No overload variant -np.diagonal(A, axis2=[]) # E: No overload variant - -np.trace(A, offset=None) # E: No overload variant -np.trace(A, axis1="bob") # E: No overload variant -np.trace(A, axis2=[]) # E: No overload variant - -np.ravel(a, order="bob") # E: No overload variant - -np.nonzero(0) # E: No overload variant - -np.compress( # E: No overload variant - [True], A, axis=1.0 -) - -np.clip(a, 1, 2, out=1) # E: No overload variant of "clip" matches argument type - -np.sum(a, axis=1.0) # E: No overload variant -np.sum(a, keepdims=1.0) # E: No overload variant -np.sum(a, initial=[1]) # E: No overload variant - -np.all(a, axis=1.0) # E: No overload variant -np.all(a, keepdims=1.0) # E: No overload variant -np.all(a, out=1.0) # E: No overload variant - -np.any(a, axis=1.0) # E: No overload variant -np.any(a, keepdims=1.0) # E: No overload variant -np.any(a, out=1.0) # E: No overload variant - -np.cumsum(a, axis=1.0) # E: No overload variant -np.cumsum(a, dtype=1.0) # E: No overload variant -np.cumsum(a, out=1.0) # E: No overload variant - -np.ptp(a, axis=1.0) # E: No overload variant -np.ptp(a, keepdims=1.0) # E: No overload variant -np.ptp(a, out=1.0) # E: No overload variant - -np.amax(a, axis=1.0) # E: No overload variant -np.amax(a, keepdims=1.0) # E: No overload variant -np.amax(a, out=1.0) # E: No overload variant -np.amax(a, initial=[1.0]) # E: No overload variant -np.amax(a, where=[1.0]) # E: incompatible type - -np.amin(a, axis=1.0) # E: No overload variant -np.amin(a, keepdims=1.0) # E: No overload variant -np.amin(a, out=1.0) # E: No overload variant -np.amin(a, initial=[1.0]) # E: No overload variant -np.amin(a, where=[1.0]) # E: incompatible type - -np.prod(a, axis=1.0) # E: No overload variant -np.prod(a, out=False) # E: No overload variant -np.prod(a, keepdims=1.0) # E: No overload variant -np.prod(a, initial=int) # E: No overload variant -np.prod(a, where=1.0) # E: No overload variant -np.prod(AR_U) # E: incompatible type - -np.cumprod(a, axis=1.0) # E: No overload variant -np.cumprod(a, out=False) # E: No overload variant -np.cumprod(AR_U) # E: incompatible type - -np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type - -np.around(a, decimals=1.0) # E: No overload variant -np.around(a, out=type) # E: No overload variant -np.around(AR_U) # E: incompatible type - -np.mean(a, axis=1.0) # E: No overload variant -np.mean(a, out=False) # E: No overload variant -np.mean(a, keepdims=1.0) # E: No overload variant -np.mean(AR_U) # E: incompatible type -np.mean(AR_M) # E: incompatible type - -np.std(a, axis=1.0) # E: No overload variant -np.std(a, out=False) # E: No overload variant -np.std(a, ddof='test') # E: No overload variant -np.std(a, keepdims=1.0) # E: No overload variant -np.std(AR_U) # E: incompatible type +np.choose(a, None) # type: ignore[call-overload] +np.choose(a, out=1.0) # type: ignore[call-overload] +np.choose(A, mode="bob") # type: ignore[call-overload] -np.var(a, axis=1.0) # E: No overload variant -np.var(a, out=False) # E: No overload variant -np.var(a, ddof='test') # E: No overload variant -np.var(a, keepdims=1.0) # E: No overload variant -np.var(AR_U) # E: incompatible type +np.repeat(a, None) # type: ignore[call-overload] +np.repeat(A, 1, axis=1.0) # type: ignore[call-overload] + +np.swapaxes(A, None, 1) # type: ignore[call-overload] +np.swapaxes(A, 1, [0]) # type: ignore[call-overload] + +np.transpose(A, axes=1.0) # type: ignore[call-overload] + +np.partition(a, None) # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(A, 0, kind="bob") # type: ignore[call-overload] +np.partition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.argpartition(a, None) # type: ignore[arg-type] +np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.sort(A, axis="bob") # type: ignore[call-overload] +np.sort(A, kind="bob") # type: ignore[call-overload] +np.sort(A, order=range(5)) # type: ignore[arg-type] + +np.argsort(A, axis="bob") # type: ignore[arg-type] +np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, order=range(5)) # type: ignore[arg-type] + +np.argmax(A, axis="bob") # type: ignore[call-overload] +np.argmax(A, kind="bob") # type: ignore[call-overload] +np.argmax(A, out=AR_f4) # type: ignore[type-var] + +np.argmin(A, axis="bob") # type: ignore[call-overload] +np.argmin(A, kind="bob") # type: ignore[call-overload] +np.argmin(A, out=AR_f4) # type: ignore[type-var] + +np.searchsorted(A[0], 0, side="bob") # type: ignore[call-overload] +np.searchsorted(A[0], 0, sorter=1.0) # type: ignore[call-overload] + +np.resize(A, 1.0) # type: ignore[call-overload] + +np.squeeze(A, 1.0) # type: ignore[call-overload] + +np.diagonal(A, offset=None) # type: ignore[call-overload] +np.diagonal(A, axis1="bob") # type: ignore[call-overload] +np.diagonal(A, axis2=[]) # type: ignore[call-overload] + +np.trace(A, offset=None) # type: ignore[call-overload] +np.trace(A, axis1="bob") # type: ignore[call-overload] +np.trace(A, axis2=[]) # type: ignore[call-overload] + +np.ravel(a, order="bob") # type: ignore[call-overload] + +np.nonzero(0) # type: ignore[arg-type] + +np.compress([True], A, axis=1.0) # type: ignore[call-overload] + +np.clip(a, 1, 2, out=1) # type: ignore[call-overload] + +np.sum(a, axis=1.0) # type: ignore[call-overload] +np.sum(a, keepdims=1.0) # type: ignore[call-overload] +np.sum(a, initial=[1]) # type: ignore[call-overload] + +np.all(a, axis=1.0) # type: ignore[call-overload] +np.all(a, keepdims=1.0) # type: ignore[call-overload] +np.all(a, out=1.0) # type: ignore[call-overload] + +np.any(a, axis=1.0) # type: ignore[call-overload] +np.any(a, keepdims=1.0) # type: ignore[call-overload] +np.any(a, out=1.0) # type: ignore[call-overload] + +np.cumsum(a, axis=1.0) # type: ignore[call-overload] +np.cumsum(a, dtype=1.0) # type: ignore[call-overload] +np.cumsum(a, out=1.0) # type: ignore[call-overload] + +np.ptp(a, axis=1.0) # type: ignore[call-overload] +np.ptp(a, keepdims=1.0) # type: ignore[call-overload] +np.ptp(a, out=1.0) # type: ignore[call-overload] + +np.amax(a, axis=1.0) # type: ignore[call-overload] +np.amax(a, keepdims=1.0) # type: ignore[call-overload] +np.amax(a, out=1.0) # type: ignore[call-overload] +np.amax(a, initial=[1.0]) # type: ignore[call-overload] +np.amax(a, where=[1.0]) # type: ignore[arg-type] + +np.amin(a, axis=1.0) # type: ignore[call-overload] +np.amin(a, keepdims=1.0) # type: ignore[call-overload] +np.amin(a, out=1.0) # type: ignore[call-overload] +np.amin(a, initial=[1.0]) # type: ignore[call-overload] +np.amin(a, where=[1.0]) # type: ignore[arg-type] + +np.prod(a, axis=1.0) # type: ignore[call-overload] +np.prod(a, out=False) # type: ignore[call-overload] +np.prod(a, keepdims=1.0) # type: ignore[call-overload] +np.prod(a, initial=int) # type: ignore[call-overload] +np.prod(a, where=1.0) # type: ignore[call-overload] +np.prod(AR_U) # type: ignore[arg-type] + +np.cumprod(a, axis=1.0) # type: ignore[call-overload] +np.cumprod(a, out=False) # type: ignore[call-overload] +np.cumprod(AR_U) # type: ignore[arg-type] + +np.size(a, axis=1.0) # type: ignore[arg-type] + +np.around(a, decimals=1.0) # type: ignore[call-overload] +np.around(a, out=type) # type: ignore[call-overload] +np.around(AR_U) # type: ignore[arg-type] + +np.mean(a, axis=1.0) # type: ignore[call-overload] +np.mean(a, out=False) # type: ignore[call-overload] +np.mean(a, keepdims=1.0) # type: ignore[call-overload] +np.mean(AR_U) # type: ignore[arg-type] +np.mean(AR_M) # type: ignore[arg-type] + +np.std(a, axis=1.0) # type: ignore[call-overload] +np.std(a, out=False) # type: ignore[call-overload] +np.std(a, ddof="test") # type: ignore[call-overload] +np.std(a, keepdims=1.0) # type: ignore[call-overload] +np.std(AR_U) # type: ignore[arg-type] + +np.var(a, axis=1.0) # type: ignore[call-overload] +np.var(a, out=False) # type: ignore[call-overload] +np.var(a, ddof="test") # type: ignore[call-overload] +np.var(a, keepdims=1.0) # type: ignore[call-overload] +np.var(AR_U) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/histograms.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/histograms.pyi index 8d25fe8..9f2d799 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/histograms.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/histograms.pyi @@ -4,9 +4,9 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] -np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # E: incompatible type +np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] -np.histogram(AR_i8, range=(0, 1, 2)) # E: incompatible type +np.histogram(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] -np.histogramdd(AR_i8, range=(0, 1)) # E: incompatible type -np.histogramdd(AR_i8, range=[(0, 1, 2)]) # E: incompatible type +np.histogramdd(AR_i8, range=(0, 1)) # type: ignore[arg-type] +np.histogramdd(AR_i8, range=[(0, 1, 2)]) # type: ignore[list-item] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/index_tricks.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/index_tricks.pyi index 59a010d..db5e0b9 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/index_tricks.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/index_tricks.pyi @@ -3,12 +3,12 @@ import numpy as np AR_LIKE_i: list[int] AR_LIKE_f: list[float] -np.ndindex([1, 2, 3]) # E: No overload variant -np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type -np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant -np.mgrid[1] # E: Invalid index type -np.mgrid[...] # E: Invalid index type -np.ogrid[1] # E: Invalid index type -np.ogrid[...] # E: Invalid index type -np.fill_diagonal(AR_LIKE_f, 2) # E: incompatible type -np.diag_indices(1.0) # E: incompatible type +np.ndindex([1, 2, 3]) # type: ignore[call-overload] +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # type: ignore[arg-type] +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # type: ignore[call-overload] +np.mgrid[1] # type: ignore[index] +np.mgrid[...] # type: ignore[index] +np.ogrid[1] # type: ignore[index] +np.ogrid[...] # type: ignore[index] +np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/lib_function_base.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/lib_function_base.pyi index 1cd7f3d..2b7b497 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/lib_function_base.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -13,50 +13,48 @@ AR_b_list: list[npt.NDArray[np.bool]] def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... -np.average(AR_m) # E: incompatible type -np.select(1, [AR_f8]) # E: incompatible type -np.angle(AR_m) # E: incompatible type -np.unwrap(AR_m) # E: incompatible type -np.unwrap(AR_c16) # E: incompatible type -np.trim_zeros(1) # E: incompatible type -np.place(1, [True], 1.5) # E: incompatible type -np.vectorize(1) # E: incompatible type -np.place(AR_f8, slice(None), 5) # E: incompatible type - -np.piecewise(AR_f8, True, [fn_ar_i], 42) # E: No overload variants -# TODO: enable these once mypy actually supports ParamSpec (released in 2021) -# NOTE: pyright correctly reports errors for these (`reportCallIssue`) -# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # E: No overload variants -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # E: No overload variant -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # E: No overload variant -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # E: No overload variant -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # E: No overload variant - -np.interp(AR_f8, AR_c16, AR_f8) # E: incompatible type -np.interp(AR_c16, AR_f8, AR_f8) # E: incompatible type -np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # E: No overload variant -np.interp(AR_f8, AR_f8, AR_O) # E: incompatible type - -np.cov(AR_m) # E: incompatible type -np.cov(AR_O) # E: incompatible type -np.corrcoef(AR_m) # E: incompatible type -np.corrcoef(AR_O) # E: incompatible type -np.corrcoef(AR_f8, bias=True) # E: No overload variant -np.corrcoef(AR_f8, ddof=2) # E: No overload variant -np.blackman(1j) # E: incompatible type -np.bartlett(1j) # E: incompatible type -np.hanning(1j) # E: incompatible type -np.hamming(1j) # E: incompatible type -np.hamming(AR_c16) # E: incompatible type -np.kaiser(1j, 1) # E: incompatible type -np.sinc(AR_O) # E: incompatible type -np.median(AR_M) # E: incompatible type - -np.percentile(AR_f8, 50j) # E: No overload variant -np.percentile(AR_f8, 50, interpolation="bob") # E: No overload variant -np.quantile(AR_f8, 0.5j) # E: No overload variant -np.quantile(AR_f8, 0.5, interpolation="bob") # E: No overload variant -np.meshgrid(AR_f8, AR_f8, indexing="bob") # E: incompatible type -np.delete(AR_f8, AR_f8) # E: incompatible type -np.insert(AR_f8, AR_f8, 1.5) # E: incompatible type -np.digitize(AR_f8, 1j) # E: No overload variant +np.average(AR_m) # type: ignore[type-var] +np.select(1, [AR_f8]) # type: ignore[call-overload] +np.angle(AR_m) # type: ignore[type-var] +np.unwrap(AR_m) # type: ignore[type-var] +np.unwrap(AR_c16) # type: ignore[type-var] +np.trim_zeros(1) # type: ignore[arg-type] +np.place(1, [True], 1.5) # type: ignore[arg-type] +np.vectorize(1) # type: ignore[arg-type] +np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] + +np.piecewise(AR_f8, True, [fn_ar_i], "wrong") # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[list-item] + +np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] +np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] +np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] +np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] + +np.cov(AR_m) # type: ignore[type-var] +np.cov(AR_O) # type: ignore[type-var] +np.corrcoef(AR_m) # type: ignore[type-var] +np.corrcoef(AR_O) # type: ignore[type-var] +np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] +np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] +np.blackman(1j) # type: ignore[arg-type] +np.bartlett(1j) # type: ignore[arg-type] +np.hanning(1j) # type: ignore[arg-type] +np.hamming(1j) # type: ignore[arg-type] +np.hamming(AR_c16) # type: ignore[arg-type] +np.kaiser(1j, 1) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[type-var] +np.median(AR_M) # type: ignore[type-var] + +np.percentile(AR_f8, 50j) # type: ignore[call-overload] +np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] +np.quantile(AR_f8, 0.5j) # type: ignore[call-overload] +np.quantile(AR_f8, 0.5, interpolation="bob") # type: ignore[call-overload] +np.meshgrid(AR_f8, AR_f8, indexing="bob") # type: ignore[call-overload] +np.delete(AR_f8, AR_f8) # type: ignore[arg-type] +np.insert(AR_f8, AR_f8, 1.5) # type: ignore[arg-type] +np.digitize(AR_f8, 1j) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/lib_polynomial.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/lib_polynomial.pyi index b3eb2f9..0a79662 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/lib_polynomial.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -8,22 +8,22 @@ AR_U: npt.NDArray[np.str_] poly_obj: np.poly1d -np.polymul(AR_f8, AR_U) # E: incompatible type -np.polydiv(AR_f8, AR_U) # E: incompatible type +np.polymul(AR_f8, AR_U) # type: ignore[arg-type] +np.polydiv(AR_f8, AR_U) # type: ignore[arg-type] -5**poly_obj # E: No overload variant +5**poly_obj # type: ignore[operator] -np.polyint(AR_U) # E: incompatible type -np.polyint(AR_f8, m=1j) # E: No overload variant +np.polyint(AR_U) # type: ignore[arg-type] +np.polyint(AR_f8, m=1j) # type: ignore[call-overload] -np.polyder(AR_U) # E: incompatible type -np.polyder(AR_f8, m=1j) # E: No overload variant +np.polyder(AR_U) # type: ignore[arg-type] +np.polyder(AR_f8, m=1j) # type: ignore[call-overload] -np.polyfit(AR_O, AR_f8, 1) # E: incompatible type -np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant -np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type -np.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant +np.polyfit(AR_O, AR_f8, 1) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # type: ignore[call-overload] +np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, cov="bob") # type: ignore[call-overload] -np.polyval(AR_f8, AR_U) # E: incompatible type -np.polyadd(AR_f8, AR_U) # E: incompatible type -np.polysub(AR_f8, AR_U) # E: incompatible type +np.polyval(AR_f8, AR_U) # type: ignore[arg-type] +np.polyadd(AR_f8, AR_U) # type: ignore[arg-type] +np.polysub(AR_f8, AR_U) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/lib_utils.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/lib_utils.pyi index b6d76a8..4766506 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/lib_utils.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/lib_utils.pyi @@ -1,3 +1,3 @@ import numpy.lib.array_utils as array_utils -array_utils.byte_bounds(1) # E: incompatible type +array_utils.byte_bounds(1) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/lib_version.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/lib_version.pyi index 8c91db7..e011049 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/lib_version.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/lib_version.pyi @@ -2,5 +2,5 @@ from numpy.lib import NumpyVersion version: NumpyVersion -NumpyVersion(b"1.8.0") # E: incompatible type -version >= b"1.8.0" # E: Unsupported operand types +NumpyVersion(b"1.8.0") # type: ignore[arg-type] +version >= b"1.8.0" # type: ignore[operator] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/linalg.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/linalg.pyi index e117c28..890d714 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/linalg.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/linalg.pyi @@ -5,44 +5,48 @@ AR_f8: npt.NDArray[np.float64] AR_O: npt.NDArray[np.object_] AR_M: npt.NDArray[np.datetime64] -np.linalg.tensorsolve(AR_O, AR_O) # E: incompatible type +np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.solve(AR_O, AR_O) # E: incompatible type +np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.tensorinv(AR_O) # E: incompatible type +np.linalg.tensorinv(AR_O) # type: ignore[arg-type] -np.linalg.inv(AR_O) # E: incompatible type +np.linalg.inv(AR_O) # type: ignore[arg-type] -np.linalg.matrix_power(AR_M, 5) # E: incompatible type +np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.cholesky(AR_O) # E: incompatible type +np.linalg.cholesky(AR_O) # type: ignore[arg-type] -np.linalg.qr(AR_O) # E: incompatible type -np.linalg.qr(AR_f8, mode="bob") # E: No overload variant +np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] -np.linalg.eigvals(AR_O) # E: incompatible type +np.linalg.eigvals(AR_O) # type: ignore[arg-type] -np.linalg.eigvalsh(AR_O) # E: incompatible type -np.linalg.eigvalsh(AR_O, UPLO="bob") # E: No overload variant +np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] +np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.eig(AR_O) # E: incompatible type +np.linalg.eig(AR_O) # type: ignore[arg-type] -np.linalg.eigh(AR_O) # E: incompatible type -np.linalg.eigh(AR_O, UPLO="bob") # E: No overload variant +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.svd(AR_O) # E: incompatible type +np.linalg.svd(AR_O) # type: ignore[arg-type] -np.linalg.cond(AR_O) # E: incompatible type -np.linalg.cond(AR_f8, p="bob") # E: incompatible type +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] -np.linalg.matrix_rank(AR_O) # E: incompatible type +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] -np.linalg.pinv(AR_O) # E: incompatible type +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] -np.linalg.slogdet(AR_O) # E: incompatible type +np.linalg.pinv(AR_O) # type: ignore[arg-type] -np.linalg.det(AR_O) # E: incompatible type +np.linalg.slogdet(AR_O) # type: ignore[arg-type] -np.linalg.norm(AR_f8, ord="bob") # E: No overload variant +np.linalg.det(AR_O) # type: ignore[arg-type] -np.linalg.multi_dot([AR_M]) # E: incompatible type +np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] + +np.linalg.multi_dot([AR_M]) # type: ignore[list-item] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/ma.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/ma.pyi new file mode 100644 index 0000000..084ae97 --- /dev/null +++ b/blimgui/dist64/numpy/typing/tests/data/fail/ma.pyi @@ -0,0 +1,155 @@ +from typing import TypeAlias, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _AnyShape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +MAR_b: MaskedArray[np.bool] +MAR_c: MaskedArray[np.complex128] +MAR_td64: MaskedArray[np.timedelta64] + +AR_b: npt.NDArray[np.bool] + +MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] +MAR_1d_f8.dtype = np.bool # type: ignore[assignment] + +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + +np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.min(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.max(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.max(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.ptp(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.ptp(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmin(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmin(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, axis=(1,)) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmax(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmax(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, axis=(0,)) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.all(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] +MAR_1d_f8.sort(kind="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(endwith="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] + +MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.take(out=1) # type: ignore[call-overload] +MAR_1d_f8.take(mode="bob") # type: ignore[call-overload] + +np.ma.take(None) # type: ignore[call-overload] +np.ma.take(axis=1.0) # type: ignore[call-overload] +np.ma.take(out=1) # type: ignore[call-overload] +np.ma.take(mode="bob") # type: ignore[call-overload] + +MAR_1d_f8.partition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] + +MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] + +np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] + +np.ma.size(AR_b, axis="0") # type: ignore[arg-type] + +MAR_1d_f8 >= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 > (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 <= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 < (lambda x: "mango") # type: ignore[operator] + +MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] + +np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] + +MAR_1d_f8.put(4, 999, mode="flip") # type: ignore[arg-type] + +np.ma.put(MAR_1d_f8, 4, 999, mode="flip") # type: ignore[arg-type] + +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] + +np.ma.compressed(lambda: "compress me") # type: ignore[call-overload] + +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] + +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol=".4") # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol=".5") # type: ignore[arg-type] + +MAR_1d_f8.__setmask__("mask") # type: ignore[arg-type] + +MAR_b *= 2 # type: ignore[arg-type] +MAR_c //= 2 # type: ignore[misc] +MAR_td64 **= 2 # type: ignore[misc] + +MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask="yes") # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order="Corinthian") # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/memmap.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/memmap.pyi index 71d8c38..7cd985e 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/memmap.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/memmap.pyi @@ -1,5 +1,5 @@ import numpy as np with open("file.txt", "r") as f: - np.memmap(f) # E: No overload variant -np.memmap("test.txt", shape=[10, 5]) # E: No overload variant + np.memmap(f) # type: ignore[call-overload] +np.memmap("test.txt", shape=[10, 5]) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/modules.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/modules.pyi index 7023e03..2678cd5 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/modules.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/modules.pyi @@ -1,17 +1,17 @@ import numpy as np -np.testing.bob # E: Module has no attribute -np.bob # E: Module has no attribute +np.testing.bob # type: ignore[attr-defined] +np.bob # type: ignore[attr-defined] # Stdlib modules in the namespace by accident -np.warnings # E: Module has no attribute -np.sys # E: Module has no attribute -np.os # E: Module "numpy" does not explicitly export -np.math # E: Module has no attribute +np.warnings # type: ignore[attr-defined] +np.sys # type: ignore[attr-defined] +np.os # type: ignore[attr-defined] +np.math # type: ignore[attr-defined] # Public sub-modules that are not imported to their parent module by default; # e.g. one must first execute `import numpy.lib.recfunctions` -np.lib.recfunctions # E: Module has no attribute +np.lib.recfunctions # type: ignore[attr-defined] -np.__deprecated_attrs__ # E: Module has no attribute -np.__expired_functions__ # E: Module has no attribute +np.__deprecated_attrs__ # type: ignore[attr-defined] +np.__expired_functions__ # type: ignore[attr-defined] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/multiarray.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/multiarray.pyi index 471ba10..66cd6f7 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/multiarray.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/multiarray.pyi @@ -15,39 +15,38 @@ AR_LIKE_f: list[float] def func(a: int) -> None: ... -np.where(AR_b, 1) # E: No overload variant +np.where(AR_b, 1) # type: ignore[call-overload] -np.can_cast(AR_f8, 1) # E: incompatible type +np.can_cast(AR_f8, 1) # type: ignore[arg-type] -np.vdot(AR_M, AR_M) # E: incompatible type +np.vdot(AR_M, AR_M) # type: ignore[arg-type] -np.copyto(AR_LIKE_f, AR_f8) # E: incompatible type +np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] -np.putmask(AR_LIKE_f, [True, True, False], 1.5) # E: incompatible type +np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] -np.packbits(AR_f8) # E: incompatible type -np.packbits(AR_u1, bitorder=">") # E: incompatible type +np.packbits(AR_f8) # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[call-overload] -np.unpackbits(AR_i8) # E: incompatible type -np.unpackbits(AR_u1, bitorder=">") # E: incompatible type +np.unpackbits(AR_i8) # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[call-overload] -np.shares_memory(1, 1, max_work=i8) # E: incompatible type -np.may_share_memory(1, 1, max_work=i8) # E: incompatible type +np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] +np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] -np.arange(M) # E: No overload variant -np.arange(stop=10) # E: No overload variant +np.arange(stop=10) # type: ignore[call-overload] -np.datetime_data(int) # E: incompatible type +np.datetime_data(int) # type: ignore[arg-type] -np.busday_offset("2012", 10) # E: No overload variant +np.busday_offset("2012", 10) # type: ignore[call-overload] -np.datetime_as_string("2012") # E: No overload variant +np.datetime_as_string("2012") # type: ignore[call-overload] -np.char.compare_chararrays("a", b"a", "==", False) # E: No overload variant +np.char.compare_chararrays("a", b"a", "==", False) # type: ignore[call-overload] -np.nested_iters([AR_i8, AR_i8]) # E: Missing positional argument -np.nested_iters([AR_i8, AR_i8], 0) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [0]) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # E: incompatible type +np.nested_iters([AR_i8, AR_i8]) # type: ignore[call-arg] +np.nested_iters([AR_i8, AR_i8], 0) # type: ignore[arg-type] +np.nested_iters([AR_i8, AR_i8], [0]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/ndarray.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/ndarray.pyi index c20c89e..680d288 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/ndarray.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/ndarray.pyi @@ -8,4 +8,4 @@ import numpy as np # # for more context. float_array = np.array([1.0]) -float_array.dtype = np.bool # E: Property "dtype" defined in "ndarray" is read-only +float_array.dtype = np.bool # type: ignore[assignment, misc] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/ndarray_misc.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/ndarray_misc.pyi index 9e2c8e2..845d36c 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -5,6 +5,7 @@ More extensive tests are performed for the methods' function-based counterpart in `../from_numeric.py`. """ +from typing import Never import numpy as np import numpy.typing as npt @@ -16,21 +17,33 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes -f8.argpartition(0) # E: has no attribute -f8.diagonal() # E: has no attribute -f8.dot(1) # E: has no attribute -f8.nonzero() # E: has no attribute -f8.partition(0) # E: has no attribute -f8.put(0, 2) # E: has no attribute -f8.setfield(2, np.float64) # E: has no attribute -f8.sort() # E: has no attribute -f8.trace() # E: has no attribute +f8.argpartition(0) # type: ignore[attr-defined] +f8.partition(0) # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] -AR_M.__complex__() # E: Invalid self argument -AR_b.__index__() # E: Invalid self argument +# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# point, which we circumvent by wrapping them in a function. -AR_f8[1.5] # E: No overload variant -AR_f8["field_a"] # E: No overload variant -AR_f8[["field_a", "field_b"]] # E: Invalid index type +def f8_diagonal(x: np.float64) -> Never: + return x.diagonal() # type: ignore[misc] -AR_f8.__array_finalize__(object()) # E: incompatible type +def f8_nonzero(x: np.float64) -> Never: + return x.nonzero() # type: ignore[misc] + +def f8_setfield(x: np.float64) -> Never: + return x.setfield(2, np.float64) # type: ignore[misc] + +def f8_sort(x: np.float64) -> Never: + return x.sort() # type: ignore[misc] + +def f8_trace(x: np.float64) -> Never: + return x.trace() # type: ignore[misc] + +AR_M.__complex__() # type: ignore[misc] +AR_b.__index__() # type: ignore[misc] + +AR_f8[1.5] # type: ignore[call-overload] +AR_f8["field_a"] # type: ignore[call-overload] +AR_f8[["field_a", "field_b"]] # type: ignore[index] + +AR_f8.__array_finalize__(object()) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/nditer.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/nditer.pyi index eb8a8e2..fdd0285 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/nditer.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/nditer.pyi @@ -1,8 +1,8 @@ import numpy as np -class Test(np.nditer): ... # E: Cannot inherit from final class +class Test(np.nditer): ... # type: ignore[misc] -np.nditer([0, 1], flags=["test"]) # E: incompatible type -np.nditer([0, 1], op_flags=[["test"]]) # E: incompatible type -np.nditer([0, 1], itershape=(1.0,)) # E: incompatible type -np.nditer([0, 1], buffersize=1.0) # E: incompatible type +np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] +np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] +np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/nested_sequence.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/nested_sequence.pyi index b6fe437..6c49d5c 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,4 +1,5 @@ from collections.abc import Sequence + from numpy._typing import _NestedSequence a: Sequence[float] @@ -7,11 +8,10 @@ c: tuple[str, ...] d: int e: str -def func(a: _NestedSequence[int]) -> None: - ... +def func(a: _NestedSequence[int]) -> None: ... -reveal_type(func(a)) # E: incompatible type -reveal_type(func(b)) # E: incompatible type -reveal_type(func(c)) # E: incompatible type -reveal_type(func(d)) # E: incompatible type -reveal_type(func(e)) # E: incompatible type +reveal_type(func(a)) # type: ignore[arg-type, misc] +reveal_type(func(b)) # type: ignore[arg-type, misc] +reveal_type(func(c)) # type: ignore[arg-type, misc] +reveal_type(func(d)) # type: ignore[arg-type, misc] +reveal_type(func(e)) # type: ignore[arg-type, misc] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/npyio.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/npyio.pyi index e9da277..591457b 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/npyio.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/npyio.pyi @@ -1,8 +1,8 @@ import pathlib from typing import IO -import numpy.typing as npt import numpy as np +import numpy.typing as npt str_path: str bytes_path: bytes @@ -10,16 +10,15 @@ pathlib_path: pathlib.Path str_file: IO[str] AR_i8: npt.NDArray[np.int64] -np.load(str_file) # E: incompatible type +np.load(str_file) # type: ignore[arg-type] -np.save(bytes_path, AR_i8) # E: No overload variant -# https://github.com/python/mypy/issues/16111 -# np.save(str_path, AR_i8, fix_imports=True) # W: deprecated +np.save(bytes_path, AR_i8) # type: ignore[arg-type] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[call-arg] -np.savez(bytes_path, AR_i8) # E: incompatible type +np.savez(bytes_path, AR_i8) # type: ignore[arg-type] -np.savez_compressed(bytes_path, AR_i8) # E: incompatible type +np.savez_compressed(bytes_path, AR_i8) # type: ignore[arg-type] -np.loadtxt(bytes_path) # E: incompatible type +np.loadtxt(bytes_path) # type: ignore[arg-type] -np.fromregex(bytes_path, ".", np.int64) # E: No overload variant +np.fromregex(bytes_path, ".", np.int64) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/numerictypes.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/numerictypes.pyi index dabd5b4..cbb9102 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/numerictypes.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/numerictypes.pyi @@ -1,5 +1,5 @@ import numpy as np -np.isdtype(1, np.int64) # E: incompatible type +np.isdtype(1, np.int64) # type: ignore[arg-type] -np.issubdtype(1, np.int64) # E: incompatible type +np.issubdtype(1, np.int64) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/random.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/random.pyi index 3370167..be63d1d 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/random.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/random.pyi @@ -8,55 +8,55 @@ SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) SEED_STR: str = "String seeding not allowed" # default rng -np.random.default_rng(SEED_FLOAT) # E: incompatible type -np.random.default_rng(SEED_ARR_FLOAT) # E: incompatible type -np.random.default_rng(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.default_rng(SEED_STR) # E: incompatible type +np.random.default_rng(SEED_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_STR) # type: ignore[arg-type] # Seed Sequence -np.random.SeedSequence(SEED_FLOAT) # E: incompatible type -np.random.SeedSequence(SEED_ARR_FLOAT) # E: incompatible type -np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.SeedSequence(SEED_SEED_SEQ) # E: incompatible type -np.random.SeedSequence(SEED_STR) # E: incompatible type +np.random.SeedSequence(SEED_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_SEED_SEQ) # type: ignore[arg-type] +np.random.SeedSequence(SEED_STR) # type: ignore[arg-type] seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() -seed_seq.spawn(11.5) # E: incompatible type -seed_seq.generate_state(3.14) # E: incompatible type -seed_seq.generate_state(3, np.uint8) # E: incompatible type -seed_seq.generate_state(3, "uint8") # E: incompatible type -seed_seq.generate_state(3, "u1") # E: incompatible type -seed_seq.generate_state(3, np.uint16) # E: incompatible type -seed_seq.generate_state(3, "uint16") # E: incompatible type -seed_seq.generate_state(3, "u2") # E: incompatible type -seed_seq.generate_state(3, np.int32) # E: incompatible type -seed_seq.generate_state(3, "int32") # E: incompatible type -seed_seq.generate_state(3, "i4") # E: incompatible type +seed_seq.spawn(11.5) # type: ignore[arg-type] +seed_seq.generate_state(3.14) # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint8) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint8") # type: ignore[arg-type] +seed_seq.generate_state(3, "u1") # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint16) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint16") # type: ignore[arg-type] +seed_seq.generate_state(3, "u2") # type: ignore[arg-type] +seed_seq.generate_state(3, np.int32) # type: ignore[arg-type] +seed_seq.generate_state(3, "int32") # type: ignore[arg-type] +seed_seq.generate_state(3, "i4") # type: ignore[arg-type] # Bit Generators -np.random.MT19937(SEED_FLOAT) # E: incompatible type -np.random.MT19937(SEED_ARR_FLOAT) # E: incompatible type -np.random.MT19937(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.MT19937(SEED_STR) # E: incompatible type - -np.random.PCG64(SEED_FLOAT) # E: incompatible type -np.random.PCG64(SEED_ARR_FLOAT) # E: incompatible type -np.random.PCG64(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.PCG64(SEED_STR) # E: incompatible type - -np.random.Philox(SEED_FLOAT) # E: incompatible type -np.random.Philox(SEED_ARR_FLOAT) # E: incompatible type -np.random.Philox(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.Philox(SEED_STR) # E: incompatible type - -np.random.SFC64(SEED_FLOAT) # E: incompatible type -np.random.SFC64(SEED_ARR_FLOAT) # E: incompatible type -np.random.SFC64(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.SFC64(SEED_STR) # E: incompatible type +np.random.MT19937(SEED_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_STR) # type: ignore[arg-type] + +np.random.PCG64(SEED_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_STR) # type: ignore[arg-type] + +np.random.Philox(SEED_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_STR) # type: ignore[arg-type] + +np.random.SFC64(SEED_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_STR) # type: ignore[arg-type] # Generator -np.random.Generator(None) # E: incompatible type -np.random.Generator(12333283902830213) # E: incompatible type -np.random.Generator("OxFEEDF00D") # E: incompatible type -np.random.Generator([123, 234]) # E: incompatible type -np.random.Generator(np.array([123, 234], dtype="u4")) # E: incompatible type +np.random.Generator(None) # type: ignore[arg-type] +np.random.Generator(12333283902830213) # type: ignore[arg-type] +np.random.Generator("OxFEEDF00D") # type: ignore[arg-type] +np.random.Generator([123, 234]) # type: ignore[arg-type] +np.random.Generator(np.array([123, 234], dtype="u4")) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/rec.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/rec.pyi index 0c319f2..a6022b2 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/rec.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/rec.pyi @@ -3,15 +3,15 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] -np.rec.fromarrays(1) # E: No overload variant -np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant +np.rec.fromarrays(1) # type: ignore[call-overload] +np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] -np.rec.fromrecords(AR_i8) # E: incompatible type -np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant +np.rec.fromrecords(AR_i8) # type: ignore[arg-type] +np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] -np.rec.fromstring("string", dtype=[("f8", "f8")]) # E: No overload variant -np.rec.fromstring(b"bytes") # E: No overload variant -np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant +np.rec.fromstring("string", dtype=[("f8", "f8")]) # type: ignore[call-overload] +np.rec.fromstring(b"bytes") # type: ignore[call-overload] +np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] with open("test", "r") as f: - np.rec.fromfile(f, dtype=[("f8", "f8")]) # E: No overload variant + np.rec.fromfile(f, dtype=[("f8", "f8")]) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/scalars.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/scalars.pyi index 3fa395e..7d37034 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/scalars.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/scalars.pyi @@ -1,4 +1,3 @@ -import sys import numpy as np f2: np.float16 @@ -7,7 +6,7 @@ c8: np.complex64 # Construction -np.float32(3j) # E: incompatible type +np.float32(3j) # type: ignore[arg-type] # Technically the following examples are valid NumPy code. But they # are not considered a best practice, and people who wish to use the @@ -25,65 +24,63 @@ np.float32(3j) # E: incompatible type # https://github.com/numpy/numpy-stubs/issues/41 # # for more context. -np.float32([1.0, 0.0, 0.0]) # E: incompatible type -np.complex64([]) # E: incompatible type +np.float32([1.0, 0.0, 0.0]) # type: ignore[arg-type] +np.complex64([]) # type: ignore[call-overload] # TODO: protocols (can't check for non-existent protocols w/ __getattr__) -np.datetime64(0) # E: No overload variant +np.datetime64(0) # type: ignore[call-overload] class A: - def __float__(self): - return 1.0 - - -np.int8(A()) # E: incompatible type -np.int16(A()) # E: incompatible type -np.int32(A()) # E: incompatible type -np.int64(A()) # E: incompatible type -np.uint8(A()) # E: incompatible type -np.uint16(A()) # E: incompatible type -np.uint32(A()) # E: incompatible type -np.uint64(A()) # E: incompatible type - -np.void("test") # E: No overload variant -np.void("test", dtype=None) # E: No overload variant - -np.generic(1) # E: Cannot instantiate abstract class -np.number(1) # E: Cannot instantiate abstract class -np.integer(1) # E: Cannot instantiate abstract class -np.inexact(1) # E: Cannot instantiate abstract class -np.character("test") # E: Cannot instantiate abstract class -np.flexible(b"test") # E: Cannot instantiate abstract class - -np.float64(value=0.0) # E: Unexpected keyword argument -np.int64(value=0) # E: Unexpected keyword argument -np.uint64(value=0) # E: Unexpected keyword argument -np.complex128(value=0.0j) # E: No overload variant -np.str_(value='bob') # E: No overload variant -np.bytes_(value=b'test') # E: No overload variant -np.void(value=b'test') # E: No overload variant -np.bool(value=True) # E: Unexpected keyword argument -np.datetime64(value="2019") # E: No overload variant -np.timedelta64(value=0) # E: Unexpected keyword argument - -np.bytes_(b"hello", encoding='utf-8') # E: No overload variant -np.str_("hello", encoding='utf-8') # E: No overload variant - -f8.item(1) # E: incompatible type -f8.item((0, 1)) # E: incompatible type -f8.squeeze(axis=1) # E: incompatible type -f8.squeeze(axis=(0, 1)) # E: incompatible type -f8.transpose(1) # E: incompatible type + def __float__(self) -> float: ... + +np.int8(A()) # type: ignore[arg-type] +np.int16(A()) # type: ignore[arg-type] +np.int32(A()) # type: ignore[arg-type] +np.int64(A()) # type: ignore[arg-type] +np.uint8(A()) # type: ignore[arg-type] +np.uint16(A()) # type: ignore[arg-type] +np.uint32(A()) # type: ignore[arg-type] +np.uint64(A()) # type: ignore[arg-type] + +np.void("test") # type: ignore[call-overload] +np.void("test", dtype=None) # type: ignore[call-overload] + +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] + +np.float64(value=0.0) # type: ignore[call-arg] +np.int64(value=0) # type: ignore[call-arg] +np.uint64(value=0) # type: ignore[call-arg] +np.complex128(value=0.0j) # type: ignore[call-overload] +np.str_(value="bob") # type: ignore[call-overload] +np.bytes_(value=b"test") # type: ignore[call-overload] +np.void(value=b"test") # type: ignore[call-overload] +np.bool(value=True) # type: ignore[call-overload] +np.datetime64(value="2019") # type: ignore[call-overload] +np.timedelta64(value=0) # type: ignore[call-overload] + +np.bytes_(b"hello", encoding="utf-8") # type: ignore[call-overload] +np.str_("hello", encoding="utf-8") # type: ignore[call-overload] + +f8.item(1) # type: ignore[call-overload] +f8.item((0, 1)) # type: ignore[arg-type] +f8.squeeze(axis=1) # type: ignore[arg-type] +f8.squeeze(axis=(0, 1)) # type: ignore[arg-type] +f8.transpose(1) # type: ignore[arg-type] def func(a: np.float32) -> None: ... -func(f2) # E: incompatible type -func(f8) # E: incompatible type +func(f2) # type: ignore[arg-type] +func(f8) # type: ignore[arg-type] -c8.__getnewargs__() # E: Invalid self argument -f2.__getnewargs__() # E: Invalid self argument -f2.hex() # E: Invalid self argument -np.float16.fromhex("0x0.0p+0") # E: Invalid self argument -f2.__trunc__() # E: Invalid self argument -f2.__getformat__("float") # E: Invalid self argument +c8.__getnewargs__() # type: ignore[attr-defined] +f2.__getnewargs__() # type: ignore[attr-defined] +f2.hex() # type: ignore[attr-defined] +np.float16.fromhex("0x0.0p+0") # type: ignore[attr-defined] +f2.__trunc__() # type: ignore[attr-defined] +f2.__getformat__("float") # type: ignore[attr-defined] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/shape.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/shape.pyi index 63faa45..a83b2e9 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/shape.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/shape.pyi @@ -1,6 +1,7 @@ from typing import Any + import numpy as np -# test bounds of _ShapeType_co +# test bounds of _ShapeT_co -np.ndarray[tuple[str, str], Any] # E: Value of type variable +np.ndarray[tuple[str, str], Any] # type: ignore[type-var] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/shape_base.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/shape_base.pyi index a7c965a..1c38f59 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/shape_base.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/shape_base.pyi @@ -5,4 +5,4 @@ class DTypeLike: dtype_like: DTypeLike -np.expand_dims(dtype_like, (5, 10)) # E: No overload variant +np.expand_dims(dtype_like, (5, 10)) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/stride_tricks.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/stride_tricks.pyi index b213bf2..f721b43 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/stride_tricks.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/stride_tricks.pyi @@ -3,7 +3,7 @@ import numpy.typing as npt AR_f8: npt.NDArray[np.float64] -np.lib.stride_tricks.as_strided(AR_f8, shape=8) # E: No overload variant -np.lib.stride_tricks.as_strided(AR_f8, strides=8) # E: No overload variant +np.lib.stride_tricks.as_strided(AR_f8, shape=8) # type: ignore[call-overload] +np.lib.stride_tricks.as_strided(AR_f8, strides=8) # type: ignore[call-overload] -np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # E: No overload variant +np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/strings.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/strings.pyi index ce6e6ab..0fb01da 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/strings.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/strings.pyi @@ -4,56 +4,49 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -np.strings.equal(AR_U, AR_S) # E: incompatible type - -np.strings.not_equal(AR_U, AR_S) # E: incompatible type - -np.strings.greater_equal(AR_U, AR_S) # E: incompatible type - -np.strings.less_equal(AR_U, AR_S) # E: incompatible type - -np.strings.greater(AR_U, AR_S) # E: incompatible type - -np.strings.less(AR_U, AR_S) # E: incompatible type - -np.strings.encode(AR_S) # E: incompatible type -np.strings.decode(AR_U) # E: incompatible type - -np.strings.join(AR_U, b"_") # E: incompatible type -np.strings.join(AR_S, "_") # E: incompatible type - -np.strings.lstrip(AR_U, b"a") # E: incompatible type -np.strings.lstrip(AR_S, "a") # E: incompatible type -np.strings.strip(AR_U, b"a") # E: incompatible type -np.strings.strip(AR_S, "a") # E: incompatible type -np.strings.rstrip(AR_U, b"a") # E: incompatible type -np.strings.rstrip(AR_S, "a") # E: incompatible type - -np.strings.partition(AR_U, b"a") # E: incompatible type -np.strings.partition(AR_S, "a") # E: incompatible type -np.strings.rpartition(AR_U, b"a") # E: incompatible type -np.strings.rpartition(AR_S, "a") # E: incompatible type - -np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.count(AR_S, "a", 0, 9) # E: incompatible type - -np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.endswith(AR_S, "a", 0, 9) # E: incompatible type -np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.startswith(AR_S, "a", 0, 9) # E: incompatible type - -np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.find(AR_S, "a", 0, 9) # E: incompatible type -np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2 , 3]) # E: incompatible type -np.strings.rfind(AR_S, "a", 0, 9) # E: incompatible type - -np.strings.index(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.strings.index(AR_S, "a", end=9) # E: incompatible type -np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.strings.rindex(AR_S, "a", end=9) # E: incompatible type - -np.strings.isdecimal(AR_S) # E: incompatible type -np.strings.isnumeric(AR_S) # E: incompatible type - -np.strings.replace(AR_U, b"_", b"-", 10) # E: incompatible type -np.strings.replace(AR_S, "_", "-", 1) # E: incompatible type +np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.encode(AR_S) # type: ignore[arg-type] +np.strings.decode(AR_U) # type: ignore[arg-type] + +np.strings.lstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.lstrip(AR_S, "a") # type: ignore[arg-type] +np.strings.strip(AR_U, b"a") # type: ignore[arg-type] +np.strings.strip(AR_S, "a") # type: ignore[arg-type] +np.strings.rstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.rstrip(AR_S, "a") # type: ignore[arg-type] + +np.strings.partition(AR_U, b"a") # type: ignore[arg-type] +np.strings.partition(AR_S, "a") # type: ignore[arg-type] +np.strings.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.strings.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.count(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.endswith(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.startswith(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.find(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.rfind(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.strings.isdecimal(AR_S) # type: ignore[arg-type] +np.strings.isnumeric(AR_S) # type: ignore[arg-type] + +np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] +np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/testing.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/testing.pyi index 160c556..c253558 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/testing.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/testing.pyi @@ -5,24 +5,24 @@ AR_U: npt.NDArray[np.str_] def func(x: object) -> bool: ... -np.testing.assert_(True, msg=1) # E: incompatible type -np.testing.build_err_msg(1, "test") # E: incompatible type -np.testing.assert_almost_equal(AR_U, AR_U) # E: incompatible type -np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # E: incompatible type -np.testing.assert_array_almost_equal(AR_U, AR_U) # E: incompatible type -np.testing.assert_array_less(AR_U, AR_U) # E: incompatible type -np.testing.assert_string_equal(b"a", b"a") # E: incompatible type +np.testing.assert_(True, msg=1) # type: ignore[arg-type] +np.testing.build_err_msg(1, "test") # type: ignore[arg-type] +np.testing.assert_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.testing.assert_array_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_less(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_string_equal(b"a", b"a") # type: ignore[arg-type] -np.testing.assert_raises(expected_exception=TypeError, callable=func) # E: No overload variant -np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # E: No overload variant +np.testing.assert_raises(expected_exception=TypeError, callable=func) # type: ignore[call-overload] +np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # type: ignore[call-overload] -np.testing.assert_allclose(AR_U, AR_U) # E: incompatible type -np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # E: incompatible type -np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type +np.testing.assert_allclose(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_max_ulp(AR_U, AR_U) # type: ignore[arg-type] -np.testing.assert_warns(RuntimeWarning, func) # E: No overload variant -np.testing.assert_no_warnings(func=func) # E: No overload variant -np.testing.assert_no_warnings(func) # E: Too many arguments -np.testing.assert_no_warnings(func, y=None) # E: No overload variant +np.testing.assert_warns(RuntimeWarning, func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func=func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func, y=None) # type: ignore[call-overload] -np.testing.assert_no_gc_cycles(func=func) # E: No overload variant +np.testing.assert_no_gc_cycles(func=func) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/twodim_base.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/twodim_base.pyi index b3ded3b..46b07ac 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,37 +1,39 @@ -from typing import Any, TypeVar +from typing import type_check_only import numpy as np import numpy.typing as npt +_0d_bool: np.bool +_nd_bool: npt.NDArray[np.bool] +_nd_td64: npt.NDArray[np.timedelta64] +_to_2d_bool: list[list[bool]] -def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: - pass +@type_check_only +def func1(ar: np.ndarray, a: int) -> npt.NDArray[np.str_]: ... +@type_check_only +def func2(ar: np.ndarray, a: float) -> float: ... +### -def func2(ar: npt.NDArray[Any], a: float) -> float: - pass +np.eye(10, M=20.0) # type: ignore[call-overload] +np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] +np.diag(_nd_bool, k=0.5) # type: ignore[call-overload] +np.diagflat(_nd_bool, k=0.5) # type: ignore[call-overload] -AR_b: npt.NDArray[np.bool] -AR_m: npt.NDArray[np.timedelta64] +np.tri(10, M=20.0) # type: ignore[call-overload] +np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] -AR_LIKE_b: list[bool] +np.tril(_nd_bool, k=0.5) # type: ignore[call-overload] +np.triu(_nd_bool, k=0.5) # type: ignore[call-overload] -np.eye(10, M=20.0) # E: No overload variant -np.eye(10, k=2.5, dtype=int) # E: No overload variant +np.vander(_nd_td64) # type: ignore[type-var] -np.diag(AR_b, k=0.5) # E: No overload variant -np.diagflat(AR_b, k=0.5) # E: No overload variant +np.histogram2d(_nd_td64) # type: ignore[call-overload] -np.tri(10, M=20.0) # E: No overload variant -np.tri(10, k=2.5, dtype=int) # E: No overload variant +np.mask_indices(10, func1) # type: ignore[arg-type] +np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] -np.tril(AR_b, k=0.5) # E: No overload variant -np.triu(AR_b, k=0.5) # E: No overload variant +np.tril_indices(3.14) # type: ignore[arg-type] -np.vander(AR_m) # E: incompatible type - -np.histogram2d(AR_m) # E: No overload variant - -np.mask_indices(10, func1) # E: incompatible type -np.mask_indices(10, func2, 10.5) # E: incompatible type +np.tril_indices_from(_to_2d_bool) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/type_check.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/type_check.pyi index 9e58d6f..775f405 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/type_check.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/type_check.pyi @@ -1,13 +1,12 @@ import numpy as np -import numpy.typing as npt DTYPE_i8: np.dtype[np.int64] -np.mintypecode(DTYPE_i8) # E: incompatible type -np.iscomplexobj(DTYPE_i8) # E: incompatible type -np.isrealobj(DTYPE_i8) # E: incompatible type +np.mintypecode(DTYPE_i8) # type: ignore[arg-type] +np.iscomplexobj(DTYPE_i8) # type: ignore[arg-type] +np.isrealobj(DTYPE_i8) # type: ignore[arg-type] -np.typename(DTYPE_i8) # E: No overload variant -np.typename("invalid") # E: No overload variant +np.typename(DTYPE_i8) # type: ignore[call-overload] +np.typename("invalid") # type: ignore[call-overload] -np.common_type(np.timedelta64()) # E: incompatible type +np.common_type(np.timedelta64()) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/ufunc_config.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/ufunc_config.pyi index 3dc9340..cf76f66 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/ufunc_config.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/ufunc_config.pyi @@ -14,8 +14,8 @@ class Write2: class Write3: def write(self, *, a: str) -> None: ... -np.seterrcall(func1) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(func2) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(Write1()) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(Write2()) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(Write3()) # E: Argument 1 to "seterrcall" has incompatible type +np.seterrcall(func1) # type: ignore[arg-type] +np.seterrcall(func2) # type: ignore[arg-type] +np.seterrcall(Write1()) # type: ignore[arg-type] +np.seterrcall(Write2()) # type: ignore[arg-type] +np.seterrcall(Write3()) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/ufunclike.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/ufunclike.pyi index 1becec3..7153d99 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/ufunclike.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/ufunclike.pyi @@ -6,16 +6,16 @@ AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] -np.fix(AR_c) # E: incompatible type -np.fix(AR_m) # E: incompatible type -np.fix(AR_M) # E: incompatible type +np.fix(AR_c) # type: ignore[arg-type] +np.fix(AR_m) # type: ignore[arg-type] +np.fix(AR_M) # type: ignore[arg-type] -np.isposinf(AR_c) # E: incompatible type -np.isposinf(AR_m) # E: incompatible type -np.isposinf(AR_M) # E: incompatible type -np.isposinf(AR_O) # E: incompatible type +np.isposinf(AR_c) # type: ignore[arg-type] +np.isposinf(AR_m) # type: ignore[arg-type] +np.isposinf(AR_M) # type: ignore[arg-type] +np.isposinf(AR_O) # type: ignore[arg-type] -np.isneginf(AR_c) # E: incompatible type -np.isneginf(AR_m) # E: incompatible type -np.isneginf(AR_M) # E: incompatible type -np.isneginf(AR_O) # E: incompatible type +np.isneginf(AR_c) # type: ignore[arg-type] +np.isneginf(AR_m) # type: ignore[arg-type] +np.isneginf(AR_M) # type: ignore[arg-type] +np.isneginf(AR_O) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/ufuncs.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/ufuncs.pyi index b6762ef..03c8c30 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/ufuncs.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/ufuncs.pyi @@ -3,15 +3,15 @@ import numpy.typing as npt AR_f8: npt.NDArray[np.float64] -np.sin.nin + "foo" # E: Unsupported operand types -np.sin(1, foo="bar") # E: No overload variant +np.sin.nin + "foo" # type: ignore[operator] +np.sin(1, foo="bar") # type: ignore[call-overload] -np.abs(None) # E: No overload variant +np.abs(None) # type: ignore[call-overload] -np.add(1, 1, 1) # E: No overload variant -np.add(1, 1, axis=0) # E: No overload variant +np.add(1, 1, 1) # type: ignore[call-overload] +np.add(1, 1, axis=0) # type: ignore[call-overload] -np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant +np.matmul(AR_f8, AR_f8, where=True) # type: ignore[call-overload] -np.frexp(AR_f8, out=None) # E: No overload variant -np.frexp(AR_f8, out=AR_f8) # E: No overload variant +np.frexp(AR_f8, out=None) # type: ignore[call-overload] +np.frexp(AR_f8, out=AR_f8) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/fail/warnings_and_errors.pyi b/blimgui/dist64/numpy/typing/tests/data/fail/warnings_and_errors.pyi index 7831033..78d781e 100644 --- a/blimgui/dist64/numpy/typing/tests/data/fail/warnings_and_errors.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/fail/warnings_and_errors.pyi @@ -1,5 +1,5 @@ import numpy.exceptions as ex -ex.AxisError(1.0) # E: No overload variant -ex.AxisError(1, ndim=2.0) # E: No overload variant -ex.AxisError(2, msg_prefix=404) # E: No overload variant +ex.AxisError(1.0) # type: ignore[call-overload] +ex.AxisError(1, ndim=2.0) # type: ignore[call-overload] +ex.AxisError(2, msg_prefix=404) # type: ignore[call-overload] diff --git a/blimgui/dist64/numpy/typing/tests/data/misc/extended_precision.pyi b/blimgui/dist64/numpy/typing/tests/data/misc/extended_precision.pyi index 47fe546..b148145 100644 --- a/blimgui/dist64/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,25 +1,9 @@ -import sys +from typing import assert_type import numpy as np -from numpy._typing import _80Bit, _96Bit, _128Bit, _256Bit +from numpy._typing import _96Bit, _128Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -assert_type(np.uint128(), np.unsignedinteger[_128Bit]) -assert_type(np.uint256(), np.unsignedinteger[_256Bit]) - -assert_type(np.int128(), np.signedinteger[_128Bit]) -assert_type(np.int256(), np.signedinteger[_256Bit]) - -assert_type(np.float80(), np.floating[_80Bit]) assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) -assert_type(np.float256(), np.floating[_256Bit]) - -assert_type(np.complex160(), np.complexfloating[_80Bit, _80Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) -assert_type(np.complex512(), np.complexfloating[_256Bit, _256Bit]) diff --git a/blimgui/dist64/numpy/typing/tests/data/mypy.ini b/blimgui/dist64/numpy/typing/tests/data/mypy.ini index bbfe887..1e65d82 100644 --- a/blimgui/dist64/numpy/typing/tests/data/mypy.ini +++ b/blimgui/dist64/numpy/typing/tests/data/mypy.ini @@ -1,10 +1,8 @@ [mypy] -plugins = numpy.typing.mypy_plugin +strict = True +enable_error_code = deprecated, ignore-without-code, truthy-bool +disallow_any_unimported = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True -implicit_reexport = False pretty = True -disallow_any_unimported = True -disallow_any_generics = True -; https://github.com/python/mypy/issues/15313 -disable_bytearray_promotion = true -disable_memoryview_promotion = true diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/arithmetic.py b/blimgui/dist64/numpy/typing/tests/data/pass/arithmetic.py index 1620aac..5858e42 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/arithmetic.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/arithmetic.py @@ -1,9 +1,11 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast + +import pytest + import numpy as np import numpy.typing as npt -import pytest c16 = np.complex128(1) f8 = np.float64(1) @@ -23,11 +25,11 @@ b = bool(1) c = complex(1) f = float(1) -i = int(1) +i = 1 class Object: - def __array__(self, dtype: np.typing.DTypeLike = None, + def __array__(self, dtype: np.typing.DTypeLike | None = None, copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self @@ -61,6 +63,7 @@ def __rpow__(self, value: Any) -> Object: AR_b: npt.NDArray[np.bool] = np.array([True]) AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) AR_f: npt.NDArray[np.float64] = np.array([1.0]) AR_c: npt.NDArray[np.complex128] = np.array([1j]) AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) @@ -252,6 +255,13 @@ def __rpow__(self, value: Any) -> Object: AR_LIKE_m // AR_m +AR_m /= f +AR_m //= f +AR_m /= AR_f +AR_m /= AR_LIKE_f +AR_m //= AR_f +AR_m //= AR_LIKE_f + AR_O // AR_LIKE_b AR_O // AR_LIKE_u AR_O // AR_LIKE_i @@ -275,6 +285,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -307,6 +321,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/array_constructors.py b/blimgui/dist64/numpy/typing/tests/data/pass/array_constructors.py index 444b29d..743c088 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/array_constructors.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/array_constructors.py @@ -3,6 +3,7 @@ import numpy as np import numpy.typing as npt + class Index: def __index__(self) -> int: return 0 diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/array_like.py b/blimgui/dist64/numpy/typing/tests/data/pass/array_like.py index 0c270c0..3927a2e 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/array_like.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/array_like.py @@ -1,11 +1,11 @@ from __future__ import annotations -from typing import Any, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np if TYPE_CHECKING: - from numpy._typing import NDArray, ArrayLike, _SupportsArray + from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 @@ -22,9 +22,7 @@ class A: - def __array__( - self, dtype: None | np.dtype[Any] = None - ) -> NDArray[np.float64]: + def __array__(self, dtype: np.dtype | None = None) -> NDArray[np.float64]: return np.array([1.0, 2.0, 3.0]) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/arrayterator.py b/blimgui/dist64/numpy/typing/tests/data/pass/arrayterator.py index 86640b2..fec19c7 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/arrayterator.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/arrayterator.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Any + import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/bitwise_ops.py b/blimgui/dist64/numpy/typing/tests/data/pass/bitwise_ops.py index caab948..726d76b 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/bitwise_ops.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/bitwise_ops.py @@ -9,7 +9,7 @@ b_ = np.bool(1) b = bool(1) -i = int(1) +i = 1 AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/comparisons.py b/blimgui/dist64/numpy/typing/tests/data/pass/comparisons.py index 5d231c6..320a8a2 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/comparisons.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import cast, Any +from typing import Any, cast + import numpy as np c16 = np.complex128() @@ -18,10 +19,10 @@ b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 SEQ = (0, 1, 2, 3, 4) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/flatiter.py b/blimgui/dist64/numpy/typing/tests/data/pass/flatiter.py index 4b34e27..2155697 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/flatiter.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/flatiter.py @@ -9,8 +9,18 @@ iter(a) next(a) a[0] -a[[0, 1, 2]] a[...] a[:] a.__array__() -a.__array__(np.dtype(np.float64)) + +b = np.array([1]).flat +a[b] + +a[0] = "1" +a[:] = "2" +a[...] = "3" +a[[]] = "4" +a[[0]] = "5" +a[[[0]]] = "6" +a[[[[[0]]]]] = "7" +a[b] = "8" diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/index_tricks.py b/blimgui/dist64/numpy/typing/tests/data/pass/index_tricks.py index 2303843..986f5cc 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/index_tricks.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/index_tricks.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np AR_LIKE_b = [[True, True], [True, True]] diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/lib_user_array.py b/blimgui/dist64/numpy/typing/tests/data/pass/lib_user_array.py index 2dda067..528babb 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/lib_user_array.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/lib_user_array.py @@ -3,7 +3,7 @@ from __future__ import annotations import numpy as np -from numpy.lib.user_array import container +from numpy.lib.user_array import container # type: ignore[deprecated] N = 10_000 W = H = int(N**0.5) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/literal.py b/blimgui/dist64/numpy/typing/tests/data/pass/literal.py index e561446..e4106d8 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/literal.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/literal.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import Any, TYPE_CHECKING from functools import partial +from typing import TYPE_CHECKING, Any import pytest + import numpy as np if TYPE_CHECKING: @@ -17,7 +18,6 @@ CF = frozenset({None, "C", "F"}) order_list: list[tuple[frozenset[str | None], Callable[..., Any]]] = [ - (KACF, partial(np.ndarray, 1)), (KACF, AR.tobytes), (KACF, partial(AR.astype, int)), (KACF, AR.copy), @@ -25,7 +25,8 @@ (KACF, AR.flatten), (KACF, AR.ravel), (KACF, partial(np.array, 1)), - # NOTE: __call__ is needed due to mypy 1.11 bugs (#17620, #17631) + # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + (KACF, partial(np.ndarray.__call__, 1)), (CF, partial(np.zeros.__call__, 1)), (CF, partial(np.ones.__call__, 1)), (CF, partial(np.empty.__call__, 1)), diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/ma.py b/blimgui/dist64/numpy/typing/tests/data/pass/ma.py index 31a31d3..62cb4a9 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/ma.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/ma.py @@ -1,8 +1,199 @@ -from typing import Any +import datetime as dt +from typing import Any, TypeAlias, TypeVar, cast import numpy as np -import numpy.ma +import numpy.typing as npt +from numpy._typing import _Shape +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] -m : np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) +# mypy: disable-error-code=no-untyped-call +MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) +MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) +MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) +MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) +MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) +MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) +MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) + +AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] + +MAR_f.mask = AR_b +MAR_f.mask = np.False_ + +MAR_i.fill_value = 0 + +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) + +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + +# Inplace addition + +MAR_b += AR_LIKE_b + +MAR_u += AR_LIKE_b +MAR_u += AR_LIKE_u + +MAR_i += AR_LIKE_b +MAR_i += 2 +MAR_i += AR_LIKE_i + +MAR_f += AR_LIKE_b +MAR_f += 2 +MAR_f += AR_LIKE_u +MAR_f += AR_LIKE_i +MAR_f += AR_LIKE_f + +MAR_c += AR_LIKE_b +MAR_c += AR_LIKE_u +MAR_c += AR_LIKE_i +MAR_c += AR_LIKE_f +MAR_c += AR_LIKE_c + +MAR_td64 += AR_LIKE_b +MAR_td64 += AR_LIKE_u +MAR_td64 += AR_LIKE_i +MAR_td64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m + +MAR_S += b'snakes' +MAR_U += 'snakes' +MAR_T += 'snakes' + +# Inplace subtraction + +MAR_u -= AR_LIKE_b +MAR_u -= AR_LIKE_u + +MAR_i -= AR_LIKE_b +MAR_i -= AR_LIKE_i + +MAR_f -= AR_LIKE_b +MAR_f -= AR_LIKE_u +MAR_f -= AR_LIKE_i +MAR_f -= AR_LIKE_f + +MAR_c -= AR_LIKE_b +MAR_c -= AR_LIKE_u +MAR_c -= AR_LIKE_i +MAR_c -= AR_LIKE_f +MAR_c -= AR_LIKE_c + +MAR_td64 -= AR_LIKE_b +MAR_td64 -= AR_LIKE_u +MAR_td64 -= AR_LIKE_i +MAR_td64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m + +# Inplace floor division + +MAR_f //= AR_LIKE_b +MAR_f //= 2 +MAR_f //= AR_LIKE_u +MAR_f //= AR_LIKE_i +MAR_f //= AR_LIKE_f + +MAR_td64 //= AR_LIKE_i + +# Inplace true division + +MAR_f /= AR_LIKE_b +MAR_f /= 2 +MAR_f /= AR_LIKE_u +MAR_f /= AR_LIKE_i +MAR_f /= AR_LIKE_f + +MAR_c /= AR_LIKE_b +MAR_c /= AR_LIKE_u +MAR_c /= AR_LIKE_i +MAR_c /= AR_LIKE_f +MAR_c /= AR_LIKE_c + +MAR_td64 /= AR_LIKE_i + +# Inplace multiplication + +MAR_b *= AR_LIKE_b + +MAR_u *= AR_LIKE_b +MAR_u *= AR_LIKE_u + +MAR_i *= AR_LIKE_b +MAR_i *= 2 +MAR_i *= AR_LIKE_i + +MAR_f *= AR_LIKE_b +MAR_f *= 2 +MAR_f *= AR_LIKE_u +MAR_f *= AR_LIKE_i +MAR_f *= AR_LIKE_f + +MAR_c *= AR_LIKE_b +MAR_c *= AR_LIKE_u +MAR_c *= AR_LIKE_i +MAR_c *= AR_LIKE_f +MAR_c *= AR_LIKE_c + +MAR_td64 *= AR_LIKE_b +MAR_td64 *= AR_LIKE_u +MAR_td64 *= AR_LIKE_i +MAR_td64 *= AR_LIKE_f + +MAR_S *= 2 +MAR_U *= 2 +MAR_T *= 2 + +# Inplace power + +MAR_u **= AR_LIKE_b +MAR_u **= AR_LIKE_u + +MAR_i **= AR_LIKE_b +MAR_i **= AR_LIKE_i + +MAR_f **= AR_LIKE_b +MAR_f **= AR_LIKE_u +MAR_f **= AR_LIKE_i +MAR_f **= AR_LIKE_f + +MAR_c **= AR_LIKE_b +MAR_c **= AR_LIKE_u +MAR_c **= AR_LIKE_i +MAR_c **= AR_LIKE_f +MAR_c **= AR_LIKE_c diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/mod.py b/blimgui/dist64/numpy/typing/tests/data/pass/mod.py index 407f956..1d151dd 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/mod.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/mod.py @@ -13,7 +13,7 @@ b = bool(1) f = float(1) -i = int(1) +i = 1 AR = np.array([1], dtype=np.bool) AR.setflags(write=False) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/multiarray.py b/blimgui/dist64/numpy/typing/tests/data/pass/multiarray.py index 6744361..15cae7f 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/multiarray.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/multiarray.py @@ -70,7 +70,8 @@ np.unpackbits(AR_u1) np.shares_memory(1, 2) -np.shares_memory(AR_f8, AR_f8, max_work=1) +np.shares_memory(AR_f8, AR_f8, max_work=-1) np.may_share_memory(1, 2) -np.may_share_memory(AR_f8, AR_f8, max_work=1) +np.may_share_memory(AR_f8, AR_f8, max_work=0) +np.may_share_memory(AR_f8, AR_f8, max_work=-1) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_conversion.py b/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_conversion.py index 3be5b47..0dcb3ec 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_conversion.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -74,14 +74,8 @@ # setflags nd.setflags() - -nd.setflags(True) nd.setflags(write=True) - -nd.setflags(True, True) nd.setflags(write=True, align=True) - -nd.setflags(True, True, False) nd.setflags(write=True, align=True, uic=False) # fill is pretty simple diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_misc.py b/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_misc.py index ef9fc5a..d2da037 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,18 +9,23 @@ from __future__ import annotations import operator -from typing import cast, Any +from collections.abc import Hashable +from typing import Any, cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... +class IntSubClass(npt.NDArray[np.intp]): ... + i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) B1 = np.empty((1,), dtype=np.int32).view(SubClass) B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass) C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) D = np.ones(3).view(SubClass) @@ -41,15 +46,20 @@ class SubClass(npt.NDArray[np.float64]): ... i4.argmax() A.argmax() A.argmax(axis=0) -A.argmax(out=B0) +A.argmax(out=B_int0) i4.argmin() A.argmin() A.argmin(axis=0) -A.argmin(out=B0) +A.argmin(out=B_int0) i4.argsort() +i4.argsort(stable=True) A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) i4.choose([()]) _choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) @@ -117,7 +127,7 @@ class SubClass(npt.NDArray[np.float64]): ... i4.std() A.std() A.std(axis=0) -A.std(keepdims=True) +A.std(keepdims=True, mean=0.) A.std(out=B0.astype(np.float64)) i4.sum() @@ -135,7 +145,7 @@ class SubClass(npt.NDArray[np.float64]): ... i4.var() A.var() A.var(axis=0) -A.var(keepdims=True) +A.var(keepdims=True, mean=0.) A.var(out=B0) A.argpartition([0]) @@ -184,13 +194,6 @@ class SubClass(npt.NDArray[np.float64]): ... A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] -# deprecated - -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_data() # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_shape() # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_strides() # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): - ctypes_obj.get_as_parameter() # pyright: ignore[reportDeprecated] +# regression test for https://github.com/numpy/numpy/issues/30445 +def f(x: np.generic) -> Hashable: + return x diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/numeric.py b/blimgui/dist64/numpy/typing/tests/data/pass/numeric.py index d1f62d5..e5f0b1f 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/numeric.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/numeric.py @@ -2,25 +2,20 @@ Tests for :mod:`numpy._core.numeric`. Does not include tests which fall under ``array_constructors``. - """ -from __future__ import annotations -from typing import cast +from typing import Any import numpy as np -import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): - ... + +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... + i8 = np.int64(1) -A = cast( - np.ndarray[tuple[int, int, int], np.dtype[np.intp]], - np.arange(27).reshape(3, 3, 3), -) -B: list[list[list[int]]] = A.tolist() +A = np.arange(27).reshape(3, 3, 3) +B = A.tolist() C = np.empty((27, 27)).view(SubClass) np.count_nonzero(i8) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/random.py b/blimgui/dist64/numpy/typing/tests/data/pass/random.py index 688985a..ccc6fe3 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/random.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/random.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Any + import numpy as np SEED_NONE = None diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/recfunctions.py b/blimgui/dist64/numpy/typing/tests/data/pass/recfunctions.py index 167db4a..8717722 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/recfunctions.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/recfunctions.py @@ -1,7 +1,6 @@ """These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" -from typing import Any -from typing_extensions import assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt @@ -13,7 +12,7 @@ def test_recursive_fill_fields() -> None: [(1, 10.0), (2, 20.0)], dtype=[("A", np.int64), ("B", np.float64)], ) - b = np.zeros((int(3),), dtype=a.dtype) + b = np.zeros((3,), dtype=a.dtype) out = rfn.recursive_fill_fields(a, b) assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) @@ -38,7 +37,7 @@ def test_get_names_flat() -> None: def test_flatten_descr() -> None: ndtype = np.dtype([("a", " None: @@ -52,8 +51,8 @@ def test_get_fieldstructure() -> None: def test_merge_arrays() -> None: assert_type( rfn.merge_arrays(( - np.ones((int(2),), np.int_), - np.ones((int(3),), np.float64), + np.ones((2,), np.int_), + np.ones((3,), np.float64), )), np.recarray[tuple[int], np.dtype[np.void]], ) @@ -61,7 +60,7 @@ def test_merge_arrays() -> None: def test_drop_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.drop_fields(a, "a"), @@ -79,7 +78,7 @@ def test_drop_fields() -> None: def test_rename_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), @@ -93,7 +92,7 @@ def test_repack_fields() -> None: assert_type(rfn.repack_fields(dt), np.dtype[np.void]) assert_type(rfn.repack_fields(dt.type(0)), np.void) assert_type( - rfn.repack_fields(np.ones((int(3),), dtype=dt)), + rfn.repack_fields(np.ones((3,), dtype=dt)), np.ndarray[tuple[int], np.dtype[np.void]], ) @@ -134,29 +133,32 @@ def test_require_fields() -> None: def test_stack_arrays() -> None: - x = np.zeros((int(2),), np.int32) + x = np.zeros((2,), np.int32) assert_type( rfn.stack_arrays(x), np.ndarray[tuple[int], np.dtype[np.int32]], ) - z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) - zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) assert_type( rfn.stack_arrays((z, zz)), - np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]], + np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], ) def test_find_duplicates() -> None: ndtype = np.dtype([("a", int)]) - a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + a = np.ma.ones(7).view(ndtype) + assert_type( + rfn.find_duplicates(a), + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + ) assert_type( rfn.find_duplicates(a, ignoremask=True, return_index=True), tuple[ - np.ma.MaskedArray[Any, np.dtype[np.void]], - np.ndarray[Any, np.dtype[np.int_]], + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + np.ndarray[tuple[int], np.dtype[np.int_]], ], ) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/scalars.py b/blimgui/dist64/numpy/typing/tests/data/pass/scalars.py index 9997ada..5ed41a2 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/scalars.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/scalars.py @@ -1,9 +1,10 @@ import datetime as dt import pytest + import numpy as np -b = np.bool() +b = np.bool() b_ = np.bool_() u8 = np.uint64() i8 = np.int64() diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/shape.py b/blimgui/dist64/numpy/typing/tests/data/pass/shape.py index 76ed591..9c6d9c4 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/shape.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/shape.py @@ -1,4 +1,4 @@ -from typing import Any, NamedTuple, cast +from typing import Any, NamedTuple import numpy as np @@ -8,14 +8,12 @@ class XYGrid(NamedTuple): x_axis: int y_axis: int -# TODO: remove this cast after: https://github.com/numpy/numpy/pull/27171 -arr: np.ndarray[XYGrid, Any] = cast( - np.ndarray[XYGrid, Any], - np.empty(XYGrid(2, 2)), -) - -# Test variance of _ShapeType_co +# Test variance of _ShapeT_co def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: return None -accepts_2d(arr) + +accepts_2d(np.empty(XYGrid(2, 2))) +accepts_2d(np.zeros(XYGrid(2, 2), dtype=int)) +accepts_2d(np.ones(XYGrid(2, 2), dtype=int)) +accepts_2d(np.full(XYGrid(2, 2), fill_value=5, dtype=int)) diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/simple.py b/blimgui/dist64/numpy/typing/tests/data/pass/simple.py index db3e578..1549d18 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/simple.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/simple.py @@ -1,9 +1,9 @@ """Simple expression that should pass with mypy.""" import operator +from collections.abc import Iterable import numpy as np import numpy.typing as npt -from collections.abc import Iterable # Basic checks array = np.array([1, 2]) @@ -165,4 +165,6 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: ~array # Other methods -np.array([1, 2]).transpose() +array.transpose() + +array @ array diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/simple_py3.py b/blimgui/dist64/numpy/typing/tests/data/pass/simple_py3.py deleted file mode 100644 index e7a3a8f..0000000 --- a/blimgui/dist64/numpy/typing/tests/data/pass/simple_py3.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np - -array = np.array([1, 2]) - -# The @ operator is not in python 2 -array @ array diff --git a/blimgui/dist64/numpy/typing/tests/data/pass/ufunclike.py b/blimgui/dist64/numpy/typing/tests/data/pass/ufunclike.py index 35481ab..b54b950 100644 --- a/blimgui/dist64/numpy/typing/tests/data/pass/ufunclike.py +++ b/blimgui/dist64/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np @@ -10,6 +12,9 @@ def __ceil__(self) -> Object: def __floor__(self) -> Object: return self + def __trunc__(self) -> Object: + return self + def __ge__(self, value: object) -> bool: return True @@ -27,12 +32,12 @@ def __array__(self, dtype: np.typing.DTypeLike | None = None, AR_LIKE_O = [Object(), Object(), Object()] AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") -np.fix(AR_LIKE_b) -np.fix(AR_LIKE_u) -np.fix(AR_LIKE_i) -np.fix(AR_LIKE_f) -np.fix(AR_LIKE_O) -np.fix(AR_LIKE_f, out=AR_U) +np.fix(AR_LIKE_b) # type: ignore[deprecated] +np.fix(AR_LIKE_u) # type: ignore[deprecated] +np.fix(AR_LIKE_i) # type: ignore[deprecated] +np.fix(AR_LIKE_f) # type: ignore[deprecated] +np.fix(AR_LIKE_O) # type: ignore[deprecated] +np.fix(AR_LIKE_f, out=AR_U) # type: ignore[deprecated] np.isposinf(AR_LIKE_b) np.isposinf(AR_LIKE_u) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/arithmetic.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/arithmetic.pyi index 871ab08..c9599a0 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -1,11 +1,9 @@ import datetime as dt -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit,_64Bit, _128Bit - -from typing_extensions import assert_type +from numpy._typing import _64Bit, _128Bit b: bool c: complex @@ -50,7 +48,11 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] -AR_number: npt.NDArray[np.number[Any]] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] +AR_T: np.ndarray[tuple[Any, ...], np.dtypes.StringDType] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] AR_Any: npt.NDArray[Any] AR_LIKE_b: list[bool] @@ -62,56 +64,55 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] - # Array subtraction -assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]]) +assert_type(AR_number - AR_number, npt.NDArray[np.number]) assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) -assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_b - AR_LIKE_O, Any) assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) -assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_b, Any) assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) -assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_u - AR_LIKE_O, Any) assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) -assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_u, Any) assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) -assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_i - AR_LIKE_O, Any) assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) -assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_i, Any) @@ -120,14 +121,14 @@ assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) -assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_f - AR_LIKE_O, Any) assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) -assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_O - AR_f, Any) assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) @@ -275,39 +276,39 @@ assert_type(AR_LIKE_O / AR_O, Any) assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) -assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating]) assert_type(AR_b // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) -assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating]) assert_type(AR_LIKE_O // AR_b, Any) assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) -assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating]) assert_type(AR_u // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) -assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating]) assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_u, Any) assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) -assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating]) assert_type(AR_i // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) -assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating]) assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_i, Any) @@ -485,7 +486,7 @@ assert_type(c8 / b_, np.complex64) # Complex -assert_type(c16 + f16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c16 + f16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(c16 + f8, np.complex128) assert_type(c16 + i8, np.complex128) @@ -498,12 +499,12 @@ assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) assert_type(c16 + AR_f, npt.NDArray[np.complex128]) -assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) assert_type(c8 + c16, np.complex128 | np.complex64) -assert_type(f4 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) @@ -511,10 +512,10 @@ assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) assert_type(AR_f + c16, npt.NDArray[np.complex128]) -assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) assert_type(c8 + c16, np.complex64 | np.complex128) assert_type(c8 + f8, np.complex64 | np.complex128) -assert_type(c8 + i8, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) assert_type(c8 + f4, np.complex64) assert_type(c8 + i4, np.complex64) @@ -539,7 +540,7 @@ assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float -assert_type(f8 + f16, np.float64| np.floating[_128Bit]) +assert_type(f8 + f16, np.floating) assert_type(f8 + f8, np.float64) assert_type(f8 + i8, np.float64) assert_type(f8 + f4, np.float64) @@ -550,44 +551,44 @@ assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) assert_type(f8 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f8, np.floating[_128Bit] | np.float64) +assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.float32 | np.float64) -assert_type(i4 + f8,np.float64) +assert_type(f4 + f8, np.floating) +assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) assert_type(AR_f + f8, npt.NDArray[np.float64]) -assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) -assert_type(f4 + f8, np.float32 | np.float64) -assert_type(f4 + i8, np.float32 | np.floating[_64Bit]) +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) -assert_type(f4 + i4, np.float32) +assert_type(f4 + i4, np.floating) assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) -assert_type(f4 + c, np.complex64 | np.complex128) -assert_type(f4 + f, np.float32 | np.float64) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) assert_type(f4 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f4, np.floating[_128Bit] | np.float32) +assert_type(f16 + f4, np.floating) assert_type(f8 + f4, np.float64) -assert_type(i8 + f4, np.floating[_32Bit] | np.floating[_64Bit]) +assert_type(i8 + f4, np.floating) assert_type(f4 + f4, np.float32) -assert_type(i4 + f4, np.float32) +assert_type(i4 + f4, np.floating) assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) -assert_type(c + f4, np.complex64 | np.complex128) -assert_type(f + f4, np.float64 | np.float32) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) assert_type(i8 + u8, Any) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) @@ -597,7 +598,7 @@ assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) @@ -606,7 +607,7 @@ assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) @@ -616,14 +617,14 @@ assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(i4 + u8, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) assert_type(AR_f + u8, npt.NDArray[np.float64]) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) @@ -631,13 +632,13 @@ assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) assert_type(u4 + AR_f, npt.NDArray[np.float64]) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) @@ -645,7 +646,7 @@ assert_type(AR_f + i4, npt.NDArray[np.float64]) assert_type(i8 + u4, Any) assert_type(i4 + u4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) @@ -654,3 +655,65 @@ assert_type(AR_f + u4, npt.NDArray[np.float64]) # Any assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +# character-like + +assert_type(AR_S + b"", npt.NDArray[np.bytes_]) +assert_type(AR_S + [b""], npt.NDArray[np.bytes_]) +assert_type([b""] + AR_S, npt.NDArray[np.bytes_]) +assert_type(AR_S + AR_S, npt.NDArray[np.bytes_]) + +assert_type(AR_U + "", npt.NDArray[np.str_]) +assert_type(AR_U + [""], npt.NDArray[np.str_]) +assert_type("" + AR_U, npt.NDArray[np.str_]) +assert_type([""] + AR_U, npt.NDArray[np.str_]) +assert_type(AR_U + AR_U, npt.NDArray[np.str_]) + +assert_type(AR_T + "", np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + [""], np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type("" + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type([""] + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_U, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_U + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) + +assert_type(AR_S * i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_S` as `list[int]` +assert_type(AR_i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(AR_U * i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_U` as `list[int]` +assert_type(AR_i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(AR_T * i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +# mypy incorrectly infers `AR_LIKE_i * AR_T` as `list[int]` +assert_type(AR_i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/array_api_info.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/array_api_info.pyi index a42cd2b..bf48658 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/array_api_info.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -1,9 +1,7 @@ -from typing import Literal +from typing import Literal, Never, assert_type import numpy as np -from typing_extensions import Never, assert_type - info = np.__array_namespace_info__() assert_type(info.__module__, Literal["numpy"]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/array_constructors.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/array_constructors.pyi index 7572a0e..7d9f61c 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,16 +1,17 @@ import sys -from typing import Any, Literal as L, TypeVar -from pathlib import Path from collections import deque +from pathlib import Path +from typing import Any, Generic, TypeVar, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) -_SCT = TypeVar("_SCT", bound=np.generic, covariant=True) +class SubClass(npt.NDArray[_ScalarT_co]): ... -class SubClass(npt.NDArray[_SCT]): ... +class IntoSubClass(Generic[_ScalarT_co]): + def __array__(self) -> SubClass[_ScalarT_co]: ... i8: np.int64 @@ -18,6 +19,9 @@ A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] + +mixed_shape: tuple[int, np.int64] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -25,63 +29,68 @@ assert_type(np.empty_like(A), npt.NDArray[np.float64]) assert_type(np.empty_like(B), SubClass[np.float64]) assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros([1, 5, 6], dtype="c16"), npt.NDArray[Any]) +assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty([1, 5, 6], dtype="c16"), npt.NDArray[Any]) +assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), npt.NDArray[Any]) +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) @@ -105,18 +114,26 @@ assert_type(np.frombuffer(A), npt.NDArray[np.float64]) assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) -assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger[Any]]]) -assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger[Any]]]) -assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger[Any]]]) -assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating[Any]]]) -assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating[Any]]]) -assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +_x_bool: bool +_x_int: int +_x_float: float +_x_timedelta: np.timedelta64 +_x_datetime: np.datetime64 + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) -assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[Any]]) -assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) +# https://github.com/numpy/numpy/issues/30628 +assert_type(np.arange("2025-12-20", "2025-12-23", dtype="datetime64[D]"), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) assert_type(np.require(A), npt.NDArray[np.float64]) assert_type(np.require(B), SubClass[np.float64]) @@ -130,22 +147,22 @@ assert_type(np.require(B, requirements="W"), SubClass[np.float64]) assert_type(np.require(B, requirements="A"), SubClass[np.float64]) assert_type(np.require(C), npt.NDArray[Any]) -assert_type(np.linspace(0, 10), npt.NDArray[np.floating[Any]]) -assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.floating[Any]], np.floating[Any]]) -assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating[Any, Any]], np.complexfloating[Any, Any]]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) -assert_type(np.logspace(0, 10), npt.NDArray[np.floating[Any]]) -assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.geomspace(0, 10), npt.NDArray[np.floating[Any]]) -assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) @@ -180,26 +197,44 @@ assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64] assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) -assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type( + np.ones(_shape_like, dtype=np.dtypes.Int64DType()), + np.ndarray[tuple[Any, ...], np.dtypes.Int64DType], +) assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) +assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) assert_type(np.full(_size, i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.full(_shape_2d, i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) assert_type(np.full(_shape_like, i8), npt.NDArray[np.int64]) assert_type(np.full(_shape_like, 42), npt.NDArray[Any]) assert_type(np.full(_size, i8, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) -assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype]) assert_type(np.full(_shape_like, 42, dtype=float), npt.NDArray[Any]) -assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype[Any]]) +assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype]) assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) -assert_type(np.identity(10), npt.NDArray[np.float64]) -assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) +assert_type(np.identity(3), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.identity(3, dtype=np.int8), np.ndarray[tuple[int, int], np.dtype[np.int8]]) +assert_type(np.identity(3, dtype=bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="bool"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="b1"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="?"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype=int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="int"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="n"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype=float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="float"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="f8"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="d"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype=complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) @@ -232,7 +267,7 @@ assert_type(np.stack([C, C]), npt.NDArray[Any]) assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) assert_type(np.stack([A, A], out=B), SubClass[np.float64]) -assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) if sys.version_info >= (3, 12): diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/arraypad.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/arraypad.pyi index f812ec1..0818e3d 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/arraypad.pyi @@ -1,13 +1,11 @@ from collections.abc import Mapping -from typing import Any, SupportsIndex +from typing import Any, SupportsIndex, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - def mode_func( - ar: npt.NDArray[np.number[Any]], + ar: npt.NDArray[np.number], width: tuple[int, int], iaxis: SupportsIndex, kwargs: Mapping[str, Any], @@ -22,3 +20,8 @@ assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) + +assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) +assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) +pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} +assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/arrayprint.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/arrayprint.pyi index 8ed869b..56ac529 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,20 +1,18 @@ import contextlib from collections.abc import Callable -from typing import Any +from typing import assert_type import numpy as np import numpy.typing as npt from numpy._core.arrayprint import _FormatOptions -from typing_extensions import assert_type - AR: npt.NDArray[np.int64] -func_float: Callable[[np.floating[Any]], str] -func_int: Callable[[np.integer[Any]], str] +func_float: Callable[[np.floating], str] +func_int: Callable[[np.integer], str] assert_type(np.get_printoptions(), _FormatOptions) assert_type( - np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + np.array2string(AR, formatter={"float_kind": func_float, "int_kind": func_int}), str, ) assert_type(np.format_float_scientific(1.0), str) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/arraysetops.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/arraysetops.pyi index 85a7969..5da2801 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -1,10 +1,12 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from numpy.lib._arraysetops_impl import UniqueAllResult, UniqueCountsResult, UniqueInverseResult - -from typing_extensions import assert_type +from numpy.lib._arraysetops_impl import ( + UniqueAllResult, + UniqueCountsResult, + UniqueInverseResult, +) AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/arrayterator.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/arrayterator.pyi index 3807ee1..8424490 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -1,29 +1,27 @@ -from typing import Any from collections.abc import Generator +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_i8: npt.NDArray[np.int64] ar_iter = np.lib.Arrayterator(AR_i8) assert_type(ar_iter.var, npt.NDArray[np.int64]) -assert_type(ar_iter.buf_size, None | int) +assert_type(ar_iter.buf_size, int | None) assert_type(ar_iter.start, list[int]) assert_type(ar_iter.stop, list[int]) assert_type(ar_iter.step, list[int]) -assert_type(ar_iter.shape, tuple[int, ...]) -assert_type(ar_iter.flat, Generator[np.int64, None, None]) +assert_type(ar_iter.shape, tuple[Any, ...]) +assert_type(ar_iter.flat, Generator[np.int64]) assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) for i in ar_iter: assert_type(i, npt.NDArray[np.int64]) -assert_type(ar_iter[0], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[...], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[:], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/bitwise_ops.pyi index fb6cf23..0069749 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,10 +1,7 @@ -from typing import Any, Literal as L, TypeAlias +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit, _32Bit - -from typing_extensions import assert_type FalseType: TypeAlias = L[False] TrueType: TypeAlias = L[True] @@ -27,18 +24,17 @@ i: int AR: npt.NDArray[np.int32] - assert_type(i8 << i8, np.int64) assert_type(i8 >> i8, np.int64) assert_type(i8 | i8, np.int64) assert_type(i8 ^ i8, np.int64) assert_type(i8 & i8, np.int64) -assert_type(i8 << AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 >> AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 | AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 ^ AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 & AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 << AR, npt.NDArray[np.signedinteger]) +assert_type(i8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(i8 | AR, npt.NDArray[np.signedinteger]) +assert_type(i8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(i8 & AR, npt.NDArray[np.signedinteger]) assert_type(i4 << i4, np.int32) assert_type(i4 >> i4, np.int32) @@ -46,11 +42,11 @@ assert_type(i4 | i4, np.int32) assert_type(i4 ^ i4, np.int32) assert_type(i4 & i4, np.int32) -assert_type(i8 << i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 >> i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 | i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 ^ i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 & i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) assert_type(i8 << b_, np.int64) assert_type(i8 >> b_, np.int64) @@ -70,11 +66,11 @@ assert_type(u8 | u8, np.uint64) assert_type(u8 ^ u8, np.uint64) assert_type(u8 & u8, np.uint64) -assert_type(u8 << AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 >> AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 | AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 ^ AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 & AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(u8 << AR, npt.NDArray[np.signedinteger]) +assert_type(u8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(u8 | AR, npt.NDArray[np.signedinteger]) +assert_type(u8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(u8 & AR, npt.NDArray[np.signedinteger]) assert_type(u4 << u4, np.uint32) assert_type(u4 >> u4, np.uint32) @@ -82,17 +78,17 @@ assert_type(u4 | u4, np.uint32) assert_type(u4 ^ u4, np.uint32) assert_type(u4 & u4, np.uint32) -assert_type(u4 << i4, np.signedinteger[Any]) -assert_type(u4 >> i4, np.signedinteger[Any]) -assert_type(u4 | i4, np.signedinteger[Any]) -assert_type(u4 ^ i4, np.signedinteger[Any]) -assert_type(u4 & i4, np.signedinteger[Any]) +assert_type(u4 << i4, np.signedinteger) +assert_type(u4 >> i4, np.signedinteger) +assert_type(u4 | i4, np.signedinteger) +assert_type(u4 ^ i4, np.signedinteger) +assert_type(u4 & i4, np.signedinteger) -assert_type(u4 << i, np.signedinteger[Any]) -assert_type(u4 >> i, np.signedinteger[Any]) -assert_type(u4 | i, np.signedinteger[Any]) -assert_type(u4 ^ i, np.signedinteger[Any]) -assert_type(u4 & i, np.signedinteger[Any]) +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) assert_type(u8 << b_, np.uint64) assert_type(u8 >> b_, np.uint64) @@ -112,11 +108,11 @@ assert_type(b_ | b_, np.bool) assert_type(b_ ^ b_, np.bool) assert_type(b_ & b_, np.bool) -assert_type(b_ << AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ >> AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ | AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ ^ AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ & AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(b_ << AR, npt.NDArray[np.signedinteger]) +assert_type(b_ >> AR, npt.NDArray[np.signedinteger]) +assert_type(b_ | AR, npt.NDArray[np.signedinteger]) +assert_type(b_ ^ AR, npt.NDArray[np.signedinteger]) +assert_type(b_ & AR, npt.NDArray[np.signedinteger]) assert_type(b_ << b, np.int8) assert_type(b_ >> b, np.int8) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/char.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/char.pyi index 8d10de6..11c14bb 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/char.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/char.pyi @@ -1,16 +1,15 @@ +from typing import TypeAlias, assert_type + import numpy as np -import numpy.typing as npt import numpy._typing as np_t +import numpy.typing as npt -from typing_extensions import assert_type -from typing import TypeAlias +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -AR_T: np.ndarray[np_t._Shape, np.dtypes.StringDType] - -AR_T_alias: TypeAlias = np.ndarray[np_t._Shape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +AR_T: AR_T_alias assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) @@ -66,6 +65,7 @@ assert_type(np.char.join(AR_T, "_"), AR_TU_alias) assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar="a"), npt.NDArray[np.bytes_]) assert_type(np.char.ljust(AR_T, 5), AR_T_alias) assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) @@ -204,16 +204,22 @@ assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.char.translate(AR_T, ""), AR_T_alias) -assert_type(np.char.array(AR_U), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.array(AR_S, order="K"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.array("bob", copy=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=False), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) - -assert_type(np.char.asarray(AR_U), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.asarray("bob"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=False), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/chararray.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/chararray.pyi index 6bf3045..a50318c 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/chararray.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,12 +1,13 @@ -from typing import Any +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type +_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] -AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] -AR_S: np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]] +AR_U: _StrCharArray +AR_S: _BytesCharArray assert_type(AR_U == AR_U, npt.NDArray[np.bool]) assert_type(AR_S == AR_S, npt.NDArray[np.bool]) @@ -26,46 +27,47 @@ assert_type(AR_S > AR_S, npt.NDArray[np.bool]) assert_type(AR_U < AR_U, npt.NDArray[np.bool]) assert_type(AR_S < AR_S, npt.NDArray[np.bool]) -assert_type(AR_U * 5, np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S * [5], np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U * 5, _StrCharArray) +assert_type(AR_S * [5], _BytesCharArray) -assert_type(AR_U % "test", np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S % b"test", np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U % "test", _StrCharArray) +assert_type(AR_S % b"test", _BytesCharArray) -assert_type(AR_U.capitalize(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.capitalize(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.capitalize(), _StrCharArray) +assert_type(AR_S.capitalize(), _BytesCharArray) -assert_type(AR_U.center(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.center([2, 3, 4], b"a"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.center(5), _StrCharArray) +assert_type(AR_S.center([2, 3, 4], b"a"), _BytesCharArray) -assert_type(AR_U.encode(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_S.decode(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_U.encode(), _BytesCharArray) +assert_type(AR_S.decode(), _StrCharArray) -assert_type(AR_U.expandtabs(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.expandtabs(tabsize=4), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.expandtabs(), _StrCharArray) +assert_type(AR_S.expandtabs(tabsize=4), _BytesCharArray) -assert_type(AR_U.join("_"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.join([b"_", b""]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.join("_"), _StrCharArray) +assert_type(AR_S.join([b"_", b""]), _BytesCharArray) -assert_type(AR_U.ljust(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.rjust(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.ljust(5), _StrCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar="a"), _BytesCharArray) +assert_type(AR_U.rjust(5), _StrCharArray) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) -assert_type(AR_U.lstrip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.lstrip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.rstrip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.rstrip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.strip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.strip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.lstrip(), _StrCharArray) +assert_type(AR_S.lstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.rstrip(), _StrCharArray) +assert_type(AR_S.rstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.strip(), _StrCharArray) +assert_type(AR_S.strip(chars=b"_"), _BytesCharArray) -assert_type(AR_U.partition("\n"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.partition([b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.rpartition("\n"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.partition("\n"), _StrCharArray) +assert_type(AR_S.partition([b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rpartition("\n"), _StrCharArray) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), _BytesCharArray) -assert_type(AR_U.replace("_", "-"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.replace("_", "-"), _StrCharArray) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), _BytesCharArray) assert_type(AR_U.split("_"), npt.NDArray[np.object_]) assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) @@ -75,17 +77,17 @@ assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) -assert_type(AR_U.swapcase(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.swapcase(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.swapcase(), _StrCharArray) +assert_type(AR_S.swapcase(), _BytesCharArray) -assert_type(AR_U.title(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.title(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.title(), _StrCharArray) +assert_type(AR_S.title(), _BytesCharArray) -assert_type(AR_U.upper(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.upper(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.upper(), _StrCharArray) +assert_type(AR_S.upper(), _BytesCharArray) -assert_type(AR_U.zfill(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.zfill([2, 3, 4]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.zfill(5), _StrCharArray) +assert_type(AR_S.zfill([2, 3, 4]), _BytesCharArray) assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/comparisons.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/comparisons.pyi index 96fc3b2..2c90dbf 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,12 +1,10 @@ -import fractions import decimal -from typing import Any +import fractions +from typing import assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - c16 = np.complex128() f8 = np.float64() i8 = np.int64() @@ -22,10 +20,10 @@ td = np.timedelta64(0, "D") b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 AR = np.array([0], dtype=np.int64) AR.setflags(write=False) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/constants.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/constants.pyi index 6440e15..54f7723 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/constants.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/constants.pyi @@ -1,5 +1,4 @@ -from typing import Literal -from typing_extensions import assert_type +from typing import Literal, assert_type import numpy as np diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ctypeslib.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ctypeslib.pyi index 0e28160..b197062 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -1,13 +1,10 @@ -import sys import ctypes as ct -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt from numpy import ctypeslib -from typing_extensions import assert_type - AR_bool: npt.NDArray[np.bool] AR_ubyte: npt.NDArray[np.ubyte] AR_ushort: npt.NDArray[np.ushort] @@ -30,10 +27,10 @@ assert_type(np.ctypeslib.c_intp(), ctypeslib.c_intp) assert_type(np.ctypeslib.ndpointer(), type[ctypeslib._ndptr[None]]) assert_type(np.ctypeslib.ndpointer(dtype=np.float64), type[ctypeslib._ndptr[np.dtype[np.float64]]]) -assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype[Any]]]) +assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype]]) assert_type(np.ctypeslib.ndpointer(shape=(10, 3)), type[ctypeslib._ndptr[None]]) assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._concrete_ndptr[np.dtype[np.int64]]]) -assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype[Any]]]) +assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) @@ -76,18 +73,9 @@ assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) -if sys.platform == "win32": - # Mainly on windows int is the same size as long but gets picked first: - assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_int]) - assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_uint]) - assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_uint]) - assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_int]) - assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_int) - assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_uint) -else: - assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) - assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) - assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) - assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) - assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) - assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) +assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) +assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/datasource.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/datasource.pyi index 31e5f55..9938ed4 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/datasource.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/datasource.pyi @@ -1,10 +1,8 @@ from pathlib import Path -from typing import IO, Any +from typing import IO, Any, assert_type import numpy as np -from typing_extensions import assert_type - path1: Path path2: str diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/dtype.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/dtype.pyi index cdebff3..db9532f 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/dtype.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/dtype.pyi @@ -2,13 +2,11 @@ import ctypes as ct import datetime as dt from decimal import Decimal from fractions import Fraction -from typing import Any, Literal, TypeAlias +from typing import Any, Literal, LiteralString, TypeAlias, assert_type import numpy as np from numpy.dtypes import StringDType -from typing_extensions import LiteralString, assert_type - # a combination of likely `object` dtype-like candidates (no `_co`) _PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta @@ -16,26 +14,21 @@ dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -py_int_co: type[int | bool] -py_float_co: type[float | int | bool] -py_complex_co: type[complex | float | int | bool] py_object: type[_PyObjectLike] py_character: type[str | bytes] -py_flexible: type[str | bytes | memoryview] ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] ct_number: type[ct.c_uint8 | ct.c_float] ct_generic: type[ct.c_bool | ct.c_char] cs_integer: Literal["u1", "V", "S"] cs_generic: Literal["H", "U", "h", "|M8[Y]", "?"] -dt_inexact: np.dtype[np.inexact[Any]] +dt_inexact: np.dtype[np.inexact] dt_string: StringDType - assert_type(np.dtype(np.float64), np.dtype[np.float64]) assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) assert_type(np.dtype(np.int64), np.dtype[np.int64]) @@ -51,19 +44,16 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(py_character), np.dtype[np.character]) assert_type(np.dtype(memoryview), np.dtype[np.void]) -assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +# object types assert_type(np.dtype(list), np.dtype[np.object_]) assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) @@ -77,13 +67,10 @@ assert_type(np.dtype("u1"), np.dtype[np.uint8]) assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) -assert_type(np.dtype(cs_integer), np.dtype[np.integer[Any]]) -assert_type(np.dtype(cs_number), np.dtype[np.number[Any]]) -assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) -assert_type(np.dtype(cs_generic), np.dtype[np.generic]) +assert_type(np.dtype(cs_integer), np.dtype[np.integer]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -93,15 +80,16 @@ assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None assert_type(np.dtype(None), np.dtype[np.float64]) -# Dypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) -assert_type(np.dtype(dt_inexact), np.dtype[np.inexact[Any]]) +assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) # Parameterized dtypes -assert_type(np.dtype("S8"), np.dtype[Any]) +assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) # StringDType assert_type(np.dtype(dt_string), StringDType) @@ -109,14 +97,13 @@ assert_type(np.dtype("T"), StringDType) assert_type(np.dtype("=T"), StringDType) assert_type(np.dtype("|T"), StringDType) - # Methods and attributes -assert_type(dtype_U.base, np.dtype[Any]) -assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]]) +assert_type(dtype_U.base, np.dtype) +assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[Any, ...]] | None) assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) assert_type(dtype_U.type, type[np.str_]) assert_type(dtype_U.name, LiteralString) -assert_type(dtype_U.names, None | tuple[str, ...]) +assert_type(dtype_U.names, tuple[str, ...] | None) assert_type(dtype_U * 0, np.dtype[np.str_]) assert_type(dtype_U * 1, np.dtype[np.str_]) @@ -130,11 +117,16 @@ assert_type(0 * dtype_U, np.dtype[np.str_]) assert_type(1 * dtype_U, np.dtype[np.str_]) assert_type(2 * dtype_U, np.dtype[np.str_]) -assert_type(0 * dtype_i8, np.dtype[Any]) -assert_type(1 * dtype_i8, np.dtype[Any]) -assert_type(2 * dtype_i8, np.dtype[Any]) +assert_type(0 * dtype_i8, np.dtype) +assert_type(1 * dtype_i8, np.dtype) +assert_type(2 * dtype_i8, np.dtype) -assert_type(dtype_V["f0"], np.dtype[Any]) -assert_type(dtype_V[0], np.dtype[Any]) +assert_type(dtype_V["f0"], np.dtype) +assert_type(dtype_V[0], np.dtype) assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) assert_type(dtype_V[["f0"]], np.dtype[np.void]) + +class _D: + __numpy_dtype__: np.dtype[np.int8] + +assert_type(np.dtype(_D()), np.dtype[np.int8]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/einsumfunc.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/einsumfunc.pyi index 90345c2..44ae745 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/einsumfunc.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] AR_LIKE_i: list[int] diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/emath.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/emath.pyi index 74b4a6d..b018000 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/emath.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/emath.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] f8: np.float64 @@ -12,45 +10,45 @@ c16: np.complex128 assert_type(np.emath.sqrt(f8), Any) assert_type(np.emath.sqrt(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.sqrt(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.sqrt(c16), np.complexfloating) +assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.log(f8), Any) assert_type(np.emath.log(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.log(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.log(c16), np.complexfloating) +assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.log10(f8), Any) assert_type(np.emath.log10(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.log10(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.log10(c16), np.complexfloating) +assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.log2(f8), Any) assert_type(np.emath.log2(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.log2(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.log2(c16), np.complexfloating) +assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.logn(f8, 2), Any) assert_type(np.emath.logn(AR_f8, 4), npt.NDArray[Any]) -assert_type(np.emath.logn(f8, 1j), np.complexfloating[Any, Any]) -assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.logn(f8, 1j), np.complexfloating) +assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating]) assert_type(np.emath.power(f8, 2), Any) assert_type(np.emath.power(AR_f8, 4), npt.NDArray[Any]) -assert_type(np.emath.power(f8, 2j), np.complexfloating[Any, Any]) -assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.power(f8, 2j), np.complexfloating) +assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating]) assert_type(np.emath.arccos(f8), Any) assert_type(np.emath.arccos(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.arccos(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.arccos(c16), np.complexfloating) +assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.arcsin(f8), Any) assert_type(np.emath.arcsin(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.arcsin(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.arcsin(c16), np.complexfloating) +assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.arctanh(f8), Any) assert_type(np.emath.arctanh(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.arctanh(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.arctanh(c16), np.complexfloating) +assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/fft.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/fft.pyi index 13ba8da..b648225 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/fft.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/fft.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] AR_LIKE_f8: list[float] @@ -15,11 +13,11 @@ assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/flatiter.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/flatiter.pyi index 9b1bfa4..04e13b8 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,49 +1,86 @@ -from typing import Literal, TypeAlias +from typing import Any, TypeAlias, assert_type import numpy as np -import numpy.typing as npt - -from typing_extensions import assert_type - -a: np.flatiter[npt.NDArray[np.str_]] -a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] - -Size: TypeAlias = Literal[42] -a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] - -assert_type(a.base, npt.NDArray[np.str_]) -assert_type(a.copy(), npt.NDArray[np.str_]) -assert_type(a.coords, tuple[int, ...]) -assert_type(a.index, int) -assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) -assert_type(next(a), np.str_) -assert_type(a[0], np.str_) -assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) -assert_type(a[...], npt.NDArray[np.str_]) -assert_type(a[:], npt.NDArray[np.str_]) -assert_type(a[(...,)], npt.NDArray[np.str_]) -assert_type(a[(0,)], np.str_) - -assert_type(a.__array__(), npt.NDArray[np.str_]) -assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) -assert_type( - a_1d.__array__(), - np.ndarray[tuple[int], np.dtype[np.bytes_]], -) -assert_type( - a_1d.__array__(np.dtype(np.float64)), - np.ndarray[tuple[int], np.dtype[np.float64]], -) -assert_type( - a_1d_fixed.__array__(), - np.ndarray[tuple[Size], np.dtype[np.object_]], -) -assert_type( - a_1d_fixed.__array__(np.dtype(np.float64)), - np.ndarray[tuple[Size], np.dtype[np.float64]], -) - -a[0] = "a" -a[:5] = "a" -a[...] = "a" -a[(...,)] = "a" + +_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] + +_a_nd: np.flatiter[_ArrayND] +_a_1d: np.flatiter[_Array1D] +_a_2d: np.flatiter[_Array2D] + +### + +# .base +assert_type(_a_nd.base, _ArrayND) +assert_type(_a_1d.base, _Array1D) +assert_type(_a_2d.base, _Array2D) + +# .coords +assert_type(_a_nd.coords, tuple[Any, ...]) +assert_type(_a_1d.coords, tuple[int]) +assert_type(_a_2d.coords, tuple[int, int]) + +# .index +assert_type(_a_nd.index, int) +assert_type(_a_1d.index, int) +assert_type(_a_2d.index, int) + +# .__len__() +assert_type(len(_a_nd), int) +assert_type(len(_a_1d), int) +assert_type(len(_a_2d), int) + +# .__iter__() +assert_type(iter(_a_nd), np.flatiter[_ArrayND]) +assert_type(iter(_a_1d), np.flatiter[_Array1D]) +assert_type(iter(_a_2d), np.flatiter[_Array2D]) + +# .__next__() +assert_type(next(_a_nd), np.str_) +assert_type(next(_a_1d), np.bytes_) +assert_type(next(_a_2d), np.int8) + +# .__getitem__(()) +assert_type(_a_nd[()], _ArrayND) +assert_type(_a_1d[()], _Array1D) +assert_type(_a_2d[()], _Array2D) +# .__getitem__(int) +assert_type(_a_nd[0], np.str_) +assert_type(_a_1d[0], np.bytes_) +assert_type(_a_2d[0], np.int8) +# .__getitem__(slice) +assert_type(_a_nd[::], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[::], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[::], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(EllipsisType) +assert_type(_a_nd[...], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[...], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[...], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[!]) +assert_type(_a_nd[[]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[int]) +assert_type(_a_nd[[0]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[0]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[0]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[list[int]]) +assert_type(_a_nd[[[0]]], np.ndarray[tuple[int, int], np.dtypes.StrDType]) +assert_type(_a_1d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.BytesDType]) +assert_type(_a_2d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.Int8DType]) +# .__getitem__(list[list[list[list[int]]]]) +assert_type(_a_nd[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.StrDType]) +assert_type(_a_1d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.BytesDType]) +assert_type(_a_2d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.Int8DType]) + +# __array__() +assert_type(_a_nd.__array__(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.__array__(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.__array__(), np.ndarray[tuple[int], np.dtypes.Int8DType]) + +# .copy() +assert_type(_a_nd.copy(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.copy(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.copy(), np.ndarray[tuple[int], np.dtypes.Int8DType]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/fromnumeric.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/fromnumeric.pyi index 4aea830..477c3f4 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,14 +1,11 @@ """Tests for :mod:`_core.fromnumeric`.""" -from typing import Any, Literal as L +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - -class NDArraySubclass(npt.NDArray[np.complex128]): - ... +class NDArraySubclass(npt.NDArray[np.complex128]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] @@ -18,15 +15,19 @@ AR_i8: npt.NDArray[np.int64] AR_O: npt.NDArray[np.object_] AR_subclass: NDArraySubclass AR_m: npt.NDArray[np.timedelta64] -AR_0d: np.ndarray[tuple[()], np.dtype[Any]] -AR_1d: np.ndarray[tuple[int], np.dtype[Any]] -AR_nd: np.ndarray[tuple[int, ...], np.dtype[Any]] +AR_0d: np.ndarray[tuple[()]] +AR_1d: np.ndarray[tuple[int]] +AR_nd: np.ndarray b: np.bool f4: np.float32 i8: np.int64 f: float +# integer‑dtype subclass for argmin/argmax +class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +AR_sub_i: NDArrayIntSubclass + assert_type(np.take(b, 0), np.bool) assert_type(np.take(f4, 0), np.float32) assert_type(np.take(f, 0), Any) @@ -39,7 +40,7 @@ assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) assert_type(np.reshape(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.reshape(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype]) assert_type(np.reshape(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.reshape(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) @@ -48,11 +49,13 @@ assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) -assert_type(np.repeat(b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(f4, 1), npt.NDArray[np.float32]) -assert_type(np.repeat(f, 1), npt.NDArray[Any]) -assert_type(np.repeat(AR_b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(AR_f4, 1), npt.NDArray[np.float32]) +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) # TODO: array_bdd tests for np.put() @@ -89,13 +92,13 @@ assert_type(np.argmax(AR_b), np.intp) assert_type(np.argmax(AR_f4), np.intp) assert_type(np.argmax(AR_b, axis=0), Any) assert_type(np.argmax(AR_f4, axis=0), Any) -assert_type(np.argmax(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.argmin(AR_b), np.intp) assert_type(np.argmin(AR_f4), np.intp) assert_type(np.argmin(AR_b, axis=0), Any) assert_type(np.argmin(AR_f4, axis=0), Any) -assert_type(np.argmin(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.searchsorted(AR_b[0], 0), np.intp) assert_type(np.searchsorted(AR_f4[0], 0), np.intp) @@ -104,7 +107,7 @@ assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) -assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype[Any]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype]) assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) @@ -120,25 +123,26 @@ assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) assert_type(np.shape([1]), tuple[int]) assert_type(np.shape([[2]]), tuple[int, int]) -assert_type(np.shape([[[3]]]), tuple[int, ...]) -assert_type(np.shape(AR_b), tuple[int, ...]) -assert_type(np.shape(AR_nd), tuple[int, ...]) +assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape(AR_b), tuple[Any, ...]) +assert_type(np.shape(AR_nd), tuple[Any, ...]) # these fail on mypy, but it works as expected with pyright/pylance # assert_type(np.shape(AR_0d), tuple[()]) # assert_type(np.shape(AR_1d), tuple[int]) @@ -249,8 +253,8 @@ assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.prod(AR_b), np.int_) assert_type(np.prod(AR_u8), np.uint64) assert_type(np.prod(AR_i8), np.int64) -assert_type(np.prod(AR_f4), np.floating[Any]) -assert_type(np.prod(AR_c16), np.complexfloating[Any, Any]) +assert_type(np.prod(AR_f4), np.floating) +assert_type(np.prod(AR_c16), np.complexfloating) assert_type(np.prod(AR_O), Any) assert_type(np.prod(AR_f4, axis=0), Any) assert_type(np.prod(AR_f4, keepdims=True), Any) @@ -261,10 +265,10 @@ assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating[Any]]) -assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating[Any]]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) @@ -272,10 +276,10 @@ assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating[Any]]) -assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating[Any]]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) @@ -302,11 +306,11 @@ assert_type(np.around(AR_f4), npt.NDArray[np.float32]) assert_type(np.around([1.5]), npt.NDArray[Any]) assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.mean(AR_b), np.floating[Any]) -assert_type(np.mean(AR_i8), np.floating[Any]) -assert_type(np.mean(AR_f4), np.floating[Any]) +assert_type(np.mean(AR_b), np.floating) +assert_type(np.mean(AR_i8), np.floating) +assert_type(np.mean(AR_f4), np.floating) assert_type(np.mean(AR_m), np.timedelta64) -assert_type(np.mean(AR_c16), np.complexfloating[Any, Any]) +assert_type(np.mean(AR_c16), np.complexfloating) assert_type(np.mean(AR_O), Any) assert_type(np.mean(AR_f4, axis=0), Any) assert_type(np.mean(AR_f4, keepdims=True), Any) @@ -320,10 +324,10 @@ assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) -assert_type(np.std(AR_b), np.floating[Any]) -assert_type(np.std(AR_i8), np.floating[Any]) -assert_type(np.std(AR_f4), np.floating[Any]) -assert_type(np.std(AR_c16), np.floating[Any]) +assert_type(np.std(AR_b), np.floating) +assert_type(np.std(AR_i8), np.floating) +assert_type(np.std(AR_f4), np.floating) +assert_type(np.std(AR_c16), np.floating) assert_type(np.std(AR_O), Any) assert_type(np.std(AR_f4, axis=0), Any) assert_type(np.std(AR_f4, keepdims=True), Any) @@ -331,10 +335,10 @@ assert_type(np.std(AR_f4, dtype=float), Any) assert_type(np.std(AR_f4, dtype=np.float64), np.float64) assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.var(AR_b), np.floating[Any]) -assert_type(np.var(AR_i8), np.floating[Any]) -assert_type(np.var(AR_f4), np.floating[Any]) -assert_type(np.var(AR_c16), np.floating[Any]) +assert_type(np.var(AR_b), np.floating) +assert_type(np.var(AR_i8), np.floating) +assert_type(np.var(AR_f4), np.floating) +assert_type(np.var(AR_c16), np.floating) assert_type(np.var(AR_O), Any) assert_type(np.var(AR_f4, axis=0), Any) assert_type(np.var(AR_f4, keepdims=True), Any) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/getlimits.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/getlimits.pyi index 47e6e66..7ca7925 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,13 +1,11 @@ -from typing import Any +from typing import assert_type import numpy as np -from numpy._typing import _64Bit - -from typing_extensions import assert_type, LiteralString f: float f8: np.float64 c8: np.complex64 +c16: np.complex128 i: int i8: np.int64 @@ -17,9 +15,10 @@ finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] assert_type(np.finfo(f), np.finfo[np.float64]) -assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) +assert_type(np.finfo(f8), np.finfo[np.float64]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo('f2'), np.finfo[np.floating[Any]]) +assert_type(np.finfo(c16), np.finfo[np.float64]) +assert_type(np.finfo("f2"), np.finfo[np.float16]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -43,11 +42,12 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo('i2'), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[np.int16]) +assert_type(np.iinfo("u2"), np.iinfo[np.uint16]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) -assert_type(iinfo_i8.kind, LiteralString) +assert_type(iinfo_i8.kind, str) assert_type(iinfo_i8.bits, int) -assert_type(iinfo_i8.key, LiteralString) +assert_type(iinfo_i8.key, str) assert_type(iinfo_i8.min, int) assert_type(iinfo_i8.max, int) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/histograms.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/histograms.pyi index 6218f57..d1acfe1 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/histograms.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/histograms.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/index_tricks.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/index_tricks.pyi index da26d0e..4cb12e6 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -1,11 +1,9 @@ from types import EllipsisType -from typing import Any, Literal +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_LIKE_b: list[bool] AR_LIKE_i: list[int] AR_LIKE_f: list[float] @@ -20,10 +18,10 @@ assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) -assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) -assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) -assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) -assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[Any, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[Any, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[Any, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[Any, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) @@ -33,7 +31,7 @@ assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) -assert_type(next(np.ndindex(1, 2, 3)), tuple[int, ...]) +assert_type(next(np.ndindex(1, 2, 3)), tuple[Any, ...]) assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_function_base.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_function_base.pyi index 398e16b..815ddea 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,46 +1,55 @@ -from fractions import Fraction -from typing import Any from collections.abc import Callable +from fractions import Fraction +from typing import Any, LiteralString, assert_type, type_check_only import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - -vectorized_func: np.vectorize - f8: np.float64 +AR_LIKE_b: list[bool] +AR_LIKE_i8: list[int] AR_LIKE_f8: list[float] AR_LIKE_c16: list[complex] AR_LIKE_O: list[Fraction] +AR_u1: npt.NDArray[np.uint8] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] -CHAR_AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] AR_b_list: list[npt.NDArray[np.bool]] -def func( - a: npt.NDArray[Any], - posarg: bool = ..., - /, - arg: int = ..., - *, - kwarg: str = ..., -) -> npt.NDArray[Any]: ... +@type_check_only +def func(a: np.ndarray, posarg: bool = ..., /, arg: int = ..., *, kwarg: str = ...) -> np.ndarray: ... +@type_check_only +def func_f8(a: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... + +### +# vectorize +vectorized_func: np.vectorize assert_type(vectorized_func.pyfunc, Callable[..., Any]) assert_type(vectorized_func.cache, bool) -assert_type(vectorized_func.signature, None | str) -assert_type(vectorized_func.otypes, None | str) +assert_type(vectorized_func.signature, LiteralString | None) +assert_type(vectorized_func.otypes, LiteralString | None) assert_type(vectorized_func.excluded, set[int | str]) -assert_type(vectorized_func.__doc__, None | str) +assert_type(vectorized_func.__doc__, str | None) assert_type(vectorized_func([1]), Any) assert_type(np.vectorize(int), np.vectorize) assert_type( @@ -48,137 +57,270 @@ assert_type( np.vectorize, ) +# rot90 +assert_type(np.rot90(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) -assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), np.ndarray) -assert_type(np.flip(f8), np.float64) -assert_type(np.flip(1.0), Any) +# flip +assert_type(np.flip(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) -assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.flip(AR_LIKE_f8, axis=0), np.ndarray) +# iterable assert_type(np.iterable(1), bool) assert_type(np.iterable([1]), bool) -assert_type(np.average(AR_f8), np.floating[Any]) -assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating[Any, Any]) +# average +assert_type(np.average(AR_f8_2d), np.float64) +assert_type(np.average(AR_f8_2d, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.average(AR_f8), np.float64) +assert_type(np.average(AR_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, weights=AR_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_LIKE_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.average(AR_O), Any) -assert_type(np.average(AR_f8, returned=True), tuple[np.floating[Any], np.floating[Any]]) -assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating[Any, Any], np.complexfloating[Any, Any]]) +assert_type(np.average(AR_O, axis=1), np.ndarray) +assert_type(np.average(AR_O, keepdims=True), np.ndarray) assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) -assert_type(np.average(AR_f8, axis=0), Any) -assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_O, axis=1, returned=True), tuple[np.ndarray, np.ndarray]) +assert_type(np.average(AR_O, keepdims=True, returned=True), tuple[np.ndarray, np.ndarray]) +# asarray_chkfinite +assert_type(np.asarray_chkfinite(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), np.ndarray) assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), np.ndarray) +# piecewise +assert_type(np.piecewise(AR_f8_1d, AR_b, [func]), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b, [func_f8]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) - -assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), np.ndarray) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) + +# extract +assert_type(np.extract(AR_i8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.extract(AR_i8, AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# select +assert_type(np.select([AR_b], [AR_f8_1d]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.select([AR_b], [AR_f8]), npt.NDArray[np.float64]) + +# places +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) -assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +# copy +assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) - -assert_type(np.gradient(AR_f8, axis=None), Any) -assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) - -assert_type(np.diff("bob", n=0), str) -assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) -assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +# pyright correctly infers `NDArray[str_]` here +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] +# gradient +assert_type(np.gradient(AR_f8_1d, 1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type( + np.gradient(AR_f8_2d, [1, 2], [2, 3.5, 4]), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.gradient(AR_f8_3d), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + ], +) +assert_type(np.gradient(AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]] | Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.gradient(AR_LIKE_c16, axis=0), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# diff +assert_type(np.diff("git", n=0), str) +assert_type(np.diff(AR_f8), npt.NDArray[np.float64]) +assert_type(np.diff(AR_f8_1d, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_f8_2d, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.diff(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.diff(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# interp assert_type(np.interp(1, [1], AR_f8), np.float64) assert_type(np.interp(1, [1], [1]), np.float64) assert_type(np.interp(1, [1], AR_c16), np.complex128) -assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` -assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) -assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` - -assert_type(np.angle(f8), np.floating[Any]) -assert_type(np.angle(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating[Any]]) -assert_type(np.angle(AR_O), npt.NDArray[np.object_]) - -assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.interp(1, [1], [1j]), np.complex128) +assert_type(np.interp([1], [1], AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], [1]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.interp([1], [1], [1j]), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# angle +assert_type(np.angle(1), np.float64) +assert_type(np.angle(1, deg=True), np.float64) +assert_type(np.angle(1j), np.float64) +assert_type(np.angle(f8), np.float64) +assert_type(np.angle(AR_b), npt.NDArray[np.float64]) +assert_type(np.angle(AR_u1), npt.NDArray[np.float64]) +assert_type(np.angle(AR_i8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f2), npt.NDArray[np.float16]) +assert_type(np.angle(AR_f4), npt.NDArray[np.float32]) +assert_type(np.angle(AR_c8), npt.NDArray[np.float32]) +assert_type(np.angle(AR_f8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_c16), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_c20), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# unwrap +assert_type(np.unwrap(AR_f2), npt.NDArray[np.float16]) +assert_type(np.unwrap(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unwrap(AR_f10), npt.NDArray[np.longdouble]) assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) - -assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) - +assert_type(np.unwrap(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_3d), np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# sort_complex +assert_type(np.sort_complex(AR_u1), npt.NDArray[np.complex64]) +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.sort_complex(AR_f10), npt.NDArray[np.clongdouble]) +assert_type(np.sort_complex(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.sort_complex(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# trim_zeros assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) -assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) - -assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) - -assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating[Any]]) -assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating[Any]]) -assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.blackman(5), npt.NDArray[np.floating[Any]]) -assert_type(np.bartlett(6), npt.NDArray[np.floating[Any]]) -assert_type(np.hanning(4.5), npt.NDArray[np.floating[Any]]) -assert_type(np.hamming(0), npt.NDArray[np.floating[Any]]) -assert_type(np.i0(AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating[Any]]) - -assert_type(np.sinc(1.0), np.floating[Any]) -assert_type(np.sinc(1j), np.complexfloating[Any, Any]) -assert_type(np.sinc(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) - -assert_type(np.median(AR_f8, keepdims=False), np.floating[Any]) -assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating[Any, Any]) +# cov +assert_type(np.cov(AR_f8_1d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.cov(AR_LIKE_f8), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, dtype=np.float16), np.ndarray[tuple[()], np.dtype[np.float16]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.cov(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.cov(AR_LIKE_f8, dtype=float), np.ndarray[tuple[()]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# corrcoef +assert_type(np.corrcoef(AR_f8_1d), np.float64) +assert_type(np.corrcoef(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.corrcoef(AR_LIKE_f8), np.float64) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=np.float16), np.float16) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.corrcoef(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=float), Any) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# window functions +assert_type(np.blackman(5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.bartlett(6), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hanning(4.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hamming(0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.kaiser(4, 5.9), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# i0 (bessel function) +assert_type(np.i0(AR_i8), npt.NDArray[np.float64]) + +# sinc (cardinal sine function) +assert_type(np.sinc(1.0), np.float64) +assert_type(np.sinc(1j), np.complex128 | Any) +assert_type(np.sinc(AR_f8), npt.NDArray[np.float64]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.sinc(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# median +assert_type(np.median(AR_f8, keepdims=False), np.float64) +assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) assert_type(np.median(AR_m), np.timedelta64) assert_type(np.median(AR_O), Any) -assert_type(np.median(AR_f8, keepdims=True), Any) -assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.median(AR_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_c16, axis=0), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_LIKE_c16, keepdims=True), npt.NDArray[np.complex128]) assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.percentile(AR_f8, 50), np.floating[Any]) -assert_type(np.percentile(AR_c16, 50), np.complexfloating[Any, Any]) +# percentile +assert_type(np.percentile(AR_f8, 50), np.float64) +assert_type(np.percentile(AR_f8, 50, axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, 50), np.complex128) assert_type(np.percentile(AR_m, 50), np.timedelta64) assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) assert_type(np.percentile(AR_O, 50), Any) -assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating[Any]]) -assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complex128]) assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) -assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) -assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.quantile(AR_f8, 0.5), np.floating[Any]) -assert_type(np.quantile(AR_c16, 0.5), np.complexfloating[Any, Any]) -assert_type(np.quantile(AR_m, 0.5), np.timedelta64) -assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) -assert_type(np.quantile(AR_O, 0.5), Any) -assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating[Any]]) -assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) -assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) -assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) -assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) -assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) -assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) - +# quantile +assert_type(np.quantile(AR_f8, 0.50), np.float64) +assert_type(np.quantile(AR_f8, 0.50, axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, 0.50), np.complex128) +assert_type(np.quantile(AR_m, 0.50), np.timedelta64) +assert_type(np.quantile(AR_M, 0.50, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.50), Any) +assert_type(np.quantile(AR_f8, [0.50]), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, [0.50]), npt.NDArray[np.complex128]) +assert_type(np.quantile(AR_m, [0.50]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.50]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], out=AR_c16), npt.NDArray[np.complex128]) + +# trapezoid assert_type(np.trapezoid(AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) @@ -191,20 +333,77 @@ assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) -assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) -assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) -assert_type(np.meshgrid(AR_f8, AR_i8, copy=False), tuple[npt.NDArray[Any], ...]) -assert_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij"), tuple[npt.NDArray[Any], ...]) +# meshgrid +assert_type(np.meshgrid(), tuple[()]) +assert_type( + np.meshgrid(AR_f8), + tuple[ + np.ndarray[tuple[int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_c16, indexing="ij"), + tuple[ + np.ndarray[tuple[int], np.dtype[np.complex128]], + ], +) +assert_type( + np.meshgrid(AR_i8, AR_f8, copy=False), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_i8, AR_c16), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int, int], np.dtype[np.complex128]], + ], +) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_LIKE_f8), tuple[np.ndarray, ...]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[np.ndarray, ...]) -assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) -assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) +# delete +assert_type(np.delete(AR_f8, np.s_[:5]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), np.ndarray) -assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) -assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) +# insert +assert_type(np.insert(AR_f8, np.s_[:5], 5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), np.ndarray) -assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) -assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) +# append +assert_type(np.append(f8, f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_LIKE_f8, AR_LIKE_c16, axis=0), np.ndarray) +assert_type(np.append(AR_f8, AR_LIKE_f8, axis=0), np.ndarray) +# digitize assert_type(np.digitize(4.5, [1]), np.intp) assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_polynomial.pyi index cf5eab4..4f70917 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -1,11 +1,9 @@ -from typing import Any, NoReturn from collections.abc import Iterator +from typing import Any, NoReturn, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] AR_i8: npt.NDArray[np.int64] @@ -51,18 +49,18 @@ assert_type(iter(poly_obj), Iterator[Any]) assert_type(poly_obj.deriv(), np.poly1d) assert_type(poly_obj.integ(), np.poly1d) -assert_type(np.poly(poly_obj), npt.NDArray[np.floating[Any]]) -assert_type(np.poly(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.poly(AR_c16), npt.NDArray[np.floating[Any]]) +assert_type(np.poly(poly_obj), npt.NDArray[np.floating]) +assert_type(np.poly(AR_f8), npt.NDArray[np.floating]) +assert_type(np.poly(AR_c16), npt.NDArray[np.floating]) assert_type(np.polyint(poly_obj), np.poly1d) -assert_type(np.polyint(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyint(AR_O, m=2), npt.NDArray[np.object_]) assert_type(np.polyder(poly_obj), np.poly1d) -assert_type(np.polyder(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyder(AR_O, m=2), npt.NDArray[np.object_]) assert_type(np.polyfit(AR_f8, AR_f8, 2), npt.NDArray[np.float64]) @@ -103,44 +101,47 @@ assert_type( ) assert_type(np.polyval(AR_b, AR_b), npt.NDArray[np.int64]) -assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyval(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polyadd(poly_obj, AR_i8), np.poly1d) assert_type(np.polyadd(AR_f8, poly_obj), np.poly1d) assert_type(np.polyadd(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) -assert_type(np.polysub(AR_b, AR_b), NoReturn) -assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + +assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polysub(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polymul(poly_obj, AR_i8), np.poly1d) assert_type(np.polymul(AR_f8, poly_obj), np.poly1d) assert_type(np.polymul(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polymul(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polydiv(poly_obj, AR_i8), tuple[np.poly1d, np.poly1d]) assert_type(np.polydiv(AR_f8, poly_obj), tuple[np.poly1d, np.poly1d]) -assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating[Any, Any]], npt.NDArray[np.complexfloating[Any, Any]]]) +assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.complexfloating]]) assert_type(np.polydiv(AR_O, AR_O), tuple[npt.NDArray[Any], npt.NDArray[Any]]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_utils.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_utils.pyi index db3f141..754d9c4 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_utils.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -1,10 +1,9 @@ from io import StringIO +from typing import assert_type import numpy as np -import numpy.typing as npt import numpy.lib.array_utils as array_utils - -from typing_extensions import assert_type +import numpy.typing as npt AR: npt.NDArray[np.float64] AR_DICT: dict[str, npt.NDArray[np.float64]] diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_version.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_version.pyi index 5adf197..c30f26f 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/lib_version.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/lib_version.pyi @@ -1,6 +1,6 @@ -from numpy.lib import NumpyVersion +from typing import assert_type -from typing_extensions import assert_type +from numpy.lib import NumpyVersion version = NumpyVersion("1.8.0") diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/linalg.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/linalg.pyi index 1df9084..39c5c0c 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/linalg.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,15 +1,20 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt from numpy.linalg._linalg import ( - QRResult, EigResult, EighResult, SVDResult, SlogdetResult + EighResult, + EigResult, + QRResult, + SlogdetResult, + SVDResult, ) -from typing_extensions import assert_type - +float_list_2d: list[list[float]] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_m: npt.NDArray[np.timedelta64] @@ -17,20 +22,20 @@ AR_S: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) @@ -38,12 +43,12 @@ assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) @@ -53,12 +58,12 @@ assert_type(np.linalg.qr(AR_f8), QRResult) assert_type(np.linalg.qr(AR_c16), QRResult) assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating[Any]] | npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) assert_type(np.linalg.eig(AR_i8), EigResult) assert_type(np.linalg.eig(AR_f8), EigResult) @@ -72,8 +77,20 @@ assert_type(np.linalg.svd(AR_i8), SVDResult) assert_type(np.linalg.svd(AR_f8), SVDResult) assert_type(np.linalg.svd(AR_c16), SVDResult) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) + +assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) @@ -84,8 +101,8 @@ assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) @@ -96,24 +113,31 @@ assert_type(np.linalg.det(AR_f8), Any) assert_type(np.linalg.det(AR_c16), Any) assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) -assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]], np.int32, npt.NDArray[np.floating[Any]]]) -assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating[Any, Any]], npt.NDArray[np.floating[Any]], np.int32, npt.NDArray[np.floating[Any]]]) +assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) -assert_type(np.linalg.norm(AR_i8), np.floating[Any]) -assert_type(np.linalg.norm(AR_f8), np.floating[Any]) -assert_type(np.linalg.norm(AR_c16), np.floating[Any]) -assert_type(np.linalg.norm(AR_S), np.floating[Any]) +assert_type(np.linalg.norm(AR_i8), np.floating) +assert_type(np.linalg.norm(AR_f8), np.floating) +assert_type(np.linalg.norm(AR_c16), np.floating) +assert_type(np.linalg.norm(AR_S), np.floating) assert_type(np.linalg.norm(AR_f8, axis=0), Any) -assert_type(np.linalg.matrix_norm(AR_i8), np.floating[Any]) -assert_type(np.linalg.matrix_norm(AR_f8), np.floating[Any]) -assert_type(np.linalg.matrix_norm(AR_c16), np.floating[Any]) -assert_type(np.linalg.matrix_norm(AR_S), np.floating[Any]) +assert_type(np.linalg.matrix_norm(AR_i8), np.floating) +assert_type(np.linalg.matrix_norm(AR_f8), np.floating) +assert_type(np.linalg.matrix_norm(AR_c16), np.floating) +assert_type(np.linalg.matrix_norm(AR_S), np.floating) + +assert_type(np.linalg.vector_norm(AR_i8), np.floating) +assert_type(np.linalg.vector_norm(AR_f8), np.floating) +assert_type(np.linalg.vector_norm(AR_c16), np.floating) +assert_type(np.linalg.vector_norm(AR_S), np.floating) -assert_type(np.linalg.vector_norm(AR_i8), np.floating[Any]) -assert_type(np.linalg.vector_norm(AR_f8), np.floating[Any]) -assert_type(np.linalg.vector_norm(AR_c16), np.floating[Any]) -assert_type(np.linalg.vector_norm(AR_S), np.floating[Any]) +assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) @@ -121,10 +145,10 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) -assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ma.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ma.pyi new file mode 100644 index 0000000..8eef32d --- /dev/null +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ma.pyi @@ -0,0 +1,1098 @@ +from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy import dtype, generic +from numpy._typing import NDArray, _AnyShape + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) + +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] +_NoMaskType: TypeAlias = np.bool[Literal[False]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... + +class IntoMaskedArraySubClass(Generic[_ScalarT_co]): + def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... + +MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] + +AR_b: NDArray[np.bool] +AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] +AR_u4: NDArray[np.uint32] +AR_dt64: NDArray[np.datetime64] +AR_td64: NDArray[np.timedelta64] +AR_o: NDArray[np.timedelta64] + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + +MAR_c8: MaskedArray[np.complex64] +MAR_c16: MaskedArray[np.complex128] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] + +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] + +MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] +MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] + +b: np.bool +f4: np.float32 +f: float +i: int + +assert_type(MAR_1d.shape, tuple[int]) + +assert_type(MAR_f4.dtype, np.dtype[np.float32]) + +assert_type(int(MAR_i8), int) +assert_type(float(MAR_f4), float) + +assert_type(np.ma.min(MAR_b), np.bool) +assert_type(np.ma.min(MAR_f4), np.float32) +assert_type(np.ma.min(MAR_b, axis=0), Any) +assert_type(np.ma.min(MAR_f4, axis=0), Any) +assert_type(np.ma.min(MAR_b, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.min(), np.bool) +assert_type(MAR_f4.min(), np.float32) +assert_type(MAR_b.min(axis=0), Any) +assert_type(MAR_f4.min(axis=0), Any) +assert_type(MAR_b.min(keepdims=True), Any) +assert_type(MAR_f4.min(keepdims=True), Any) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.max(MAR_b), np.bool) +assert_type(np.ma.max(MAR_f4), np.float32) +assert_type(np.ma.max(MAR_b, axis=0), Any) +assert_type(np.ma.max(MAR_f4, axis=0), Any) +assert_type(np.ma.max(MAR_b, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.max(), np.bool) +assert_type(MAR_f4.max(), np.float32) +assert_type(MAR_b.max(axis=0), Any) +assert_type(MAR_f4.max(axis=0), Any) +assert_type(MAR_b.max(keepdims=True), Any) +assert_type(MAR_f4.max(keepdims=True), Any) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.ptp(MAR_b), np.bool) +assert_type(np.ma.ptp(MAR_f4), np.float32) +assert_type(np.ma.ptp(MAR_b, axis=0), Any) +assert_type(np.ma.ptp(MAR_f4, axis=0), Any) +assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.ptp(), np.bool) +assert_type(MAR_f4.ptp(), np.float32) +assert_type(MAR_b.ptp(axis=0), Any) +assert_type(MAR_f4.ptp(axis=0), Any) +assert_type(MAR_b.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.argmin(), np.intp) +assert_type(MAR_f4.argmin(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), Any) +assert_type(MAR_f4.argmin(axis=0), Any) +assert_type(MAR_b.argmin(keepdims=True), Any) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.argmin(MAR_b), np.intp) +assert_type(np.ma.argmin(MAR_f4), np.intp) +assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmin(MAR_b, axis=0), Any) +assert_type(np.ma.argmin(MAR_f4, axis=0), Any) +assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.argmax(), np.intp) +assert_type(MAR_f4.argmax(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmax(axis=0), Any) +assert_type(MAR_f4.argmax(axis=0), Any) +assert_type(MAR_b.argmax(keepdims=True), Any) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.argmax(MAR_b), np.intp) +assert_type(np.ma.argmax(MAR_f4), np.intp) +assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmax(MAR_b, axis=0), Any) +assert_type(np.ma.argmax(MAR_f4, axis=0), Any) +assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f4.sort(), None) +assert_type(MAR_f4.sort(axis=0, kind="quicksort", order="K", endwith=False, fill_value=42., stable=False), None) + +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) +assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) + +assert_type(MAR_f8.take(0), np.float64) +assert_type(MAR_1d.take(0), Any) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.take(f, 0), Any) +assert_type(np.ma.take(f4, 0), np.float32) +assert_type(np.ma.take(MAR_f8, 0), np.float64) +assert_type(np.ma.take(AR_f4, 0), np.float32) +assert_type(np.ma.take(MAR_1d, 0), Any) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) + +assert_type(MAR_f4.partition(1), None) +assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) + +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) + +assert_type(np.ma.ndim(f4), int) +assert_type(np.ma.ndim(MAR_b), int) +assert_type(np.ma.ndim(AR_f4), int) + +assert_type(np.ma.size(b), int) +assert_type(np.ma.size(MAR_f4, axis=0), int) +assert_type(np.ma.size(AR_f4), int) + +assert_type(np.ma.is_masked(MAR_f4), bool) + +assert_type(MAR_f4.ids(), tuple[int, int]) + +assert_type(MAR_f4.iscontiguous(), bool) + +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_byte.count(), int) +assert_type(MAR_f4.count(axis=None), int) +assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) +assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(None, True), NDArray[np.int_]) + +assert_type(np.ma.count(MAR_byte), int) +assert_type(np.ma.count(MAR_byte, axis=None), int) +assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) + +assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) + +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) +assert_type(MAR_f4.put(4, 999), None) +assert_type(MAR_f4.put(4, 999, mode="clip"), None) + +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) + +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) +assert_type(np.ma.put(MAR_f4, 4, 999), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode="clip"), None) + +assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) + +assert_type(MAR_f4.filled(float("nan")), NDArray[np.float32]) +assert_type(MAR_i8.filled(), NDArray[np.int64]) +assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.ma.filled(MAR_f4, float("nan")), NDArray[np.float32]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) +# PyRight detects this one correctly, but mypy doesn't. +# https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 +assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] + +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) + +assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) +assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) + +assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) + +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | _NoMaskType) +# PyRight detects this one correctly, but mypy doesn't: +# `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` +assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | _NoMaskType) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | _NoMaskType) +assert_type(np.ma.getmask(np.int64(1)), _NoMaskType) + +assert_type(np.ma.is_mask(MAR_1d), bool) +assert_type(np.ma.is_mask(AR_b), bool) + +def func(x: object) -> None: + if np.ma.is_mask(x): + assert_type(x, NDArray[np.bool]) + else: + assert_type(x, object) + +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V["field_0"], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[["field_0", "field_1"]], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) + +assert_type(np.ma.nomask, np.bool[Literal[False]]) +assert_type(np.ma.MaskType, type[np.bool]) + +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, "float16"), np.ma.MaskedArray[tuple[int, int], np.dtype]) + +assert_type(MAR_i8.fill_value, np.int64) + +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype="float32"), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype="float32", type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind="heap", order=("x", "y")), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) + +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] + +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/matrix.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/matrix.pyi index dc488c0..e82bbbc 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/matrix.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,20 +1,19 @@ -from typing import Any, TypeAlias +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - _Shape2D: TypeAlias = tuple[int, int] mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] +ar_ip: npt.NDArray[np.intp] -assert_type(mat * 5, np.matrix[_Shape2D, Any]) -assert_type(5 * mat, np.matrix[_Shape2D, Any]) +assert_type(mat * 5, np.matrix) +assert_type(5 * mat, np.matrix) mat *= 5 -assert_type(mat**5, np.matrix[_Shape2D, Any]) +assert_type(mat**5, np.matrix) mat **= 5 assert_type(mat.sum(), Any) @@ -30,11 +29,11 @@ assert_type(mat.argmax(), np.intp) assert_type(mat.argmin(), np.intp) assert_type(mat.ptp(), np.int64) -assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.sum(axis=0), np.matrix) +assert_type(mat.mean(axis=0), np.matrix) +assert_type(mat.std(axis=0), np.matrix) +assert_type(mat.var(axis=0), np.matrix) +assert_type(mat.prod(axis=0), np.matrix) assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) @@ -52,23 +51,23 @@ assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.argmax(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.argmin(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.I, np.matrix) assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.A1, npt.NDArray[np.int64]) assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getI(), np.matrix) assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getA1(), npt.NDArray[np.int64]) assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) -assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) -assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) +assert_type(np.bmat(ar_f8), np.matrix) +assert_type(np.bmat([[0, 1, 2]]), np.matrix) +assert_type(np.bmat("mat"), np.matrix) -assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/memmap.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/memmap.pyi index 0cc0b11..57e31d4 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/memmap.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/memmap.pyi @@ -1,9 +1,7 @@ -from typing import Any +from typing import Any, assert_type import numpy as np -from typing_extensions import assert_type - memmap_obj: np.memmap[Any, np.dtype[np.str_]] assert_type(np.memmap.__array_priority__, float) @@ -16,6 +14,6 @@ assert_type(memmap_obj.flush(), None) assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) with open("file.txt", "rb") as f: - assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype[Any]]) + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype]) assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/mod.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/mod.pyi index 5d0ac55..3f0da16 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/mod.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/mod.pyi @@ -1,11 +1,8 @@ import datetime as dt -from typing import Literal as L - -from typing_extensions import assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit f8: np.float64 i8: np.int64 @@ -110,51 +107,51 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) assert_type(i8 % b, np.int64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f, np.float64 | np.floating[_64Bit]) -assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) -assert_type(i4 % i8, np.int64 | np.int32) -assert_type(i4 % f8, np.float64 | np.float32) +assert_type(i8 % f, np.float64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) -assert_type(i4 % f4, np.float32) +assert_type(i4 % f4, np.floating) assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) -assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) # workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) -assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) -assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) -assert_type(f % i8, np.float64 | np.floating[_64Bit]) +assert_type(f % i8, np.float64) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) -assert_type(i8 % i4, np.int64 | np.int32) +assert_type(i8 % i4, np.signedinteger) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) -assert_type(f4 % i4, np.float32) +assert_type(f4 % i4, np.floating) assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) -assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) # workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) -assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_64Bit] | np.float32) +assert_type(i8 % f4, np.floating) assert_type(f4 % f4, np.float32) assert_type(f8 % AR_b, npt.NDArray[np.float64]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/modules.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/modules.pyi index fffd635..1e582d9 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/modules.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/modules.pyi @@ -1,10 +1,9 @@ import types +from typing import assert_type import numpy as np from numpy import f2py -from typing_extensions import assert_type - assert_type(np, types.ModuleType) assert_type(np.char, types.ModuleType) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/multiarray.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/multiarray.pyi index 3ea7f3f..cb9aa78 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,14 +1,12 @@ import datetime as dt -from typing import Any, Literal, TypeVar +from typing import Any, Literal, TypeVar, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import Unpack, assert_type +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) -_SCT = TypeVar("_SCT", bound=np.generic, covariant=True) - -class SubClass(npt.NDArray[_SCT]): ... +class SubClass(npt.NDArray[_ScalarT_co]): ... subclass: SubClass[np.float64] @@ -50,7 +48,7 @@ assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) assert_type(b_f8.nd, int) assert_type(b_f8.ndim, int) assert_type(b_f8.numiter, int) -assert_type(b_f8.shape, tuple[int, ...]) +assert_type(b_f8.shape, tuple[Any, ...]) assert_type(b_f8.size, int) assert_type(next(b_i8_f8_f8), tuple[Any, ...]) @@ -60,7 +58,7 @@ assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) assert_type(b_i8_f8_f8.nd, int) assert_type(b_i8_f8_f8.ndim, int) assert_type(b_i8_f8_f8.numiter, int) -assert_type(b_i8_f8_f8.shape, tuple[int, ...]) +assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) assert_type(b_i8_f8_f8.size, int) assert_type(np.inner(AR_f8, AR_i8), Any) @@ -68,27 +66,27 @@ assert_type(np.inner(AR_f8, AR_i8), Any) assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) -assert_type(np.lexsort([0, 1, 2]), Any) +assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) assert_type(np.can_cast(np.dtype("i8"), int), bool) assert_type(np.can_cast(AR_f8, "f8"), bool) assert_type(np.can_cast(AR_f8, np.complex128, casting="unsafe"), bool) -assert_type(np.min_scalar_type([1]), np.dtype[Any]) -assert_type(np.min_scalar_type(AR_f8), np.dtype[Any]) +assert_type(np.min_scalar_type([1]), np.dtype) +assert_type(np.min_scalar_type(AR_f8), np.dtype) -assert_type(np.result_type(int, [1]), np.dtype[Any]) -assert_type(np.result_type(AR_f8, AR_u1), np.dtype[Any]) -assert_type(np.result_type(AR_f8, np.complex128), np.dtype[Any]) +assert_type(np.result_type(int, [1]), np.dtype) +assert_type(np.result_type(AR_f8, AR_u1), np.dtype) +assert_type(np.result_type(AR_f8, np.complex128), np.dtype) assert_type(np.dot(AR_LIKE_f, AR_i8), Any) assert_type(np.dot(AR_u1, 1), Any) assert_type(np.dot(1.5j, 1), Any) assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) -assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating[Any]) -assert_type(np.vdot(AR_u1, 1), np.signedinteger[Any]) -assert_type(np.vdot(1.5j, 1), np.complexfloating[Any, Any]) +assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) +assert_type(np.vdot(AR_u1, 1), np.signedinteger) +assert_type(np.vdot(1.5j, 1), np.complexfloating) assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) @@ -96,19 +94,22 @@ assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) -assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) -assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_i8), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_i8, axis=1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1, axis=1), npt.NDArray[np.uint8]) -assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.unpackbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.unpackbits(AR_u1, axis=1), npt.NDArray[np.uint8]) assert_type(np.shares_memory(1, 2), bool) -assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=-1), bool) assert_type(np.may_share_memory(1, 2), bool) -assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=0), bool) -assert_type(np.promote_types(np.int32, np.int64), np.dtype[Any]) -assert_type(np.promote_types("f4", float), np.dtype[Any]) +assert_type(np.promote_types(np.int32, np.int64), np.dtype) +assert_type(np.promote_types("f4", float), np.dtype) assert_type(np.frompyfunc(func11, n1, n1).nin, Literal[1]) assert_type(np.frompyfunc(func11, n1, n1).nout, Literal[1]) @@ -150,14 +151,14 @@ assert_type(np.frompyfunc(func12, n1, n2).identity, None) assert_type(np.frompyfunc(func12, n1, n2).signature, None) assert_type( np.frompyfunc(func12, n2, n2)(f8, f8), - tuple[complex, complex, Unpack[tuple[complex, ...]]], + tuple[complex, complex, *tuple[complex, ...]], ) assert_type( np.frompyfunc(func12, n2, n2)(AR_f8, f8), tuple[ complex | npt.NDArray[np.object_], complex | npt.NDArray[np.object_], - Unpack[tuple[complex | npt.NDArray[np.object_], ...]], + *tuple[complex | npt.NDArray[np.object_], ...], ], ) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/nbit_base_example.pyi index eaaa62b..4ac59e9 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,16 +1,13 @@ -from typing import TypeVar +from typing import TypeVar, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit, _32Bit +from numpy._typing import _32Bit, _64Bit -from typing_extensions import assert_type +T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -T1 = TypeVar("T1", bound=npt.NBitBase) -T2 = TypeVar("T2", bound=npt.NBitBase) - -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... i8: np.int64 i4: np.int32 diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index 5c1e365..3694b68 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,10 +1,8 @@ -from typing import Protocol, TypeAlias, TypeVar -from typing_extensions import assert_type -import numpy as np +from typing import Any, Protocol, TypeAlias, TypeVar, assert_type +import numpy as np from numpy._typing import _64Bit - _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) @@ -35,6 +33,7 @@ _LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] _Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] _Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] _CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d @@ -46,6 +45,7 @@ g_1d: _LongDouble_1d c8_1d: _Complex64_1d c16_1d: _Complex128_1d G_1d: _CLongDouble_1d +V_1d: _Void_1d assert_type(do_abs(b1_1d), _Bool_1d) assert_type(do_abs(u1_1d), _UInt8_1d) @@ -77,3 +77,6 @@ assert_type(do_pos(i2_1d), _Int16_1d) assert_type(do_pos(q_1d), _LongLong_1d) assert_type(do_pos(f4_1d), _Float32_1d) assert_type(do_pos(c16_1d), _Complex128_1d) + +# this shape is effectively equivalent to `tuple[int, *tuple[Any, ...]]`, i.e. ndim >= 1 +assert_type(V_1d["field"], np.ndarray[tuple[int] | tuple[Any, ...]]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 9d4893e..0e235ef 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - b1_0d: np.ndarray[tuple[()], np.dtype[np.bool]] u2_1d: np.ndarray[tuple[int], np.dtype[np.uint16]] i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] @@ -39,9 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) - -# itemset does not return a value -# tostring is pretty simple # tobytes is pretty simple # tofile does not return a value # dump does not return a value @@ -62,8 +57,8 @@ assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uin assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) -assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic[Any]]]) -assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[Any]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype]) # byteswap assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) @@ -76,7 +71,7 @@ assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) assert_type(i0_nd.view(), npt.NDArray[np.int_]) assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) assert_type(i0_nd.view(float), npt.NDArray[Any]) -assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix) # getfield assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 690c92b..28754ae 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,16 +6,16 @@ function-based counterpart in `../from_numeric.py`. """ -import operator import ctypes as ct +import operator +from collections.abc import Iterator from types import ModuleType -from typing import Any, Literal +from typing import Any, Literal, assert_type +from typing_extensions import CapsuleType import numpy as np import numpy.typing as npt -from typing_extensions import CapsuleType, assert_type - class SubClass(npt.NDArray[np.object_]): ... f8: np.float64 @@ -29,6 +29,10 @@ AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -58,15 +62,15 @@ assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=B), SubClass) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=B), SubClass) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) -assert_type(f8.argsort(), npt.NDArray[Any]) -assert_type(AR_f8.argsort(), npt.NDArray[Any]) +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) assert_type(AR_f8.choose([0]), npt.NDArray[Any]) @@ -126,9 +130,12 @@ assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) assert_type(AR_f8.round(out=B), SubClass) -assert_type(f8.repeat(1), npt.NDArray[np.float64]) -assert_type(AR_f8.repeat(1), npt.NDArray[np.float64]) -assert_type(B.repeat(1), npt.NDArray[np.object_]) +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) assert_type(f8.std(), Any) assert_type(AR_f8.std(), Any) @@ -162,7 +169,7 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) @@ -232,3 +239,8 @@ assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) assert_type(f8.__array_namespace__(), ModuleType) assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index d2c8381..95aa4d9 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,39 +1,47 @@ +from typing import TypeAlias, assert_type + import numpy as np import numpy.typing as npt -from typing_extensions import assert_type +_ArrayND: TypeAlias = npt.NDArray[np.int64] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] -nd: npt.NDArray[np.int64] +_nd: _ArrayND +_2d: _Array2D +_3d: _Array3D # reshape -assert_type(nd.reshape(None), npt.NDArray[np.int64]) -assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(None), npt.NDArray[np.int64]) +assert_type(_nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # resize does not return a value # transpose -assert_type(nd.transpose(), npt.NDArray[np.int64]) -assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) -assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) +assert_type(_nd.transpose(), npt.NDArray[np.int64]) +assert_type(_nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(_nd.transpose((1, 0)), npt.NDArray[np.int64]) # swapaxes -assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) +assert_type(_nd.swapaxes(0, 1), _ArrayND) +assert_type(_2d.swapaxes(0, 1), _Array2D) +assert_type(_3d.swapaxes(0, 1), _Array3D) # flatten -assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # ravel -assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # squeeze -assert_type(nd.squeeze(), npt.NDArray[np.int64]) -assert_type(nd.squeeze(0), npt.NDArray[np.int64]) -assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(_nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/nditer.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/nditer.pyi index d9c1751..bade7ac 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/nditer.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/nditer.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - nditer_obj: np.nditer assert_type(np.nditer([0, 1], flags=["c_index"]), np.nditer) @@ -12,7 +10,7 @@ assert_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]]), np.nditer) assert_type(np.nditer([0, 1], op_dtypes=np.int_), np.nditer) assert_type(np.nditer([0, 1], order="C", casting="no"), np.nditer) -assert_type(nditer_obj.dtypes, tuple[np.dtype[Any], ...]) +assert_type(nditer_obj.dtypes, tuple[np.dtype, ...]) assert_type(nditer_obj.finished, bool) assert_type(nditer_obj.has_delayed_bufalloc, bool) assert_type(nditer_obj.has_index, bool) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/nested_sequence.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/nested_sequence.pyi index a076873..8ac7ef8 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/nested_sequence.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -1,10 +1,8 @@ from collections.abc import Sequence -from typing import Any +from typing import Any, assert_type from numpy._typing import _NestedSequence -from typing_extensions import assert_type - a: Sequence[int] b: Sequence[Sequence[int]] c: Sequence[Sequence[Sequence[int]]] @@ -14,8 +12,7 @@ f: tuple[int, ...] g: list[int] h: Sequence[Any] -def func(a: _NestedSequence[int]) -> None: - ... +def func(a: _NestedSequence[int]) -> None: ... assert_type(func(a), None) assert_type(func(b), None) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/npyio.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/npyio.pyi index 060969d..88fe215 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/npyio.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/npyio.pyi @@ -1,15 +1,13 @@ +import pathlib import re import zipfile -import pathlib -from typing import IO, Any from collections.abc import Mapping +from typing import IO, Any, assert_type -import numpy.typing as npt import numpy as np +import numpy.typing as npt from numpy.lib._npyio_impl import BagObj -from typing_extensions import assert_type - str_path: str pathlib_path: pathlib.Path str_file: IO[str] @@ -30,11 +28,11 @@ class BytesReader: bytes_writer: BytesWriter bytes_reader: BytesReader -assert_type(npz_file.zip, zipfile.ZipFile) -assert_type(npz_file.fid, None | IO[str]) +assert_type(npz_file.zip, zipfile.ZipFile | None) +assert_type(npz_file.fid, IO[str] | None) assert_type(npz_file.files, list[str]) assert_type(npz_file.allow_pickle, bool) -assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any]) +assert_type(npz_file.pickle_kwargs, Mapping[str, Any] | None) assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile]) assert_type(npz_file["test"], npt.NDArray[Any]) assert_type(len(npz_file), int) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/numeric.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/numeric.pyi index f55f7dc..fb6bbc6 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/numeric.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/numeric.pyi @@ -5,15 +5,12 @@ Does not include tests which fall under ``array_constructors``. """ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - -class SubClass(npt.NDArray[np.int64]): - ... +class SubClass(npt.NDArray[np.int64]): ... i8: np.int64 @@ -25,89 +22,125 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_O: npt.NDArray[np.object_] -B: list[int] -C: SubClass +_sub_nd_i8: SubClass + +_to_1d_bool: list[bool] +_to_1d_int: list[int] +_to_1d_float: list[float] +_to_1d_complex: list[complex] + +### -assert_type(np.count_nonzero(i8), int) -assert_type(np.count_nonzero(AR_i8), int) -assert_type(np.count_nonzero(B), int) +assert_type(np.count_nonzero(i8), np.intp) +assert_type(np.count_nonzero(AR_i8), np.intp) +assert_type(np.count_nonzero(_to_1d_int), np.intp) assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) assert_type(np.isfortran(AR_i8), bool) -assert_type(np.argwhere(i8), npt.NDArray[np.intp]) -assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) -assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) -assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.argwhere(i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) + +assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# correlate +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.correlate(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# convolve (same as correlate) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.convolve(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# outer (very similar to above, but 2D output) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(AR_u8, AR_u8), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) +assert_type(np.outer(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(_to_1d_int, _to_1d_int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.outer(_to_1d_float, _to_1d_float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +# tensordot +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.tensordot(_to_1d_bool, _to_1d_bool), npt.NDArray[np.bool]) +assert_type(np.tensordot(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.tensordot(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +# cross +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.cross(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.cross(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.cross(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.cross(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.cross(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) -assert_type(np.isscalar(B), bool) +assert_type(np.isscalar(_to_1d_int), bool) assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) -assert_type(np.roll(B, 1), npt.NDArray[Any]) +assert_type(np.roll(_to_1d_int, 1), npt.NDArray[Any]) assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) -assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) - assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) @@ -120,18 +153,18 @@ assert_type(np.binary_repr(1), str) assert_type(np.base_repr(1), str) assert_type(np.allclose(i8, AR_i8), bool) -assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(_to_1d_int, AR_i8), bool) assert_type(np.allclose(AR_i8, AR_i8), bool) assert_type(np.isclose(i8, i8), np.bool) assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) -assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.array_equal(i8, AR_i8), bool) -assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(_to_1d_int, AR_i8), bool) assert_type(np.array_equal(AR_i8, AR_i8), bool) assert_type(np.array_equiv(i8, AR_i8), bool) -assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(_to_1d_int, AR_i8), bool) assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/numerictypes.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/numerictypes.pyi index c06412e..b5749c1 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -1,53 +1,16 @@ -from typing import Literal -from typing_extensions import assert_type +from typing import Literal, assert_type import numpy as np - -assert_type( - np.ScalarType, - tuple[ - type[int], - type[float], - type[complex], - type[bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[np.csingle], - type[np.cdouble], - type[np.clongdouble], - type[np.half], - type[np.single], - type[np.double], - type[np.longdouble], - type[np.byte], - type[np.short], - type[np.intc], - type[np.long], - type[np.longlong], - type[np.timedelta64], - type[np.datetime64], - type[np.object_], - type[np.bytes_], - type[np.str_], - type[np.ubyte], - type[np.ushort], - type[np.uintc], - type[np.ulong], - type[np.ulonglong], - type[np.void], - ], -) assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) -assert_type(np.ScalarType[8], type[np.csingle]) -assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) assert_type(np.bool_(object()), np.bool) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) -assert_type(np.sctypeDict['uint8'], type[np.generic]) +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 1809b36..53c5ba8 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,31 +1,28 @@ -from fractions import Fraction from collections.abc import Sequence from decimal import Decimal -from typing import Any, Literal as L, TypeAlias, TypeVar +from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -from typing_extensions import assert_type, LiteralString - -_Ar_x: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] -_Ar_f: TypeAlias = npt.NDArray[np.floating[Any]] -_Ar_c: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] +_Ar_f: TypeAlias = npt.NDArray[np.floating] +_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] _Ar_O: TypeAlias = npt.NDArray[np.object_] -_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact[Any] | np.object_]] -_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] -_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] _Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact[Any] | np.object_]] -_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating[Any]]] -_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating[Any, Any]]] +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] _Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] -_SCT = TypeVar("_SCT", bound=np.generic) -_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] _BasisName: TypeAlias = L["X"] @@ -41,9 +38,9 @@ AR_i: npt.NDArray[np.int_] AR_f: npt.NDArray[np.float64] AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] AR_c: npt.NDArray[np.complex128] -AR_c_co: npt.NDArray[np.complex128] |npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c_co: npt.NDArray[np.complex128] | npt.NDArray[np.float64] | npt.NDArray[np.int_] AR_O: npt.NDArray[np.object_] -AR_O_co: npt.NDArray[np.object_ | np.number[Any]] +AR_O_co: npt.NDArray[np.object_ | np.number] SQ_i: Sequence[int] SQ_f: Sequence[float] @@ -68,11 +65,11 @@ PS_all: ( # static- and classmethods assert_type(type(PS_poly).basis_name, None) -assert_type(type(PS_cheb).basis_name, L['T']) -assert_type(type(PS_herm).basis_name, L['H']) -assert_type(type(PS_herme).basis_name, L['He']) -assert_type(type(PS_lag).basis_name, L['L']) -assert_type(type(PS_leg).basis_name, L['P']) +assert_type(type(PS_cheb).basis_name, L["T"]) +assert_type(type(PS_herm).basis_name, L["H"]) +assert_type(type(PS_herme).basis_name, L["He"]) +assert_type(type(PS_lag).basis_name, L["L"]) +assert_type(type(PS_leg).basis_name, L["P"]) assert_type(type(PS_all).__hash__, None) assert_type(type(PS_all).__array_ufunc__, None) @@ -92,10 +89,10 @@ assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) assert_type(type(PS_poly).identity(), npp.Polynomial) -assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) +assert_type(type(PS_cheb).identity(symbol="z"), npp.Chebyshev) assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) -assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) +assert_type(type(PS_leg).basis(32, symbol="u"), npp.Legendre) assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) @@ -105,7 +102,7 @@ assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) assert_type(PS_all.coef, _Ar_x_n) assert_type(PS_all.domain, _Ar_x_2) assert_type(PS_all.window, _Ar_x_2) -assert_type(PS_all.symbol, LiteralString) +assert_type(PS_all.symbol, str) # instance methods @@ -115,7 +112,7 @@ assert_type(PS_all.has_samewindow(PS_all), bool) assert_type(PS_all.has_sametype(PS_all), bool) assert_type(PS_poly.has_sametype(PS_poly), bool) assert_type(PS_poly.has_sametype(PS_leg), bool) -assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) +assert_type(PS_poly.has_sametype(NotADirectoryError), bool) assert_type(PS_poly.copy(), npp.Polynomial) assert_type(PS_cheb.copy(), npp.Chebyshev) @@ -124,7 +121,7 @@ assert_type(PS_herme.copy(), npp.HermiteE) assert_type(PS_lag.copy(), npp.Laguerre) assert_type(PS_leg.copy(), npp.Legendre) -assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.cutdeg(3), npp.Legendre) assert_type(PS_leg.trim(), npp.Legendre) assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) @@ -161,7 +158,7 @@ assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) assert_type( PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), - tuple[npp.HermiteE, Sequence[np.inexact[Any] | np.int32]], + tuple[npp.HermiteE, Sequence[np.inexact | np.int32]], ) # custom operations @@ -174,17 +171,16 @@ assert_type(repr(PS_all), str) assert_type(format(PS_all), str) assert_type(len(PS_all), int) -assert_type(next(iter(PS_all)), np.inexact[Any] | object) - -assert_type(PS_all(SC_f_co), np.float64 | np.complex128) -assert_type(PS_all(SC_c_co), np.complex128) -assert_type(PS_all(Decimal()), np.float64 | np.complex128) -assert_type(PS_all(Fraction()), np.float64 | np.complex128) -assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(next(iter(PS_all)), np.float64 | Any) + +assert_type(PS_all(SC_f_co), np.float64 | Any) +assert_type(PS_all(SC_c_co), np.complex128 | Any) +assert_type(PS_all(Decimal()), np.float64 | Any) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) -assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) assert_type(PS_all(PS_poly), npp.Polynomial) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 55cb243..0ab947b 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,17 +1,15 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, Literal as L, TypeAlias +from typing import Literal as L, TypeAlias, assert_type import numpy as np -import numpy.typing as npt import numpy.polynomial.polyutils as pu +import numpy.typing as npt from numpy.polynomial._polytypes import _Tuple2 -from typing_extensions import assert_type - -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] _ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] _ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] @@ -157,31 +155,31 @@ assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) -assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating[Any]]) -assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating]) assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[float]) assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[complex]) assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[complex]) assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) -assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating[Any]]) -assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating[Any]]) -assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating[Any]]) -assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating[Any, Any]]) -assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating]) assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) # mapdomain -assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating[Any]) -assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating[Any]) -assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating[Any]) -assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating[Any]) -assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating[Any, Any]) -assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) -assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating) assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_series.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_series.pyi index 51ecd4a..cc0cd9b 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,15 +1,13 @@ from collections.abc import Sequence -from typing import Any, TypeAlias +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -from typing_extensions import assert_type - -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] _ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] _ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] _ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] @@ -51,70 +49,70 @@ assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) # assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) # assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complex128 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_ | Any]) -assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating[Any]]) -assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) assert_type( npp.polynomial.polyfit(AR_f8, AR_f8, 2), - npt.NDArray[np.floating[Any]], + npt.NDArray[np.floating], ) assert_type( npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), - tuple[npt.NDArray[np.floating[Any]], Sequence[np.inexact[Any] | np.int32]], + tuple[npt.NDArray[np.floating], Sequence[np.inexact | np.int32]], ) assert_type( npp.polynomial.polyfit(AR_c16, AR_f8, 2), - npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complexfloating], ) assert_type( npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], - npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complexfloating], ) assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/random.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/random.pyi index e011743..8a57644 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/random.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/random.pyi @@ -1,17 +1,15 @@ import threading -from typing import Any from collections.abc import Sequence +from typing import Any, assert_type import numpy as np import numpy.typing as npt from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 from numpy.random._pcg64 import PCG64 -from numpy.random._sfc64 import SFC64 from numpy.random._philox import Philox -from numpy.random.bit_generator import SeedSequence, SeedlessSeedSequence - -from typing_extensions import assert_type +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import SeedlessSeedSequence, SeedSequence def_rng = np.random.default_rng() seed_seq = np.random.SeedSequence() @@ -74,12 +72,11 @@ assert_type(sfc64_raw_arr, npt.NDArray[np.uint64]) assert_type(sfc64.lock, threading.Lock) assert_type(seed_seq.pool, npt.NDArray[np.uint32]) -assert_type(seed_seq.entropy, None | int | Sequence[int]) +assert_type(seed_seq.entropy, int | Sequence[int] | None) assert_type(seed_seq.spawn(1), list[np.random.SeedSequence]) assert_type(seed_seq.generate_state(8, "uint32"), npt.NDArray[np.uint32 | np.uint64]) assert_type(seed_seq.generate_state(8, "uint64"), npt.NDArray[np.uint32 | np.uint64]) - def_gen: np.random.Generator = np.random.default_rng() D_arr_0p1: npt.NDArray[np.float64] = np.array([0.1]) @@ -637,7 +634,6 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), np assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) - assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) @@ -891,7 +887,6 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), n assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) - assert_type(def_gen.bit_generator, np.random.BitGenerator) assert_type(def_gen.bytes(2), bytes) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/rec.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/rec.pyi index 5a9bf54..d28f273 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/rec.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/rec.pyi @@ -1,13 +1,13 @@ import io -from typing import Any +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type +_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] AR_i8: npt.NDArray[np.int64] -REC_AR_V: np.recarray[tuple[int, ...], np.dtype[np.record]] +REC_AR_V: _RecArray AR_LIST: list[npt.NDArray[np.int64]] record: np.record @@ -43,7 +43,7 @@ assert_type( order="K", byteorder="|", ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -52,13 +52,13 @@ assert_type( dtype=[("f8", np.float64), ("i8", np.int64)], strides=(5, 5), ), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) -assert_type(np.rec.fromarrays(AR_LIST), np.recarray[Any, np.dtype[Any]]) +assert_type(np.rec.fromarrays(AR_LIST), np.recarray) assert_type( np.rec.fromarrays(AR_LIST, dtype=np.int64), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( np.rec.fromarrays( @@ -66,12 +66,12 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( np.rec.fromrecords((1, 1.5)), - np.recarray[Any, np.dtype[np.record]] + _RecArray ) assert_type( @@ -79,7 +79,7 @@ assert_type( [(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)], ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -88,7 +88,7 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -96,7 +96,7 @@ assert_type( b"(1, 1.5)", dtype=[("i8", np.int64), ("f8", np.float64)], ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -105,13 +105,16 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) -assert_type(np.rec.fromfile( - "test_file.txt", - dtype=[("i8", np.int64), ("f8", np.float64)], -), np.recarray[Any, np.dtype[Any]]) +assert_type( + np.rec.fromfile( + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray, +) assert_type( np.rec.fromfile( @@ -119,14 +122,14 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) -assert_type(np.rec.array(AR_i8), np.recarray[Any, np.dtype[np.int64]]) +assert_type(np.rec.array(AR_i8), np.recarray[tuple[Any, ...], np.dtype[np.int64]]) assert_type( np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( @@ -135,7 +138,7 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -144,7 +147,7 @@ assert_type( dtype=np.float64, shape=(10, 3), ), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( @@ -154,15 +157,15 @@ assert_type( names=["i8", "f8"], shape=(10, 3), ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( np.rec.array(file_obj, dtype=np.float64), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/scalars.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/scalars.pyi index 365c1e7..06378f1 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/scalars.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,8 +1,6 @@ -from typing import Any, Literal, TypeAlias -from typing_extensions import Unpack, assert_type +from typing import Any, Literal, TypeAlias, assert_type import numpy as np -import numpy.typing as npt _1: TypeAlias = Literal[1] @@ -44,7 +42,7 @@ assert_type(c8.dtype, np.dtype[np.complex64]) assert_type(c8.real, np.float32) assert_type(c16.imag, np.float64) -assert_type(np.str_('foo'), np.str_) +assert_type(np.str_("foo"), np.str_) assert_type(V[0], Any) assert_type(V["field1"], Any) @@ -122,7 +120,7 @@ assert_type( S.reshape(1, 1, 1, 1, 1), np.ndarray[ # len(shape) >= 5 - tuple[_1, _1, _1, _1, _1, Unpack[tuple[_1, ...]]], + tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], np.dtype[np.bytes_], ], ) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/shape.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/shape.pyi index d236479..fce3f15 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/shape.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/shape.pyi @@ -1,8 +1,6 @@ -from typing import Any, NamedTuple +from typing import Any, NamedTuple, assert_type import numpy as np -from typing_extensions import assert_type - # Subtype of tuple[int, int] class XYGrid(NamedTuple): diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/shape_base.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/shape_base.pyi index baaa78b..fc1131c 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - i8: np.int64 f8: np.float64 @@ -44,8 +42,8 @@ assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/stride_tricks.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/stride_tricks.pyi index fda149e..2d18b10 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/stride_tricks.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_LIKE_f: list[float] interface_dict: dict[str, Any] @@ -22,8 +20,8 @@ assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) -assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[int, ...]) -assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[int, ...]) +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[Any, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[Any, ...]) assert_type(np.broadcast_arrays(AR_f8, AR_f8), tuple[npt.NDArray[Any], ...]) assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), tuple[npt.NDArray[Any], ...]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/strings.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/strings.pyi index d9e812a..1cafda2 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/strings.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/strings.pyi @@ -1,16 +1,15 @@ +from typing import TypeAlias, assert_type + import numpy as np -import numpy.typing as npt import numpy._typing as np_t +import numpy.typing as npt -from typing_extensions import assert_type -from typing import TypeAlias +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -AR_T: np.ndarray[np_t._Shape, np.dtypes.StringDType] - -AR_T_alias: TypeAlias = np.ndarray[np_t._Shape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +AR_T: AR_T_alias assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) @@ -191,3 +190,7 @@ assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/testing.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/testing.pyi index 14fd57d..cf8d503 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/testing.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/testing.pyi @@ -1,30 +1,28 @@ +import contextlib import re import sys -import warnings import types import unittest -import contextlib +import warnings from collections.abc import Callable -from typing import Any, TypeVar from pathlib import Path +from typing import Any, TypeVar, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] bool_obj: bool -suppress_obj: np.testing.suppress_warnings +suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] FT = TypeVar("FT", bound=Callable[..., Any]) def func() -> int: ... def func2( - x: npt.NDArray[np.number[Any]], - y: npt.NDArray[np.number[Any]], + x: npt.NDArray[np.number], + y: npt.NDArray[np.number], ) -> npt.NDArray[np.bool]: ... assert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException) @@ -60,12 +58,12 @@ with np.testing.clear_and_catch_warnings(True) as c1: with np.testing.clear_and_catch_warnings() as c2: assert_type(c2, None) -assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) -assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(suppress_obj.filter(RuntimeWarning), None) assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) with suppress_obj as c3: - assert_type(c3, np.testing.suppress_warnings) + assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) assert_type(np.testing.IS_PYPY, bool) @@ -78,7 +76,7 @@ assert_type(np.testing.assert_(2, msg=lambda: "test"), None) if sys.platform == "win32" or sys.platform == "cygwin": assert_type(np.testing.memusage(), int) elif sys.platform == "linux": - assert_type(np.testing.memusage(), None | int) + assert_type(np.testing.memusage(), int | None) assert_type(np.testing.jiffies(), int) @@ -92,7 +90,7 @@ assert_type(np.testing.assert_equal({1}, {1}), None) assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) -assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) +assert_type(np.testing.print_assert_equal("Test XYZ of func xyz", [0, 1], [0, 1]), None) assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) @@ -174,8 +172,8 @@ assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), Non assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) -assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) -assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func4(a: int, b: str) -> bool: ... diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/twodim_base.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/twodim_base.pyi index ac9b706..5f5627d 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,152 +1,225 @@ -from typing import Any, TypeVar +from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - -_SCT = TypeVar("_SCT", bound=np.generic) - - -def func1(ar: npt.NDArray[_SCT], a: int) -> npt.NDArray[_SCT]: - pass - - -def func2(ar: npt.NDArray[np.number[Any]], a: str) -> npt.NDArray[np.float64]: - pass - - -AR_b: npt.NDArray[np.bool] -AR_u: npt.NDArray[np.uint64] -AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] -AR_c: npt.NDArray[np.complex128] -AR_O: npt.NDArray[np.object_] - -AR_LIKE_b: list[bool] -AR_LIKE_c: list[complex] - -assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) -assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) - -assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) -assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) - -assert_type(np.eye(10), npt.NDArray[np.float64]) -assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) - -assert_type(np.diag(AR_b), npt.NDArray[np.bool]) -assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) -assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.tri(10), npt.NDArray[np.float64]) -assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) - -assert_type(np.tril(AR_b), npt.NDArray[np.bool]) -assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.triu(AR_b), npt.NDArray[np.bool]) -assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) -assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.vander(AR_O), npt.NDArray[np.object_]) - +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_ND: TypeAlias = tuple[Any, ...] + +_Indices2D: TypeAlias = tuple[ + np.ndarray[_1D, np.dtype[np.intp]], + np.ndarray[_1D, np.dtype[np.intp]], +] + +### + +_nd_bool: np.ndarray[_ND, np.dtype[np.bool]] +_1d_bool: np.ndarray[_1D, np.dtype[np.bool]] +_2d_bool: np.ndarray[_2D, np.dtype[np.bool]] +_nd_u64: np.ndarray[_ND, np.dtype[np.uint64]] +_nd_i64: np.ndarray[_ND, np.dtype[np.int64]] +_nd_f64: np.ndarray[_ND, np.dtype[np.float64]] +_nd_c128: np.ndarray[_ND, np.dtype[np.complex128]] +_nd_obj: np.ndarray[_ND, np.dtype[np.object_]] + +_to_nd_bool: list[bool] | list[list[bool]] +_to_1d_bool: list[bool] +_to_2d_bool: list[list[bool]] + +_to_1d_f64: list[float] +_to_1d_c128: list[complex] + +@type_check_only +def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +@type_check_only +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... + +@type_check_only +class _Cube: + shape = 3, 4 + ndim = 2 + +### + +# fliplr +assert_type(np.fliplr(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.fliplr(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.fliplr(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.fliplr(_to_nd_bool), np.ndarray) +assert_type(np.fliplr(_to_1d_bool), np.ndarray) +assert_type(np.fliplr(_to_2d_bool), np.ndarray) + +# flipud +assert_type(np.flipud(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.flipud(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.flipud(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.flipud(_to_nd_bool), np.ndarray) +assert_type(np.flipud(_to_1d_bool), np.ndarray) +assert_type(np.flipud(_to_2d_bool), np.ndarray) + +# eye +assert_type(np.eye(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.eye(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.eye(10, k=2, dtype=int), np.ndarray[_2D]) + +# diag +assert_type(np.diag(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.diag(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diag(_2d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.diag(_to_nd_bool, k=0), np.ndarray) +assert_type(np.diag(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diag(_to_2d_bool, k=0), np.ndarray[_1D]) + +# diagflat +assert_type(np.diagflat(_nd_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_to_nd_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_2d_bool, k=0), np.ndarray[_2D]) + +# tri +assert_type(np.tri(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.tri(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.tri(10, k=2, dtype=int), np.ndarray[_2D]) + +# tril +assert_type(np.tril(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.tril(_to_nd_bool, k=0), np.ndarray) +assert_type(np.tril(_to_1d_bool, k=0), np.ndarray) +assert_type(np.tril(_to_2d_bool, k=0), np.ndarray) + +# triu +assert_type(np.triu(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.triu(_to_nd_bool, k=0), np.ndarray) +assert_type(np.triu(_to_1d_bool, k=0), np.ndarray) +assert_type(np.triu(_to_2d_bool, k=0), np.ndarray) + +# vander +assert_type(np.vander(_nd_bool), np.ndarray[_2D, np.dtype[np.int_]]) +assert_type(np.vander(_nd_u64), np.ndarray[_2D, np.dtype[np.uint64]]) +assert_type(np.vander(_nd_i64, N=2), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.vander(_nd_f64, increasing=True), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.vander(_nd_c128), np.ndarray[_2D, np.dtype[np.complex128]]) +assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) + +# histogram2d assert_type( - np.histogram2d(AR_LIKE_c, AR_LIKE_c), + np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128 | np.float64], - npt.NDArray[np.complex128 | np.float64], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_b), + np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) assert_type( - np.histogram2d(AR_f, AR_i), + np.histogram2d(_nd_i64, _nd_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_f), + np.histogram2d(_nd_f64, _nd_i64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + np.histogram2d(_nd_i64, _nd_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, bins=8), + np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_f, bins=(8, 5)), + np.histogram2d(_nd_f64, _nd_c128, bins=8), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_i, bins=AR_u), + np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.bool | np.complex128], - npt.NDArray[np.bool | np.complex128], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) -assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) -assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) - -assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) +# mask_indices +assert_type(np.mask_indices(10, func1), _Indices2D) +assert_type(np.mask_indices(8, func2, "0"), _Indices2D) + +# tril_indices +assert_type(np.tril_indices(3), _Indices2D) +assert_type(np.tril_indices(3, 1), _Indices2D) +assert_type(np.tril_indices(3, 1, 2), _Indices2D) +# tril_indices +assert_type(np.triu_indices(3), _Indices2D) +assert_type(np.triu_indices(3, 1), _Indices2D) +assert_type(np.triu_indices(3, 1, 2), _Indices2D) + +# tril_indices_from +assert_type(np.tril_indices_from(_2d_bool), _Indices2D) +assert_type(np.tril_indices_from(_Cube()), _Indices2D) +# triu_indices_from +assert_type(np.triu_indices_from(_2d_bool), _Indices2D) +assert_type(np.triu_indices_from(_Cube()), _Indices2D) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/type_check.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/type_check.pyi index 3212ca1..ee40bd1 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/type_check.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/type_check.pyi @@ -1,10 +1,7 @@ -from typing import Any, Literal +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _16Bit, _32Bit, _64Bit, _128Bit - -from typing_extensions import assert_type f8: np.float64 f: float @@ -14,7 +11,7 @@ AR_i8: npt.NDArray[np.int64] AR_i4: npt.NDArray[np.int32] AR_f2: npt.NDArray[np.float16] AR_f8: npt.NDArray[np.float64] -AR_f16: npt.NDArray[np.floating[_128Bit]] +AR_f16: npt.NDArray[np.longdouble] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] @@ -53,11 +50,8 @@ assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) -assert_type( - np.real_if_close(AR_c16), - npt.NDArray[np.floating[_64Bit]] | npt.NDArray[np.complexfloating[_64Bit, _64Bit]], -) -assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32] | npt.NDArray[np.complex64]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) assert_type(np.typename("h"), Literal["short"]) @@ -65,15 +59,9 @@ assert_type(np.typename("B"), Literal["unsigned char"]) assert_type(np.typename("V"), Literal["void"]) assert_type(np.typename("S1"), Literal["character"]) -assert_type(np.common_type(AR_i4), type[np.floating[_64Bit]]) +assert_type(np.common_type(AR_i4), type[np.float64]) assert_type(np.common_type(AR_f2), type[np.float16]) -assert_type(np.common_type(AR_f2, AR_i4), type[np.floating[_16Bit | _64Bit]]) -assert_type(np.common_type(AR_f16, AR_i4), type[np.floating[_64Bit | _128Bit]]) -assert_type( - np.common_type(AR_c8, AR_f2), - type[np.complexfloating[_16Bit | _32Bit, _16Bit | _32Bit]], -) -assert_type( - np.common_type(AR_f2, AR_c8, AR_i4), - type[np.complexfloating[_16Bit | _32Bit | _64Bit, _16Bit | _32Bit | _64Bit]], -) +assert_type(np.common_type(AR_f2, AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f16, AR_i4), type[np.longdouble]) +assert_type(np.common_type(AR_c8, AR_f2), type[np.complex64]) +assert_type(np.common_type(AR_f2, AR_c8, AR_i4), type[np.complexfloating]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ufunc_config.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ufunc_config.pyi index 2363181..f6f19f3 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,13 +1,11 @@ """Typing tests for `_core._ufunc_config`.""" from _typeshed import SupportsWrite -from typing import Any from collections.abc import Callable +from typing import Any, assert_type import numpy as np -from typing_extensions import assert_type - def func(a: str, b: int) -> None: ... class Write: @@ -23,9 +21,9 @@ assert_type(np.geterr(), np._core._ufunc_config._ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) -assert_type(np.seterrcall(func), Callable[[str, int], Any] | None | SupportsWrite[str]) -assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | None | SupportsWrite[str]) -assert_type(np.geterrcall(), Callable[[str, int], Any] | None | SupportsWrite[str]) +assert_type(np.seterrcall(func), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.geterrcall(), Callable[[str, int], Any] | SupportsWrite[str] | None) assert_type(np.errstate(call=func, all="call"), np.errstate) assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ufunclike.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ufunclike.pyi index 8fbfa22..35e1138 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,10 +1,8 @@ -from typing import Any +from typing import assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] AR_LIKE_i: list[int] @@ -13,12 +11,12 @@ AR_LIKE_O: list[np.object_] AR_U: npt.NDArray[np.str_] -assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) -assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) # type: ignore[deprecated] assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/ufuncs.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/ufuncs.pyi index 77a96d0..f2a15d5 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -1,10 +1,8 @@ -from typing import Literal, Any, NoReturn +from typing import Any, Literal, NoReturn, assert_type import numpy as np import numpy.typing as npt -from typing_extensions import assert_type - i8: np.int64 f8: np.float64 AR_f8: npt.NDArray[np.float64] @@ -100,26 +98,45 @@ assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) -assert_type(np.absolute.outer(), NoReturn) -assert_type(np.frexp.outer(), NoReturn) -assert_type(np.divmod.outer(), NoReturn) -assert_type(np.matmul.outer(), NoReturn) - -assert_type(np.absolute.reduceat(), NoReturn) -assert_type(np.frexp.reduceat(), NoReturn) -assert_type(np.divmod.reduceat(), NoReturn) -assert_type(np.matmul.reduceat(), NoReturn) - -assert_type(np.absolute.reduce(), NoReturn) -assert_type(np.frexp.reduce(), NoReturn) -assert_type(np.divmod.reduce(), NoReturn) -assert_type(np.matmul.reduce(), NoReturn) - -assert_type(np.absolute.accumulate(), NoReturn) -assert_type(np.frexp.accumulate(), NoReturn) -assert_type(np.divmod.accumulate(), NoReturn) -assert_type(np.matmul.accumulate(), NoReturn) - -assert_type(np.frexp.at(), NoReturn) -assert_type(np.divmod.at(), NoReturn) -assert_type(np.matmul.at(), NoReturn) +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] + +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] + +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(AR_f8), NoReturn) # type: ignore[arg-type] + +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] + +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(AR_f8, i8), NoReturn) # type: ignore[arg-type] +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] diff --git a/blimgui/dist64/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/blimgui/dist64/numpy/typing/tests/data/reveal/warnings_and_errors.pyi index 1f1be13..4c0d23b 100644 --- a/blimgui/dist64/numpy/typing/tests/data/reveal/warnings_and_errors.pyi +++ b/blimgui/dist64/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -1,6 +1,6 @@ -import numpy.exceptions as ex +from typing import assert_type -from typing_extensions import assert_type +import numpy.exceptions as ex assert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning) assert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning) diff --git a/blimgui/dist64/numpy/typing/tests/test_isfile.py b/blimgui/dist64/numpy/typing/tests/test_isfile.py index 41978ae..5175147 100644 --- a/blimgui/dist64/numpy/typing/tests/test_isfile.py +++ b/blimgui/dist64/numpy/typing/tests/test_isfile.py @@ -2,6 +2,8 @@ import sys from pathlib import Path +import pytest + import numpy as np from numpy.testing import assert_ @@ -9,7 +11,7 @@ FILES = [ ROOT / "py.typed", ROOT / "__init__.pyi", - ROOT / "ctypeslib.pyi", + ROOT / "ctypeslib" / "__init__.pyi", ROOT / "_core" / "__init__.pyi", ROOT / "f2py" / "__init__.pyi", ROOT / "fft" / "__init__.pyi", @@ -25,6 +27,10 @@ FILES += [ROOT / "distutils" / "__init__.pyi"] +@pytest.mark.thread_unsafe( + reason="os.path has a thread-safety bug (python/cpython#140054). " + "Expected to only be a problem in 3.14.0" +) class TestIsFile: def test_isfile(self): """Test if all ``.pyi`` files are properly installed.""" diff --git a/blimgui/dist64/numpy/typing/tests/test_runtime.py b/blimgui/dist64/numpy/typing/tests/test_runtime.py index e9c69ac..2d9b9f2 100644 --- a/blimgui/dist64/numpy/typing/tests/test_runtime.py +++ b/blimgui/dist64/numpy/typing/tests/test_runtime.py @@ -1,34 +1,46 @@ """Test the runtime usage of `numpy.typing`.""" -from __future__ import annotations - from typing import ( - get_type_hints, - Union, + Any, NamedTuple, + Union, # pyright: ignore[reportDeprecated] get_args, get_origin, - Any, + get_type_hints, ) import pytest + import numpy as np -import numpy.typing as npt import numpy._typing as _npt +import numpy.typing as npt class TypeTup(NamedTuple): typ: type args: tuple[type, ...] - origin: None | type + origin: type | None + + +def _flatten_type_alias(t: Any) -> Any: + # "flattens" a TypeAliasType to its underlying type alias + return getattr(t, "__value__", t) NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), - "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), - "NBitBase": TypeTup(npt.NBitBase, (), None), + "ArrayLike": TypeTup( + _flatten_type_alias(npt.ArrayLike), + _flatten_type_alias(npt.ArrayLike).__args__, + Union, + ), + "DTypeLike": TypeTup( + _flatten_type_alias(npt.DTypeLike), + _flatten_type_alias(npt.DTypeLike).__args__, + Union, + ), + "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] "NDArray": NDArrayTup, } @@ -54,10 +66,7 @@ def test_get_type_hints(name: type, tup: TypeTup) -> None: """Test `typing.get_type_hints`.""" typ = tup.typ - # Explicitly set `__annotations__` in order to circumvent the - # stringification performed by `from __future__ import annotations` - def func(a): pass - func.__annotations__ = {"a": typ, "return": None} + def func(a: typ) -> None: pass out = get_type_hints(func) ref = {"a": typ, "return": type(None)} @@ -69,13 +78,10 @@ def test_get_type_hints_str(name: type, tup: TypeTup) -> None: """Test `typing.get_type_hints` with string-representation of types.""" typ_str, typ = f"npt.{name}", tup.typ - # Explicitly set `__annotations__` in order to circumvent the - # stringification performed by `from __future__ import annotations` - def func(a): pass - func.__annotations__ = {"a": typ_str, "return": None} + def func(a: typ_str) -> None: pass out = get_type_hints(func) - ref = {"a": typ, "return": type(None)} + ref = {"a": getattr(npt, str(name)), "return": type(None)} assert out == ref @@ -87,7 +93,6 @@ def test_keys() -> None: PROTOCOLS: dict[str, tuple[type[Any], object]] = { - "_SupportsDType": (_npt._SupportsDType, np.int64(1)), "_SupportsArray": (_npt._SupportsArray, np.arange(10)), "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), "_NestedSequence": (_npt._NestedSequence, [1]), @@ -101,9 +106,5 @@ def test_isinstance(self, cls: type[Any], obj: object) -> None: assert not isinstance(None, cls) def test_issubclass(self, cls: type[Any], obj: object) -> None: - if cls is _npt._SupportsDType: - pytest.xfail( - "Protocols with non-method members don't support issubclass()" - ) assert issubclass(type(obj), cls) assert not issubclass(type(None), cls) diff --git a/blimgui/dist64/numpy/typing/tests/test_typing.py b/blimgui/dist64/numpy/typing/tests/test_typing.py index 3c1b371..716c975 100644 --- a/blimgui/dist64/numpy/typing/tests/test_typing.py +++ b/blimgui/dist64/numpy/typing/tests/test_typing.py @@ -1,15 +1,12 @@ -from __future__ import annotations - import importlib.util import os import re import shutil +import textwrap from collections import defaultdict from typing import TYPE_CHECKING import pytest -from numpy.typing.mypy_plugin import _EXTENDED_PRECISION_LIST - # Only trigger a full `mypy` run if this environment variable is set # Note that these tests tend to take over a minute even on a macOS M1 CPU, @@ -34,6 +31,7 @@ if TYPE_CHECKING: from collections.abc import Iterator + # We need this as annotation, but it's located in a private namespace. # As a compromise, do *not* import it during runtime from _pytest.mark.structures import ParameterSet @@ -84,7 +82,7 @@ def run_mypy() -> None: """ if ( os.path.isdir(CACHE_DIR) - and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) # noqa: PLW1508 ): shutil.rmtree(CACHE_DIR) @@ -99,9 +97,9 @@ def run_mypy() -> None: directory, ]) if stderr: - pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}", False) elif exit_code not in {0, 1}: - pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}", False) str_concat = "" filename: str | None = None @@ -118,98 +116,47 @@ def run_mypy() -> None: filename = None -def get_test_cases(directory: str) -> Iterator[ParameterSet]: - for root, _, files in os.walk(directory): - for fname in files: - short_fname, ext = os.path.splitext(fname) - if ext in (".pyi", ".py"): +def get_test_cases(*directories: str) -> "Iterator[ParameterSet]": + for directory in directories: + for root, _, files in os.walk(directory): + for fname in files: + short_fname, ext = os.path.splitext(fname) + if ext not in (".pyi", ".py"): + continue + fullpath = os.path.join(root, fname) yield pytest.param(fullpath, id=short_fname) -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) -def test_success(path) -> None: - # Alias `OUTPUT_MYPY` so that it appears in the local namespace - output_mypy = OUTPUT_MYPY - if path in output_mypy: - msg = "Unexpected mypy output\n\n" - msg += "\n".join(_strip_filename(v)[1] for v in output_mypy[path]) - raise AssertionError(msg) - +_FAIL_INDENT = " " * 4 +_FAIL_SEP = "\n" + "_" * 79 + "\n\n" -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR)) -def test_fail(path: str) -> None: - __tracebackhide__ = True +_FAIL_MSG_REVEAL = """{}:{} - reveal mismatch: - with open(path) as fin: - lines = fin.readlines() +{}""" - errors = defaultdict(lambda: "") +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR, FAIL_DIR)) +def test_pass(path) -> None: + # Alias `OUTPUT_MYPY` so that it appears in the local namespace output_mypy = OUTPUT_MYPY - assert path in output_mypy - - for error_line in output_mypy[path]: - lineno, error_line = _strip_filename(error_line) - errors[lineno] += f'{error_line}\n' - - for i, line in enumerate(lines): - lineno = i + 1 - if ( - line.startswith('#') - or (" E:" not in line and lineno not in errors) - ): - continue - - target_line = lines[lineno - 1] - if "# E:" in target_line: - expression, _, marker = target_line.partition(" # E: ") - error = errors[lineno].strip() - expected_error = marker.strip() - _test_fail(path, expression, error, expected_error, lineno) - else: - pytest.fail( - f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}" - ) + if path not in output_mypy: + return -_FAIL_MSG1 = """Extra error at line {} - -Expression: {} -Extra error: {!r} -""" - -_FAIL_MSG2 = """Error mismatch at line {} - -Expression: {} -Expected error: {} -Observed error: {!r} -""" - - -def _test_fail( - path: str, - expression: str, - error: str, - expected_error: None | str, - lineno: int, -) -> None: - if expected_error is None: - raise AssertionError(_FAIL_MSG1.format(lineno, expression, error)) - elif expected_error not in error: - raise AssertionError(_FAIL_MSG2.format( - lineno, expression, expected_error, error - )) - + relpath = os.path.relpath(path) -_REVEAL_MSG = """Reveal mismatch at line {} + # collect any reported errors, and clean up the output + messages = [] + for message in output_mypy[path]: + lineno, content = _strip_filename(message) + content = content.removeprefix("error:").lstrip() + messages.append(f"{relpath}:{lineno} - {content}") -{} -""" + if messages: + pytest.fail("\n".join(messages), pytrace=False) @pytest.mark.slow @@ -225,9 +172,19 @@ def test_reveal(path: str) -> None: if path not in output_mypy: return + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + failures = [] for error_line in output_mypy[path]: - lineno, error_line = _strip_filename(error_line) - raise AssertionError(_REVEAL_MSG.format(lineno, error_line)) + lineno, error_msg = _strip_filename(error_line) + error_msg = textwrap.indent(error_msg, _FAIL_INDENT) + reason = _FAIL_MSG_REVEAL.format(relpath, lineno, error_msg) + failures.append(reason) + + if failures: + reasons = _FAIL_SEP.join(failures) + pytest.fail(reasons, pytrace=False) @pytest.mark.slow @@ -246,41 +203,3 @@ def test_code_runs(path: str) -> None: test_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(test_module) - - -LINENO_MAPPING = { - 11: "uint128", - 12: "uint256", - 14: "int128", - 15: "int256", - 17: "float80", - 18: "float96", - 19: "float128", - 20: "float256", - 22: "complex160", - 23: "complex192", - 24: "complex256", - 25: "complex512", -} - - -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -def test_extended_precision() -> None: - path = os.path.join(MISC_DIR, "extended_precision.pyi") - output_mypy = OUTPUT_MYPY - assert path in output_mypy - - with open(path) as f: - expression_list = f.readlines() - - for _msg in output_mypy[path]: - lineno, msg = _strip_filename(_msg) - expression = expression_list[lineno - 1].rstrip("\n") - - if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: - raise AssertionError(_REVEAL_MSG.format(lineno, msg)) - elif "error" not in msg: - _test_fail( - path, expression, msg, 'Expression is of type "Any"', lineno - ) diff --git a/blimgui/dist64/numpy/version.py b/blimgui/dist64/numpy/version.py index 56de969..921df4f 100644 --- a/blimgui/dist64/numpy/version.py +++ b/blimgui/dist64/numpy/version.py @@ -2,10 +2,10 @@ """ Module to expose more detailed version info for the installed `numpy` """ -version = "2.2.5" +version = "2.4.2" __version__ = version full_version = version -git_revision = "7be8c1f9133516fe20fd076f9bdfe23d9f537874" +git_revision = "c81c49f77451340651a751e76bca607d85e4fd55" release = 'dev' not in version and '+' not in version short_version = version.split("+")[0] diff --git a/blimgui/dist64/numpy/version.pyi b/blimgui/dist64/numpy/version.pyi index 0c10eec..0eb641b 100644 --- a/blimgui/dist64/numpy/version.pyi +++ b/blimgui/dist64/numpy/version.pyi @@ -1,20 +1,9 @@ -from typing import Final +from typing import Final, LiteralString -from typing_extensions import LiteralString +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... -__all__ = ( - '__version__', - 'full_version', - 'git_revision', - 'release', - 'short_version', - 'version', -) - -version: Final[LiteralString] -__version__: Final[LiteralString] -full_version: Final[LiteralString] - -git_revision: Final[LiteralString] -release: Final[bool] -short_version: Final[LiteralString] +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... diff --git a/blimgui/dist64/pydantic-2.11.4.dist-info/RECORD b/blimgui/dist64/pydantic-2.11.4.dist-info/RECORD deleted file mode 100644 index 390721f..0000000 --- a/blimgui/dist64/pydantic-2.11.4.dist-info/RECORD +++ /dev/null @@ -1,215 +0,0 @@ -pydantic-2.11.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pydantic-2.11.4.dist-info/METADATA,sha256=1brc0bV_8_1JdUpZT1moXvRI-PEuG_QgqVRh3uvOriY,66605 -pydantic-2.11.4.dist-info/RECORD,, -pydantic-2.11.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 -pydantic-2.11.4.dist-info/licenses/LICENSE,sha256=qeGG88oWte74QxjnpwFyE1GgDLe4rjpDlLZ7SeNSnvM,1129 -pydantic/__init__.py,sha256=D3_-0aRPoAF5EH4T4JPVOYLNEc-DeaCcDt6UzIjP_D0,15395 -pydantic/__pycache__/__init__.cpython-313.pyc,, -pydantic/__pycache__/_migration.cpython-313.pyc,, -pydantic/__pycache__/alias_generators.cpython-313.pyc,, -pydantic/__pycache__/aliases.cpython-313.pyc,, -pydantic/__pycache__/annotated_handlers.cpython-313.pyc,, -pydantic/__pycache__/class_validators.cpython-313.pyc,, -pydantic/__pycache__/color.cpython-313.pyc,, -pydantic/__pycache__/config.cpython-313.pyc,, -pydantic/__pycache__/dataclasses.cpython-313.pyc,, -pydantic/__pycache__/datetime_parse.cpython-313.pyc,, -pydantic/__pycache__/decorator.cpython-313.pyc,, -pydantic/__pycache__/env_settings.cpython-313.pyc,, -pydantic/__pycache__/error_wrappers.cpython-313.pyc,, -pydantic/__pycache__/errors.cpython-313.pyc,, -pydantic/__pycache__/fields.cpython-313.pyc,, -pydantic/__pycache__/functional_serializers.cpython-313.pyc,, -pydantic/__pycache__/functional_validators.cpython-313.pyc,, -pydantic/__pycache__/generics.cpython-313.pyc,, -pydantic/__pycache__/json.cpython-313.pyc,, -pydantic/__pycache__/json_schema.cpython-313.pyc,, -pydantic/__pycache__/main.cpython-313.pyc,, -pydantic/__pycache__/mypy.cpython-313.pyc,, -pydantic/__pycache__/networks.cpython-313.pyc,, -pydantic/__pycache__/parse.cpython-313.pyc,, -pydantic/__pycache__/root_model.cpython-313.pyc,, -pydantic/__pycache__/schema.cpython-313.pyc,, -pydantic/__pycache__/tools.cpython-313.pyc,, -pydantic/__pycache__/type_adapter.cpython-313.pyc,, -pydantic/__pycache__/types.cpython-313.pyc,, -pydantic/__pycache__/typing.cpython-313.pyc,, -pydantic/__pycache__/utils.cpython-313.pyc,, -pydantic/__pycache__/validate_call_decorator.cpython-313.pyc,, -pydantic/__pycache__/validators.cpython-313.pyc,, -pydantic/__pycache__/version.cpython-313.pyc,, -pydantic/__pycache__/warnings.cpython-313.pyc,, -pydantic/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pydantic/_internal/__pycache__/__init__.cpython-313.pyc,, -pydantic/_internal/__pycache__/_config.cpython-313.pyc,, -pydantic/_internal/__pycache__/_core_metadata.cpython-313.pyc,, -pydantic/_internal/__pycache__/_core_utils.cpython-313.pyc,, -pydantic/_internal/__pycache__/_dataclasses.cpython-313.pyc,, -pydantic/_internal/__pycache__/_decorators.cpython-313.pyc,, -pydantic/_internal/__pycache__/_decorators_v1.cpython-313.pyc,, -pydantic/_internal/__pycache__/_discriminated_union.cpython-313.pyc,, -pydantic/_internal/__pycache__/_docs_extraction.cpython-313.pyc,, -pydantic/_internal/__pycache__/_fields.cpython-313.pyc,, -pydantic/_internal/__pycache__/_forward_ref.cpython-313.pyc,, -pydantic/_internal/__pycache__/_generate_schema.cpython-313.pyc,, -pydantic/_internal/__pycache__/_generics.cpython-313.pyc,, -pydantic/_internal/__pycache__/_git.cpython-313.pyc,, -pydantic/_internal/__pycache__/_import_utils.cpython-313.pyc,, -pydantic/_internal/__pycache__/_internal_dataclass.cpython-313.pyc,, -pydantic/_internal/__pycache__/_known_annotated_metadata.cpython-313.pyc,, -pydantic/_internal/__pycache__/_mock_val_ser.cpython-313.pyc,, -pydantic/_internal/__pycache__/_model_construction.cpython-313.pyc,, -pydantic/_internal/__pycache__/_namespace_utils.cpython-313.pyc,, -pydantic/_internal/__pycache__/_repr.cpython-313.pyc,, -pydantic/_internal/__pycache__/_schema_gather.cpython-313.pyc,, -pydantic/_internal/__pycache__/_schema_generation_shared.cpython-313.pyc,, -pydantic/_internal/__pycache__/_serializers.cpython-313.pyc,, -pydantic/_internal/__pycache__/_signature.cpython-313.pyc,, -pydantic/_internal/__pycache__/_typing_extra.cpython-313.pyc,, -pydantic/_internal/__pycache__/_utils.cpython-313.pyc,, -pydantic/_internal/__pycache__/_validate_call.cpython-313.pyc,, -pydantic/_internal/__pycache__/_validators.cpython-313.pyc,, -pydantic/_internal/_config.py,sha256=WV07hp8xf0Q0yP9IwMvuGLQmu34AZl5sBs2JaOgCk9I,14253 -pydantic/_internal/_core_metadata.py,sha256=Y_g2t3i7uluK-wXCZvzJfRFMPUM23aBYLfae4FzBPy0,5162 -pydantic/_internal/_core_utils.py,sha256=_-ZuXhpi_0JDpZzz8jvGr82kgS3PEritWR22fjWpw48,6746 -pydantic/_internal/_dataclasses.py,sha256=8T9cHxxbHgUGLKs_Mj-SibBkHyo2ApSETy5FfLGf45w,8909 -pydantic/_internal/_decorators.py,sha256=NS7SKQvtDgnsAd37mjqtwPh19td57FJ69LsceO5SywI,32638 -pydantic/_internal/_decorators_v1.py,sha256=tfdfdpQKY4R2XCOwqHbZeoQMur6VNigRrfhudXBHx38,6185 -pydantic/_internal/_discriminated_union.py,sha256=aMl0SRSyQyHfW4-klnMTHNvwSRoqE3H3PRV_05vRsTg,25478 -pydantic/_internal/_docs_extraction.py,sha256=p-STFvLHUzxrj6bblpaAAYWmq4INxVCAdIupDgQYSIw,3831 -pydantic/_internal/_fields.py,sha256=MSEp6dO6mOF1UZvhfbtdmbDTo2vjvJMrkCfdtrP2Y-I,20831 -pydantic/_internal/_forward_ref.py,sha256=5n3Y7-3AKLn8_FS3Yc7KutLiPUhyXmAtkEZOaFnonwM,611 -pydantic/_internal/_generate_schema.py,sha256=abAX1wK5BUPUoUvAAi-0ajPrGNSvnPt1yX0Y-crlwZs,133100 -pydantic/_internal/_generics.py,sha256=D1_0xgqnL6TJQe_fFyaSk2Ug_F-kT_jRBfLjHFLCIqQ,23849 -pydantic/_internal/_git.py,sha256=IwPh3DPfa2Xq3rBuB9Nx8luR2A1i69QdeTfWWXIuCVg,809 -pydantic/_internal/_import_utils.py,sha256=TRhxD5OuY6CUosioBdBcJUs0om7IIONiZdYAV7zQ8jM,402 -pydantic/_internal/_internal_dataclass.py,sha256=_bedc1XbuuygRGiLZqkUkwwFpQaoR1hKLlR501nyySY,144 -pydantic/_internal/_known_annotated_metadata.py,sha256=lYAPiUhfSgfpY6qH9xJPJTEMoowv27QmcyOgQzys90U,16213 -pydantic/_internal/_mock_val_ser.py,sha256=wmRRFSBvqfcLbI41PsFliB4u2AZ3mJpZeiERbD3xKTo,8885 -pydantic/_internal/_model_construction.py,sha256=2Qa5Y4EgBojkhsVHu0OjpphUIlWYuVXMg1KC2opc00s,35228 -pydantic/_internal/_namespace_utils.py,sha256=CMG7nEAXVb-Idqyd3CgdulRrM-zEXOPe3kYEDBqnSKw,12878 -pydantic/_internal/_repr.py,sha256=t7GNyaUU8xvqwlDHxVE2IyDeaNZrK7p01ojQPP0UI_o,5081 -pydantic/_internal/_schema_gather.py,sha256=VLEv51TYEeeND2czsyrmJq1MVnJqTOmnLan7VG44c8A,9114 -pydantic/_internal/_schema_generation_shared.py,sha256=F_rbQbrkoomgxsskdHpP0jUJ7TCfe0BADAEkq6CJ4nM,4842 -pydantic/_internal/_serializers.py,sha256=qQ3Rak4J6bqbnjGCRjiAY4M8poLo0s5qH46sXZSQQuA,1474 -pydantic/_internal/_signature.py,sha256=8EljPJe4pSnapuirG5DkBAgD1hggHxEAyzFPH-9H0zE,6779 -pydantic/_internal/_typing_extra.py,sha256=PO3u2JmX3JKlTFy0Ew95iyjAgYHgJsqqskev4zooB2I,28216 -pydantic/_internal/_utils.py,sha256=iRmCSO0uoFhAL_ChHaYSCKrswpSrRHYoO_YQSFfCJxU,15344 -pydantic/_internal/_validate_call.py,sha256=PfdVnSzhXOrENtaDoDw3PFWPVYD5W_gNYPe8p3Ug6Lg,5321 -pydantic/_internal/_validators.py,sha256=TJcR9bxcPXjzntN6Qgib8cyPRkFZQxHW32SoKGEcp0k,20610 -pydantic/_migration.py,sha256=_6VCCVWNYB7fDpbP2MqW4bXXqo17C5_J907u9zNJQbM,11907 -pydantic/alias_generators.py,sha256=KM1n3u4JfLSBl1UuYg3hoYHzXJD-yvgrnq8u1ccwh_A,2124 -pydantic/aliases.py,sha256=vhCHyoSWnX-EJ-wWb5qj4xyRssgGWnTQfzQp4GSZ9ug,4937 -pydantic/annotated_handlers.py,sha256=WfyFSqwoEIFXBh7T73PycKloI1DiX45GWi0-JOsCR4Y,4407 -pydantic/class_validators.py,sha256=i_V3j-PYdGLSLmj_IJZekTRjunO8SIVz8LMlquPyP7E,148 -pydantic/color.py,sha256=AzqGfVQHF92_ZctDcue0DM4yTp2P6tekkwRINTWrLIo,21481 -pydantic/config.py,sha256=roz_FbfFPoVpJVpB1G7dJ8A3swghQjdN-ozrBxbLShM,42048 -pydantic/dataclasses.py,sha256=8Joks6SGCoRjf2q38P6_ZxLg0Mqab1jcYyiz9GT5Hlo,16215 -pydantic/datetime_parse.py,sha256=QC-WgMxMr_wQ_mNXUS7AVf-2hLEhvvsPY1PQyhSGOdk,150 -pydantic/decorator.py,sha256=YX-jUApu5AKaVWKPoaV-n-4l7UbS69GEt9Ra3hszmKI,145 -pydantic/deprecated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pydantic/deprecated/__pycache__/__init__.cpython-313.pyc,, -pydantic/deprecated/__pycache__/class_validators.cpython-313.pyc,, -pydantic/deprecated/__pycache__/config.cpython-313.pyc,, -pydantic/deprecated/__pycache__/copy_internals.cpython-313.pyc,, -pydantic/deprecated/__pycache__/decorator.cpython-313.pyc,, -pydantic/deprecated/__pycache__/json.cpython-313.pyc,, -pydantic/deprecated/__pycache__/parse.cpython-313.pyc,, -pydantic/deprecated/__pycache__/tools.cpython-313.pyc,, -pydantic/deprecated/class_validators.py,sha256=rwfP165xity36foy1NNCg4Jf9Sul44sJLW-A5sseahI,10245 -pydantic/deprecated/config.py,sha256=k_lsVk57paxLJOcBueH07cu1OgEgWdVBxm6lfaC3CCU,2663 -pydantic/deprecated/copy_internals.py,sha256=Ku0LHLEU0WcoIInNHls7PjuBvpLFTQ4Uus77jQ3Yi08,7616 -pydantic/deprecated/decorator.py,sha256=TBm6bJ7wJsNih_8Wq5IzDcwP32m9_vfxs96desLuk00,10845 -pydantic/deprecated/json.py,sha256=HlWCG35RRrxyzuTS6LTQiZBwRhmDZWmeqQH8rLW6wA8,4657 -pydantic/deprecated/parse.py,sha256=Gzd6b_g8zJXcuE7QRq5adhx_EMJahXfcpXCF0RgrqqI,2511 -pydantic/deprecated/tools.py,sha256=Nrm9oFRZWp8-jlfvPgJILEsywp4YzZD52XIGPDLxHcI,3330 -pydantic/env_settings.py,sha256=6IHeeWEqlUPRUv3V-AXiF_W91fg2Jw_M3O0l34J_eyA,148 -pydantic/error_wrappers.py,sha256=RK6mqATc9yMD-KBD9IJS9HpKCprWHd8wo84Bnm-3fR8,150 -pydantic/errors.py,sha256=7ctBNCtt57kZFx71Ls2H86IufQARv4wPKf8DhdsVn5w,6002 -pydantic/experimental/__init__.py,sha256=j08eROfz-xW4k_X9W4m2AW26IVdyF3Eg1OzlIGA11vk,328 -pydantic/experimental/__pycache__/__init__.cpython-313.pyc,, -pydantic/experimental/__pycache__/arguments_schema.cpython-313.pyc,, -pydantic/experimental/__pycache__/pipeline.cpython-313.pyc,, -pydantic/experimental/arguments_schema.py,sha256=EFnjX_ulp-tPyUjQX5pmQtug1OFL_Acc8bcMbLd-fVY,1866 -pydantic/experimental/pipeline.py,sha256=znbMBvir3xvPA20Xj8Moco1oJMPf1VYVrIQ8KQNtDlM,23910 -pydantic/fields.py,sha256=cAbnzt8bYy_KQK1aGq3FA5faecD1OB91sYgPU-UD_9I,63787 -pydantic/functional_serializers.py,sha256=3m81unH3lYovdMi00oZywlHhn1KDz9X2CO3iTtBya6A,17102 -pydantic/functional_validators.py,sha256=-yY6uj_9_GAI4aqqfZlzyGdzs06huzy6zNWD7TJp3_0,29560 -pydantic/generics.py,sha256=0ZqZ9O9annIj_3mGBRqps4htey3b5lV1-d2tUxPMMnA,144 -pydantic/json.py,sha256=ZH8RkI7h4Bz-zp8OdTAxbJUoVvcoU-jhMdRZ0B-k0xc,140 -pydantic/json_schema.py,sha256=KhsS_MWPox0PYqklnhJcb_3uiCVrEOgyhG53cUZv6QA,115430 -pydantic/main.py,sha256=NYm_65EEVV4FYUSyx8I6DGgJ5Ub1N4JpLegsrAc0uEk,80732 -pydantic/mypy.py,sha256=ta-lBmVd8P4S7px2qmWm-qyqSkBdqfBeOIzMilU0ifY,59265 -pydantic/networks.py,sha256=_YpSnBR2kMfoWX76sdq34cfCH-MWr5or0ve0tow7OWo,41446 -pydantic/parse.py,sha256=wkd82dgtvWtD895U_I6E1htqMlGhBSYEV39cuBSeo3A,141 -pydantic/plugin/__init__.py,sha256=5cXMmu5xL4LVZhWPE1XD8ozHZ-qEC2-s4seLe8tbN_Y,6965 -pydantic/plugin/__pycache__/__init__.cpython-313.pyc,, -pydantic/plugin/__pycache__/_loader.cpython-313.pyc,, -pydantic/plugin/__pycache__/_schema_validator.cpython-313.pyc,, -pydantic/plugin/_loader.py,sha256=nI3SEKr0mlCB556kvbyBXjYQw9b_s8UTKE9Q6iESX6s,2167 -pydantic/plugin/_schema_validator.py,sha256=QbmqsG33MBmftNQ2nNiuN22LhbrexUA7ipDVv3J02BU,5267 -pydantic/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pydantic/root_model.py,sha256=SCXhpRCgZgfqE9AGVJTC7kMAojKffL7PV4i0qcwOMm0,6279 -pydantic/schema.py,sha256=Vqqjvq_LnapVknebUd3Bp_J1p2gXZZnZRgL48bVEG7o,142 -pydantic/tools.py,sha256=iHQpd8SJ5DCTtPV5atAV06T89bjSaMFeZZ2LX9lasZY,141 -pydantic/type_adapter.py,sha256=Y3NE0YhFwxwoqrYU9caWymLWp1Avq4sRUdb5s01RoJk,31171 -pydantic/types.py,sha256=mWTvQH_Wt_CccQcEHYjcUWpyoj1U04WOnrMsMYod_64,104781 -pydantic/typing.py,sha256=P7feA35MwTcLsR1uL7db0S-oydBxobmXa55YDoBgajQ,138 -pydantic/utils.py,sha256=15nR2QpqTBFlQV4TNtTItMyTJx_fbyV-gPmIEY1Gooc,141 -pydantic/v1/__init__.py,sha256=SxQPklgBs4XHJwE6BZ9qoewYoGiNyYUnmHzEFCZbfnI,2946 -pydantic/v1/__pycache__/__init__.cpython-313.pyc,, -pydantic/v1/__pycache__/_hypothesis_plugin.cpython-313.pyc,, -pydantic/v1/__pycache__/annotated_types.cpython-313.pyc,, -pydantic/v1/__pycache__/class_validators.cpython-313.pyc,, -pydantic/v1/__pycache__/color.cpython-313.pyc,, -pydantic/v1/__pycache__/config.cpython-313.pyc,, -pydantic/v1/__pycache__/dataclasses.cpython-313.pyc,, -pydantic/v1/__pycache__/datetime_parse.cpython-313.pyc,, -pydantic/v1/__pycache__/decorator.cpython-313.pyc,, -pydantic/v1/__pycache__/env_settings.cpython-313.pyc,, -pydantic/v1/__pycache__/error_wrappers.cpython-313.pyc,, -pydantic/v1/__pycache__/errors.cpython-313.pyc,, -pydantic/v1/__pycache__/fields.cpython-313.pyc,, -pydantic/v1/__pycache__/generics.cpython-313.pyc,, -pydantic/v1/__pycache__/json.cpython-313.pyc,, -pydantic/v1/__pycache__/main.cpython-313.pyc,, -pydantic/v1/__pycache__/mypy.cpython-313.pyc,, -pydantic/v1/__pycache__/networks.cpython-313.pyc,, -pydantic/v1/__pycache__/parse.cpython-313.pyc,, -pydantic/v1/__pycache__/schema.cpython-313.pyc,, -pydantic/v1/__pycache__/tools.cpython-313.pyc,, -pydantic/v1/__pycache__/types.cpython-313.pyc,, -pydantic/v1/__pycache__/typing.cpython-313.pyc,, -pydantic/v1/__pycache__/utils.cpython-313.pyc,, -pydantic/v1/__pycache__/validators.cpython-313.pyc,, -pydantic/v1/__pycache__/version.cpython-313.pyc,, -pydantic/v1/_hypothesis_plugin.py,sha256=5ES5xWuw1FQAsymLezy8QgnVz0ZpVfU3jkmT74H27VQ,14847 -pydantic/v1/annotated_types.py,sha256=uk2NAAxqiNELKjiHhyhxKaIOh8F1lYW_LzrW3X7oZBc,3157 -pydantic/v1/class_validators.py,sha256=ULOaIUgYUDBsHL7EEVEarcM-UubKUggoN8hSbDonsFE,14672 -pydantic/v1/color.py,sha256=iZABLYp6OVoo2AFkP9Ipri_wSc6-Kklu8YuhSartd5g,16844 -pydantic/v1/config.py,sha256=a6P0Wer9x4cbwKW7Xv8poSUqM4WP-RLWwX6YMpYq9AA,6532 -pydantic/v1/dataclasses.py,sha256=784cqvInbwIPWr9usfpX3ch7z4t3J2tTK6N067_wk1o,18172 -pydantic/v1/datetime_parse.py,sha256=4Qy1kQpq3rNVZJeIHeSPDpuS2Bvhp1KPtzJG1xu-H00,7724 -pydantic/v1/decorator.py,sha256=zaaxxxoWPCm818D1bs0yhapRjXm32V8G0ZHWCdM1uXA,10339 -pydantic/v1/env_settings.py,sha256=A9VXwtRl02AY-jH0C0ouy5VNw3fi6F_pkzuHDjgAAOM,14105 -pydantic/v1/error_wrappers.py,sha256=6625Mfw9qkC2NwitB_JFAWe8B-Xv6zBU7rL9k28tfyo,5196 -pydantic/v1/errors.py,sha256=mIwPED5vGM5Q5v4C4Z1JPldTRH-omvEylH6ksMhOmPw,17726 -pydantic/v1/fields.py,sha256=VqWJCriUNiEyptXroDVJ501JpVA0en2VANcksqXL2b8,50649 -pydantic/v1/generics.py,sha256=VzC9YUV-EbPpQ3aAfk1cNFej79_IzznkQ7WrmTTZS9E,17871 -pydantic/v1/json.py,sha256=WQ5Hy_hIpfdR3YS8k6N2E6KMJzsdbBi_ldWOPJaV81M,3390 -pydantic/v1/main.py,sha256=zuNpdN5Q0V0wG2UUTKt0HUy3XJ4OAvPSZDdiXY-FIzs,44824 -pydantic/v1/mypy.py,sha256=AiZYkv127-WsgL9vwvLqj0dS8dz-HUMbH9Yvvlq4bfE,38949 -pydantic/v1/networks.py,sha256=HYNtKAfOmOnKJpsDg1g6SIkj9WPhU_-i8l5e2JKBpG4,22124 -pydantic/v1/parse.py,sha256=BJtdqiZRtav9VRFCmOxoY-KImQmjPy-A_NoojiFUZxY,1821 -pydantic/v1/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pydantic/v1/schema.py,sha256=aqBuA--cq8gAVkim5BJPFASHzOZ8dFtmFX_fNGr6ip4,47801 -pydantic/v1/tools.py,sha256=1lDdXHk0jL5uP3u5RCYAvUAlGClgAO-45lkq9j7fyBA,2881 -pydantic/v1/types.py,sha256=Fltx5GoP_qaUmAktlGz7nFeJa13yNy3FY1-RcMzEVt8,35455 -pydantic/v1/typing.py,sha256=HNtuKvgH4EHIeb2ytkd7VSyG6mxP9RKqEqEql-1ab14,19720 -pydantic/v1/utils.py,sha256=M5FRyfNUb1A2mk9laGgCVdfHHb3AtQgrjO5qfyBf4xA,25989 -pydantic/v1/validators.py,sha256=lyUkn1MWhHxlCX5ZfEgFj_CAHojoiPcaQeMdEM9XviU,22187 -pydantic/v1/version.py,sha256=HXnXW-1bMW5qKhlr5RgOEPohrZDCDSuyy8-gi8GCgZo,1039 -pydantic/validate_call_decorator.py,sha256=8jqLlgXTjWEj4dXDg0wI3EGQKkb0JnCsL_JSUjbU5Sg,4389 -pydantic/validators.py,sha256=pwbIJXVb1CV2mAE4w_EGfNj7DwzsKaWw_tTL6cviTus,146 -pydantic/version.py,sha256=UoqE5sDRllO87cjBYcFejk_Jf-_1SrWlo3ejYSftixs,2710 -pydantic/warnings.py,sha256=gqDTQ2FX7wGLZJV3XboQSiRXKHknss3pfIOXL0BDXTk,3772 diff --git a/blimgui/dist64/pydantic-2.11.4.dist-info/INSTALLER b/blimgui/dist64/pydantic-2.12.5.dist-info/INSTALLER similarity index 100% rename from blimgui/dist64/pydantic-2.11.4.dist-info/INSTALLER rename to blimgui/dist64/pydantic-2.12.5.dist-info/INSTALLER diff --git a/blimgui/dist64/pydantic-2.11.4.dist-info/METADATA b/blimgui/dist64/pydantic-2.12.5.dist-info/METADATA similarity index 73% rename from blimgui/dist64/pydantic-2.11.4.dist-info/METADATA rename to blimgui/dist64/pydantic-2.12.5.dist-info/METADATA index c08e56d..fe0df08 100644 --- a/blimgui/dist64/pydantic-2.11.4.dist-info/METADATA +++ b/blimgui/dist64/pydantic-2.12.5.dist-info/METADATA @@ -1,13 +1,13 @@ Metadata-Version: 2.4 Name: pydantic -Version: 2.11.4 +Version: 2.12.5 Summary: Data validation using Python type hints Project-URL: Homepage, https://github.com/pydantic/pydantic Project-URL: Documentation, https://docs.pydantic.dev Project-URL: Funding, https://github.com/sponsors/samuelcolvin Project-URL: Source, https://github.com/pydantic/pydantic Project-URL: Changelog, https://docs.pydantic.dev/latest/changelog/ -Author-email: Samuel Colvin , Eric Jolibois , Hasan Ramezani , Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, Terrence Dorsey , David Montague , Serge Matveenko , Marcelo Trylesinski , Sydney Runkle , David Hewitt , Alex Hall , Victorien Plot +Author-email: Samuel Colvin , Eric Jolibois , Hasan Ramezani , Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, Terrence Dorsey , David Montague , Serge Matveenko , Marcelo Trylesinski , Sydney Runkle , David Hewitt , Alex Hall , Victorien Plot , Douwe Maan License-Expression: MIT License-File: LICENSE Classifier: Development Status :: 5 - Production/Stable @@ -15,7 +15,6 @@ Classifier: Framework :: Hypothesis Classifier: Framework :: Pydantic Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology -Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 @@ -25,22 +24,24 @@ Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet Classifier: Topic :: Software Development :: Libraries :: Python Modules Requires-Python: >=3.9 Requires-Dist: annotated-types>=0.6.0 -Requires-Dist: pydantic-core==2.33.2 -Requires-Dist: typing-extensions>=4.12.2 -Requires-Dist: typing-inspection>=0.4.0 +Requires-Dist: pydantic-core==2.41.5 +Requires-Dist: typing-extensions>=4.14.1 +Requires-Dist: typing-inspection>=0.4.2 Provides-Extra: email Requires-Dist: email-validator>=2.0.0; extra == 'email' Provides-Extra: timezone Requires-Dist: tzdata; (python_version >= '3.9' and platform_system == 'Windows') and extra == 'timezone' Description-Content-Type: text/markdown -# Pydantic +# Pydantic Validation + [![CI](https://img.shields.io/github/actions/workflow/status/pydantic/pydantic/ci.yml?branch=main&logo=github&label=CI)](https://github.com/pydantic/pydantic/actions?query=event%3Apush+branch%3Amain+workflow%3ACI) [![Coverage](https://coverage-badge.samuelcolvin.workers.dev/pydantic/pydantic.svg)](https://coverage-badge.samuelcolvin.workers.dev/redirect/pydantic/pydantic) [![pypi](https://img.shields.io/pypi/v/pydantic.svg)](https://pypi.python.org/pypi/pydantic) @@ -51,7 +52,6 @@ Description-Content-Type: text/markdown [![Pydantic v2](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/pydantic/pydantic/main/docs/badge/v2.json)](https://docs.pydantic.dev/latest/contributing/#badges) [![llms.txt](https://img.shields.io/badge/llms.txt-green)](https://docs.pydantic.dev/latest/llms.txt) - Data validation using Python type hints. Fast and extensible, Pydantic plays nicely with your linters/IDE/brain. @@ -113,6 +113,301 @@ See our [security policy](https://github.com/pydantic/pydantic/security/policy). ## Changelog + + + + +## v2.12.5 (2025-11-26) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.5) + +This is the fifth 2.12 patch release, addressing an issue with the `MISSING` sentinel and providing several documentation improvements. + +The next 2.13 minor release will be published in a couple weeks, and will include a new *polymorphic serialization* feature addressing +the remaining unexpected changes to the *serialize as any* behavior. + +* Fix pickle error when using `model_construct()` on a model with `MISSING` as a default value by [@ornariece](https://github.com/ornariece) in [#12522](https://github.com/pydantic/pydantic/pull/12522). +* Several updates to the documentation by [@Viicos](https://github.com/Viicos). + +## v2.12.4 (2025-11-05) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.4) + +This is the fourth 2.12 patch release, fixing more regressions, and reverting a change in the `build()` method +of the [`AnyUrl` and Dsn types](https://docs.pydantic.dev/latest/api/networks/). + +This patch release also fixes an issue with the serialization of IP address types, when `serialize_as_any` is used. The next patch release +will try to address the remaining issues with *serialize as any* behavior by introducing a new *polymorphic serialization* feature, that +should be used in most cases in place of *serialize as any*. + +* Fix issue with forward references in parent `TypedDict` classes by [@Viicos](https://github.com/Viicos) in [#12427](https://github.com/pydantic/pydantic/pull/12427). + + This issue is only relevant on Python 3.14 and greater. +* Exclude fields with `exclude_if` from JSON Schema required fields by [@Viicos](https://github.com/Viicos) in [#12430](https://github.com/pydantic/pydantic/pull/12430) +* Revert URL percent-encoding of credentials in the `build()` method + of the [`AnyUrl` and Dsn types](https://docs.pydantic.dev/latest/api/networks/) by [@davidhewitt](https://github.com/davidhewitt) in + [pydantic-core#1833](https://github.com/pydantic/pydantic-core/pull/1833). + + This was initially considered as a bugfix, but caused regressions and as such was fully reverted. The next release will include + an opt-in option to percent-encode components of the URL. +* Add type inference for IP address types by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1868](https://github.com/pydantic/pydantic-core/pull/1868). + + The 2.12 changes to the `serialize_as_any` behavior made it so that IP address types could not properly serialize to JSON. +* Avoid getting default values from defaultdict by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1853](https://github.com/pydantic/pydantic-core/pull/1853). + + This fixes a subtle regression in the validation behavior of the [`collections.defaultdict`](https://docs.python.org/3/library/collections.html#collections.defaultdict) + type. +* Fix issue with field serializers on nested typed dictionaries by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1879](https://github.com/pydantic/pydantic-core/pull/1879). +* Add more `pydantic-core` builds for the three-threaded version of Python 3.14 by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1864](https://github.com/pydantic/pydantic-core/pull/1864). + +## v2.12.3 (2025-10-17) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.3) + +### What's Changed + +This is the third 2.12 patch release, fixing issues related to the `FieldInfo` class, and reverting a change to the supported +[*after* model validator](https://docs.pydantic.dev/latest/concepts/validators/#model-validators) function signatures. + +* Raise a warning when an invalid after model validator function signature is raised by [@Viicos](https://github.com/Viicos) in [#12414](https://github.com/pydantic/pydantic/pull/12414). + Starting in 2.12.0, using class methods for *after* model validators raised an error, but the error wasn't raised concistently. We decided + to emit a deprecation warning instead. +* Add [`FieldInfo.asdict()`](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.FieldInfo.asdict) method, improve documentation around `FieldInfo` by [@Viicos](https://github.com/Viicos) in [#12411](https://github.com/pydantic/pydantic/pull/12411). + This also add back support for mutations on `FieldInfo` classes, that are reused as `Annotated` metadata. **However**, note that this is still + *not* a supported pattern. Instead, please refer to the [added example](https://docs.pydantic.dev/latest/examples/dynamic_models/) in the documentation. + +The [blog post](https://pydantic.dev/articles/pydantic-v2-12-release#changes) section on changes was also updated to document the changes related to `serialize_as_any`. + +## v2.12.2 (2025-10-14) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.2) + +### What's Changed + +#### Fixes + +* Release a new `pydantic-core` version, as a corrupted CPython 3.10 `manylinux2014_aarch64` wheel got uploaded ([pydantic-core#1843](https://github.com/pydantic/pydantic-core/pull/1843)). +* Fix issue with recursive generic models with a parent model class by [@Viicos](https://github.com/Viicos) in [#12398](https://github.com/pydantic/pydantic/pull/12398) + +## v2.12.1 (2025-10-13) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.1) + +### What's Changed + +This is the first 2.12 patch release, addressing most (but not all yet) regressions from the initial 2.12.0 release. + +#### Fixes + +* Do not evaluate annotations when inspecting validators and serializers by [@Viicos](https://github.com/Viicos) in [#12355](https://github.com/pydantic/pydantic/pull/12355) +* Make sure `None` is converted as `NoneType` in Python 3.14 by [@Viicos](https://github.com/Viicos) in [#12370](https://github.com/pydantic/pydantic/pull/12370) +* Backport V1 runtime warning when using Python 3.14 by [@Viicos](https://github.com/Viicos) in [#12367](https://github.com/pydantic/pydantic/pull/12367) +* Fix error message for invalid validator signatures by [@Viicos](https://github.com/Viicos) in [#12366](https://github.com/pydantic/pydantic/pull/12366) +* Populate field name in `ValidationInfo` for validation of default value by [@Viicos](https://github.com/Viicos) in [pydantic-core#1826](https://github.com/pydantic/pydantic-core/pull/1826) +* Encode credentials in `MultiHostUrl` builder by [@willswire](https://github.com/willswire) in [pydantic-core#1829](https://github.com/pydantic/pydantic-core/pull/1829) +* Respect field serializers when using `serialize_as_any` serialization flag by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1829](https://github.com/pydantic/pydantic-core/pull/1829) +* Fix various `RootModel` serialization issues by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1836](https://github.com/pydantic/pydantic-core/pull/1836) + +### New Contributors + +* [@willswire](https://github.com/willswire) made their first contribution in [pydantic-core#1829](https://github.com/pydantic/pydantic-core/pull/1829) + +## v2.12.0 (2025-10-07) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.0) + +### What's Changed + +This is the final 2.12 release. It features the work of 20 external contributors and provides useful new features, along with initial Python 3.14 support. +Several minor changes (considered non-breaking changes according to our [versioning policy](https://docs.pydantic.dev/2.12/version-policy/#pydantic-v2)) +are also included in this release. Make sure to look into them before upgrading. + +**Note that Pydantic V1 is not compatible with Python 3.14 and greater**. + +Changes (see the alpha and beta releases for additional changes since 2.11): + +#### Packaging + +* Update V1 copy to v1.10.24 by [@Viicos](https://github.com/Viicos) in [#12338](https://github.com/pydantic/pydantic/pull/12338) + +#### New Features + +* Add `extra` parameter to the validate functions by [@anvilpete](https://github.com/anvilpete) in [#12233](https://github.com/pydantic/pydantic/pull/12233) +* Add `exclude_computed_fields` serialization option by [@Viicos](https://github.com/Viicos) in [#12334](https://github.com/pydantic/pydantic/pull/12334) +* Add `preverse_empty_path` URL options by [@Viicos](https://github.com/Viicos) in [#12336](https://github.com/pydantic/pydantic/pull/12336) +* Add `union_format` parameter to JSON Schema generation by [@Viicos](https://github.com/Viicos) in [#12147](https://github.com/pydantic/pydantic/pull/12147) +* Add `__qualname__` parameter for `create_model` by [@Atry](https://github.com/Atry) in [#12001](https://github.com/pydantic/pydantic/pull/12001) + +#### Fixes + +* Do not try to infer name from lambda definitions in pipelines API by [@Viicos](https://github.com/Viicos) in [#12289](https://github.com/pydantic/pydantic/pull/12289) +* Use proper namespace for functions in `TypeAdapter` by [@Viicos](https://github.com/Viicos) in [#12324](https://github.com/pydantic/pydantic/pull/12324) +* Use `Any` for context type annotation in `TypeAdapter` by [@inducer](https://github.com/inducer) in [#12279](https://github.com/pydantic/pydantic/pull/12279) +* Expose `FieldInfo` in `pydantic.fields.__all__` by [@Viicos](https://github.com/Viicos) in [#12339](https://github.com/pydantic/pydantic/pull/12339) +* Respect `validation_alias` in `@validate_call` by [@Viicos](https://github.com/Viicos) in [#12340](https://github.com/pydantic/pydantic/pull/12340) +* Use `Any` as context annotation in plugin API by [@Viicos](https://github.com/Viicos) in [#12341](https://github.com/pydantic/pydantic/pull/12341) +* Use proper `stacklevel` in warnings when possible by [@Viicos](https://github.com/Viicos) in [#12342](https://github.com/pydantic/pydantic/pull/12342) + +### New Contributors + +* [@anvilpete](https://github.com/anvilpete) made their first contribution in [#12233](https://github.com/pydantic/pydantic/pull/12233) +* [@JonathanWindell](https://github.com/JonathanWindell) made their first contribution in [#12327](https://github.com/pydantic/pydantic/pull/12327) +* [@inducer](https://github.com/inducer) made their first contribution in [#12279](https://github.com/pydantic/pydantic/pull/12279) +* [@Atry](https://github.com/Atry) made their first contribution in [#12001](https://github.com/pydantic/pydantic/pull/12001) + +## v2.12.0b1 (2025-10-03) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.0b1) + +This is the first beta release of the upcoming 2.12 release. + +### What's Changed + +#### Packaging + +* Bump `pydantic-core` to v2.40.1 by [@Viicos](https://github.com/Viicos) in [#12314](https://github.com/pydantic/pydantic/pull/12314) + +#### New Features + +* Add support for `exclude_if` at the field level by [@andresliszt](https://github.com/andresliszt) in [#12141](https://github.com/pydantic/pydantic/pull/12141) +* Add `ValidateAs` annotation helper by [@Viicos](https://github.com/Viicos) in [#11942](https://github.com/pydantic/pydantic/pull/11942) +* Add configuration options for validation and JSON serialization of temporal types by [@ollz272](https://github.com/ollz272) in [#12068](https://github.com/pydantic/pydantic/pull/12068) +* Add support for PEP 728 by [@Viicos](https://github.com/Viicos) in [#12179](https://github.com/pydantic/pydantic/pull/12179) +* Add field name in serialization error by [@NicolasPllr1](https://github.com/NicolasPllr1) in [pydantic-core#1799](https://github.com/pydantic/pydantic-core/pull/1799) +* Add option to preserve empty URL paths by [@davidhewitt](https://github.com/davidhewitt) in [pydantic-core#1789](https://github.com/pydantic/pydantic-core/pull/1789) + +#### Changes + +* Raise error if an incompatible `pydantic-core` version is installed by [@Viicos](https://github.com/Viicos) in [#12196](https://github.com/pydantic/pydantic/pull/12196) +* Remove runtime warning for experimental features by [@Viicos](https://github.com/Viicos) in [#12265](https://github.com/pydantic/pydantic/pull/12265) +* Warn if registering virtual subclasses on Pydantic models by [@Viicos](https://github.com/Viicos) in [#11669](https://github.com/pydantic/pydantic/pull/11669) + +#### Fixes + +* Fix `__getattr__()` behavior on Pydantic models when a property raised an `AttributeError` and extra values are present by [@raspuchin](https://github.com/raspuchin) in [#12106](https://github.com/pydantic/pydantic/pull/12106) +* Add test to prevent regression with Pydantic models used as annotated metadata by [@Viicos](https://github.com/Viicos) in [#12133](https://github.com/pydantic/pydantic/pull/12133) +* Allow to use property setters on Pydantic dataclasses with `validate_assignment` set by [@Viicos](https://github.com/Viicos) in [#12173](https://github.com/pydantic/pydantic/pull/12173) +* Fix mypy v2 plugin for upcoming mypy release by [@cdce8p](https://github.com/cdce8p) in [#12209](https://github.com/pydantic/pydantic/pull/12209) +* Respect custom title in functions JSON Schema by [@Viicos](https://github.com/Viicos) in [#11892](https://github.com/pydantic/pydantic/pull/11892) +* Fix `ImportString` JSON serialization for objects with a `name` attribute by [@chr1sj0nes](https://github.com/chr1sj0nes) in [#12219](https://github.com/pydantic/pydantic/pull/12219) +* Do not error on fields overridden by methods in the mypy plugin by [@Viicos](https://github.com/Viicos) in [#12290](https://github.com/pydantic/pydantic/pull/12290) + +### New Contributors + +* [@raspuchin](https://github.com/raspuchin) made their first contribution in [#12106](https://github.com/pydantic/pydantic/pull/12106) +* [@chr1sj0nes](https://github.com/chr1sj0nes) made their first contribution in [#12219](https://github.com/pydantic/pydantic/pull/12219) + +## v2.12.0a1 (2025-07-26) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.12.0a1) + +This is the first alpha release of the upcoming 2.12 release, which adds initial support for Python 3.14. + +### What's Changed + +#### New Features + +* Add `__pydantic_on_complete__()` hook that is called once model is fully ready to be used by [@DouweM](https://github.com/DouweM) in [#11762](https://github.com/pydantic/pydantic/pull/11762) +* Add initial support for Python 3.14 by [@Viicos](https://github.com/Viicos) in [#11991](https://github.com/pydantic/pydantic/pull/11991) +* Add regex patterns to JSON schema for `Decimal` type by [@Dima-Bulavenko](https://github.com/Dima-Bulavenko) in [#11987](https://github.com/pydantic/pydantic/pull/11987) +* Add support for `doc` attribute on dataclass fields by [@Viicos](https://github.com/Viicos) in [#12077](https://github.com/pydantic/pydantic/pull/12077) +* Add experimental `MISSING` sentinel by [@Viicos](https://github.com/Viicos) in [#11883](https://github.com/pydantic/pydantic/pull/11883) + +#### Changes + +* Allow config and bases to be specified together in `create_model()` by [@Viicos](https://github.com/Viicos) in [#11714](https://github.com/pydantic/pydantic/pull/11714) +* Move some field logic out of the `GenerateSchema` class by [@Viicos](https://github.com/Viicos) in [#11733](https://github.com/pydantic/pydantic/pull/11733) +* Always make use of `inspect.getsourcelines()` for docstring extraction on Python 3.13 and greater by [@Viicos](https://github.com/Viicos) in [#11829](https://github.com/pydantic/pydantic/pull/11829) +* Only support the latest Mypy version by [@Viicos](https://github.com/Viicos) in [#11832](https://github.com/pydantic/pydantic/pull/11832) +* Do not implicitly convert after model validators to class methods by [@Viicos](https://github.com/Viicos) in [#11957](https://github.com/pydantic/pydantic/pull/11957) +* Refactor `FieldInfo` creation implementation by [@Viicos](https://github.com/Viicos) in [#11898](https://github.com/pydantic/pydantic/pull/11898) +* Make `Secret` covariant by [@bluenote10](https://github.com/bluenote10) in [#12008](https://github.com/pydantic/pydantic/pull/12008) +* Emit warning when field-specific metadata is used in invalid contexts by [@Viicos](https://github.com/Viicos) in [#12028](https://github.com/pydantic/pydantic/pull/12028) + +#### Fixes + +* Properly fetch plain serializer function when serializing default value in JSON Schema by [@Viicos](https://github.com/Viicos) in [#11721](https://github.com/pydantic/pydantic/pull/11721) +* Remove generics cache workaround by [@Viicos](https://github.com/Viicos) in [#11755](https://github.com/pydantic/pydantic/pull/11755) +* Remove coercion of decimal constraints by [@Viicos](https://github.com/Viicos) in [#11772](https://github.com/pydantic/pydantic/pull/11772) +* Fix crash when expanding root type in the mypy plugin by [@Viicos](https://github.com/Viicos) in [#11735](https://github.com/pydantic/pydantic/pull/11735) +* Only mark model as complete once all fields are complete by [@DouweM](https://github.com/DouweM) in [#11759](https://github.com/pydantic/pydantic/pull/11759) +* Do not provide `field_name` in validator core schemas by [@DouweM](https://github.com/DouweM) in [#11761](https://github.com/pydantic/pydantic/pull/11761) +* Fix issue with recursive generic models by [@Viicos](https://github.com/Viicos) in [#11775](https://github.com/pydantic/pydantic/pull/11775) +* Fix qualified name comparison of private attributes during namespace inspection by [@karta9821](https://github.com/karta9821) in [#11803](https://github.com/pydantic/pydantic/pull/11803) +* Make sure Pydantic dataclasses with slots and `validate_assignment` can be unpickled by [@Viicos](https://github.com/Viicos) in [#11769](https://github.com/pydantic/pydantic/pull/11769) +* Traverse `function-before` schemas during schema gathering by [@Viicos](https://github.com/Viicos) in [#11801](https://github.com/pydantic/pydantic/pull/11801) +* Fix check for stdlib dataclasses by [@Viicos](https://github.com/Viicos) in [#11822](https://github.com/pydantic/pydantic/pull/11822) +* Check if `FieldInfo` is complete after applying type variable map by [@Viicos](https://github.com/Viicos) in [#11855](https://github.com/pydantic/pydantic/pull/11855) +* Do not delete mock validator/serializer in `model_rebuild()` by [@Viicos](https://github.com/Viicos) in [#11890](https://github.com/pydantic/pydantic/pull/11890) +* Rebuild dataclass fields before schema generation by [@Viicos](https://github.com/Viicos) in [#11949](https://github.com/pydantic/pydantic/pull/11949) +* Always store the original field assignment on `FieldInfo` by [@Viicos](https://github.com/Viicos) in [#11946](https://github.com/pydantic/pydantic/pull/11946) +* Do not use deprecated methods as default field values by [@Viicos](https://github.com/Viicos) in [#11914](https://github.com/pydantic/pydantic/pull/11914) +* Allow callable discriminator to be applied on PEP 695 type aliases by [@Viicos](https://github.com/Viicos) in [#11941](https://github.com/pydantic/pydantic/pull/11941) +* Suppress core schema generation warning when using `SkipValidation` by [@ygsh0816](https://github.com/ygsh0816) in [#12002](https://github.com/pydantic/pydantic/pull/12002) +* Do not emit typechecking error for invalid `Field()` default with `validate_default` set to `True` by [@Viicos](https://github.com/Viicos) in [#11988](https://github.com/pydantic/pydantic/pull/11988) +* Refactor logic to support Pydantic's `Field()` function in dataclasses by [@Viicos](https://github.com/Viicos) in [#12051](https://github.com/pydantic/pydantic/pull/12051) + +#### Packaging + +* Update project metadata to use PEP 639 by [@Viicos](https://github.com/Viicos) in [#11694](https://github.com/pydantic/pydantic/pull/11694) +* Bump `mkdocs-llmstxt` to v0.2.0 by [@Viicos](https://github.com/Viicos) in [#11725](https://github.com/pydantic/pydantic/pull/11725) +* Bump `pydantic-core` to v2.35.1 by [@Viicos](https://github.com/Viicos) in [#11963](https://github.com/pydantic/pydantic/pull/11963) +* Bump dawidd6/action-download-artifact from 10 to 11 by [@dependabot](https://github.com/dependabot)[bot] in [#12033](https://github.com/pydantic/pydantic/pull/12033) +* Bump astral-sh/setup-uv from 5 to 6 by [@dependabot](https://github.com/dependabot)[bot] in [#11826](https://github.com/pydantic/pydantic/pull/11826) +* Update mypy to 1.17.0 by [@Viicos](https://github.com/Viicos) in [#12076](https://github.com/pydantic/pydantic/pull/12076) + +### New Contributors + +* [@parth-paradkar](https://github.com/parth-paradkar) made their first contribution in [#11695](https://github.com/pydantic/pydantic/pull/11695) +* [@dqkqd](https://github.com/dqkqd) made their first contribution in [#11739](https://github.com/pydantic/pydantic/pull/11739) +* [@fhightower](https://github.com/fhightower) made their first contribution in [#11722](https://github.com/pydantic/pydantic/pull/11722) +* [@gbaian10](https://github.com/gbaian10) made their first contribution in [#11766](https://github.com/pydantic/pydantic/pull/11766) +* [@DouweM](https://github.com/DouweM) made their first contribution in [#11759](https://github.com/pydantic/pydantic/pull/11759) +* [@bowenliang123](https://github.com/bowenliang123) made their first contribution in [#11719](https://github.com/pydantic/pydantic/pull/11719) +* [@rawwar](https://github.com/rawwar) made their first contribution in [#11799](https://github.com/pydantic/pydantic/pull/11799) +* [@karta9821](https://github.com/karta9821) made their first contribution in [#11803](https://github.com/pydantic/pydantic/pull/11803) +* [@jinnovation](https://github.com/jinnovation) made their first contribution in [#11834](https://github.com/pydantic/pydantic/pull/11834) +* [@zmievsa](https://github.com/zmievsa) made their first contribution in [#11861](https://github.com/pydantic/pydantic/pull/11861) +* [@Otto-AA](https://github.com/Otto-AA) made their first contribution in [#11860](https://github.com/pydantic/pydantic/pull/11860) +* [@ygsh0816](https://github.com/ygsh0816) made their first contribution in [#12002](https://github.com/pydantic/pydantic/pull/12002) +* [@lukland](https://github.com/lukland) made their first contribution in [#12015](https://github.com/pydantic/pydantic/pull/12015) +* [@Dima-Bulavenko](https://github.com/Dima-Bulavenko) made their first contribution in [#11987](https://github.com/pydantic/pydantic/pull/11987) +* [@GSemikozov](https://github.com/GSemikozov) made their first contribution in [#12050](https://github.com/pydantic/pydantic/pull/12050) +* [@hannah-heywa](https://github.com/hannah-heywa) made their first contribution in [#12082](https://github.com/pydantic/pydantic/pull/12082) + +## v2.11.7 (2025-06-14) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.11.7) + +### What's Changed + +#### Fixes + +* Copy `FieldInfo` instance if necessary during `FieldInfo` build by [@Viicos](https://github.com/Viicos) in [#11898](https://github.com/pydantic/pydantic/pull/11898) + +## v2.11.6 (2025-06-13) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.11.6) + +### What's Changed + +#### Fixes + +* Rebuild dataclass fields before schema generation by [@Viicos](https://github.com/Viicos) in [#11949](https://github.com/pydantic/pydantic/pull/11949) +* Always store the original field assignment on `FieldInfo` by [@Viicos](https://github.com/Viicos) in [#11946](https://github.com/pydantic/pydantic/pull/11946) + +## v2.11.5 (2025-05-22) + +[GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.11.5) + +### What's Changed + +#### Fixes + +* Check if `FieldInfo` is complete after applying type variable map by [@Viicos](https://github.com/Viicos) in [#11855](https://github.com/pydantic/pydantic/pull/11855) +* Do not delete mock validator/serializer in `model_rebuild()` by [@Viicos](https://github.com/Viicos) in [#11890](https://github.com/pydantic/pydantic/pull/11890) +* Do not duplicate metadata on model rebuild by [@Viicos](https://github.com/Viicos) in [#11902](https://github.com/pydantic/pydantic/pull/11902) + ## v2.11.4 (2025-04-29) [GitHub release](https://github.com/pydantic/pydantic/releases/tag/v2.11.4) @@ -322,6 +617,7 @@ See the [blog post](https://pydantic.dev/articles/pydantic-v2-11-release) for mo * `dataclass` `InitVar` shouldn't be required on serialization by [@sydney-runkle](https://github.com/sydney-runkle) in https://github.com/pydantic/pydantic-core/pull/1602 ## New Contributors + * [@FyZzyss](https://github.com/FyZzyss) made their first contribution in https://github.com/pydantic/pydantic/pull/10789 * [@tamird](https://github.com/tamird) made their first contribution in https://github.com/pydantic/pydantic/pull/10948 * [@felixxm](https://github.com/felixxm) made their first contribution in https://github.com/pydantic/pydantic/pull/11077 @@ -359,7 +655,6 @@ This is another early alpha release, meant to collect early feedback from users * Bump `pydantic-core` to v2.29.0 by [@mikeedjones](https://github.com/mikeedjones) in [#11402](https://github.com/pydantic/pydantic/pull/11402) * Use locally-built rust with symbols & pgo by [@davidhewitt](https://github.com/davidhewitt) in [#11403](https://github.com/pydantic/pydantic/pull/11403) - #### Performance * Create a single dictionary when creating a `CoreConfig` instance by [@sydney-runkle](https://github.com/sydney-runkle) in [#11384](https://github.com/pydantic/pydantic/pull/11384) diff --git a/blimgui/dist64/pydantic-2.12.5.dist-info/RECORD b/blimgui/dist64/pydantic-2.12.5.dist-info/RECORD new file mode 100644 index 0000000..e56ab5d --- /dev/null +++ b/blimgui/dist64/pydantic-2.12.5.dist-info/RECORD @@ -0,0 +1,218 @@ +pydantic-2.12.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pydantic-2.12.5.dist-info/METADATA,sha256=o7oj6JUZH-1puDI8vLzcgphMoLajzcYsSKI0GIapwI0,90587 +pydantic-2.12.5.dist-info/RECORD,, +pydantic-2.12.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pydantic-2.12.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +pydantic-2.12.5.dist-info/licenses/LICENSE,sha256=qeGG88oWte74QxjnpwFyE1GgDLe4rjpDlLZ7SeNSnvM,1129 +pydantic/__init__.py,sha256=5iEnJ4wHv1OEzdKQPzaKaZKfO4pSQAC65ODrYI6_S8Y,15812 +pydantic/__pycache__/__init__.cpython-314.pyc,, +pydantic/__pycache__/_migration.cpython-314.pyc,, +pydantic/__pycache__/alias_generators.cpython-314.pyc,, +pydantic/__pycache__/aliases.cpython-314.pyc,, +pydantic/__pycache__/annotated_handlers.cpython-314.pyc,, +pydantic/__pycache__/class_validators.cpython-314.pyc,, +pydantic/__pycache__/color.cpython-314.pyc,, +pydantic/__pycache__/config.cpython-314.pyc,, +pydantic/__pycache__/dataclasses.cpython-314.pyc,, +pydantic/__pycache__/datetime_parse.cpython-314.pyc,, +pydantic/__pycache__/decorator.cpython-314.pyc,, +pydantic/__pycache__/env_settings.cpython-314.pyc,, +pydantic/__pycache__/error_wrappers.cpython-314.pyc,, +pydantic/__pycache__/errors.cpython-314.pyc,, +pydantic/__pycache__/fields.cpython-314.pyc,, +pydantic/__pycache__/functional_serializers.cpython-314.pyc,, +pydantic/__pycache__/functional_validators.cpython-314.pyc,, +pydantic/__pycache__/generics.cpython-314.pyc,, +pydantic/__pycache__/json.cpython-314.pyc,, +pydantic/__pycache__/json_schema.cpython-314.pyc,, +pydantic/__pycache__/main.cpython-314.pyc,, +pydantic/__pycache__/mypy.cpython-314.pyc,, +pydantic/__pycache__/networks.cpython-314.pyc,, +pydantic/__pycache__/parse.cpython-314.pyc,, +pydantic/__pycache__/root_model.cpython-314.pyc,, +pydantic/__pycache__/schema.cpython-314.pyc,, +pydantic/__pycache__/tools.cpython-314.pyc,, +pydantic/__pycache__/type_adapter.cpython-314.pyc,, +pydantic/__pycache__/types.cpython-314.pyc,, +pydantic/__pycache__/typing.cpython-314.pyc,, +pydantic/__pycache__/utils.cpython-314.pyc,, +pydantic/__pycache__/validate_call_decorator.cpython-314.pyc,, +pydantic/__pycache__/validators.cpython-314.pyc,, +pydantic/__pycache__/version.cpython-314.pyc,, +pydantic/__pycache__/warnings.cpython-314.pyc,, +pydantic/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pydantic/_internal/__pycache__/__init__.cpython-314.pyc,, +pydantic/_internal/__pycache__/_config.cpython-314.pyc,, +pydantic/_internal/__pycache__/_core_metadata.cpython-314.pyc,, +pydantic/_internal/__pycache__/_core_utils.cpython-314.pyc,, +pydantic/_internal/__pycache__/_dataclasses.cpython-314.pyc,, +pydantic/_internal/__pycache__/_decorators.cpython-314.pyc,, +pydantic/_internal/__pycache__/_decorators_v1.cpython-314.pyc,, +pydantic/_internal/__pycache__/_discriminated_union.cpython-314.pyc,, +pydantic/_internal/__pycache__/_docs_extraction.cpython-314.pyc,, +pydantic/_internal/__pycache__/_fields.cpython-314.pyc,, +pydantic/_internal/__pycache__/_forward_ref.cpython-314.pyc,, +pydantic/_internal/__pycache__/_generate_schema.cpython-314.pyc,, +pydantic/_internal/__pycache__/_generics.cpython-314.pyc,, +pydantic/_internal/__pycache__/_git.cpython-314.pyc,, +pydantic/_internal/__pycache__/_import_utils.cpython-314.pyc,, +pydantic/_internal/__pycache__/_internal_dataclass.cpython-314.pyc,, +pydantic/_internal/__pycache__/_known_annotated_metadata.cpython-314.pyc,, +pydantic/_internal/__pycache__/_mock_val_ser.cpython-314.pyc,, +pydantic/_internal/__pycache__/_model_construction.cpython-314.pyc,, +pydantic/_internal/__pycache__/_namespace_utils.cpython-314.pyc,, +pydantic/_internal/__pycache__/_repr.cpython-314.pyc,, +pydantic/_internal/__pycache__/_schema_gather.cpython-314.pyc,, +pydantic/_internal/__pycache__/_schema_generation_shared.cpython-314.pyc,, +pydantic/_internal/__pycache__/_serializers.cpython-314.pyc,, +pydantic/_internal/__pycache__/_signature.cpython-314.pyc,, +pydantic/_internal/__pycache__/_typing_extra.cpython-314.pyc,, +pydantic/_internal/__pycache__/_utils.cpython-314.pyc,, +pydantic/_internal/__pycache__/_validate_call.cpython-314.pyc,, +pydantic/_internal/__pycache__/_validators.cpython-314.pyc,, +pydantic/_internal/_config.py,sha256=TWZwg3c0bZHiT3boR5-YYqkouHcwjRdenmyGHofV7E0,14674 +pydantic/_internal/_core_metadata.py,sha256=Y_g2t3i7uluK-wXCZvzJfRFMPUM23aBYLfae4FzBPy0,5162 +pydantic/_internal/_core_utils.py,sha256=1jru4VbJ0x63R6dtVcuOI-dKQTC_d_lSnJWEBQzGNEQ,6487 +pydantic/_internal/_dataclasses.py,sha256=Tk1mEafhad1kV7K5tPX5BwxWSXY7C-MKwf0OLFgIlEA,13158 +pydantic/_internal/_decorators.py,sha256=PnyAoKSg3BNbCVSZnwqw9naEg1UDtYvDT9LluigPiO8,33529 +pydantic/_internal/_decorators_v1.py,sha256=tfdfdpQKY4R2XCOwqHbZeoQMur6VNigRrfhudXBHx38,6185 +pydantic/_internal/_discriminated_union.py,sha256=aMl0SRSyQyHfW4-klnMTHNvwSRoqE3H3PRV_05vRsTg,25478 +pydantic/_internal/_docs_extraction.py,sha256=fyznSAHh5AzohnXZStV0HvH-nRbavNHPyg-knx-S_EE,4127 +pydantic/_internal/_fields.py,sha256=YSfEKq21FgjLJ6YqYXKh0eEEs5nxMPvQ6hp9pA8Nzfw,28093 +pydantic/_internal/_forward_ref.py,sha256=5n3Y7-3AKLn8_FS3Yc7KutLiPUhyXmAtkEZOaFnonwM,611 +pydantic/_internal/_generate_schema.py,sha256=TT49vzYzqH90rWrv5ptNoZgjzOsR0KPlSkqPVFrnrBw,132665 +pydantic/_internal/_generics.py,sha256=ELqjT6LMzQzWAK0EB5_9qke_iAazz0OQ4gunp_uKuYY,23822 +pydantic/_internal/_git.py,sha256=IwPh3DPfa2Xq3rBuB9Nx8luR2A1i69QdeTfWWXIuCVg,809 +pydantic/_internal/_import_utils.py,sha256=TRhxD5OuY6CUosioBdBcJUs0om7IIONiZdYAV7zQ8jM,402 +pydantic/_internal/_internal_dataclass.py,sha256=_bedc1XbuuygRGiLZqkUkwwFpQaoR1hKLlR501nyySY,144 +pydantic/_internal/_known_annotated_metadata.py,sha256=Jc7KTNFZoB3f-0ibP_NgJINOeVvYE3q3OTBQDjVMk3U,16765 +pydantic/_internal/_mock_val_ser.py,sha256=wmRRFSBvqfcLbI41PsFliB4u2AZ3mJpZeiERbD3xKTo,8885 +pydantic/_internal/_model_construction.py,sha256=wk-bNGDAJvduaGvn0U0_8zEl0GERu0shJvN8_ZfkYaw,37783 +pydantic/_internal/_namespace_utils.py,sha256=hl3-TRAr82U2jTyPP3t-QqsvKLirxtkLfNfrN-fp0x8,12878 +pydantic/_internal/_repr.py,sha256=jQfnJuyDxQpSRNhG29II9PX8e4Nv2qWZrEw2lqih3UE,5172 +pydantic/_internal/_schema_gather.py,sha256=VLEv51TYEeeND2czsyrmJq1MVnJqTOmnLan7VG44c8A,9114 +pydantic/_internal/_schema_generation_shared.py,sha256=F_rbQbrkoomgxsskdHpP0jUJ7TCfe0BADAEkq6CJ4nM,4842 +pydantic/_internal/_serializers.py,sha256=YIWvSmAR5fnbGSWCOQduWt1yB4ZQY42eAruc-enrb6c,1491 +pydantic/_internal/_signature.py,sha256=8EljPJe4pSnapuirG5DkBAgD1hggHxEAyzFPH-9H0zE,6779 +pydantic/_internal/_typing_extra.py,sha256=_GRYopNi4a9USi5UQ285ObrlsYmvqKEWTNbBoJFSK2c,30309 +pydantic/_internal/_utils.py,sha256=c6Naqf3bds4jBctepiW5jV0xISQQQk5EBUhMNmVQ3Nk,15912 +pydantic/_internal/_validate_call.py,sha256=PfdVnSzhXOrENtaDoDw3PFWPVYD5W_gNYPe8p3Ug6Lg,5321 +pydantic/_internal/_validators.py,sha256=dv0a2Nkc4zcYqv31Gh_QId2lcf-W0kQpV0oSNzgEdfg,20588 +pydantic/_migration.py,sha256=VF73LRCUz3Irb5xVt13jb3NAcXVnEF6T1-J0OLfeZ5A,12160 +pydantic/alias_generators.py,sha256=KM1n3u4JfLSBl1UuYg3hoYHzXJD-yvgrnq8u1ccwh_A,2124 +pydantic/aliases.py,sha256=vhCHyoSWnX-EJ-wWb5qj4xyRssgGWnTQfzQp4GSZ9ug,4937 +pydantic/annotated_handlers.py,sha256=WfyFSqwoEIFXBh7T73PycKloI1DiX45GWi0-JOsCR4Y,4407 +pydantic/class_validators.py,sha256=i_V3j-PYdGLSLmj_IJZekTRjunO8SIVz8LMlquPyP7E,148 +pydantic/color.py,sha256=AzqGfVQHF92_ZctDcue0DM4yTp2P6tekkwRINTWrLIo,21481 +pydantic/config.py,sha256=5MjjzlAR0_xq7C1yAEPf7qWp5qraQwStRvma9nzbqVI,44267 +pydantic/dataclasses.py,sha256=VlknbEulg08xdmPg_60hBsCVIw-W603OJWY2n5gyXA0,18936 +pydantic/datetime_parse.py,sha256=QC-WgMxMr_wQ_mNXUS7AVf-2hLEhvvsPY1PQyhSGOdk,150 +pydantic/decorator.py,sha256=YX-jUApu5AKaVWKPoaV-n-4l7UbS69GEt9Ra3hszmKI,145 +pydantic/deprecated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pydantic/deprecated/__pycache__/__init__.cpython-314.pyc,, +pydantic/deprecated/__pycache__/class_validators.cpython-314.pyc,, +pydantic/deprecated/__pycache__/config.cpython-314.pyc,, +pydantic/deprecated/__pycache__/copy_internals.cpython-314.pyc,, +pydantic/deprecated/__pycache__/decorator.cpython-314.pyc,, +pydantic/deprecated/__pycache__/json.cpython-314.pyc,, +pydantic/deprecated/__pycache__/parse.cpython-314.pyc,, +pydantic/deprecated/__pycache__/tools.cpython-314.pyc,, +pydantic/deprecated/class_validators.py,sha256=EAcaVQM5zp2wBml0ybN62CfQfyJvDLx5Qd9Pk4_tb4U,10273 +pydantic/deprecated/config.py,sha256=k_lsVk57paxLJOcBueH07cu1OgEgWdVBxm6lfaC3CCU,2663 +pydantic/deprecated/copy_internals.py,sha256=Ghd-vkMd5EYCCgyCGtPKO58np9cEKBQC6qkBeIEFI2g,7618 +pydantic/deprecated/decorator.py,sha256=TBm6bJ7wJsNih_8Wq5IzDcwP32m9_vfxs96desLuk00,10845 +pydantic/deprecated/json.py,sha256=HlWCG35RRrxyzuTS6LTQiZBwRhmDZWmeqQH8rLW6wA8,4657 +pydantic/deprecated/parse.py,sha256=Gzd6b_g8zJXcuE7QRq5adhx_EMJahXfcpXCF0RgrqqI,2511 +pydantic/deprecated/tools.py,sha256=Nrm9oFRZWp8-jlfvPgJILEsywp4YzZD52XIGPDLxHcI,3330 +pydantic/env_settings.py,sha256=6IHeeWEqlUPRUv3V-AXiF_W91fg2Jw_M3O0l34J_eyA,148 +pydantic/error_wrappers.py,sha256=RK6mqATc9yMD-KBD9IJS9HpKCprWHd8wo84Bnm-3fR8,150 +pydantic/errors.py,sha256=7ctBNCtt57kZFx71Ls2H86IufQARv4wPKf8DhdsVn5w,6002 +pydantic/experimental/__init__.py,sha256=QT7rKYdDsCiTJ9GEjmsQdWHScwpKrrNkGq6vqONP6RQ,104 +pydantic/experimental/__pycache__/__init__.cpython-314.pyc,, +pydantic/experimental/__pycache__/arguments_schema.cpython-314.pyc,, +pydantic/experimental/__pycache__/missing_sentinel.cpython-314.pyc,, +pydantic/experimental/__pycache__/pipeline.cpython-314.pyc,, +pydantic/experimental/arguments_schema.py,sha256=EFnjX_ulp-tPyUjQX5pmQtug1OFL_Acc8bcMbLd-fVY,1866 +pydantic/experimental/missing_sentinel.py,sha256=hQejgtF00wUuQMni9429evg-eXyIwpKvjsD8ofqfj-w,127 +pydantic/experimental/pipeline.py,sha256=Kv_dvcexKumazfRL0y69AayeA6H37SrmsZ3SUl_n0qY,23582 +pydantic/fields.py,sha256=WuDGOvB22KWuuW3fXnS4Wvg4qX_tdp8X7BrAlza4sw8,79194 +pydantic/functional_serializers.py,sha256=rEzH391zqy3o_bWk2QEuvySmcQNZmwXmJQLC3ZGF7QA,17151 +pydantic/functional_validators.py,sha256=c_-7weWpGNcOYfRfVUFu11jrxMVMdfY_c-4istwk95Y,31839 +pydantic/generics.py,sha256=0ZqZ9O9annIj_3mGBRqps4htey3b5lV1-d2tUxPMMnA,144 +pydantic/json.py,sha256=ZH8RkI7h4Bz-zp8OdTAxbJUoVvcoU-jhMdRZ0B-k0xc,140 +pydantic/json_schema.py,sha256=-h8c7vsNGAJCIxR-n52-69Q54w38EM-j0AGC_4VGt30,123653 +pydantic/main.py,sha256=WZTxwW81igl75Y00zHJJmoU3qCNSy-1KCEmEsBPftiQ,84205 +pydantic/mypy.py,sha256=p6KU1GwPHazF7E5vJq1uLd4tHd6DE6bre4-m5Ln23ms,58986 +pydantic/networks.py,sha256=Smf_RyImQ-F5FZLCgFwHPfROYxW_e-Hz68R_8LW0sZ0,42099 +pydantic/parse.py,sha256=wkd82dgtvWtD895U_I6E1htqMlGhBSYEV39cuBSeo3A,141 +pydantic/plugin/__init__.py,sha256=a7Tw366U6K3kltCCNZY76nc9ss-7uGGQ40TXad9OypQ,7333 +pydantic/plugin/__pycache__/__init__.cpython-314.pyc,, +pydantic/plugin/__pycache__/_loader.cpython-314.pyc,, +pydantic/plugin/__pycache__/_schema_validator.cpython-314.pyc,, +pydantic/plugin/_loader.py,sha256=9QLXneLEmvyhXka_9j4Lrkbme4qPv6qYphlsjF2MGsA,2210 +pydantic/plugin/_schema_validator.py,sha256=QbmqsG33MBmftNQ2nNiuN22LhbrexUA7ipDVv3J02BU,5267 +pydantic/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pydantic/root_model.py,sha256=BvmLtW4i11dJk-dLOM3rl-jnJdQGeeQTFBcmEOq6pMg,6311 +pydantic/schema.py,sha256=Vqqjvq_LnapVknebUd3Bp_J1p2gXZZnZRgL48bVEG7o,142 +pydantic/tools.py,sha256=iHQpd8SJ5DCTtPV5atAV06T89bjSaMFeZZ2LX9lasZY,141 +pydantic/type_adapter.py,sha256=VT--yg4a27shSBzWHBPKz493f3iQ9obdkEkhjZKlE7Q,35653 +pydantic/types.py,sha256=nqdS-J2ZXqTh2qeyJOzBTBtHWyZ5YRFe8gaMV59d9HE,105431 +pydantic/typing.py,sha256=P7feA35MwTcLsR1uL7db0S-oydBxobmXa55YDoBgajQ,138 +pydantic/utils.py,sha256=15nR2QpqTBFlQV4TNtTItMyTJx_fbyV-gPmIEY1Gooc,141 +pydantic/v1/__init__.py,sha256=FLQ8ISp6MVZRfjnS7fQ4m1FxQxFCF2QVikE4DK-4PhE,3164 +pydantic/v1/__pycache__/__init__.cpython-314.pyc,, +pydantic/v1/__pycache__/_hypothesis_plugin.cpython-314.pyc,, +pydantic/v1/__pycache__/annotated_types.cpython-314.pyc,, +pydantic/v1/__pycache__/class_validators.cpython-314.pyc,, +pydantic/v1/__pycache__/color.cpython-314.pyc,, +pydantic/v1/__pycache__/config.cpython-314.pyc,, +pydantic/v1/__pycache__/dataclasses.cpython-314.pyc,, +pydantic/v1/__pycache__/datetime_parse.cpython-314.pyc,, +pydantic/v1/__pycache__/decorator.cpython-314.pyc,, +pydantic/v1/__pycache__/env_settings.cpython-314.pyc,, +pydantic/v1/__pycache__/error_wrappers.cpython-314.pyc,, +pydantic/v1/__pycache__/errors.cpython-314.pyc,, +pydantic/v1/__pycache__/fields.cpython-314.pyc,, +pydantic/v1/__pycache__/generics.cpython-314.pyc,, +pydantic/v1/__pycache__/json.cpython-314.pyc,, +pydantic/v1/__pycache__/main.cpython-314.pyc,, +pydantic/v1/__pycache__/mypy.cpython-314.pyc,, +pydantic/v1/__pycache__/networks.cpython-314.pyc,, +pydantic/v1/__pycache__/parse.cpython-314.pyc,, +pydantic/v1/__pycache__/schema.cpython-314.pyc,, +pydantic/v1/__pycache__/tools.cpython-314.pyc,, +pydantic/v1/__pycache__/types.cpython-314.pyc,, +pydantic/v1/__pycache__/typing.cpython-314.pyc,, +pydantic/v1/__pycache__/utils.cpython-314.pyc,, +pydantic/v1/__pycache__/validators.cpython-314.pyc,, +pydantic/v1/__pycache__/version.cpython-314.pyc,, +pydantic/v1/_hypothesis_plugin.py,sha256=5ES5xWuw1FQAsymLezy8QgnVz0ZpVfU3jkmT74H27VQ,14847 +pydantic/v1/annotated_types.py,sha256=uk2NAAxqiNELKjiHhyhxKaIOh8F1lYW_LzrW3X7oZBc,3157 +pydantic/v1/class_validators.py,sha256=ULOaIUgYUDBsHL7EEVEarcM-UubKUggoN8hSbDonsFE,14672 +pydantic/v1/color.py,sha256=iZABLYp6OVoo2AFkP9Ipri_wSc6-Kklu8YuhSartd5g,16844 +pydantic/v1/config.py,sha256=a6P0Wer9x4cbwKW7Xv8poSUqM4WP-RLWwX6YMpYq9AA,6532 +pydantic/v1/dataclasses.py,sha256=784cqvInbwIPWr9usfpX3ch7z4t3J2tTK6N067_wk1o,18172 +pydantic/v1/datetime_parse.py,sha256=4Qy1kQpq3rNVZJeIHeSPDpuS2Bvhp1KPtzJG1xu-H00,7724 +pydantic/v1/decorator.py,sha256=zaaxxxoWPCm818D1bs0yhapRjXm32V8G0ZHWCdM1uXA,10339 +pydantic/v1/env_settings.py,sha256=A9VXwtRl02AY-jH0C0ouy5VNw3fi6F_pkzuHDjgAAOM,14105 +pydantic/v1/error_wrappers.py,sha256=6625Mfw9qkC2NwitB_JFAWe8B-Xv6zBU7rL9k28tfyo,5196 +pydantic/v1/errors.py,sha256=mIwPED5vGM5Q5v4C4Z1JPldTRH-omvEylH6ksMhOmPw,17726 +pydantic/v1/fields.py,sha256=VqWJCriUNiEyptXroDVJ501JpVA0en2VANcksqXL2b8,50649 +pydantic/v1/generics.py,sha256=VzC9YUV-EbPpQ3aAfk1cNFej79_IzznkQ7WrmTTZS9E,17871 +pydantic/v1/json.py,sha256=WQ5Hy_hIpfdR3YS8k6N2E6KMJzsdbBi_ldWOPJaV81M,3390 +pydantic/v1/main.py,sha256=zuNpdN5Q0V0wG2UUTKt0HUy3XJ4OAvPSZDdiXY-FIzs,44824 +pydantic/v1/mypy.py,sha256=Cl8XRfCmIcVE3j5AEU52C8iDh8lcX__D3hz2jIWxMAs,38860 +pydantic/v1/networks.py,sha256=HYNtKAfOmOnKJpsDg1g6SIkj9WPhU_-i8l5e2JKBpG4,22124 +pydantic/v1/parse.py,sha256=BJtdqiZRtav9VRFCmOxoY-KImQmjPy-A_NoojiFUZxY,1821 +pydantic/v1/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pydantic/v1/schema.py,sha256=aqBuA--cq8gAVkim5BJPFASHzOZ8dFtmFX_fNGr6ip4,47801 +pydantic/v1/tools.py,sha256=1lDdXHk0jL5uP3u5RCYAvUAlGClgAO-45lkq9j7fyBA,2881 +pydantic/v1/types.py,sha256=Bzl-RcnitPBHnqwwj9iv7JjHuN1GpnWH24dKkF3l9e8,35455 +pydantic/v1/typing.py,sha256=7GdBg1YTHULU81thB_9cjRNDfZfn4khoX7nGtw_keCE,19677 +pydantic/v1/utils.py,sha256=M5FRyfNUb1A2mk9laGgCVdfHHb3AtQgrjO5qfyBf4xA,25989 +pydantic/v1/validators.py,sha256=lyUkn1MWhHxlCX5ZfEgFj_CAHojoiPcaQeMdEM9XviU,22187 +pydantic/v1/version.py,sha256=HXnXW-1bMW5qKhlr5RgOEPohrZDCDSuyy8-gi8GCgZo,1039 +pydantic/validate_call_decorator.py,sha256=8jqLlgXTjWEj4dXDg0wI3EGQKkb0JnCsL_JSUjbU5Sg,4389 +pydantic/validators.py,sha256=pwbIJXVb1CV2mAE4w_EGfNj7DwzsKaWw_tTL6cviTus,146 +pydantic/version.py,sha256=XNmGSyOP87Mqa_A9HFzfDcNippfnqfRK3ZUiGyBb4-A,3985 +pydantic/warnings.py,sha256=Wu1VGzrvFZw4T6yCIKHjH7LSY66HjbtyCFbn5uWoMJ4,4802 diff --git a/blimgui/dist64/numpy/compat/tests/__init__.py b/blimgui/dist64/pydantic-2.12.5.dist-info/REQUESTED similarity index 100% rename from blimgui/dist64/numpy/compat/tests/__init__.py rename to blimgui/dist64/pydantic-2.12.5.dist-info/REQUESTED diff --git a/blimgui/dist64/pydantic-2.11.4.dist-info/WHEEL b/blimgui/dist64/pydantic-2.12.5.dist-info/WHEEL similarity index 100% rename from blimgui/dist64/pydantic-2.11.4.dist-info/WHEEL rename to blimgui/dist64/pydantic-2.12.5.dist-info/WHEEL diff --git a/blimgui/dist64/pydantic-2.11.4.dist-info/licenses/LICENSE b/blimgui/dist64/pydantic-2.12.5.dist-info/licenses/LICENSE similarity index 100% rename from blimgui/dist64/pydantic-2.11.4.dist-info/licenses/LICENSE rename to blimgui/dist64/pydantic-2.12.5.dist-info/licenses/LICENSE diff --git a/blimgui/dist64/pydantic/__init__.py b/blimgui/dist64/pydantic/__init__.py index 716ca40..0121284 100644 --- a/blimgui/dist64/pydantic/__init__.py +++ b/blimgui/dist64/pydantic/__init__.py @@ -1,11 +1,14 @@ -import typing from importlib import import_module +from typing import TYPE_CHECKING from warnings import warn from ._migration import getattr_migration -from .version import VERSION +from .version import VERSION, _ensure_pydantic_core_version -if typing.TYPE_CHECKING: +_ensure_pydantic_core_version() +del _ensure_pydantic_core_version + +if TYPE_CHECKING: # import of virtually everything is supported via `__getattr__` below, # but we need them here for type checking and IDE support import pydantic_core @@ -37,6 +40,7 @@ ModelWrapValidatorHandler, PlainValidator, SkipValidation, + ValidateAs, WrapValidator, field_validator, model_validator, @@ -53,6 +57,7 @@ PydanticDeprecatedSince29, PydanticDeprecatedSince210, PydanticDeprecatedSince211, + PydanticDeprecatedSince212, PydanticDeprecationWarning, PydanticExperimentalWarning, ) @@ -76,6 +81,7 @@ 'PlainValidator', 'WrapValidator', 'SkipValidation', + 'ValidateAs', 'InstanceOf', 'ModelWrapValidatorHandler', # JSON Schema @@ -223,6 +229,7 @@ 'PydanticDeprecatedSince29', 'PydanticDeprecatedSince210', 'PydanticDeprecatedSince211', + 'PydanticDeprecatedSince212', 'PydanticDeprecationWarning', 'PydanticExperimentalWarning', # annotated handlers @@ -250,6 +257,7 @@ 'WrapValidator': (__spec__.parent, '.functional_validators'), 'SkipValidation': (__spec__.parent, '.functional_validators'), 'InstanceOf': (__spec__.parent, '.functional_validators'), + 'ValidateAs': (__spec__.parent, '.functional_validators'), 'ModelWrapValidatorHandler': (__spec__.parent, '.functional_validators'), # JSON Schema 'WithJsonSchema': (__spec__.parent, '.json_schema'), @@ -384,6 +392,7 @@ 'PydanticDeprecatedSince29': (__spec__.parent, '.warnings'), 'PydanticDeprecatedSince210': (__spec__.parent, '.warnings'), 'PydanticDeprecatedSince211': (__spec__.parent, '.warnings'), + 'PydanticDeprecatedSince212': (__spec__.parent, '.warnings'), 'PydanticDeprecationWarning': (__spec__.parent, '.warnings'), 'PydanticExperimentalWarning': (__spec__.parent, '.warnings'), # annotated handlers @@ -415,9 +424,11 @@ def __getattr__(attr_name: str) -> object: if attr_name in _deprecated_dynamic_imports: + from pydantic.warnings import PydanticDeprecatedSince20 + warn( f'Importing {attr_name} from `pydantic` is deprecated. This feature is either no longer supported, or is not public.', - DeprecationWarning, + PydanticDeprecatedSince20, stacklevel=2, ) @@ -441,5 +452,5 @@ def __getattr__(attr_name: str) -> object: return result -def __dir__() -> 'list[str]': +def __dir__() -> list[str]: return list(__all__) diff --git a/blimgui/dist64/pydantic/_internal/_config.py b/blimgui/dist64/pydantic/_internal/_config.py index fe71264..43c8568 100644 --- a/blimgui/dist64/pydantic/_internal/_config.py +++ b/blimgui/dist64/pydantic/_internal/_config.py @@ -19,11 +19,6 @@ from ..errors import PydanticUserError from ..warnings import PydanticDeprecatedSince20, PydanticDeprecatedSince210 -if not TYPE_CHECKING: - # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915 - # and https://youtrack.jetbrains.com/issue/PY-51428 - DeprecationWarning = PydanticDeprecatedSince20 - if TYPE_CHECKING: from .._internal._schema_generation_shared import GenerateSchema from ..fields import ComputedFieldInfo, FieldInfo @@ -69,6 +64,8 @@ class ConfigWrapper: # whether instances of models and dataclasses (including subclass instances) should re-validate, default 'never' revalidate_instances: Literal['always', 'never', 'subclass-instances'] ser_json_timedelta: Literal['iso8601', 'float'] + ser_json_temporal: Literal['iso8601', 'seconds', 'milliseconds'] + val_temporal_unit: Literal['seconds', 'milliseconds', 'infer'] ser_json_bytes: Literal['utf8', 'base64', 'hex'] val_json_bytes: Literal['utf8', 'base64', 'hex'] ser_json_inf_nan: Literal['null', 'constants', 'strings'] @@ -90,6 +87,7 @@ class ConfigWrapper: validate_by_alias: bool validate_by_name: bool serialize_by_alias: bool + url_preserve_empty_path: bool def __init__(self, config: ConfigDict | dict[str, Any] | type[Any] | None, *, check: bool = True): if check: @@ -98,7 +96,13 @@ def __init__(self, config: ConfigDict | dict[str, Any] | type[Any] | None, *, ch self.config_dict = cast(ConfigDict, config) @classmethod - def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwargs: dict[str, Any]) -> Self: + def for_model( + cls, + bases: tuple[type[Any], ...], + namespace: dict[str, Any], + raw_annotations: dict[str, Any], + kwargs: dict[str, Any], + ) -> Self: """Build a new `ConfigWrapper` instance for a `BaseModel`. The config wrapper built based on (in descending order of priority): @@ -109,6 +113,7 @@ def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwar Args: bases: A tuple of base classes. namespace: The namespace of the class being created. + raw_annotations: The (non-evaluated) annotations of the model. kwargs: The kwargs passed to the class being created. Returns: @@ -123,7 +128,6 @@ def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwar config_class_from_namespace = namespace.get('Config') config_dict_from_namespace = namespace.get('model_config') - raw_annotations = namespace.get('__annotations__', {}) if raw_annotations.get('model_config') and config_dict_from_namespace is None: raise PydanticUserError( '`model_config` cannot be used as a model field name. Use `model_config` for model configuration.', @@ -205,6 +209,8 @@ def core_config(self, title: str | None) -> core_schema.CoreConfig: ('str_to_upper', config.get('str_to_upper')), ('strict', config.get('strict')), ('ser_json_timedelta', config.get('ser_json_timedelta')), + ('ser_json_temporal', config.get('ser_json_temporal')), + ('val_temporal_unit', config.get('val_temporal_unit')), ('ser_json_bytes', config.get('ser_json_bytes')), ('val_json_bytes', config.get('val_json_bytes')), ('ser_json_inf_nan', config.get('ser_json_inf_nan')), @@ -222,6 +228,7 @@ def core_config(self, title: str | None) -> core_schema.CoreConfig: ('validate_by_alias', config.get('validate_by_alias')), ('validate_by_name', config.get('validate_by_name')), ('serialize_by_alias', config.get('serialize_by_alias')), + ('url_preserve_empty_path', config.get('url_preserve_empty_path')), ) if v is not None } @@ -283,6 +290,8 @@ def push(self, config_wrapper: ConfigWrapper | ConfigDict | None): strict=False, revalidate_instances='never', ser_json_timedelta='iso8601', + ser_json_temporal='iso8601', + val_temporal_unit='infer', ser_json_bytes='utf8', val_json_bytes='utf8', ser_json_inf_nan='null', @@ -304,6 +313,7 @@ def push(self, config_wrapper: ConfigWrapper | ConfigDict | None): validate_by_alias=True, validate_by_name=False, serialize_by_alias=False, + url_preserve_empty_path=False, ) @@ -320,7 +330,7 @@ def prepare_config(config: ConfigDict | dict[str, Any] | type[Any] | None) -> Co return ConfigDict() if not isinstance(config, dict): - warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning) + warnings.warn(DEPRECATION_MESSAGE, PydanticDeprecatedSince20, stacklevel=4) config = {k: getattr(config, k) for k in dir(config) if not k.startswith('__')} config_dict = cast(ConfigDict, config) diff --git a/blimgui/dist64/pydantic/_internal/_core_utils.py b/blimgui/dist64/pydantic/_internal/_core_utils.py index cf8cf7c..df149b4 100644 --- a/blimgui/dist64/pydantic/_internal/_core_utils.py +++ b/blimgui/dist64/pydantic/_internal/_core_utils.py @@ -1,12 +1,11 @@ from __future__ import annotations import inspect -import os from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, Union -from pydantic_core import CoreSchema, core_schema -from pydantic_core import validate_core_schema as _validate_core_schema +from pydantic_core import core_schema +from pydantic_core.core_schema import CoreSchema from typing_extensions import TypeGuard, get_args, get_origin from typing_inspection import typing_objects @@ -109,12 +108,6 @@ def get_ref(s: core_schema.CoreSchema) -> None | str: return s.get('ref', None) -def validate_core_schema(schema: CoreSchema) -> CoreSchema: - if os.getenv('PYDANTIC_VALIDATE_CORE_SCHEMAS'): - return _validate_core_schema(schema) - return schema - - def _clean_schema_for_pretty_print(obj: Any, strip_metadata: bool = True) -> Any: # pragma: no cover """A utility function to remove irrelevant information from a core schema.""" if isinstance(obj, Mapping): diff --git a/blimgui/dist64/pydantic/_internal/_dataclasses.py b/blimgui/dist64/pydantic/_internal/_dataclasses.py index 954ab32..869286b 100644 --- a/blimgui/dist64/pydantic/_internal/_dataclasses.py +++ b/blimgui/dist64/pydantic/_internal/_dataclasses.py @@ -2,11 +2,14 @@ from __future__ import annotations as _annotations +import copy import dataclasses -import typing +import sys import warnings -from functools import partial, wraps -from typing import Any, ClassVar +from collections.abc import Generator +from contextlib import contextmanager +from functools import partial +from typing import TYPE_CHECKING, Any, ClassVar, Protocol, cast from pydantic_core import ( ArgsKwargs, @@ -14,9 +17,10 @@ SchemaValidator, core_schema, ) -from typing_extensions import TypeGuard +from typing_extensions import TypeAlias, TypeIs from ..errors import PydanticUndefinedAnnotation +from ..fields import FieldInfo from ..plugin._schema_validator import PluggableSchemaValidator, create_schema_validator from ..warnings import PydanticDeprecatedSince20 from . import _config, _decorators @@ -28,13 +32,12 @@ from ._signature import generate_pydantic_signature from ._utils import LazyClassAttribute -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from _typeshed import DataclassInstance as StandardDataclass from ..config import ConfigDict - from ..fields import FieldInfo - class PydanticDataclass(StandardDataclass, typing.Protocol): + class PydanticDataclass(StandardDataclass, Protocol): """A protocol containing attributes only available once a class has been decorated as a Pydantic dataclass. Attributes: @@ -55,23 +58,21 @@ class PydanticDataclass(StandardDataclass, typing.Protocol): __pydantic_serializer__: ClassVar[SchemaSerializer] __pydantic_validator__: ClassVar[SchemaValidator | PluggableSchemaValidator] -else: - # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915 - # and https://youtrack.jetbrains.com/issue/PY-51428 - DeprecationWarning = PydanticDeprecatedSince20 + @classmethod + def __pydantic_fields_complete__(cls) -> bool: ... def set_dataclass_fields( cls: type[StandardDataclass], + config_wrapper: _config.ConfigWrapper, ns_resolver: NsResolver | None = None, - config_wrapper: _config.ConfigWrapper | None = None, ) -> None: """Collect and set `cls.__pydantic_fields__`. Args: cls: The class. + config_wrapper: The config wrapper instance. ns_resolver: Namespace resolver to use when getting dataclass annotations. - config_wrapper: The config wrapper instance, defaults to `None`. """ typevars_map = get_standard_typevars_map(cls) fields = collect_dataclass_fields( @@ -124,7 +125,7 @@ def __init__(__dataclass_self__: PydanticDataclass, *args: Any, **kwargs: Any) - cls.__init__ = __init__ # type: ignore cls.__pydantic_config__ = config_wrapper.config_dict # type: ignore - set_dataclass_fields(cls, ns_resolver, config_wrapper=config_wrapper) + set_dataclass_fields(cls, config_wrapper=config_wrapper, ns_resolver=ns_resolver) if not _force_build and config_wrapper.defer_build: set_dataclass_mocks(cls) @@ -132,7 +133,8 @@ def __init__(__dataclass_self__: PydanticDataclass, *args: Any, **kwargs: Any) - if hasattr(cls, '__post_init_post_parse__'): warnings.warn( - 'Support for `__post_init_post_parse__` has been dropped, the method will not be called', DeprecationWarning + 'Support for `__post_init_post_parse__` has been dropped, the method will not be called', + PydanticDeprecatedSince20, ) typevars_map = get_standard_typevars_map(cls) @@ -177,59 +179,137 @@ def __init__(__dataclass_self__: PydanticDataclass, *args: Any, **kwargs: Any) - # We are about to set all the remaining required properties expected for this cast; # __pydantic_decorators__ and __pydantic_fields__ should already be set - cls = typing.cast('type[PydanticDataclass]', cls) - # debug(schema) + cls = cast('type[PydanticDataclass]', cls) cls.__pydantic_core_schema__ = schema - cls.__pydantic_validator__ = validator = create_schema_validator( + cls.__pydantic_validator__ = create_schema_validator( schema, cls, cls.__module__, cls.__qualname__, 'dataclass', core_config, config_wrapper.plugin_settings ) cls.__pydantic_serializer__ = SchemaSerializer(schema, core_config) + cls.__pydantic_complete__ = True + return True - if config_wrapper.validate_assignment: - @wraps(cls.__setattr__) - def validated_setattr(instance: Any, field: str, value: str, /) -> None: - validator.validate_assignment(instance, field, value) +def is_stdlib_dataclass(cls: type[Any], /) -> TypeIs[type[StandardDataclass]]: + """Returns `True` if the class is a stdlib dataclass and *not* a Pydantic dataclass. - cls.__setattr__ = validated_setattr.__get__(None, cls) # type: ignore + Unlike the stdlib `dataclasses.is_dataclass()` function, this does *not* include subclasses + of a dataclass that are themselves not dataclasses. - cls.__pydantic_complete__ = True - return True + Args: + cls: The class. + + Returns: + `True` if the class is a stdlib dataclass, `False` otherwise. + """ + return '__dataclass_fields__' in cls.__dict__ and not hasattr(cls, '__pydantic_validator__') + + +def as_dataclass_field(pydantic_field: FieldInfo) -> dataclasses.Field[Any]: + field_args: dict[str, Any] = {'default': pydantic_field} + + # Needed because if `doc` is set, the dataclass slots will be a dict (field name -> doc) instead of a tuple: + if sys.version_info >= (3, 14) and pydantic_field.description is not None: + field_args['doc'] = pydantic_field.description + + # Needed as the stdlib dataclass module processes kw_only in a specific way during class construction: + if sys.version_info >= (3, 10) and pydantic_field.kw_only: + field_args['kw_only'] = True + + # Needed as the stdlib dataclass modules generates `__repr__()` during class construction: + if pydantic_field.repr is not True: + field_args['repr'] = pydantic_field.repr + return dataclasses.field(**field_args) -def is_builtin_dataclass(_cls: type[Any]) -> TypeGuard[type[StandardDataclass]]: - """Returns True if a class is a stdlib dataclass and *not* a pydantic dataclass. - We check that - - `_cls` is a dataclass - - `_cls` does not inherit from a processed pydantic dataclass (and thus have a `__pydantic_validator__`) - - `_cls` does not have any annotations that are not dataclass fields - e.g. - ```python - import dataclasses +DcFields: TypeAlias = dict[str, dataclasses.Field[Any]] - import pydantic.dataclasses - @dataclasses.dataclass +@contextmanager +def patch_base_fields(cls: type[Any]) -> Generator[None]: + """Temporarily patch the stdlib dataclasses bases of `cls` if the Pydantic `Field()` function is used. + + When creating a Pydantic dataclass, it is possible to inherit from stdlib dataclasses, where + the Pydantic `Field()` function is used. To create this Pydantic dataclass, we first apply + the stdlib `@dataclass` decorator on it. During the construction of the stdlib dataclass, + the `kw_only` and `repr` field arguments need to be understood by the stdlib *during* the + dataclass construction. To do so, we temporarily patch the fields dictionary of the affected + bases. + + For instance, with the following example: + + ```python {test="skip" lint="skip"} + import dataclasses as stdlib_dc + + import pydantic + import pydantic.dataclasses as pydantic_dc + + @stdlib_dc.dataclass class A: - x: int + a: int = pydantic.Field(repr=False) - @pydantic.dataclasses.dataclass + # Notice that the `repr` attribute of the dataclass field is `True`: + A.__dataclass_fields__['a'] + #> dataclass.Field(default=FieldInfo(repr=False), repr=True, ...) + + @pydantic_dc.dataclass class B(A): - y: int + b: int = pydantic.Field(repr=False) ``` - In this case, when we first check `B`, we make an extra check and look at the annotations ('y'), - which won't be a superset of all the dataclass fields (only the stdlib fields i.e. 'x') - Args: - cls: The class. + When passing `B` to the stdlib `@dataclass` decorator, it will look for fields in the parent classes + and reuse them directly. When this context manager is active, `A` will be temporarily patched to be + equivalent to: - Returns: - `True` if the class is a stdlib dataclass, `False` otherwise. + ```python {test="skip" lint="skip"} + @stdlib_dc.dataclass + class A: + a: int = stdlib_dc.field(default=Field(repr=False), repr=False) + ``` + + !!! note + This is only applied to the bases of `cls`, and not `cls` itself. The reason is that the Pydantic + dataclass decorator "owns" `cls` (in the previous example, `B`). As such, we instead modify the fields + directly (in the previous example, we simply do `setattr(B, 'b', as_dataclass_field(pydantic_field))`). + + !!! note + This approach is far from ideal, and can probably be the source of unwanted side effects/race conditions. + The previous implemented approach was mutating the `__annotations__` dict of `cls`, which is no longer a + safe operation in Python 3.14+, and resulted in unexpected behavior with field ordering anyway. """ - return ( - dataclasses.is_dataclass(_cls) - and not hasattr(_cls, '__pydantic_validator__') - and set(_cls.__dataclass_fields__).issuperset(set(getattr(_cls, '__annotations__', {}))) - ) + # A list of two-tuples, the first element being a reference to the + # dataclass fields dictionary, the second element being a mapping between + # the field names that were modified, and their original `Field`: + original_fields_list: list[tuple[DcFields, DcFields]] = [] + + for base in cls.__mro__[1:]: + dc_fields: dict[str, dataclasses.Field[Any]] = base.__dict__.get('__dataclass_fields__', {}) + dc_fields_with_pydantic_field_defaults = { + field_name: field + for field_name, field in dc_fields.items() + if isinstance(field.default, FieldInfo) + # Only do the patching if one of the affected attributes is set: + and (field.default.description is not None or field.default.kw_only or field.default.repr is not True) + } + if dc_fields_with_pydantic_field_defaults: + original_fields_list.append((dc_fields, dc_fields_with_pydantic_field_defaults)) + for field_name, field in dc_fields_with_pydantic_field_defaults.items(): + default = cast(FieldInfo, field.default) + # `dataclasses.Field` isn't documented as working with `copy.copy()`. + # It is a class with `__slots__`, so should work (and we hope for the best): + new_dc_field = copy.copy(field) + # For base fields, no need to set `doc` from `FieldInfo.description`, this is only relevant + # for the class under construction and handled in `as_dataclass_field()`. + if sys.version_info >= (3, 10) and default.kw_only: + new_dc_field.kw_only = True + if default.repr is not True: + new_dc_field.repr = default.repr + dc_fields[field_name] = new_dc_field + + try: + yield + finally: + for fields, original_fields in original_fields_list: + for field_name, original_field in original_fields.items(): + fields[field_name] = original_field diff --git a/blimgui/dist64/pydantic/_internal/_decorators.py b/blimgui/dist64/pydantic/_internal/_decorators.py index 92880a4..40e5128 100644 --- a/blimgui/dist64/pydantic/_internal/_decorators.py +++ b/blimgui/dist64/pydantic/_internal/_decorators.py @@ -2,6 +2,7 @@ from __future__ import annotations as _annotations +import sys import types from collections import deque from collections.abc import Iterable @@ -11,7 +12,8 @@ from itertools import islice from typing import TYPE_CHECKING, Any, Callable, ClassVar, Generic, Literal, TypeVar, Union -from pydantic_core import PydanticUndefined, PydanticUndefinedType, core_schema +from pydantic_core import core_schema +from pydantic_core._pydantic_core import PydanticUndefined, PydanticUndefinedType from typing_extensions import TypeAlias, is_typeddict from ..errors import PydanticUserError @@ -24,6 +26,7 @@ if TYPE_CHECKING: from ..fields import ComputedFieldInfo from ..functional_validators import FieldValidatorModes + from ._config import ConfigWrapper @dataclass(**slots_true) @@ -196,7 +199,7 @@ def _call_wrapped_attr(self, func: Callable[[Any], None], *, name: str) -> Pydan def __get__(self, obj: object | None, obj_type: type[object] | None = None) -> PydanticDescriptorProxy[ReturnType]: try: - return self.wrapped.__get__(obj, obj_type) + return self.wrapped.__get__(obj, obj_type) # pyright: ignore[reportReturnType] except AttributeError: # not a descriptor, e.g. a partial object return self.wrapped # type: ignore[return-value] @@ -514,8 +517,15 @@ def build(model_dc: type[Any]) -> DecoratorInfos: # noqa: C901 (ignore complexi setattr(model_dc, name, value) return res + def update_from_config(self, config_wrapper: ConfigWrapper) -> None: + """Update the decorator infos from the configuration of the class they are attached to.""" + for name, computed_field_dec in self.computed_fields.items(): + computed_field_dec.info._update_from_config(config_wrapper, name) -def inspect_validator(validator: Callable[..., Any], mode: FieldValidatorModes) -> bool: + +def inspect_validator( + validator: Callable[..., Any], *, mode: FieldValidatorModes, type: Literal['field', 'model'] +) -> bool: """Look at a field or model validator function and determine whether it takes an info argument. An error is raised if the function has an invalid signature. @@ -523,12 +533,13 @@ def inspect_validator(validator: Callable[..., Any], mode: FieldValidatorModes) Args: validator: The validator function to inspect. mode: The proposed validator mode. + type: The type of validator, either 'field' or 'model'. Returns: Whether the validator takes an info argument. """ try: - sig = signature(validator) + sig = _signature_no_eval(validator) except (ValueError, TypeError): # `inspect.signature` might not be able to infer a signature, e.g. with C objects. # In this case, we assume no info argument is present: @@ -547,7 +558,7 @@ def inspect_validator(validator: Callable[..., Any], mode: FieldValidatorModes) return False raise PydanticUserError( - f'Unrecognized field_validator function signature for {validator} with `mode={mode}`:{sig}', + f'Unrecognized {type} validator function signature for {validator} with `mode={mode}`: {sig}', code='validator-signature', ) @@ -566,7 +577,7 @@ def inspect_field_serializer(serializer: Callable[..., Any], mode: Literal['plai Tuple of (is_field_serializer, info_arg). """ try: - sig = signature(serializer) + sig = _signature_no_eval(serializer) except (ValueError, TypeError): # `inspect.signature` might not be able to infer a signature, e.g. with C objects. # In this case, we assume no info argument is present and this is not a method: @@ -604,7 +615,7 @@ def inspect_annotated_serializer(serializer: Callable[..., Any], mode: Literal[' info_arg """ try: - sig = signature(serializer) + sig = _signature_no_eval(serializer) except (ValueError, TypeError): # `inspect.signature` might not be able to infer a signature, e.g. with C objects. # In this case, we assume no info argument is present: @@ -636,7 +647,7 @@ def inspect_model_serializer(serializer: Callable[..., Any], mode: Literal['plai '`@model_serializer` must be applied to instance methods', code='model-serializer-instance-method' ) - sig = signature(serializer) + sig = _signature_no_eval(serializer) info_arg = _serializer_info_arg(mode, count_positional_required_params(sig)) if info_arg is None: raise PydanticUserError( @@ -684,7 +695,7 @@ def is_instance_method_from_sig(function: AnyDecoratorCallable) -> bool: Returns: `True` if the function is an instance method, `False` otherwise. """ - sig = signature(unwrap_wrapped_function(function)) + sig = _signature_no_eval(unwrap_wrapped_function(function)) first = next(iter(sig.parameters.values()), None) if first and first.name == 'self': return True @@ -708,7 +719,7 @@ def ensure_classmethod_based_on_signature(function: AnyDecoratorCallable) -> Any def _is_classmethod_from_sig(function: AnyDecoratorCallable) -> bool: - sig = signature(unwrap_wrapped_function(function)) + sig = _signature_no_eval(unwrap_wrapped_function(function)) first = next(iter(sig.parameters.values()), None) if first and first.name == 'cls': return True @@ -836,3 +847,13 @@ def ensure_property(f: Any) -> Any: return f else: return property(f) + + +def _signature_no_eval(f: Callable[..., Any]) -> Signature: + """Get the signature of a callable without evaluating any annotations.""" + if sys.version_info >= (3, 14): + from annotationlib import Format + + return signature(f, annotation_format=Format.FORWARDREF) + else: + return signature(f) diff --git a/blimgui/dist64/pydantic/_internal/_docs_extraction.py b/blimgui/dist64/pydantic/_internal/_docs_extraction.py index 7b5f310..6df77bf 100644 --- a/blimgui/dist64/pydantic/_internal/_docs_extraction.py +++ b/blimgui/dist64/pydantic/_internal/_docs_extraction.py @@ -4,6 +4,7 @@ import ast import inspect +import sys import textwrap from typing import Any @@ -89,13 +90,17 @@ def extract_docstrings_from_cls(cls: type[Any], use_inspect: bool = False) -> di Returns: A mapping containing attribute names and their corresponding docstring. """ - if use_inspect: - # Might not work as expected if two classes have the same name in the same source file. + if use_inspect or sys.version_info >= (3, 13): + # On Python < 3.13, `inspect.getsourcelines()` might not work as expected + # if two classes have the same name in the same source file. + # On Python 3.13+, it will use the new `__firstlineno__` class attribute, + # making it way more robust. try: source, _ = inspect.getsourcelines(cls) except OSError: # pragma: no cover return {} else: + # TODO remove this implementation when we drop support for Python 3.12: source = _extract_source_from_frame(cls) if not source: diff --git a/blimgui/dist64/pydantic/_internal/_fields.py b/blimgui/dist64/pydantic/_internal/_fields.py index e9a7995..aad2ac9 100644 --- a/blimgui/dist64/pydantic/_internal/_fields.py +++ b/blimgui/dist64/pydantic/_internal/_fields.py @@ -5,34 +5,33 @@ import dataclasses import warnings from collections.abc import Mapping -from copy import copy from functools import cache from inspect import Parameter, ismethoddescriptor, signature from re import Pattern from typing import TYPE_CHECKING, Any, Callable, TypeVar from pydantic_core import PydanticUndefined -from typing_extensions import TypeIs, get_origin -from typing_inspection import typing_objects +from typing_extensions import TypeIs from typing_inspection.introspection import AnnotationSource from pydantic import PydanticDeprecatedSince211 from pydantic.errors import PydanticUserError +from ..aliases import AliasGenerator from . import _generics, _typing_extra from ._config import ConfigWrapper from ._docs_extraction import extract_docstrings_from_cls from ._import_utils import import_cached_base_model, import_cached_field_info from ._namespace_utils import NsResolver from ._repr import Representation -from ._utils import can_be_positional +from ._utils import can_be_positional, get_first_not_none if TYPE_CHECKING: from annotated_types import BaseMetadata from ..fields import FieldInfo from ..main import BaseModel - from ._dataclasses import StandardDataclass + from ._dataclasses import PydanticDataclass, StandardDataclass from ._decorators import DecoratorInfos @@ -68,6 +67,49 @@ def __init__(self, metadata: Any): return _PydanticGeneralMetadata # type: ignore +def _check_protected_namespaces( + protected_namespaces: tuple[str | Pattern[str], ...], + ann_name: str, + bases: tuple[type[Any], ...], + cls_name: str, +) -> None: + BaseModel = import_cached_base_model() + + for protected_namespace in protected_namespaces: + ns_violation = False + if isinstance(protected_namespace, Pattern): + ns_violation = protected_namespace.match(ann_name) is not None + elif isinstance(protected_namespace, str): + ns_violation = ann_name.startswith(protected_namespace) + + if ns_violation: + for b in bases: + if hasattr(b, ann_name): + if not (issubclass(b, BaseModel) and ann_name in getattr(b, '__pydantic_fields__', {})): + raise ValueError( + f'Field {ann_name!r} conflicts with member {getattr(b, ann_name)}' + f' of protected namespace {protected_namespace!r}.' + ) + else: + valid_namespaces: list[str] = [] + for pn in protected_namespaces: + if isinstance(pn, Pattern): + if not pn.match(ann_name): + valid_namespaces.append(f're.compile({pn.pattern!r})') + else: + if not ann_name.startswith(pn): + valid_namespaces.append(f"'{pn}'") + + valid_namespaces_str = f'({", ".join(valid_namespaces)}{",)" if len(valid_namespaces) == 1 else ")"}' + + warnings.warn( + f'Field {ann_name!r} in {cls_name!r} conflicts with protected namespace {protected_namespace!r}.\n\n' + f"You may be able to solve this by setting the 'protected_namespaces' configuration to {valid_namespaces_str}.", + UserWarning, + stacklevel=5, + ) + + def _update_fields_from_docstrings(cls: type[Any], fields: dict[str, FieldInfo], use_inspect: bool = False) -> None: fields_docs = extract_docstrings_from_cls(cls, use_inspect=use_inspect) for ann_name, field_info in fields.items(): @@ -75,6 +117,102 @@ def _update_fields_from_docstrings(cls: type[Any], fields: dict[str, FieldInfo], field_info.description = fields_docs[ann_name] +def _apply_field_title_generator_to_field_info( + title_generator: Callable[[str, FieldInfo], str], + field_name: str, + field_info: FieldInfo, +): + if field_info.title is None: + title = title_generator(field_name, field_info) + if not isinstance(title, str): + raise TypeError(f'field_title_generator {title_generator} must return str, not {title.__class__}') + + field_info.title = title + + +def _apply_alias_generator_to_field_info( + alias_generator: Callable[[str], str] | AliasGenerator, field_name: str, field_info: FieldInfo +): + """Apply an alias generator to aliases on a `FieldInfo` instance if appropriate. + + Args: + alias_generator: A callable that takes a string and returns a string, or an `AliasGenerator` instance. + field_name: The name of the field from which to generate the alias. + field_info: The `FieldInfo` instance to which the alias generator is (maybe) applied. + """ + # Apply an alias_generator if + # 1. An alias is not specified + # 2. An alias is specified, but the priority is <= 1 + if ( + field_info.alias_priority is None + or field_info.alias_priority <= 1 + or field_info.alias is None + or field_info.validation_alias is None + or field_info.serialization_alias is None + ): + alias, validation_alias, serialization_alias = None, None, None + + if isinstance(alias_generator, AliasGenerator): + alias, validation_alias, serialization_alias = alias_generator.generate_aliases(field_name) + elif callable(alias_generator): + alias = alias_generator(field_name) + if not isinstance(alias, str): + raise TypeError(f'alias_generator {alias_generator} must return str, not {alias.__class__}') + + # if priority is not set, we set to 1 + # which supports the case where the alias_generator from a child class is used + # to generate an alias for a field in a parent class + if field_info.alias_priority is None or field_info.alias_priority <= 1: + field_info.alias_priority = 1 + + # if the priority is 1, then we set the aliases to the generated alias + if field_info.alias_priority == 1: + field_info.serialization_alias = get_first_not_none(serialization_alias, alias) + field_info.validation_alias = get_first_not_none(validation_alias, alias) + field_info.alias = alias + + # if any of the aliases are not set, then we set them to the corresponding generated alias + if field_info.alias is None: + field_info.alias = alias + if field_info.serialization_alias is None: + field_info.serialization_alias = get_first_not_none(serialization_alias, alias) + if field_info.validation_alias is None: + field_info.validation_alias = get_first_not_none(validation_alias, alias) + + +def update_field_from_config(config_wrapper: ConfigWrapper, field_name: str, field_info: FieldInfo) -> None: + """Update the `FieldInfo` instance from the configuration set on the model it belongs to. + + This will apply the title and alias generators from the configuration. + + Args: + config_wrapper: The configuration from the model. + field_name: The field name the `FieldInfo` instance is attached to. + field_info: The `FieldInfo` instance to update. + """ + field_title_generator = field_info.field_title_generator or config_wrapper.field_title_generator + if field_title_generator is not None: + _apply_field_title_generator_to_field_info(field_title_generator, field_name, field_info) + if config_wrapper.alias_generator is not None: + _apply_alias_generator_to_field_info(config_wrapper.alias_generator, field_name, field_info) + + +_deprecated_method_names = {'dict', 'json', 'copy', '_iter', '_copy_and_set_values', '_calculate_keys'} + +_deprecated_classmethod_names = { + 'parse_obj', + 'parse_raw', + 'parse_file', + 'from_orm', + 'construct', + 'schema', + 'schema_json', + 'validate', + 'update_forward_refs', + '_get_value', +} + + def collect_model_fields( # noqa: C901 cls: type[BaseModel], config_wrapper: ConfigWrapper, @@ -106,8 +244,8 @@ def collect_model_fields( # noqa: C901 - If there is a field other than `root` in `RootModel`. - If a field shadows an attribute in the parent model. """ - BaseModel = import_cached_base_model() FieldInfo_ = import_cached_field_info() + BaseModel_ = import_cached_base_model() bases = cls.__bases__ parent_fields_lookup: dict[str, FieldInfo] = {} @@ -119,7 +257,8 @@ def collect_model_fields( # noqa: C901 # https://docs.python.org/3/howto/annotations.html#accessing-the-annotations-dict-of-an-object-in-python-3-9-and-older # annotations is only used for finding fields in parent classes - annotations = cls.__dict__.get('__annotations__', {}) + annotations = _typing_extra.safe_get_annotations(cls) + fields: dict[str, FieldInfo] = {} class_vars: set[str] = set() @@ -130,42 +269,32 @@ def collect_model_fields( # noqa: C901 # protected namespaces (where `model_config` might be allowed as a field name) continue - for protected_namespace in config_wrapper.protected_namespaces: - ns_violation: bool = False - if isinstance(protected_namespace, Pattern): - ns_violation = protected_namespace.match(ann_name) is not None - elif isinstance(protected_namespace, str): - ns_violation = ann_name.startswith(protected_namespace) - - if ns_violation: - for b in bases: - if hasattr(b, ann_name): - if not (issubclass(b, BaseModel) and ann_name in getattr(b, '__pydantic_fields__', {})): - raise NameError( - f'Field "{ann_name}" conflicts with member {getattr(b, ann_name)}' - f' of protected namespace "{protected_namespace}".' - ) - else: - valid_namespaces = () - for pn in config_wrapper.protected_namespaces: - if isinstance(pn, Pattern): - if not pn.match(ann_name): - valid_namespaces += (f're.compile({pn.pattern})',) - else: - if not ann_name.startswith(pn): - valid_namespaces += (pn,) - - warnings.warn( - f'Field "{ann_name}" in {cls.__name__} has conflict with protected namespace "{protected_namespace}".' - '\n\nYou may be able to resolve this warning by setting' - f" `model_config['protected_namespaces'] = {valid_namespaces}`.", - UserWarning, - ) + _check_protected_namespaces( + protected_namespaces=config_wrapper.protected_namespaces, + ann_name=ann_name, + bases=bases, + cls_name=cls.__name__, + ) + if _typing_extra.is_classvar_annotation(ann_type): class_vars.add(ann_name) continue assigned_value = getattr(cls, ann_name, PydanticUndefined) + if assigned_value is not PydanticUndefined and ( + # One of the deprecated instance methods was used as a field name (e.g. `dict()`): + any(getattr(BaseModel_, depr_name, None) is assigned_value for depr_name in _deprecated_method_names) + # One of the deprecated class methods was used as a field name (e.g. `schema()`): + or ( + hasattr(assigned_value, '__func__') + and any( + getattr(getattr(BaseModel_, depr_name, None), '__func__', None) is assigned_value.__func__ # pyright: ignore[reportAttributeAccessIssue] + for depr_name in _deprecated_classmethod_names + ) + ) + ): + # Then `assigned_value` would be the method, even though no default was specified: + assigned_value = PydanticUndefined if not is_valid_field_name(ann_name): continue @@ -199,6 +328,7 @@ def collect_model_fields( # noqa: C901 f'Field name "{ann_name}" in "{cls.__qualname__}" shadows an attribute in parent ' f'"{base.__qualname__}"', UserWarning, + stacklevel=4, ) if assigned_value is PydanticUndefined: # no assignment, just a plain annotation @@ -217,30 +347,27 @@ def collect_model_fields( # noqa: C901 else: # The field was present on one of the (possibly multiple) base classes # copy the field to make sure typevar substitutions don't cause issues with the base classes - field_info = copy(parent_fields_lookup[ann_name]) + field_info = parent_fields_lookup[ann_name]._copy() else: # An assigned value is present (either the default value, or a `Field()` function) - _warn_on_nested_alias_in_annotation(ann_type, ann_name) if isinstance(assigned_value, FieldInfo_) and ismethoddescriptor(assigned_value.default): # `assigned_value` was fetched using `getattr`, which triggers a call to `__get__` # for descriptors, so we do the same if the `= field(default=...)` form is used. # Note that we only do this for method descriptors for now, we might want to # extend this to any descriptor in the future (by simply checking for # `hasattr(assigned_value.default, '__get__')`). - assigned_value.default = assigned_value.default.__get__(None, cls) - - # The `from_annotated_attribute()` call below mutates the assigned `Field()`, so make a copy: - original_assignment = ( - copy(assigned_value) if not evaluated and isinstance(assigned_value, FieldInfo_) else assigned_value - ) + default = assigned_value.default.__get__(None, cls) + assigned_value.default = default + assigned_value._attributes_set['default'] = default field_info = FieldInfo_.from_annotated_attribute(ann_type, assigned_value, _source=AnnotationSource.CLASS) + # Store the original annotation and assignment value that should be used to rebuild the field info later. + # Note that the assignment is always stored as the annotation might contain a type var that is later + # parameterized with an unknown forward reference (and we'll need it to rebuild the field info): + field_info._original_assignment = assigned_value if not evaluated: field_info._complete = False - # Store the original annotation and assignment value that should be used to rebuild - # the field info later: field_info._original_annotation = ann_type - field_info._original_assignment = original_assignment elif 'final' in field_info._qualifiers and not field_info.is_required(): warnings.warn( f'Annotation {ann_name!r} is marked as final and has a default value. Pydantic treats {ann_name!r} as a ' @@ -271,6 +398,10 @@ def collect_model_fields( # noqa: C901 ) fields[ann_name] = field_info + if field_info._complete: + # If not complete, this will be called in `rebuild_model_fields()`: + update_field_from_config(config_wrapper, ann_name, field_info) + if typevars_map: for field in fields.values(): if field._complete: @@ -281,25 +412,10 @@ def collect_model_fields( # noqa: C901 return fields, class_vars -def _warn_on_nested_alias_in_annotation(ann_type: type[Any], ann_name: str) -> None: - FieldInfo = import_cached_field_info() - - args = getattr(ann_type, '__args__', None) - if args: - for anno_arg in args: - if typing_objects.is_annotated(get_origin(anno_arg)): - for anno_type_arg in _typing_extra.get_args(anno_arg): - if isinstance(anno_type_arg, FieldInfo) and anno_type_arg.alias is not None: - warnings.warn( - f'`alias` specification on field "{ann_name}" must be set on outermost annotation to take effect.', - UserWarning, - ) - return - - def rebuild_model_fields( cls: type[BaseModel], *, + config_wrapper: ConfigWrapper, ns_resolver: NsResolver, typevars_map: Mapping[TypeVar, Any], ) -> dict[str, FieldInfo]: @@ -307,6 +423,9 @@ def rebuild_model_fields( This function should be called whenever a model with incomplete fields is encountered. + Raises: + NameError: If one of the annotations failed to evaluate. + Note: This function *doesn't* mutate the model fields in place, as it can be called during schema generation, where you don't want to mutate other model's fields. @@ -332,6 +451,7 @@ def rebuild_model_fields( new_field = FieldInfo_.from_annotated_attribute(ann, assign, _source=AnnotationSource.CLASS) # The description might come from the docstring if `use_attribute_docstrings` was `True`: new_field.description = new_field.description if new_field.description is not None else existing_desc + update_field_from_config(config_wrapper, f_name, new_field) rebuilt_fields[f_name] = new_field return rebuilt_fields @@ -340,18 +460,18 @@ def rebuild_model_fields( def collect_dataclass_fields( cls: type[StandardDataclass], *, + config_wrapper: ConfigWrapper, ns_resolver: NsResolver | None = None, typevars_map: dict[Any, Any] | None = None, - config_wrapper: ConfigWrapper | None = None, ) -> dict[str, FieldInfo]: """Collect the fields of a dataclass. Args: cls: dataclass. + config_wrapper: The config wrapper instance. ns_resolver: Namespace resolver to use when getting dataclass annotations. Defaults to an empty instance. typevars_map: A dictionary mapping type variables to their concrete types. - config_wrapper: The config wrapper instance. Returns: The dataclass fields. @@ -371,13 +491,15 @@ def collect_dataclass_fields( with ns_resolver.push(base): for ann_name, dataclass_field in dataclass_fields.items(): - if ann_name not in base.__dict__.get('__annotations__', {}): + base_anns = _typing_extra.safe_get_annotations(base) + + if ann_name not in base_anns: # `__dataclass_fields__`contains every field, even the ones from base classes. # Only collect the ones defined on `base`. continue globalns, localns = ns_resolver.types_namespace - ann_type, _ = _typing_extra.try_eval_type(dataclass_field.type, globalns, localns) + ann_type, evaluated = _typing_extra.try_eval_type(dataclass_field.type, globalns, localns) if _typing_extra.is_classvar_annotation(ann_type): continue @@ -404,12 +526,19 @@ def collect_dataclass_fields( field_info = FieldInfo_.from_annotated_attribute( ann_type, dataclass_field.default, _source=AnnotationSource.DATACLASS ) + field_info._original_assignment = dataclass_field.default else: field_info = FieldInfo_.from_annotated_attribute( ann_type, dataclass_field, _source=AnnotationSource.DATACLASS ) + field_info._original_assignment = dataclass_field + + if not evaluated: + field_info._complete = False + field_info._original_annotation = ann_type fields[ann_name] = field_info + update_field_from_config(config_wrapper, ann_name, field_info) if field_info.default is not PydanticUndefined and isinstance( getattr(cls, ann_name, field_info), FieldInfo_ @@ -424,7 +553,7 @@ def collect_dataclass_fields( # Can't we juste use `_generics.replace_types`? field.apply_typevars_map(typevars_map) - if config_wrapper is not None and config_wrapper.use_attribute_docstrings: + if config_wrapper.use_attribute_docstrings: _update_fields_from_docstrings( cls, fields, @@ -436,6 +565,52 @@ def collect_dataclass_fields( return fields +def rebuild_dataclass_fields( + cls: type[PydanticDataclass], + *, + config_wrapper: ConfigWrapper, + ns_resolver: NsResolver, + typevars_map: Mapping[TypeVar, Any], +) -> dict[str, FieldInfo]: + """Rebuild the (already present) dataclass fields by trying to reevaluate annotations. + + This function should be called whenever a dataclass with incomplete fields is encountered. + + Raises: + NameError: If one of the annotations failed to evaluate. + + Note: + This function *doesn't* mutate the dataclass fields in place, as it can be called during + schema generation, where you don't want to mutate other dataclass's fields. + """ + FieldInfo_ = import_cached_field_info() + + rebuilt_fields: dict[str, FieldInfo] = {} + with ns_resolver.push(cls): + for f_name, field_info in cls.__pydantic_fields__.items(): + if field_info._complete: + rebuilt_fields[f_name] = field_info + else: + existing_desc = field_info.description + ann = _typing_extra.eval_type( + field_info._original_annotation, + *ns_resolver.types_namespace, + ) + ann = _generics.replace_types(ann, typevars_map) + new_field = FieldInfo_.from_annotated_attribute( + ann, + field_info._original_assignment, + _source=AnnotationSource.DATACLASS, + ) + + # The description might come from the docstring if `use_attribute_docstrings` was `True`: + new_field.description = new_field.description if new_field.description is not None else existing_desc + update_field_from_config(config_wrapper, f_name, new_field) + rebuilt_fields[f_name] = new_field + + return rebuilt_fields + + def is_valid_field_name(name: str) -> bool: return not name.startswith('_') diff --git a/blimgui/dist64/pydantic/_internal/_generate_schema.py b/blimgui/dist64/pydantic/_internal/_generate_schema.py index faefc49..839764c 100644 --- a/blimgui/dist64/pydantic/_internal/_generate_schema.py +++ b/blimgui/dist64/pydantic/_internal/_generate_schema.py @@ -37,11 +37,11 @@ overload, ) from uuid import UUID -from warnings import warn from zoneinfo import ZoneInfo import typing_extensions from pydantic_core import ( + MISSING, CoreSchema, MultiHostUrl, PydanticCustomError, @@ -51,18 +51,23 @@ core_schema, to_jsonable_python, ) -from typing_extensions import TypeAlias, TypeAliasType, TypedDict, get_args, get_origin, is_typeddict +from typing_extensions import TypeAlias, TypeAliasType, get_args, get_origin, is_typeddict from typing_inspection import typing_objects from typing_inspection.introspection import AnnotationSource, get_literal_values, is_union_origin -from ..aliases import AliasChoices, AliasGenerator, AliasPath +from ..aliases import AliasChoices, AliasPath from ..annotated_handlers import GetCoreSchemaHandler, GetJsonSchemaHandler from ..config import ConfigDict, JsonDict, JsonEncoder, JsonSchemaExtraCallable from ..errors import PydanticSchemaGenerationError, PydanticUndefinedAnnotation, PydanticUserError from ..functional_validators import AfterValidator, BeforeValidator, FieldValidatorModes, PlainValidator, WrapValidator from ..json_schema import JsonSchemaValue from ..version import version_short -from ..warnings import PydanticDeprecatedSince20 +from ..warnings import ( + ArbitraryTypeWarning, + PydanticDeprecatedSince20, + TypedDictExtraConfigWarning, + UnsupportedFieldAttributeWarning, +) from . import _decorators, _discriminated_union, _known_annotated_metadata, _repr, _typing_extra from ._config import ConfigWrapper, ConfigWrapperStack from ._core_metadata import CoreMetadata, update_core_metadata @@ -70,7 +75,6 @@ get_ref, get_type_ref, is_list_like_schema_with_items_schema, - validate_core_schema, ) from ._decorators import ( Decorator, @@ -87,7 +91,13 @@ inspect_validator, ) from ._docs_extraction import extract_docstrings_from_cls -from ._fields import collect_dataclass_fields, rebuild_model_fields, takes_validated_data_argument +from ._fields import ( + collect_dataclass_fields, + rebuild_dataclass_fields, + rebuild_model_fields, + takes_validated_data_argument, + update_field_from_config, +) from ._forward_ref import PydanticRecursiveRef from ._generics import get_standard_typevars_map, replace_types from ._import_utils import import_cached_base_model, import_cached_field_info @@ -159,6 +169,24 @@ ] VALIDATE_CALL_SUPPORTED_TYPES = get_args(ValidateCallSupportedTypes) +UNSUPPORTED_STANDALONE_FIELDINFO_ATTRIBUTES: list[tuple[str, Any]] = [ + ('alias', None), + ('validation_alias', None), + ('serialization_alias', None), + # will be set if any alias is set, so disable it to avoid double warnings: + # 'alias_priority', + ('default', PydanticUndefined), + ('default_factory', None), + ('exclude', None), + ('deprecated', None), + ('repr', True), + ('validate_default', None), + ('frozen', None), + ('init', None), + ('init_var', None), + ('kw_only', None), +] +"""`FieldInfo` attributes (and their default value) that can't be used outside of a model (e.g. in a type adapter or a PEP 695 type alias).""" _mode_to_validator: dict[ FieldValidatorModes, type[BeforeValidator | AfterValidator | PlainValidator | WrapValidator] @@ -218,7 +246,6 @@ def filter_field_decorator_info_by_field( def apply_each_item_validators( schema: core_schema.CoreSchema, each_item_validators: list[Decorator[ValidatorDecoratorInfo]], - field_name: str | None, ) -> core_schema.CoreSchema: # This V1 compatibility shim should eventually be removed @@ -230,21 +257,20 @@ def apply_each_item_validators( # note that this won't work for any Annotated types that get wrapped by a function validator # but that's okay because that didn't exist in V1 if schema['type'] == 'nullable': - schema['schema'] = apply_each_item_validators(schema['schema'], each_item_validators, field_name) + schema['schema'] = apply_each_item_validators(schema['schema'], each_item_validators) return schema elif schema['type'] == 'tuple': if (variadic_item_index := schema.get('variadic_item_index')) is not None: schema['items_schema'][variadic_item_index] = apply_validators( schema['items_schema'][variadic_item_index], each_item_validators, - field_name, ) elif is_list_like_schema_with_items_schema(schema): inner_schema = schema.get('items_schema', core_schema.any_schema()) - schema['items_schema'] = apply_validators(inner_schema, each_item_validators, field_name) + schema['items_schema'] = apply_validators(inner_schema, each_item_validators) elif schema['type'] == 'dict': inner_schema = schema.get('values_schema', core_schema.any_schema()) - schema['values_schema'] = apply_validators(inner_schema, each_item_validators, field_name) + schema['values_schema'] = apply_validators(inner_schema, each_item_validators) else: raise TypeError( f'`@validator(..., each_item=True)` cannot be applied to fields with a schema of {schema["type"]}' @@ -302,15 +328,6 @@ def _add_custom_serialization_from_json_encoders( return schema -def _get_first_non_null(a: Any, b: Any) -> Any: - """Return the first argument if it is not None, otherwise return the second argument. - - Use case: serialization_alias (argument a) and alias (argument b) are both defined, and serialization_alias is ''. - This function will return serialization_alias, which is the first argument, even though it is an empty string. - """ - return a if a is not None else b - - class InvalidSchemaError(Exception): """The core schema is invalid.""" @@ -568,7 +585,15 @@ def _mapping_schema(self, tp: Any, keys_type: Any, values_type: Any) -> CoreSche mapped_origin = MAPPING_ORIGIN_MAP[tp] keys_schema = self.generate_schema(keys_type) - values_schema = self.generate_schema(values_type) + with warnings.catch_warnings(): + # We kind of abused `Field()` default factories to be able to specify + # the `defaultdict`'s `default_factory`. As a consequence, we get warnings + # as normally `FieldInfo.default_factory` is unsupported in the context where + # `Field()` is used and our only solution is to ignore them (note that this might + # wrongfully ignore valid warnings, e.g. if the `value_type` is a PEP 695 type alias + # with unsupported metadata). + warnings.simplefilter('ignore', category=UnsupportedFieldAttributeWarning) + values_schema = self.generate_schema(values_type) dict_schema = core_schema.dict_schema(keys_schema, values_schema, strict=False) if mapped_origin is dict: @@ -620,12 +645,12 @@ def _fraction_schema(self) -> CoreSchema: def _arbitrary_type_schema(self, tp: Any) -> CoreSchema: if not isinstance(tp, type): - warn( + warnings.warn( f'{tp!r} is not a Python type (it may be an instance of an object),' ' Pydantic will allow any object with no validation since we cannot even' ' enforce that the input is an instance of the given type.' ' To get rid of this error wrap the type with `pydantic.SkipValidation`.', - UserWarning, + ArbitraryTypeWarning, ) return core_schema.any_schema() return core_schema.is_instance_schema(tp) @@ -661,9 +686,7 @@ def _apply_discriminator_to_union( return schema def clean_schema(self, schema: CoreSchema) -> CoreSchema: - schema = self.defs.finalize_schema(schema) - schema = validate_core_schema(schema) - return schema + return self.defs.finalize_schema(schema) def _add_js_function(self, metadata_schema: CoreSchema, js_function: Callable[..., Any]) -> None: metadata = metadata_schema.get('metadata', {}) @@ -741,7 +764,7 @@ def _model_schema(self, cls: type[BaseModel]) -> core_schema.CoreSchema: if cls.__pydantic_fields_complete__ or cls is BaseModel_: fields = getattr(cls, '__pydantic_fields__', {}) else: - if not hasattr(cls, '__pydantic_fields__'): + if '__pydantic_fields__' not in cls.__dict__: # This happens when we have a loop in the schema generation: # class Base[T](BaseModel): # t: T @@ -759,6 +782,7 @@ def _model_schema(self, cls: type[BaseModel]) -> core_schema.CoreSchema: try: fields = rebuild_model_fields( cls, + config_wrapper=self._config_wrapper, ns_resolver=self._ns_resolver, typevars_map=self._typevars_map or {}, ) @@ -814,8 +838,8 @@ def _model_schema(self, cls: type[BaseModel]) -> core_schema.CoreSchema: generic_origin: type[BaseModel] | None = getattr(cls, '__pydantic_generic_metadata__', {}).get('origin') if cls.__pydantic_root_model__: - root_field = self._common_field_schema('root', fields['root'], decorators) - inner_schema = root_field['schema'] + # FIXME: should the common field metadata be used here? + inner_schema, _ = self._common_field_schema('root', fields['root'], decorators) inner_schema = apply_model_validators(inner_schema, model_validators, 'inner') model_schema = core_schema.model_schema( cls, @@ -838,7 +862,7 @@ def _model_schema(self, cls: type[BaseModel]) -> core_schema.CoreSchema: extras_keys_schema=extras_keys_schema, model_name=cls.__name__, ) - inner_schema = apply_validators(fields_schema, decorators.root_validators.values(), None) + inner_schema = apply_validators(fields_schema, decorators.root_validators.values()) inner_schema = apply_model_validators(inner_schema, model_validators, 'inner') model_schema = core_schema.model_schema( @@ -910,12 +934,12 @@ def _generate_schema_from_get_schema_method(self, obj: Any, source: Any) -> core from pydantic.v1 import BaseModel as BaseModelV1 if issubclass(obj, BaseModelV1): - warn( + warnings.warn( f'Mixing V1 models and V2 models (or constructs, like `TypeAdapter`) is not supported. Please upgrade `{obj.__name__}` to V2.', UserWarning, ) else: - warn( + warnings.warn( '`__get_validators__` is deprecated and will be removed, use `__get_pydantic_core_schema__` instead.', PydanticDeprecatedSince20, ) @@ -1050,6 +1074,8 @@ def match_type(self, obj: Any) -> core_schema.CoreSchema: # noqa: C901 return core_schema.multi_host_url_schema() elif obj is None or obj is _typing_extra.NoneType: return core_schema.none_schema() + if obj is MISSING: + return core_schema.missing_sentinel_schema() elif obj in IP_TYPES: return self._ip_schema(obj) elif obj in TUPLE_TYPES: @@ -1181,14 +1207,15 @@ def _generate_td_field_schema( required: bool = True, ) -> core_schema.TypedDictField: """Prepare a TypedDictField to represent a model or typeddict field.""" - common_field = self._common_field_schema(name, field_info, decorators) + schema, metadata = self._common_field_schema(name, field_info, decorators) return core_schema.typed_dict_field( - common_field['schema'], + schema, required=False if not field_info.is_required() else required, - serialization_exclude=common_field['serialization_exclude'], - validation_alias=common_field['validation_alias'], - serialization_alias=common_field['serialization_alias'], - metadata=common_field['metadata'], + serialization_exclude=field_info.exclude, + validation_alias=_convert_to_aliases(field_info.validation_alias), + serialization_alias=field_info.serialization_alias, + serialization_exclude_if=field_info.exclude_if, + metadata=metadata, ) def _generate_md_field_schema( @@ -1198,14 +1225,15 @@ def _generate_md_field_schema( decorators: DecoratorInfos, ) -> core_schema.ModelField: """Prepare a ModelField to represent a model field.""" - common_field = self._common_field_schema(name, field_info, decorators) + schema, metadata = self._common_field_schema(name, field_info, decorators) return core_schema.model_field( - common_field['schema'], - serialization_exclude=common_field['serialization_exclude'], - validation_alias=common_field['validation_alias'], - serialization_alias=common_field['serialization_alias'], - frozen=common_field['frozen'], - metadata=common_field['metadata'], + schema, + serialization_exclude=field_info.exclude, + validation_alias=_convert_to_aliases(field_info.validation_alias), + serialization_alias=field_info.serialization_alias, + serialization_exclude_if=field_info.exclude_if, + frozen=field_info.frozen, + metadata=metadata, ) def _generate_dc_field_schema( @@ -1215,138 +1243,24 @@ def _generate_dc_field_schema( decorators: DecoratorInfos, ) -> core_schema.DataclassField: """Prepare a DataclassField to represent the parameter/field, of a dataclass.""" - common_field = self._common_field_schema(name, field_info, decorators) + schema, metadata = self._common_field_schema(name, field_info, decorators) return core_schema.dataclass_field( name, - common_field['schema'], + schema, init=field_info.init, init_only=field_info.init_var or None, kw_only=None if field_info.kw_only else False, - serialization_exclude=common_field['serialization_exclude'], - validation_alias=common_field['validation_alias'], - serialization_alias=common_field['serialization_alias'], - frozen=common_field['frozen'], - metadata=common_field['metadata'], + serialization_exclude=field_info.exclude, + validation_alias=_convert_to_aliases(field_info.validation_alias), + serialization_alias=field_info.serialization_alias, + serialization_exclude_if=field_info.exclude_if, + frozen=field_info.frozen, + metadata=metadata, ) - @staticmethod - def _apply_alias_generator_to_field_info( - alias_generator: Callable[[str], str] | AliasGenerator, field_info: FieldInfo, field_name: str - ) -> None: - """Apply an alias_generator to aliases on a FieldInfo instance if appropriate. - - Args: - alias_generator: A callable that takes a string and returns a string, or an AliasGenerator instance. - field_info: The FieldInfo instance to which the alias_generator is (maybe) applied. - field_name: The name of the field from which to generate the alias. - """ - # Apply an alias_generator if - # 1. An alias is not specified - # 2. An alias is specified, but the priority is <= 1 - if ( - field_info.alias_priority is None - or field_info.alias_priority <= 1 - or field_info.alias is None - or field_info.validation_alias is None - or field_info.serialization_alias is None - ): - alias, validation_alias, serialization_alias = None, None, None - - if isinstance(alias_generator, AliasGenerator): - alias, validation_alias, serialization_alias = alias_generator.generate_aliases(field_name) - elif isinstance(alias_generator, Callable): - alias = alias_generator(field_name) - if not isinstance(alias, str): - raise TypeError(f'alias_generator {alias_generator} must return str, not {alias.__class__}') - - # if priority is not set, we set to 1 - # which supports the case where the alias_generator from a child class is used - # to generate an alias for a field in a parent class - if field_info.alias_priority is None or field_info.alias_priority <= 1: - field_info.alias_priority = 1 - - # if the priority is 1, then we set the aliases to the generated alias - if field_info.alias_priority == 1: - field_info.serialization_alias = _get_first_non_null(serialization_alias, alias) - field_info.validation_alias = _get_first_non_null(validation_alias, alias) - field_info.alias = alias - - # if any of the aliases are not set, then we set them to the corresponding generated alias - if field_info.alias is None: - field_info.alias = alias - if field_info.serialization_alias is None: - field_info.serialization_alias = _get_first_non_null(serialization_alias, alias) - if field_info.validation_alias is None: - field_info.validation_alias = _get_first_non_null(validation_alias, alias) - - @staticmethod - def _apply_alias_generator_to_computed_field_info( - alias_generator: Callable[[str], str] | AliasGenerator, - computed_field_info: ComputedFieldInfo, - computed_field_name: str, - ): - """Apply an alias_generator to alias on a ComputedFieldInfo instance if appropriate. - - Args: - alias_generator: A callable that takes a string and returns a string, or an AliasGenerator instance. - computed_field_info: The ComputedFieldInfo instance to which the alias_generator is (maybe) applied. - computed_field_name: The name of the computed field from which to generate the alias. - """ - # Apply an alias_generator if - # 1. An alias is not specified - # 2. An alias is specified, but the priority is <= 1 - - if ( - computed_field_info.alias_priority is None - or computed_field_info.alias_priority <= 1 - or computed_field_info.alias is None - ): - alias, validation_alias, serialization_alias = None, None, None - - if isinstance(alias_generator, AliasGenerator): - alias, validation_alias, serialization_alias = alias_generator.generate_aliases(computed_field_name) - elif isinstance(alias_generator, Callable): - alias = alias_generator(computed_field_name) - if not isinstance(alias, str): - raise TypeError(f'alias_generator {alias_generator} must return str, not {alias.__class__}') - - # if priority is not set, we set to 1 - # which supports the case where the alias_generator from a child class is used - # to generate an alias for a field in a parent class - if computed_field_info.alias_priority is None or computed_field_info.alias_priority <= 1: - computed_field_info.alias_priority = 1 - - # if the priority is 1, then we set the aliases to the generated alias - # note that we use the serialization_alias with priority over alias, as computed_field - # aliases are used for serialization only (not validation) - if computed_field_info.alias_priority == 1: - computed_field_info.alias = _get_first_non_null(serialization_alias, alias) - - @staticmethod - def _apply_field_title_generator_to_field_info( - config_wrapper: ConfigWrapper, field_info: FieldInfo | ComputedFieldInfo, field_name: str - ) -> None: - """Apply a field_title_generator on a FieldInfo or ComputedFieldInfo instance if appropriate - Args: - config_wrapper: The config of the model - field_info: The FieldInfo or ComputedField instance to which the title_generator is (maybe) applied. - field_name: The name of the field from which to generate the title. - """ - field_title_generator = field_info.field_title_generator or config_wrapper.field_title_generator - - if field_title_generator is None: - return - - if field_info.title is None: - title = field_title_generator(field_name, field_info) # type: ignore - if not isinstance(title, str): - raise TypeError(f'field_title_generator {field_title_generator} must return str, not {title.__class__}') - - field_info.title = title - def _common_field_schema( # C901 self, name: str, field_info: FieldInfo, decorators: DecoratorInfos - ) -> _CommonField: + ) -> tuple[CoreSchema, dict[str, Any]]: source_type, annotations = field_info.annotation, field_info.metadata def set_discriminator(schema: CoreSchema) -> CoreSchema: @@ -1354,9 +1268,10 @@ def set_discriminator(schema: CoreSchema) -> CoreSchema: return schema # Convert `@field_validator` decorators to `Before/After/Plain/WrapValidator` instances: - validators_from_decorators = [] - for decorator in filter_field_decorator_info_by_field(decorators.field_validators.values(), name): - validators_from_decorators.append(_mode_to_validator[decorator.info.mode]._from_decorator(decorator)) + validators_from_decorators = [ + _mode_to_validator[decorator.info.mode]._from_decorator(decorator) + for decorator in filter_field_decorator_info_by_field(decorators.field_validators.values(), name) + ] with self.field_name_stack.push(name): if field_info.discriminator is not None: @@ -1378,9 +1293,9 @@ def set_discriminator(schema: CoreSchema) -> CoreSchema: field_info.validate_default = True each_item_validators = [v for v in this_field_validators if v.info.each_item is True] this_field_validators = [v for v in this_field_validators if v not in each_item_validators] - schema = apply_each_item_validators(schema, each_item_validators, name) + schema = apply_each_item_validators(schema, each_item_validators) - schema = apply_validators(schema, this_field_validators, name) + schema = apply_validators(schema, this_field_validators) # the default validator needs to go outside of any other validators # so that it is the topmost validator for the field validator @@ -1391,7 +1306,6 @@ def set_discriminator(schema: CoreSchema) -> CoreSchema: schema = self._apply_field_serializers( schema, filter_field_decorator_info_by_field(decorators.field_serializers.values(), name) ) - self._apply_field_title_generator_to_field_info(self._config_wrapper, field_info, name) pydantic_js_updates, pydantic_js_extra = _extract_json_schema_info_from_field_info(field_info) core_metadata: dict[str, Any] = {} @@ -1399,23 +1313,7 @@ def set_discriminator(schema: CoreSchema) -> CoreSchema: core_metadata, pydantic_js_updates=pydantic_js_updates, pydantic_js_extra=pydantic_js_extra ) - alias_generator = self._config_wrapper.alias_generator - if alias_generator is not None: - self._apply_alias_generator_to_field_info(alias_generator, field_info, name) - - if isinstance(field_info.validation_alias, (AliasChoices, AliasPath)): - validation_alias = field_info.validation_alias.convert_to_aliases() - else: - validation_alias = field_info.validation_alias - - return _common_field( - schema, - serialization_exclude=True if field_info.exclude else None, - validation_alias=validation_alias, - serialization_alias=field_info.serialization_alias, - frozen=field_info.frozen, - metadata=core_metadata, - ) + return schema, core_metadata def _union_schema(self, union_type: Any) -> core_schema.CoreSchema: """Generate schema for a Union.""" @@ -1523,6 +1421,7 @@ def _typed_dict_schema(self, typed_dict_cls: Any, origin: Any) -> core_schema.Co fields: dict[str, core_schema.TypedDictField] = {} decorators = DecoratorInfos.build(typed_dict_cls) + decorators.update_from_config(self._config_wrapper) if self._config_wrapper.use_attribute_docstrings: field_docstrings = extract_docstrings_from_cls(typed_dict_cls, use_inspect=True) @@ -1552,7 +1451,8 @@ def _typed_dict_schema(self, typed_dict_cls: Any, origin: Any) -> core_schema.Co and field_name in field_docstrings ): field_info.description = field_docstrings[field_name] - self._apply_field_title_generator_to_field_info(self._config_wrapper, field_info, field_name) + update_field_from_config(self._config_wrapper, field_name, field_info) + fields[field_name] = self._generate_td_field_schema( field_name, field_info, decorators, required=required ) @@ -1567,6 +1467,35 @@ def _typed_dict_schema(self, typed_dict_cls: Any, origin: Any) -> core_schema.Co UserWarning, ) + extra_behavior: core_schema.ExtraBehavior = 'ignore' + extras_schema: CoreSchema | None = None # For 'allow', equivalent to `Any` - no validation performed. + + # `__closed__` is `None` when not specified (equivalent to `False`): + is_closed = bool(getattr(typed_dict_cls, '__closed__', False)) + extra_items = getattr(typed_dict_cls, '__extra_items__', typing_extensions.NoExtraItems) + if is_closed: + extra_behavior = 'forbid' + extras_schema = None + elif not typing_objects.is_noextraitems(extra_items): + extra_behavior = 'allow' + extras_schema = self.generate_schema(replace_types(extra_items, typevars_map)) + + if (config_extra := self._config_wrapper.extra) in ('allow', 'forbid'): + if is_closed and config_extra == 'allow': + warnings.warn( + f"TypedDict class {typed_dict_cls.__qualname__!r} is closed, but 'extra' configuration " + "is set to `'allow'`. The 'extra' configuration value will be ignored.", + category=TypedDictExtraConfigWarning, + ) + elif not typing_objects.is_noextraitems(extra_items) and config_extra == 'forbid': + warnings.warn( + f"TypedDict class {typed_dict_cls.__qualname__!r} allows extra items, but 'extra' configuration " + "is set to `'forbid'`. The 'extra' configuration value will be ignored.", + category=TypedDictExtraConfigWarning, + ) + else: + extra_behavior = config_extra + td_schema = core_schema.typed_dict_schema( fields, cls=typed_dict_cls, @@ -1574,6 +1503,8 @@ def _typed_dict_schema(self, typed_dict_cls: Any, origin: Any) -> core_schema.Co self._computed_field_schema(d, decorators.field_serializers) for d in decorators.computed_fields.values() ], + extra_behavior=extra_behavior, + extras_schema=extras_schema, ref=typed_dict_ref, config=core_config, ) @@ -1603,7 +1534,7 @@ def _namedtuple_schema(self, namedtuple_cls: Any, origin: Any) -> core_schema.Co raise PydanticUndefinedAnnotation.from_name_error(e) from e if not annotations: # annotations is empty, happens if namedtuple_cls defined via collections.namedtuple(...) - annotations: dict[str, Any] = {k: Any for k in namedtuple_cls._fields} + annotations: dict[str, Any] = dict.fromkeys(namedtuple_cls._fields, Any) if typevars_map: annotations = { @@ -1645,24 +1576,30 @@ def _generate_parameter_schema( field = FieldInfo.from_annotation(annotation, _source=source) else: field = FieldInfo.from_annotated_attribute(annotation, default, _source=source) + assert field.annotation is not None, 'field.annotation should not be None when generating a schema' + update_field_from_config(self._config_wrapper, name, field) + with self.field_name_stack.push(name): - schema = self._apply_annotations(field.annotation, [field]) + schema = self._apply_annotations( + field.annotation, + [field], + # Because we pass `field` as metadata above (required for attributes relevant for + # JSON Scheme generation), we need to ignore the potential warnings about `FieldInfo` + # attributes that will not be used: + check_unsupported_field_info_attributes=False, + ) if not field.is_required(): schema = wrap_default(field, schema) - parameter_schema = core_schema.arguments_parameter(name, schema) - if mode is not None: - parameter_schema['mode'] = mode - if field.alias is not None: - parameter_schema['alias'] = field.alias - else: - alias_generator = self._config_wrapper.alias_generator - if isinstance(alias_generator, AliasGenerator) and alias_generator.alias is not None: - parameter_schema['alias'] = alias_generator.alias(name) - elif callable(alias_generator): - parameter_schema['alias'] = alias_generator(name) + parameter_schema = core_schema.arguments_parameter( + name, + schema, + mode=mode, + alias=_convert_to_aliases(field.validation_alias), + ) + return parameter_schema def _generate_parameter_v3_schema( @@ -1691,9 +1628,17 @@ def _generate_parameter_v3_schema( field = FieldInfo.from_annotation(annotation, _source=source) else: field = FieldInfo.from_annotated_attribute(annotation, default, _source=source) + update_field_from_config(self._config_wrapper, name, field) with self.field_name_stack.push(name): - schema = self._apply_annotations(field.annotation, [field]) + schema = self._apply_annotations( + field.annotation, + [field], + # Because we pass `field` as metadata above (required for attributes relevant for + # JSON Scheme generation), we need to ignore the potential warnings about `FieldInfo` + # attributes that will not be used: + check_unsupported_field_info_attributes=False, + ) if not field.is_required(): schema = wrap_default(field, schema) @@ -1702,15 +1647,8 @@ def _generate_parameter_v3_schema( name=name, schema=schema, mode=mode, + alias=_convert_to_aliases(field.validation_alias), ) - if field.alias is not None: - parameter_schema['alias'] = field.alias - else: - alias_generator = self._config_wrapper.alias_generator - if isinstance(alias_generator, AliasGenerator) and alias_generator.alias is not None: - parameter_schema['alias'] = alias_generator.alias(name) - elif callable(alias_generator): - parameter_schema['alias'] = alias_generator(name) return parameter_schema @@ -1912,14 +1850,27 @@ def _dataclass_schema( with self._ns_resolver.push(dataclass), self._config_wrapper_stack.push(config): if is_pydantic_dataclass(dataclass): - # Copy the field info instances to avoid mutating the `FieldInfo` instances - # of the generic dataclass generic origin (e.g. `apply_typevars_map` below). - # Note that we don't apply `deepcopy` on `__pydantic_fields__` because we - # don't want to copy the `FieldInfo` attributes: - fields = {f_name: copy(field_info) for f_name, field_info in dataclass.__pydantic_fields__.items()} - if typevars_map: - for field in fields.values(): - field.apply_typevars_map(typevars_map, *self._types_namespace) + if dataclass.__pydantic_fields_complete__(): + # Copy the field info instances to avoid mutating the `FieldInfo` instances + # of the generic dataclass generic origin (e.g. `apply_typevars_map` below). + # Note that we don't apply `deepcopy` on `__pydantic_fields__` because we + # don't want to copy the `FieldInfo` attributes: + fields = { + f_name: copy(field_info) for f_name, field_info in dataclass.__pydantic_fields__.items() + } + if typevars_map: + for field in fields.values(): + field.apply_typevars_map(typevars_map, *self._types_namespace) + else: + try: + fields = rebuild_dataclass_fields( + dataclass, + config_wrapper=self._config_wrapper, + ns_resolver=self._ns_resolver, + typevars_map=typevars_map or {}, + ) + except NameError as e: + raise PydanticUndefinedAnnotation.from_name_error(e) from e else: fields = collect_dataclass_fields( dataclass, @@ -1937,7 +1888,10 @@ def _dataclass_schema( code='dataclass-init-false-extra-allow', ) - decorators = dataclass.__dict__.get('__pydantic_decorators__') or DecoratorInfos.build(dataclass) + decorators = dataclass.__dict__.get('__pydantic_decorators__') + if decorators is None: + decorators = DecoratorInfos.build(dataclass) + decorators.update_from_config(self._config_wrapper) # Move kw_only=False args to the start of the list, as this is how vanilla dataclasses work. # Note that when kw_only is missing or None, it is treated as equivalent to kw_only=True args = sorted( @@ -1957,7 +1911,7 @@ def _dataclass_schema( collect_init_only=has_post_init, ) - inner_schema = apply_validators(args_schema, decorators.root_validators.values(), None) + inner_schema = apply_validators(args_schema, decorators.root_validators.values()) model_validators = decorators.model_validators.values() inner_schema = apply_model_validators(inner_schema, model_validators, 'inner') @@ -2151,23 +2105,20 @@ def _arguments_v3_schema( def _unsubstituted_typevar_schema(self, typevar: typing.TypeVar) -> core_schema.CoreSchema: try: - has_default = typevar.has_default() + has_default = typevar.has_default() # pyright: ignore[reportAttributeAccessIssue] except AttributeError: # Happens if using `typing.TypeVar` (and not `typing_extensions`) on Python < 3.13 pass else: if has_default: - return self.generate_schema(typevar.__default__) + return self.generate_schema(typevar.__default__) # pyright: ignore[reportAttributeAccessIssue] if constraints := typevar.__constraints__: return self._union_schema(typing.Union[constraints]) if bound := typevar.__bound__: schema = self.generate_schema(bound) - schema['serialization'] = core_schema.wrap_serializer_function_ser_schema( - lambda x, h: h(x), - schema=core_schema.any_schema(), - ) + schema['serialization'] = core_schema.simple_ser_schema('any') return schema return core_schema.any_schema() @@ -2205,13 +2156,6 @@ def _computed_field_schema( filter_field_decorator_info_by_field(field_serializers.values(), d.cls_var_name), ) - alias_generator = self._config_wrapper.alias_generator - if alias_generator is not None: - self._apply_alias_generator_to_computed_field_info( - alias_generator=alias_generator, computed_field_info=d.info, computed_field_name=d.cls_var_name - ) - self._apply_field_title_generator_to_field_info(self._config_wrapper, d.info, d.cls_var_name) - pydantic_js_updates, pydantic_js_extra = _extract_json_schema_info_from_field_info(d.info) core_metadata: dict[str, Any] = {} update_core_metadata( @@ -2243,6 +2187,7 @@ def _apply_annotations( source_type: Any, annotations: list[Any], transform_inner_schema: Callable[[CoreSchema], CoreSchema] = lambda x: x, + check_unsupported_field_info_attributes: bool = True, ) -> CoreSchema: """Apply arguments from `Annotated` or from `FieldInfo` to a schema. @@ -2273,7 +2218,10 @@ def inner_handler(obj: Any) -> CoreSchema: if annotation is None: continue get_inner_schema = self._get_wrapped_inner_schema( - get_inner_schema, annotation, pydantic_js_annotation_functions + get_inner_schema, + annotation, + pydantic_js_annotation_functions, + check_unsupported_field_info_attributes=check_unsupported_field_info_attributes, ) schema = get_inner_schema(source_type) @@ -2282,10 +2230,42 @@ def inner_handler(obj: Any) -> CoreSchema: update_core_metadata(core_metadata, pydantic_js_annotation_functions=pydantic_js_annotation_functions) return _add_custom_serialization_from_json_encoders(self._config_wrapper.json_encoders, source_type, schema) - def _apply_single_annotation(self, schema: core_schema.CoreSchema, metadata: Any) -> core_schema.CoreSchema: + def _apply_single_annotation( + self, + schema: core_schema.CoreSchema, + metadata: Any, + check_unsupported_field_info_attributes: bool = True, + ) -> core_schema.CoreSchema: FieldInfo = import_cached_field_info() if isinstance(metadata, FieldInfo): + if ( + check_unsupported_field_info_attributes + # HACK: we don't want to emit the warning for `FieldInfo` subclasses, because FastAPI does weird manipulations + # with its subclasses and their annotations: + and type(metadata) is FieldInfo + ): + for attr, value in (unsupported_attributes := self._get_unsupported_field_info_attributes(metadata)): + warnings.warn( + f'The {attr!r} attribute with value {value!r} was provided to the `Field()` function, ' + f'which has no effect in the context it was used. {attr!r} is field-specific metadata, ' + 'and can only be attached to a model field using `Annotated` metadata or by assignment. ' + 'This may have happened because an `Annotated` type alias using the `type` statement was ' + 'used, or if the `Field()` function was attached to a single member of a union type.', + category=UnsupportedFieldAttributeWarning, + ) + + if ( + metadata.default_factory_takes_validated_data + and self.model_type_stack.get() is None + and 'defaut_factory' not in unsupported_attributes + ): + warnings.warn( + "A 'default_factory' taking validated data as an argument was provided to the `Field()` function, " + 'but no validated data is available in the context it was used.', + category=UnsupportedFieldAttributeWarning, + ) + for field_metadata in metadata.metadata: schema = self._apply_single_annotation(schema, field_metadata) @@ -2340,11 +2320,34 @@ def _apply_single_annotation_json_schema( ) return schema + def _get_unsupported_field_info_attributes(self, field_info: FieldInfo) -> list[tuple[str, Any]]: + """Get the list of unsupported `FieldInfo` attributes when not directly used in `Annotated` for field annotations.""" + unused_metadata: list[tuple[str, Any]] = [] + for unused_metadata_name, unset_value in UNSUPPORTED_STANDALONE_FIELDINFO_ATTRIBUTES: + if ( + (unused_metadata_value := getattr(field_info, unused_metadata_name)) is not unset_value + # `default` and `default_factory` can still be used with a type adapter, so only include them + # if used with a model-like class: + and ( + unused_metadata_name not in ('default', 'default_factory') + or self.model_type_stack.get() is not None + ) + # Setting `alias` will set `validation/serialization_alias` as well, so we want to avoid duplicate warnings: + and ( + unused_metadata_name not in ('validation_alias', 'serialization_alias') + or 'alias' not in field_info._attributes_set + ) + ): + unused_metadata.append((unused_metadata_name, unused_metadata_value)) + + return unused_metadata + def _get_wrapped_inner_schema( self, get_inner_schema: GetCoreSchemaHandler, annotation: Any, pydantic_js_annotation_functions: list[GetJsonSchemaFunction], + check_unsupported_field_info_attributes: bool = False, ) -> CallbackGetCoreSchemaHandler: annotation_get_schema: GetCoreSchemaFunction | None = getattr(annotation, '__get_pydantic_core_schema__', None) @@ -2353,7 +2356,11 @@ def new_handler(source: Any) -> core_schema.CoreSchema: schema = annotation_get_schema(source, get_inner_schema) else: schema = get_inner_schema(source) - schema = self._apply_single_annotation(schema, annotation) + schema = self._apply_single_annotation( + schema, + annotation, + check_unsupported_field_info_attributes=check_unsupported_field_info_attributes, + ) schema = self._apply_single_annotation_json_schema(schema, annotation) metadata_js_function = _extract_get_pydantic_json_schema(annotation) @@ -2469,24 +2476,16 @@ def _apply_model_serializers( _VALIDATOR_F_MATCH: Mapping[ tuple[FieldValidatorModes, Literal['no-info', 'with-info']], - Callable[[Callable[..., Any], core_schema.CoreSchema, str | None], core_schema.CoreSchema], + Callable[[Callable[..., Any], core_schema.CoreSchema], core_schema.CoreSchema], ] = { - ('before', 'no-info'): lambda f, schema, _: core_schema.no_info_before_validator_function(f, schema), - ('after', 'no-info'): lambda f, schema, _: core_schema.no_info_after_validator_function(f, schema), - ('plain', 'no-info'): lambda f, _1, _2: core_schema.no_info_plain_validator_function(f), - ('wrap', 'no-info'): lambda f, schema, _: core_schema.no_info_wrap_validator_function(f, schema), - ('before', 'with-info'): lambda f, schema, field_name: core_schema.with_info_before_validator_function( - f, schema, field_name=field_name - ), - ('after', 'with-info'): lambda f, schema, field_name: core_schema.with_info_after_validator_function( - f, schema, field_name=field_name - ), - ('plain', 'with-info'): lambda f, _, field_name: core_schema.with_info_plain_validator_function( - f, field_name=field_name - ), - ('wrap', 'with-info'): lambda f, schema, field_name: core_schema.with_info_wrap_validator_function( - f, schema, field_name=field_name - ), + ('before', 'no-info'): lambda f, schema: core_schema.no_info_before_validator_function(f, schema), + ('after', 'no-info'): lambda f, schema: core_schema.no_info_after_validator_function(f, schema), + ('plain', 'no-info'): lambda f, _: core_schema.no_info_plain_validator_function(f), + ('wrap', 'no-info'): lambda f, schema: core_schema.no_info_wrap_validator_function(f, schema), + ('before', 'with-info'): lambda f, schema: core_schema.with_info_before_validator_function(f, schema), + ('after', 'with-info'): lambda f, schema: core_schema.with_info_after_validator_function(f, schema), + ('plain', 'with-info'): lambda f, _: core_schema.with_info_plain_validator_function(f), + ('wrap', 'with-info'): lambda f, schema: core_schema.with_info_wrap_validator_function(f, schema), } @@ -2497,7 +2496,6 @@ def apply_validators( validators: Iterable[Decorator[RootValidatorDecoratorInfo]] | Iterable[Decorator[ValidatorDecoratorInfo]] | Iterable[Decorator[FieldValidatorDecoratorInfo]], - field_name: str | None, ) -> core_schema.CoreSchema: """Apply validators to a schema. @@ -2510,10 +2508,12 @@ def apply_validators( The updated schema. """ for validator in validators: - info_arg = inspect_validator(validator.func, validator.info.mode) + # Actually, type could be 'field' or 'model', but this is only used for deprecated + # decorators, so let's not worry about it. + info_arg = inspect_validator(validator.func, mode=validator.info.mode, type='field') val_type = 'with-info' if info_arg else 'no-info' - schema = _VALIDATOR_F_MATCH[(validator.info.mode, val_type)](validator.func, schema, field_name) + schema = _VALIDATOR_F_MATCH[(validator.info.mode, val_type)](validator.func, schema) return schema @@ -2533,6 +2533,15 @@ def _validators_require_validate_default(validators: Iterable[Decorator[Validato return False +def _convert_to_aliases( + alias: str | AliasChoices | AliasPath | None, +) -> str | list[str | int] | list[list[str | int]] | None: + if isinstance(alias, (AliasChoices, AliasPath)): + return alias.convert_to_aliases() + else: + return alias + + def apply_model_validators( schema: core_schema.CoreSchema, validators: Iterable[Decorator[ModelValidatorDecoratorInfo]], @@ -2558,7 +2567,7 @@ def apply_model_validators( continue if mode == 'outer' and validator.info.mode == 'before': continue - info_arg = inspect_validator(validator.func, validator.info.mode) + info_arg = inspect_validator(validator.func, mode=validator.info.mode, type='model') if validator.info.mode == 'wrap': if info_arg: schema = core_schema.with_info_wrap_validator_function(function=validator.func, schema=schema) @@ -2637,34 +2646,6 @@ def _extract_get_pydantic_json_schema(tp: Any) -> GetJsonSchemaFunction | None: return js_modify_function -class _CommonField(TypedDict): - schema: core_schema.CoreSchema - validation_alias: str | list[str | int] | list[list[str | int]] | None - serialization_alias: str | None - serialization_exclude: bool | None - frozen: bool | None - metadata: dict[str, Any] - - -def _common_field( - schema: core_schema.CoreSchema, - *, - validation_alias: str | list[str | int] | list[list[str | int]] | None = None, - serialization_alias: str | None = None, - serialization_exclude: bool | None = None, - frozen: bool | None = None, - metadata: Any = None, -) -> _CommonField: - return { - 'schema': schema, - 'validation_alias': validation_alias, - 'serialization_alias': serialization_alias, - 'serialization_exclude': serialization_exclude, - 'frozen': frozen, - 'metadata': metadata, - } - - def resolve_original_schema(schema: CoreSchema, definitions: _Definitions) -> CoreSchema | None: if schema['type'] == 'definition-ref': return definitions.get_schema_from_ref(schema['schema_ref']) diff --git a/blimgui/dist64/pydantic/_internal/_generics.py b/blimgui/dist64/pydantic/_internal/_generics.py index 8013676..f9f88a6 100644 --- a/blimgui/dist64/pydantic/_internal/_generics.py +++ b/blimgui/dist64/pydantic/_internal/_generics.py @@ -1,5 +1,6 @@ from __future__ import annotations +import operator import sys import types import typing @@ -7,9 +8,10 @@ from collections.abc import Iterator, Mapping from contextlib import contextmanager from contextvars import ContextVar +from functools import reduce from itertools import zip_longest from types import prepare_class -from typing import TYPE_CHECKING, Annotated, Any, TypeVar +from typing import TYPE_CHECKING, Annotated, Any, TypedDict, TypeVar, cast from weakref import WeakValueDictionary import typing_extensions @@ -21,9 +23,6 @@ from ._forward_ref import PydanticRecursiveRef from ._utils import all_identical, is_model_class -if sys.version_info >= (3, 10): - from typing import _UnionGenericAlias # type: ignore[attr-defined] - if TYPE_CHECKING: from ..main import BaseModel @@ -97,7 +96,7 @@ def __delitem__(self, key: KT) -> None: _GENERIC_TYPES_CACHE: ContextVar[GenericTypesCache | None] = ContextVar('_GENERIC_TYPES_CACHE', default=None) -class PydanticGenericMetadata(typing_extensions.TypedDict): +class PydanticGenericMetadata(TypedDict): origin: type[BaseModel] | None # analogous to typing._GenericAlias.__origin__ args: tuple[Any, ...] # analogous to typing._GenericAlias.__args__ parameters: tuple[TypeVar, ...] # analogous to typing.Generic.__parameters__ @@ -256,12 +255,12 @@ def replace_types(type_: Any, type_map: Mapping[TypeVar, Any] | None) -> Any: Example: ```python - from typing import List, Union + from typing import Union from pydantic._internal._generics import replace_types - replace_types(tuple[str, Union[List[str], float]], {str: int}) - #> tuple[int, Union[List[int], float]] + replace_types(tuple[str, Union[list[str], float]], {str: int}) + #> tuple[int, Union[list[int], float]] ``` """ if not type_map: @@ -311,7 +310,7 @@ def replace_types(type_: Any, type_map: Mapping[TypeVar, Any] | None) -> Any: # PEP-604 syntax (Ex.: list | str) is represented with a types.UnionType object that does not have __getitem__. # We also cannot use isinstance() since we have to compare types. if sys.version_info >= (3, 10) and origin_type is types.UnionType: - return _UnionGenericAlias(origin_type, resolved_type_args) + return reduce(operator.or_, resolved_type_args) # NotRequired[T] and Required[T] don't support tuple type resolved_type_args, hence the condition below return origin_type[resolved_type_args[0] if len(resolved_type_args) == 1 else resolved_type_args] @@ -373,21 +372,21 @@ class Model[T, U, V = int](BaseModel): ... raise TypeError(f'Too many arguments for {cls}; actual {len(args)}, expected {expected_len}') if argument is _missing: - param = typing.cast(TypeVar, parameter) + param = cast(TypeVar, parameter) try: - has_default = param.has_default() + has_default = param.has_default() # pyright: ignore[reportAttributeAccessIssue] except AttributeError: # Happens if using `typing.TypeVar` (and not `typing_extensions`) on Python < 3.13. has_default = False if has_default: # The default might refer to other type parameters. For an example, see: - # https://typing.readthedocs.io/en/latest/spec/generics.html#type-parameters-as-parameters-to-generics - typevars_map[param] = replace_types(param.__default__, typevars_map) + # https://typing.python.org/en/latest/spec/generics.html#type-parameters-as-parameters-to-generics + typevars_map[param] = replace_types(param.__default__, typevars_map) # pyright: ignore[reportAttributeAccessIssue] else: - expected_len -= sum(hasattr(p, 'has_default') and p.has_default() for p in parameters) + expected_len -= sum(hasattr(p, 'has_default') and p.has_default() for p in parameters) # pyright: ignore[reportAttributeAccessIssue] raise TypeError(f'Too few arguments for {cls}; actual {len(args)}, expected at least {expected_len}') else: - param = typing.cast(TypeVar, parameter) + param = cast(TypeVar, parameter) typevars_map[param] = argument return typevars_map @@ -513,10 +512,7 @@ def _union_orderings_key(typevar_values: Any) -> Any: (See https://github.com/python/cpython/issues/86483 for reference.) """ if isinstance(typevar_values, tuple): - args_data = [] - for value in typevar_values: - args_data.append(_union_orderings_key(value)) - return tuple(args_data) + return tuple(_union_orderings_key(value) for value in typevar_values) elif typing_objects.is_union(typing_extensions.get_origin(typevar_values)): return get_args(typevar_values) else: diff --git a/blimgui/dist64/pydantic/_internal/_known_annotated_metadata.py b/blimgui/dist64/pydantic/_internal/_known_annotated_metadata.py index c127e27..7d61f4a 100644 --- a/blimgui/dist64/pydantic/_internal/_known_annotated_metadata.py +++ b/blimgui/dist64/pydantic/_internal/_known_annotated_metadata.py @@ -182,7 +182,8 @@ def apply_known_metadata(annotation: Any, schema: CoreSchema) -> CoreSchema | No An updated schema with annotation if it is an annotation we know about, `None` otherwise. Raises: - PydanticCustomError: If `Predicate` fails. + RuntimeError: If a constraint can't be applied to a specific schema type. + ValueError: If an unknown constraint is encountered. """ import annotated_types as at @@ -296,26 +297,33 @@ def _apply_constraint_with_incompatibility_info( ) continue elif isinstance(annotation, (at.Predicate, at.Not)): - predicate_name = f'{annotation.func.__qualname__}' if hasattr(annotation.func, '__qualname__') else '' + predicate_name = f'{annotation.func.__qualname__!r} ' if hasattr(annotation.func, '__qualname__') else '' - def val_func(v: Any) -> Any: - predicate_satisfied = annotation.func(v) # noqa: B023 + # Note: B023 is ignored because even though we iterate over `other_metadata`, it is guaranteed + # to be of length 1. `apply_known_metadata()` is called from `GenerateSchema`, where annotations + # were already expanded via `expand_grouped_metadata()`. Confusing, but this falls into the annotations + # refactor. + if isinstance(annotation, at.Predicate): - # annotation.func may also raise an exception, let it pass through - if isinstance(annotation, at.Predicate): # noqa: B023 + def val_func(v: Any) -> Any: + predicate_satisfied = annotation.func(v) # noqa: B023 if not predicate_satisfied: raise PydanticCustomError( 'predicate_failed', - f'Predicate {predicate_name} failed', # type: ignore # noqa: B023 + f'Predicate {predicate_name}failed', # pyright: ignore[reportArgumentType] # noqa: B023 ) - else: + return v + + else: + + def val_func(v: Any) -> Any: + predicate_satisfied = annotation.func(v) # noqa: B023 if predicate_satisfied: raise PydanticCustomError( 'not_operation_failed', - f'Not of {predicate_name} failed', # type: ignore # noqa: B023 + f'Not of {predicate_name}failed', # pyright: ignore[reportArgumentType] # noqa: B023 ) - - return v + return v schema = cs.no_info_after_validator_function(val_func, schema) else: diff --git a/blimgui/dist64/pydantic/_internal/_mock_val_ser.py b/blimgui/dist64/pydantic/_internal/_mock_val_ser.py index 9125ab3..72f2766 100644 --- a/blimgui/dist64/pydantic/_internal/_mock_val_ser.py +++ b/blimgui/dist64/pydantic/_internal/_mock_val_ser.py @@ -3,7 +3,9 @@ from collections.abc import Iterator, Mapping from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, TypeVar, Union -from pydantic_core import CoreSchema, SchemaSerializer, SchemaValidator + +from pydantic_core.core_schema import CoreSchema +from pydantic_core._pydantic_core import SchemaSerializer, SchemaValidator from ..errors import PydanticErrorCodes, PydanticUserError from ..plugin._schema_validator import PluggableSchemaValidator diff --git a/blimgui/dist64/pydantic/_internal/_model_construction.py b/blimgui/dist64/pydantic/_internal/_model_construction.py index fd5d68b..4fe223c 100644 --- a/blimgui/dist64/pydantic/_internal/_model_construction.py +++ b/blimgui/dist64/pydantic/_internal/_model_construction.py @@ -2,7 +2,6 @@ from __future__ import annotations as _annotations -import builtins import operator import sys import typing @@ -11,7 +10,7 @@ from abc import ABCMeta from functools import cache, partial, wraps from types import FunctionType -from typing import Any, Callable, Generic, Literal, NoReturn, cast +from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, NoReturn, TypeVar, cast from pydantic_core import PydanticUndefined, SchemaSerializer from typing_extensions import TypeAliasType, dataclass_transform, deprecated, get_args, get_origin @@ -22,7 +21,7 @@ from ..warnings import GenericBeforeBaseModelWarning, PydanticDeprecatedSince20 from ._config import ConfigWrapper from ._decorators import DecoratorInfos, PydanticDescriptorProxy, get_attribute_from_bases, unwrap_wrapped_function -from ._fields import collect_model_fields, is_valid_field_name, is_valid_privateattr_name +from ._fields import collect_model_fields, is_valid_field_name, is_valid_privateattr_name, rebuild_model_fields from ._generate_schema import GenerateSchema, InvalidSchemaError from ._generics import PydanticGenericMetadata, get_model_typevars_map from ._import_utils import import_cached_base_model, import_cached_field_info @@ -37,15 +36,12 @@ ) from ._utils import LazyClassAttribute, SafeGetItemProxy -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from ..fields import Field as PydanticModelField from ..fields import FieldInfo, ModelPrivateAttr from ..fields import PrivateAttr as PydanticModelPrivateAttr from ..main import BaseModel else: - # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915 - # and https://youtrack.jetbrains.com/issue/PY-51428 - DeprecationWarning = PydanticDeprecatedSince20 PydanticModelField = object() PydanticModelPrivateAttr = object() @@ -60,7 +56,10 @@ class _ModelNamespaceDict(dict): def __setitem__(self, k: str, v: object) -> None: existing: Any = self.get(k, None) if existing and v is not existing and isinstance(existing, PydanticDescriptorProxy): - warnings.warn(f'`{k}` overrides an existing Pydantic `{existing.decorator_info.decorator_repr}` decorator') + warnings.warn( + f'`{k}` overrides an existing Pydantic `{existing.decorator_info.decorator_repr}` decorator', + stacklevel=2, + ) return super().__setitem__(k, v) @@ -75,6 +74,10 @@ def NoInitField( """ +# For ModelMetaclass.register(): +_T = TypeVar('_T') + + @dataclass_transform(kw_only_default=True, field_specifiers=(PydanticModelField, PydanticModelPrivateAttr, NoInitField)) class ModelMetaclass(ABCMeta): def __new__( @@ -105,12 +108,29 @@ def __new__( # that `BaseModel` itself won't have any bases, but any subclass of it will, to determine whether the `__new__` # call we're in the middle of is for the `BaseModel` class. if bases: + raw_annotations: dict[str, Any] + if sys.version_info >= (3, 14): + if ( + '__annotations__' in namespace + ): # `from __future__ import annotations` was used in the model's module + raw_annotations = namespace['__annotations__'] + else: + # See https://docs.python.org/3.14/library/annotationlib.html#using-annotations-in-a-metaclass: + from annotationlib import Format, call_annotate_function, get_annotate_from_class_namespace + + if annotate := get_annotate_from_class_namespace(namespace): + raw_annotations = call_annotate_function(annotate, format=Format.FORWARDREF) + else: + raw_annotations = {} + else: + raw_annotations = namespace.get('__annotations__', {}) + base_field_names, class_vars, base_private_attributes = mcs._collect_bases_data(bases) - config_wrapper = ConfigWrapper.for_model(bases, namespace, kwargs) + config_wrapper = ConfigWrapper.for_model(bases, namespace, raw_annotations, kwargs) namespace['model_config'] = config_wrapper.config_dict private_attributes = inspect_namespace( - namespace, config_wrapper.ignored_types, class_vars, base_field_names + namespace, raw_annotations, config_wrapper.ignored_types, class_vars, base_field_names ) if private_attributes or base_private_attributes: original_model_post_init = get_model_post_init(namespace, bases) @@ -153,6 +173,7 @@ def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None: cls.__pydantic_setattr_handlers__ = {} cls.__pydantic_decorators__ = DecoratorInfos.build(cls) + cls.__pydantic_decorators__.update_from_config(config_wrapper) # Use the getattr below to grab the __parameters__ from the `typing.Generic` parent class if __pydantic_generic_metadata__: @@ -227,9 +248,6 @@ def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None: } if config_wrapper.defer_build: - # TODO we can also stop there if `__pydantic_fields_complete__` is False. - # However, `set_model_fields()` is currently lenient and we don't have access to the `NameError`. - # (which is useful as we can provide the name in the error message: `set_model_mock(cls, e.name)`) set_model_mocks(cls) else: # Any operation that requires accessing the field infos instances should be put inside @@ -237,8 +255,8 @@ def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None: complete_model_class( cls, config_wrapper, + ns_resolver, raise_errors=False, - ns_resolver=ns_resolver, create_model_module=_create_model_module, ) @@ -260,7 +278,7 @@ def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None: namespace.get('__annotations__', {}).clear() return super().__new__(mcs, cls_name, bases, namespace, **kwargs) - if not typing.TYPE_CHECKING: # pragma: no branch + if not TYPE_CHECKING: # pragma: no branch # We put `__getattr__` in a non-TYPE_CHECKING block because otherwise, mypy allows arbitrary attribute access def __getattr__(self, item: str) -> Any: @@ -274,19 +292,20 @@ def __getattr__(self, item: str) -> Any: def __prepare__(cls, *args: Any, **kwargs: Any) -> dict[str, object]: return _ModelNamespaceDict() - def __instancecheck__(self, instance: Any) -> bool: - """Avoid calling ABC _abc_instancecheck unless we're pretty sure. - - See #3829 and python/cpython#92810 - """ - return hasattr(instance, '__pydantic_decorators__') and super().__instancecheck__(instance) - - def __subclasscheck__(self, subclass: type[Any]) -> bool: - """Avoid calling ABC _abc_subclasscheck unless we're pretty sure. + # Due to performance and memory issues, in the ABCMeta.__subclasscheck__ implementation, we don't support + # registered virtual subclasses. See https://github.com/python/cpython/issues/92810#issuecomment-2762454345. + # This may change once the CPython gets fixed (possibly in 3.15), in which case we should conditionally + # define `register()`. + def register(self, subclass: type[_T]) -> type[_T]: + warnings.warn( + f"For performance reasons, virtual subclasses registered using '{self.__qualname__}.register()' " + "are not supported in 'isinstance()' and 'issubclass()' checks.", + stacklevel=2, + ) + return super().register(subclass) - See #3829 and python/cpython#92810 - """ - return hasattr(subclass, '__pydantic_decorators__') and super().__subclasscheck__(subclass) + __instancecheck__ = type.__instancecheck__ # pyright: ignore[reportAssignmentType] + __subclasscheck__ = type.__subclasscheck__ # pyright: ignore[reportAssignmentType] @staticmethod def _collect_bases_data(bases: tuple[type[Any], ...]) -> tuple[set[str], set[str], dict[str, ModelPrivateAttr]]: @@ -304,10 +323,12 @@ def _collect_bases_data(bases: tuple[type[Any], ...]) -> tuple[set[str], set[str return field_names, class_vars, private_attributes @property - @deprecated('The `__fields__` attribute is deprecated, use `model_fields` instead.', category=None) + @deprecated( + 'The `__fields__` attribute is deprecated, use the `model_fields` class property instead.', category=None + ) def __fields__(self) -> dict[str, FieldInfo]: warnings.warn( - 'The `__fields__` attribute is deprecated, use `model_fields` instead.', + 'The `__fields__` attribute is deprecated, use the `model_fields` class property instead.', PydanticDeprecatedSince20, stacklevel=2, ) @@ -319,7 +340,7 @@ def __pydantic_fields_complete__(self) -> bool: This is a private attribute, not meant to be used outside Pydantic. """ - if not hasattr(self, '__pydantic_fields__'): + if '__pydantic_fields__' not in self.__dict__: return False field_infos = cast('dict[str, FieldInfo]', self.__pydantic_fields__) # pyright: ignore[reportAttributeAccessIssue] @@ -365,6 +386,7 @@ def get_model_post_init(namespace: dict[str, Any], bases: tuple[type[Any], ...]) def inspect_namespace( # noqa C901 namespace: dict[str, Any], + raw_annotations: dict[str, Any], ignored_types: tuple[type[Any], ...], base_class_vars: set[str], base_class_fields: set[str], @@ -375,6 +397,7 @@ def inspect_namespace( # noqa C901 Args: namespace: The attribute dictionary of the class to be created. + raw_annotations: The (non-evaluated) annotations of the model. ignored_types: A tuple of ignore types. base_class_vars: A set of base class class variables. base_class_fields: A set of base class fields. @@ -396,7 +419,6 @@ def inspect_namespace( # noqa C901 all_ignored_types = ignored_types + default_ignored_types() private_attributes: dict[str, ModelPrivateAttr] = {} - raw_annotations = namespace.get('__annotations__', {}) if '__root__' in raw_annotations or '__root__' in namespace: raise TypeError("To define root models, use `pydantic.RootModel` rather than a field called '__root__'") @@ -409,7 +431,7 @@ def inspect_namespace( # noqa C901 isinstance(value, type) and value.__module__ == namespace['__module__'] and '__qualname__' in namespace - and value.__qualname__.startswith(namespace['__qualname__']) + and value.__qualname__.startswith(f'{namespace["__qualname__"]}.') ): # `value` is a nested type defined in this namespace; don't error continue @@ -562,9 +584,10 @@ def set_model_fields( def complete_model_class( cls: type[BaseModel], config_wrapper: ConfigWrapper, + ns_resolver: NsResolver, *, raise_errors: bool = True, - ns_resolver: NsResolver | None = None, + call_on_complete_hook: bool = True, create_model_module: str | None = None, ) -> bool: """Finish building a model class. @@ -575,8 +598,9 @@ def complete_model_class( Args: cls: BaseModel or dataclass. config_wrapper: The config wrapper instance. - raise_errors: Whether to raise errors. ns_resolver: The namespace resolver instance to use during schema building. + raise_errors: Whether to raise errors. + call_on_complete_hook: Whether to call the `__pydantic_on_complete__` hook. create_model_module: The module of the class to be created, if created by `create_model`. Returns: @@ -587,6 +611,33 @@ def complete_model_class( and `raise_errors=True`. """ typevars_map = get_model_typevars_map(cls) + + if not cls.__pydantic_fields_complete__: + # Note: when coming from `ModelMetaclass.__new__()`, this results in fields being built twice. + # We do so a second time here so that we can get the `NameError` for the specific undefined annotation. + # Alternatively, we could let `GenerateSchema()` raise the error, but there are cases where incomplete + # fields are inherited in `collect_model_fields()` and can actually have their annotation resolved in the + # generate schema process. As we want to avoid having `__pydantic_fields_complete__` set to `False` + # when `__pydantic_complete__` is `True`, we rebuild here: + try: + cls.__pydantic_fields__ = rebuild_model_fields( + cls, + config_wrapper=config_wrapper, + ns_resolver=ns_resolver, + typevars_map=typevars_map, + ) + except NameError as e: + exc = PydanticUndefinedAnnotation.from_name_error(e) + set_model_mocks(cls, f'`{exc.name}`') + if raise_errors: + raise exc from e + + if not raise_errors and not cls.__pydantic_fields_complete__: + # No need to continue with schema gen, it is guaranteed to fail + return False + + assert cls.__pydantic_fields_complete__ + gen_schema = GenerateSchema( config_wrapper, ns_resolver, @@ -627,7 +678,6 @@ def complete_model_class( config_wrapper.plugin_settings, ) cls.__pydantic_serializer__ = SchemaSerializer(schema, core_config) - cls.__pydantic_complete__ = True # set __signature__ attr only for model class, but not for its instances # (because instances can define `__call__`, and `inspect.signature` shouldn't @@ -642,6 +692,12 @@ def complete_model_class( extra=config_wrapper.extra, ), ) + + cls.__pydantic_complete__ = True + + if call_on_complete_hook: + cls.__pydantic_on_complete__() + return True @@ -688,7 +744,7 @@ def __get__(self, obj: BaseModel | None, obj_type: type[BaseModel] | None = None return self.wrapped_property.__get__(None, obj_type) raise AttributeError(self.field_name) - warnings.warn(self.msg, builtins.DeprecationWarning, stacklevel=2) + warnings.warn(self.msg, DeprecationWarning, stacklevel=2) if self.wrapped_property is not None: return self.wrapped_property.__get__(obj, obj_type) diff --git a/blimgui/dist64/pydantic/_internal/_namespace_utils.py b/blimgui/dist64/pydantic/_internal/_namespace_utils.py index 781dfa2..af0cddb 100644 --- a/blimgui/dist64/pydantic/_internal/_namespace_utils.py +++ b/blimgui/dist64/pydantic/_internal/_namespace_utils.py @@ -134,7 +134,7 @@ def ns_for_function(obj: Callable[..., Any], parent_namespace: MappingNamespace locals_list.append({t.__name__: t for t in type_params}) - # What about short-cirtuiting to `obj.__globals__`? + # What about short-circuiting to `obj.__globals__`? globalns = get_module_ns_of(obj) return NamespacesTuple(globalns, LazyLocalNamespace(*locals_list)) diff --git a/blimgui/dist64/pydantic/_internal/_repr.py b/blimgui/dist64/pydantic/_internal/_repr.py index bf3cae5..7e80a9c 100644 --- a/blimgui/dist64/pydantic/_internal/_repr.py +++ b/blimgui/dist64/pydantic/_internal/_repr.py @@ -3,20 +3,20 @@ from __future__ import annotations as _annotations import types -import typing -from typing import Any +from collections.abc import Callable, Collection, Generator, Iterable +from typing import TYPE_CHECKING, Any, ForwardRef, cast import typing_extensions +from typing_extensions import TypeAlias from typing_inspection import typing_objects from typing_inspection.introspection import is_union_origin from . import _typing_extra -if typing.TYPE_CHECKING: - ReprArgs: typing_extensions.TypeAlias = 'typing.Iterable[tuple[str | None, Any]]' - RichReprResult: typing_extensions.TypeAlias = ( - 'typing.Iterable[Any | tuple[Any] | tuple[str, Any] | tuple[str, Any, Any]]' - ) +if TYPE_CHECKING: + # TODO remove type error comments when we drop support for Python 3.9 + ReprArgs: TypeAlias = Iterable[tuple[str | None, Any]] # pyright: ignore[reportGeneralTypeIssues] + RichReprResult: TypeAlias = Iterable[Any | tuple[Any] | tuple[str, Any] | tuple[str, Any, Any]] # pyright: ignore[reportGeneralTypeIssues] class PlainRepr(str): @@ -34,8 +34,7 @@ class Representation: # `__rich_repr__` is used by [rich](https://rich.readthedocs.io/en/stable/pretty.html). # (this is not a docstring to avoid adding a docstring to classes which inherit from Representation) - # we don't want to use a type annotation here as it can break get_type_hints - __slots__ = () # type: typing.Collection[str] + __slots__ = () def __repr_args__(self) -> ReprArgs: """Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden. @@ -44,7 +43,7 @@ def __repr_args__(self) -> ReprArgs: * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]` * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]` """ - attrs_names = self.__slots__ + attrs_names = cast(Collection[str], self.__slots__) if not attrs_names and hasattr(self, '__dict__'): attrs_names = self.__dict__.keys() attrs = ((s, getattr(self, s)) for s in attrs_names) @@ -62,7 +61,7 @@ def __repr_recursion__(self, object: Any) -> str: def __repr_str__(self, join_str: str) -> str: return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__()) - def __pretty__(self, fmt: typing.Callable[[Any], Any], **kwargs: Any) -> typing.Generator[Any, None, None]: + def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any]: """Used by devtools (https://python-devtools.helpmanual.io/) to pretty print objects.""" yield self.__repr_name__() + '(' yield 1 @@ -101,7 +100,7 @@ def display_as_type(obj: Any) -> str: return '...' elif isinstance(obj, Representation): return repr(obj) - elif isinstance(obj, typing.ForwardRef) or typing_objects.is_typealiastype(obj): + elif isinstance(obj, ForwardRef) or typing_objects.is_typealiastype(obj): return str(obj) if not isinstance(obj, (_typing_extra.typing_base, _typing_extra.WithArgsTypes, type)): diff --git a/blimgui/dist64/pydantic/_internal/_serializers.py b/blimgui/dist64/pydantic/_internal/_serializers.py index d059321..a4058e0 100644 --- a/blimgui/dist64/pydantic/_internal/_serializers.py +++ b/blimgui/dist64/pydantic/_internal/_serializers.py @@ -42,7 +42,7 @@ def serialize_sequence_via_list( for index, item in enumerate(v): try: v = handler(item, index) - except PydanticOmit: + except PydanticOmit: # noqa: PERF203 pass else: items.append(v) diff --git a/blimgui/dist64/pydantic/_internal/_typing_extra.py b/blimgui/dist64/pydantic/_internal/_typing_extra.py index 4be1a09..986ee42 100644 --- a/blimgui/dist64/pydantic/_internal/_typing_extra.py +++ b/blimgui/dist64/pydantic/_internal/_typing_extra.py @@ -26,6 +26,9 @@ from types import EllipsisType as EllipsisType from types import NoneType as NoneType +if sys.version_info >= (3, 14): + import annotationlib + if TYPE_CHECKING: from pydantic import BaseModel @@ -289,6 +292,19 @@ def _type_convert(arg: Any) -> Any: return arg +def safe_get_annotations(cls: type[Any]) -> dict[str, Any]: + """Get the annotations for the provided class, accounting for potential deferred forward references. + + Starting with Python 3.14, accessing the `__annotations__` attribute might raise a `NameError` if + a referenced symbol isn't defined yet. In this case, we return the annotation in the *forward ref* + format. + """ + if sys.version_info >= (3, 14): + return annotationlib.get_annotations(cls, format=annotationlib.Format.FORWARDREF) + else: + return cls.__dict__.get('__annotations__', {}) + + def get_model_type_hints( obj: type[BaseModel], *, @@ -309,9 +325,14 @@ def get_model_type_hints( ns_resolver = ns_resolver or NsResolver() for base in reversed(obj.__mro__): - ann: dict[str, Any] | None = base.__dict__.get('__annotations__') - if not ann or isinstance(ann, types.GetSetDescriptorType): + # For Python 3.14, we could also use `Format.VALUE` and pass the globals/locals + # from the ns_resolver, but we want to be able to know which specific field failed + # to evaluate: + ann = safe_get_annotations(base) + + if not ann: continue + with ns_resolver.push(base): globalns, localns = ns_resolver.types_namespace for name, value in ann.items(): @@ -341,13 +362,18 @@ def get_cls_type_hints( obj: The class to inspect. ns_resolver: A namespace resolver instance to use. Defaults to an empty instance. """ - hints: dict[str, Any] | dict[str, tuple[Any, bool]] = {} + hints: dict[str, Any] = {} ns_resolver = ns_resolver or NsResolver() for base in reversed(obj.__mro__): - ann: dict[str, Any] | None = base.__dict__.get('__annotations__') - if not ann or isinstance(ann, types.GetSetDescriptorType): + # For Python 3.14, we could also use `Format.VALUE` and pass the globals/locals + # from the ns_resolver, but we want to be able to know which specific field failed + # to evaluate: + ann = safe_get_annotations(base) + + if not ann: continue + with ns_resolver.push(base): globalns, localns = ns_resolver.types_namespace for name, value in ann.items(): @@ -492,7 +518,26 @@ def _eval_type( localns: MappingNamespace | None = None, type_params: tuple[Any, ...] | None = None, ) -> Any: - if sys.version_info >= (3, 13): + if sys.version_info >= (3, 14): + # Starting in 3.14, `_eval_type()` does *not* apply `_type_convert()` + # anymore. This means the `None` -> `type(None)` conversion does not apply: + evaluated = typing._eval_type( # type: ignore + value, + globalns, + localns, + type_params=type_params, + # This is relevant when evaluating types from `TypedDict` classes, where string annotations + # are automatically converted to `ForwardRef` instances with a module set. In this case, + # Our `globalns` is irrelevant and we need to indicate `typing._eval_type()` that it should + # infer it from the `ForwardRef.__forward_module__` attribute instead (`typing.get_type_hints()` + # does the same). Note that this would probably be unnecessary if we properly iterated over the + # `__orig_bases__` for TypedDicts in `get_cls_type_hints()`: + prefer_fwd_module=True, + ) + if evaluated is None: + evaluated = type(None) + return evaluated + elif sys.version_info >= (3, 13): return typing._eval_type( # type: ignore value, globalns, localns, type_params=type_params ) @@ -553,6 +598,7 @@ def get_function_type_hints( return type_hints +# TODO use typing.ForwardRef directly when we stop supporting 3.9: if sys.version_info < (3, 9, 8) or (3, 10) <= sys.version_info < (3, 10, 1): def _make_forward_ref( @@ -572,10 +618,10 @@ def _make_forward_ref( Implemented as EAFP with memory. """ - return typing.ForwardRef(arg, is_argument) + return typing.ForwardRef(arg, is_argument) # pyright: ignore[reportCallIssue] else: - _make_forward_ref = typing.ForwardRef + _make_forward_ref = typing.ForwardRef # pyright: ignore[reportAssignmentType] if sys.version_info >= (3, 10): diff --git a/blimgui/dist64/pydantic/_internal/_utils.py b/blimgui/dist64/pydantic/_internal/_utils.py index f334649..7eae1b7 100644 --- a/blimgui/dist64/pydantic/_internal/_utils.py +++ b/blimgui/dist64/pydantic/_internal/_utils.py @@ -8,18 +8,19 @@ import dataclasses import keyword import sys -import typing import warnings import weakref from collections import OrderedDict, defaultdict, deque -from collections.abc import Mapping +from collections.abc import Callable, Iterable, Mapping +from collections.abc import Set as AbstractSet from copy import deepcopy from functools import cached_property from inspect import Parameter from itertools import zip_longest from types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType -from typing import Any, Callable, Generic, TypeVar, overload +from typing import TYPE_CHECKING, Any, Generic, TypeVar, overload +from pydantic_core import MISSING from typing_extensions import TypeAlias, TypeGuard, deprecated from pydantic import PydanticDeprecatedSince211 @@ -27,9 +28,10 @@ from . import _repr, _typing_extra from ._import_utils import import_cached_base_model -if typing.TYPE_CHECKING: - MappingIntStrAny: TypeAlias = 'typing.Mapping[int, Any] | typing.Mapping[str, Any]' - AbstractSetIntStr: TypeAlias = 'typing.AbstractSet[int] | typing.AbstractSet[str]' +if TYPE_CHECKING: + # TODO remove type error comments when we drop support for Python 3.9 + MappingIntStrAny: TypeAlias = Mapping[int, Any] | Mapping[str, Any] # pyright: ignore[reportGeneralTypeIssues] + AbstractSetIntStr: TypeAlias = AbstractSet[int] | AbstractSet[str] # pyright: ignore[reportGeneralTypeIssues] from ..main import BaseModel @@ -149,7 +151,7 @@ def update_not_none(mapping: dict[Any, Any], **update: Any) -> None: def unique_list( input_list: list[T] | tuple[T, ...], *, - name_factory: typing.Callable[[T], str] = str, + name_factory: Callable[[T], str] = str, ) -> list[T]: """Make a list unique while maintaining order. We update the list if another one with the same name is set @@ -214,7 +216,7 @@ def _normalize_indexes(self, items: MappingIntStrAny, v_length: int) -> dict[int normalized_items: dict[int | str, Any] = {} all_items = None for i, v in items.items(): - if not (isinstance(v, typing.Mapping) or isinstance(v, typing.AbstractSet) or self.is_true(v)): + if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or self.is_true(v)): raise TypeError(f'Unexpected type of exclude value for index "{i}" {v.__class__}') if i == '__all__': all_items = self._coerce_value(v) @@ -279,9 +281,9 @@ def merge(cls, base: Any, override: Any, intersect: bool = False) -> Any: @staticmethod def _coerce_items(items: AbstractSetIntStr | MappingIntStrAny) -> MappingIntStrAny: - if isinstance(items, typing.Mapping): + if isinstance(items, Mapping): pass - elif isinstance(items, typing.AbstractSet): + elif isinstance(items, AbstractSet): items = dict.fromkeys(items, ...) # type: ignore else: class_name = getattr(items, '__class__', '???') @@ -302,7 +304,7 @@ def __repr_args__(self) -> _repr.ReprArgs: return [(None, self._items)] -if typing.TYPE_CHECKING: +if TYPE_CHECKING: def LazyClassAttribute(name: str, get_value: Callable[[], T]) -> T: ... @@ -336,6 +338,8 @@ def smart_deepcopy(obj: Obj) -> Obj: Use obj.copy() for built-in empty collections Use copy.deepcopy() for non-empty collections and unknown objects. """ + if obj is MISSING: + return obj # pyright: ignore[reportReturnType] obj_type = obj.__class__ if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES: return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway @@ -353,7 +357,7 @@ def smart_deepcopy(obj: Obj) -> Obj: _SENTINEL = object() -def all_identical(left: typing.Iterable[Any], right: typing.Iterable[Any]) -> bool: +def all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool: """Check that the items of `left` are the same objects as those in `right`. >>> a, b = object(), object() @@ -368,6 +372,11 @@ def all_identical(left: typing.Iterable[Any], right: typing.Iterable[Any]) -> bo return True +def get_first_not_none(a: Any, b: Any) -> Any: + """Return the first argument if it is not `None`, otherwise return the second argument.""" + return a if a is not None else b + + @dataclasses.dataclass(frozen=True) class SafeGetItemProxy: """Wrapper redirecting `__getitem__` to `get` with a sentinel value as default @@ -388,7 +397,7 @@ def __getitem__(self, key: str, /) -> Any: # https://github.com/python/mypy/issues/13713 # https://github.com/python/typeshed/pull/8785 # Since this is typing-only, hide it in a typing.TYPE_CHECKING block - if typing.TYPE_CHECKING: + if TYPE_CHECKING: def __contains__(self, key: str, /) -> bool: return self.wrapped.__contains__(key) @@ -421,7 +430,13 @@ def __get__(self, instance: None, objtype: type[_ModelT]) -> _RT: ... def __get__(self, instance: _ModelT, objtype: type[_ModelT]) -> _RT: ... def __get__(self, instance: _ModelT | None, objtype: type[_ModelT]) -> _RT: if instance is not None: - attr_name = self.fget.__name__ if sys.version_info >= (3, 10) else self.fget.__func__.__name__ + # fmt: off + attr_name = ( + self.fget.__name__ + if sys.version_info >= (3, 10) + else self.fget.__func__.__name__ # pyright: ignore[reportFunctionMemberAccess] + ) + # fmt: on warnings.warn( f'Accessing the {attr_name!r} attribute on the instance is deprecated. ' 'Instead, you should access this attribute from the model class.', diff --git a/blimgui/dist64/pydantic/_internal/_validators.py b/blimgui/dist64/pydantic/_internal/_validators.py index 803363c..2c7fab6 100644 --- a/blimgui/dist64/pydantic/_internal/_validators.py +++ b/blimgui/dist64/pydantic/_internal/_validators.py @@ -9,15 +9,16 @@ import math import re import typing +from collections.abc import Sequence from decimal import Decimal from fractions import Fraction from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network -from typing import Any, Callable, Union, cast, get_origin +from typing import Any, Callable, TypeVar, Union, cast from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import typing_extensions -from pydantic_core import PydanticCustomError, core_schema -from pydantic_core._pydantic_core import PydanticKnownError +from pydantic_core import PydanticCustomError, PydanticKnownError, core_schema +from typing_extensions import get_args, get_origin from typing_inspection import typing_objects from pydantic._internal._import_utils import import_cached_field_info @@ -25,10 +26,10 @@ def sequence_validator( - input_value: typing.Sequence[Any], + input_value: Sequence[Any], /, validator: core_schema.ValidatorFunctionWrapHandler, -) -> typing.Sequence[Any]: +) -> Sequence[Any]: """Validator for `Sequence` types, isinstance(v, Sequence) has already been called.""" value_type = type(input_value) @@ -126,8 +127,8 @@ def _import_string_logic(dotted_path: str) -> Any: return module -def pattern_either_validator(input_value: Any, /) -> typing.Pattern[Any]: - if isinstance(input_value, typing.Pattern): +def pattern_either_validator(input_value: Any, /) -> re.Pattern[Any]: + if isinstance(input_value, re.Pattern): return input_value elif isinstance(input_value, (str, bytes)): # todo strict mode @@ -136,8 +137,8 @@ def pattern_either_validator(input_value: Any, /) -> typing.Pattern[Any]: raise PydanticCustomError('pattern_type', 'Input should be a valid pattern') -def pattern_str_validator(input_value: Any, /) -> typing.Pattern[str]: - if isinstance(input_value, typing.Pattern): +def pattern_str_validator(input_value: Any, /) -> re.Pattern[str]: + if isinstance(input_value, re.Pattern): if isinstance(input_value.pattern, str): return input_value else: @@ -150,8 +151,8 @@ def pattern_str_validator(input_value: Any, /) -> typing.Pattern[str]: raise PydanticCustomError('pattern_type', 'Input should be a valid pattern') -def pattern_bytes_validator(input_value: Any, /) -> typing.Pattern[bytes]: - if isinstance(input_value, typing.Pattern): +def pattern_bytes_validator(input_value: Any, /) -> re.Pattern[bytes]: + if isinstance(input_value, re.Pattern): if isinstance(input_value.pattern, bytes): return input_value else: @@ -164,10 +165,10 @@ def pattern_bytes_validator(input_value: Any, /) -> typing.Pattern[bytes]: raise PydanticCustomError('pattern_type', 'Input should be a valid pattern') -PatternType = typing.TypeVar('PatternType', str, bytes) +PatternType = TypeVar('PatternType', str, bytes) -def compile_pattern(pattern: PatternType) -> typing.Pattern[PatternType]: +def compile_pattern(pattern: PatternType) -> re.Pattern[PatternType]: try: return re.compile(pattern) except re.error: @@ -472,7 +473,7 @@ def type_var_default_factory() -> None: # Assume Annotated[..., Field(...)] if typing_objects.is_annotated(values_type_origin): - field_info = next((v for v in typing_extensions.get_args(values_source_type) if isinstance(v, FieldInfo)), None) + field_info = next((v for v in get_args(values_source_type) if isinstance(v, FieldInfo)), None) else: field_info = None if field_info and field_info.default_factory: diff --git a/blimgui/dist64/pydantic/_migration.py b/blimgui/dist64/pydantic/_migration.py index 980dfd2..b4ecd28 100644 --- a/blimgui/dist64/pydantic/_migration.py +++ b/blimgui/dist64/pydantic/_migration.py @@ -1,6 +1,8 @@ import sys from typing import Any, Callable +from pydantic.warnings import PydanticDeprecatedSince20 + from .version import version_short MOVED_IN_V2 = { @@ -280,7 +282,11 @@ def wrapper(name: str) -> object: import_path = f'{module}:{name}' if import_path in MOVED_IN_V2.keys(): new_location = MOVED_IN_V2[import_path] - warnings.warn(f'`{import_path}` has been moved to `{new_location}`.') + warnings.warn( + f'`{import_path}` has been moved to `{new_location}`.', + category=PydanticDeprecatedSince20, + stacklevel=2, + ) return import_string(MOVED_IN_V2[import_path]) if import_path in DEPRECATED_MOVED_IN_V2: # skip the warning here because a deprecation warning will be raised elsewhere @@ -289,7 +295,9 @@ def wrapper(name: str) -> object: new_location = REDIRECT_TO_V1[import_path] warnings.warn( f'`{import_path}` has been removed. We are importing from `{new_location}` instead.' - 'See the migration guide for more details: https://docs.pydantic.dev/latest/migration/' + 'See the migration guide for more details: https://docs.pydantic.dev/latest/migration/', + category=PydanticDeprecatedSince20, + stacklevel=2, ) return import_string(REDIRECT_TO_V1[import_path]) if import_path == 'pydantic:BaseSettings': diff --git a/blimgui/dist64/pydantic/aliases.py b/blimgui/dist64/pydantic/aliases.py index ac22737..26f5eee 100644 --- a/blimgui/dist64/pydantic/aliases.py +++ b/blimgui/dist64/pydantic/aliases.py @@ -5,7 +5,7 @@ import dataclasses from typing import Any, Callable, Literal -from pydantic_core import PydanticUndefined +from pydantic_core._pydantic_core import PydanticUndefined from ._internal import _internal_dataclass diff --git a/blimgui/dist64/pydantic/config.py b/blimgui/dist64/pydantic/config.py index 12fef10..bbf57aa 100644 --- a/blimgui/dist64/pydantic/config.py +++ b/blimgui/dist64/pydantic/config.py @@ -153,6 +153,27 @@ class Model(BaseModel): 1. The `= Field(init=False)` does not have any effect at runtime, but prevents the `__pydantic_extra__` field from being included as a parameter to the model's `__init__` method by type checkers. + + As well as specifying an `extra` configuration value on the model, you can also provide it as an argument to the validation methods. + This will override any `extra` configuration value set on the model: + ```python + from pydantic import BaseModel, ConfigDict, ValidationError + + class Model(BaseModel): + x: int + model_config = ConfigDict(extra="allow") + + try: + # Override model config and forbid extra fields just this time + Model.model_validate({"x": 1, "y": 2}, extra="forbid") + except ValidationError as exc: + print(exc) + """ + 1 validation error for Model + y + Extra inputs are not permitted [type=extra_forbidden, input_value=2, input_type=int] + """ + ``` ''' frozen: bool @@ -194,7 +215,7 @@ class Model(BaseModel): print(m) #> my_field='foo' - m = Model(my_alias='foo') # (3)! + m = Model(my_field='foo') # (3)! print(m) #> my_field='foo' ``` @@ -367,7 +388,7 @@ class Model(BaseModel): If you want to use different alias generators for validation and serialization, you can use [`AliasGenerator`][pydantic.aliases.AliasGenerator] instead. - If data source field names do not match your code style (e. g. CamelCase fields), + If data source field names do not match your code style (e.g. CamelCase fields), you can automatically generate aliases using `alias_generator`. Here's an example with a basic callable: @@ -434,16 +455,16 @@ class without an annotation and has a type that is not in this tuple (or otherwi """ A `dict` of custom JSON encoders for specific types. Defaults to `None`. - !!! warning "Deprecated" - This config option is a carryover from v1. - We originally planned to remove it in v2 but didn't have a 1:1 replacement so we are keeping it for now. - It is still deprecated and will likely be removed in the future. + /// version-deprecated | v2 + This configuration option is a carryover from v1. We originally planned to remove it in v2 but didn't have a 1:1 replacement + so we are keeping it for now. It is still deprecated and will likely be removed in the future. + /// """ # new in V2 strict: bool """ - _(new in V2)_ If `True`, strict validation is applied to all fields on the model. + Whether strict validation is applied to all fields on the model. By default, Pydantic attempts to coerce values to the correct type, when possible. @@ -466,136 +487,137 @@ class Model(BaseModel): See the [Conversion Table](../concepts/conversion_table.md) for more details on how Pydantic converts data in both strict and lax modes. + + /// version-added | v2 + /// """ # whether instances of models and dataclasses (including subclass instances) should re-validate, default 'never' revalidate_instances: Literal['always', 'never', 'subclass-instances'] """ - When and how to revalidate models and dataclasses during validation. Accepts the string - values of `'never'`, `'always'` and `'subclass-instances'`. Defaults to `'never'`. + When and how to revalidate models and dataclasses during validation. Can be one of: - - `'never'` will not revalidate models and dataclasses during validation - - `'always'` will revalidate models and dataclasses during validation - - `'subclass-instances'` will revalidate models and dataclasses during validation if the instance is a + - `'never'`: will *not* revalidate models and dataclasses during validation + - `'always'`: will revalidate models and dataclasses during validation + - `'subclass-instances'`: will revalidate models and dataclasses during validation if the instance is a subclass of the model or dataclass - By default, model and dataclass instances are not revalidated during validation. + The default is `'never'` (no revalidation). + + This configuration only affects *the current model* it is applied on, and does *not* populate to the models + referenced in fields. ```python from pydantic import BaseModel class User(BaseModel, revalidate_instances='never'): # (1)! - hobbies: list[str] - - class SubUser(User): - sins: list[str] + name: str class Transaction(BaseModel): user: User - my_user = User(hobbies=['reading']) + my_user = User(name='John') t = Transaction(user=my_user) - print(t) - #> user=User(hobbies=['reading']) - my_user.hobbies = [1] # (2)! + my_user.name = 1 # (2)! t = Transaction(user=my_user) # (3)! print(t) - #> user=User(hobbies=[1]) - - my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying']) - t = Transaction(user=my_sub_user) - print(t) - #> user=SubUser(hobbies=['scuba diving'], sins=['lying']) + #> user=User(name=1) ``` - 1. `revalidate_instances` is set to `'never'` by **default. - 2. The assignment is not validated, unless you set `validate_assignment` to `True` in the model's config. - 3. Since `revalidate_instances` is set to `never`, this is not revalidated. + 1. This is the default behavior. + 2. The assignment is *not* validated, unless you set [`validate_assignment`][pydantic.ConfigDict.validate_assignment] in the configuration. + 3. Since `revalidate_instances` is set to `'never'`, the user instance is not revalidated. - If you want to revalidate instances during validation, you can set `revalidate_instances` to `'always'` - in the model's config. + Here is an example demonstrating the behavior of `'subclass-instances'`: ```python - from pydantic import BaseModel, ValidationError + from pydantic import BaseModel - class User(BaseModel, revalidate_instances='always'): # (1)! - hobbies: list[str] + class User(BaseModel, revalidate_instances='subclass-instances'): + name: str class SubUser(User): - sins: list[str] + age: int class Transaction(BaseModel): user: User - my_user = User(hobbies=['reading']) - t = Transaction(user=my_user) + my_user = User(name='John') + my_user.name = 1 # (1)! + t = Transaction(user=my_user) # (2)! print(t) - #> user=User(hobbies=['reading']) - - my_user.hobbies = [1] - try: - t = Transaction(user=my_user) # (2)! - except ValidationError as e: - print(e) - ''' - 1 validation error for Transaction - user.hobbies.0 - Input should be a valid string [type=string_type, input_value=1, input_type=int] - ''' + #> user=User(name=1) - my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying']) + my_sub_user = SubUser(name='John', age=20) t = Transaction(user=my_sub_user) print(t) # (3)! - #> user=User(hobbies=['scuba diving']) + #> user=User(name='John') ``` - 1. `revalidate_instances` is set to `'always'`. - 2. The model is revalidated, since `revalidate_instances` is set to `'always'`. - 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`. + 1. The assignment is *not* validated, unless you set [`validate_assignment`][pydantic.ConfigDict.validate_assignment] in the configuration. + 2. Because `my_user` is a "direct" instance of `User`, it is *not* being revalidated. It would have been the case if + `revalidate_instances` was set to `'always'`. + 3. Because `my_sub_user` is an instance of a `User` subclass, it is being revalidated. In this case, Pydantic coerces `my_sub_user` to the defined + `User` class defined on `Transaction`. If one of its fields had an invalid value, a validation error would have been raised. - It's also possible to set `revalidate_instances` to `'subclass-instances'` to only revalidate instances - of subclasses of the model. + /// version-added | v2 + /// + """ - ```python - from pydantic import BaseModel + ser_json_timedelta: Literal['iso8601', 'float'] + """ + The format of JSON serialized timedeltas. Accepts the string values of `'iso8601'` and + `'float'`. Defaults to `'iso8601'`. - class User(BaseModel, revalidate_instances='subclass-instances'): # (1)! - hobbies: list[str] + - `'iso8601'` will serialize timedeltas to [ISO 8601 text format](https://en.wikipedia.org/wiki/ISO_8601#Durations). + - `'float'` will serialize timedeltas to the total number of seconds. - class SubUser(User): - sins: list[str] + /// version-changed | v2.12 + It is now recommended to use the [`ser_json_temporal`][pydantic.config.ConfigDict.ser_json_temporal] + setting. `ser_json_timedelta` will be deprecated in v3. + /// + """ - class Transaction(BaseModel): - user: User + ser_json_temporal: Literal['iso8601', 'seconds', 'milliseconds'] + """ + The format of JSON serialized temporal types from the [`datetime`][] module. This includes: - my_user = User(hobbies=['reading']) - t = Transaction(user=my_user) - print(t) - #> user=User(hobbies=['reading']) + - [`datetime.datetime`][] + - [`datetime.date`][] + - [`datetime.time`][] + - [`datetime.timedelta`][] - my_user.hobbies = [1] - t = Transaction(user=my_user) # (2)! - print(t) - #> user=User(hobbies=[1]) + Can be one of: - my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying']) - t = Transaction(user=my_sub_user) - print(t) # (3)! - #> user=User(hobbies=['scuba diving']) - ``` + - `'iso8601'` will serialize date-like types to [ISO 8601 text format](https://en.wikipedia.org/wiki/ISO_8601#Durations). + - `'milliseconds'` will serialize date-like types to a floating point number of milliseconds since the epoch. + - `'seconds'` will serialize date-like types to a floating point number of seconds since the epoch. + + Defaults to `'iso8601'`. - 1. `revalidate_instances` is set to `'subclass-instances'`. - 2. This is not revalidated, since `my_user` is not a subclass of `User`. - 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`. + /// version-added | v2.12 + This setting replaces [`ser_json_timedelta`][pydantic.config.ConfigDict.ser_json_timedelta], + which will be deprecated in v3. `ser_json_temporal` adds more configurability for the other temporal types. + /// """ - ser_json_timedelta: Literal['iso8601', 'float'] + val_temporal_unit: Literal['seconds', 'milliseconds', 'infer'] """ - The format of JSON serialized timedeltas. Accepts the string values of `'iso8601'` and - `'float'`. Defaults to `'iso8601'`. + The unit to assume for validating numeric input for datetime-like types ([`datetime.datetime`][] and [`datetime.date`][]). Can be one of: - - `'iso8601'` will serialize timedeltas to ISO 8601 durations. - - `'float'` will serialize timedeltas to the total number of seconds. + - `'seconds'` will validate date or time numeric inputs as seconds since the [epoch]. + - `'milliseconds'` will validate date or time numeric inputs as milliseconds since the [epoch]. + - `'infer'` will infer the unit from the string numeric input on unix time as: + + * seconds since the [epoch] if $-2^{10} <= v <= 2^{10}$ + * milliseconds since the [epoch] (if $v < -2^{10}$ or $v > 2^{10}$). + + Defaults to `'infer'`. + + /// version-added | v2.12 + /// + + [epoch]: https://en.wikipedia.org/wiki/Unix_time """ ser_json_bytes: Literal['utf8', 'base64', 'hex'] @@ -636,20 +658,15 @@ class Transaction(BaseModel): protected_namespaces: tuple[str | Pattern[str], ...] """ - A `tuple` of strings and/or patterns that prevent models from having fields with names that conflict with them. - For strings, we match on a prefix basis. Ex, if 'dog' is in the protected namespace, 'dog_name' will be protected. - For patterns, we match on the entire field name. Ex, if `re.compile(r'^dog$')` is in the protected namespace, 'dog' will be protected, but 'dog_name' will not be. - Defaults to `('model_validate', 'model_dump',)`. + A tuple of strings and/or regex patterns that prevent models from having fields with names that conflict with its existing members/methods. - The reason we've selected these is to prevent collisions with other validation / dumping formats - in the future - ex, `model_validate_{some_newly_supported_format}`. + Strings are matched on a prefix basis. For instance, with `'dog'`, having a field named `'dog_name'` will be disallowed. - Before v2.10, Pydantic used `('model_',)` as the default value for this setting to - prevent collisions between model attributes and `BaseModel`'s own methods. This was changed - in v2.10 given feedback that this restriction was limiting in AI and data science contexts, - where it is common to have fields with names like `model_id`, `model_input`, `model_output`, etc. + Regex patterns are matched on the entire field name. For instance, with the pattern `'^dog$'`, having a field named `'dog'` will be disallowed, + but `'dog_name'` will be accepted. - For more details, see https://github.com/pydantic/pydantic/issues/10315. + Defaults to `('model_validate', 'model_dump')`. This default is used to prevent collisions with the existing (and possibly future) + [validation](../concepts/models.md#validating-data) and [serialization](../concepts/serialization.md#serializing-data) methods. ```python import warnings @@ -666,9 +683,9 @@ class Model(BaseModel): except UserWarning as e: print(e) ''' - Field "model_dump_something" in Model has conflict with protected namespace "model_dump". + Field 'model_dump_something' in 'Model' conflicts with protected namespace 'model_dump'. - You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ('model_validate',)`. + You may be able to solve this by setting the 'protected_namespaces' configuration to ('model_validate',). ''' ``` @@ -699,11 +716,11 @@ class Model(BaseModel): for warning in caught_warnings: print(f'{warning.message}') ''' - Field "also_protect_field" in Model has conflict with protected namespace "also_protect_". - You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ('protect_me_', re.compile('^protect_this$'))`. + Field 'also_protect_field' in 'Model' conflicts with protected namespace 'also_protect_'. + You may be able to solve this by setting the 'protected_namespaces' configuration to ('protect_me_', re.compile('^protect_this$'))`. - Field "protect_this" in Model has conflict with protected namespace "re.compile('^protect_this$')". - You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ('protect_me_', 'also_protect_')`. + Field 'protect_this' in 'Model' conflicts with protected namespace 're.compile('^protect_this$')'. + You may be able to solve this by setting the 'protected_namespaces' configuration to ('protect_me_', 'also_protect_')`. ''' ``` @@ -720,12 +737,17 @@ class Model(BaseModel): model_config = ConfigDict(protected_namespaces=('model_',)) - except NameError as e: + except ValueError as e: print(e) ''' - Field "model_validate" conflicts with member > of protected namespace "model_". + Field 'model_validate' conflicts with member > of protected namespace 'model_'. ''' ``` + + /// version-changed | v2.10 + The default protected namespaces was changed from `('model_',)` to `('model_validate', 'model_dump')`, to allow + for fields like `model_id`, `model_name` to be used. + /// """ hide_input_in_errors: bool @@ -780,7 +802,9 @@ class Model(BaseModel): used nested within other models, or when you want to manually define type namespace via [`Model.model_rebuild(_types_namespace=...)`][pydantic.BaseModel.model_rebuild]. - Since v2.10, this setting also applies to pydantic dataclasses and TypeAdapter instances. + /// version-changed | v2.10 + The setting also applies to [Pydantic dataclasses](../concepts/dataclasses.md) and [type adapters](../concepts/type_adapter.md). + /// """ plugin_settings: dict[str, object] | None @@ -788,12 +812,11 @@ class Model(BaseModel): schema_generator: type[_GenerateSchema] | None """ - !!! warning - `schema_generator` is deprecated in v2.10. + The `GenerateSchema` class to use during core schema generation. - Prior to v2.10, this setting was advertised as highly subject to change. - It's possible that this interface may once again become public once the internal core schema generation - API is more stable, but that will likely come after significant performance improvements have been made. + /// version-deprecated | v2.10 + The `GenerateSchema` class is private and highly subject to change. + /// """ json_schema_serialization_defaults_required: bool @@ -833,6 +856,9 @@ class Model(BaseModel): } ''' ``` + + /// version-added | v2.4 + /// """ json_schema_mode_override: Literal['validation', 'serialization', None] @@ -888,6 +914,9 @@ class ForceInputModel(Model): } ''' ``` + + /// version-added | v2.4 + /// """ coerce_numbers_to_str: bool @@ -933,14 +962,13 @@ class Model(BaseModel): The regex engine to be used for pattern validation. Defaults to `'rust-regex'`. - - `rust-regex` uses the [`regex`](https://docs.rs/regex) Rust crate, + - `'rust-regex'` uses the [`regex`](https://docs.rs/regex) Rust crate, which is non-backtracking and therefore more DDoS resistant, but does not support all regex features. - - `python-re` use the [`re`](https://docs.python.org/3/library/re.html) module, - which supports all regex features, but may be slower. + - `'python-re'` use the [`re`][] module, which supports all regex features, but may be slower. !!! note - If you use a compiled regex pattern, the python-re engine will be used regardless of this setting. - This is so that flags such as `re.IGNORECASE` are respected. + If you use a compiled regex pattern, the `'python-re'` engine will be used regardless of this setting. + This is so that flags such as [`re.IGNORECASE`][] are respected. ```python from pydantic import BaseModel, ConfigDict, Field, ValidationError @@ -963,6 +991,9 @@ class Model(BaseModel): String should match pattern '^abc(?=def)' [type=string_pattern_mismatch, input_value='abxyzcdef', input_type=str] ''' ``` + + /// version-added | v2.5 + /// """ validation_error_cause: bool @@ -974,6 +1005,9 @@ class Model(BaseModel): Note: The structure of validation errors are likely to change in future Pydantic versions. Pydantic offers no guarantees about their structure. Should be used for visual traceback debugging only. + + /// version-added | v2.5 + /// """ use_attribute_docstrings: bool @@ -981,8 +1015,6 @@ class Model(BaseModel): Whether docstrings of attributes (bare string literals immediately following the attribute declaration) should be used for field descriptions. Defaults to `False`. - Available in Pydantic v2.7+. - ```python from pydantic import BaseModel, ConfigDict, Field @@ -1013,7 +1045,10 @@ class Model(BaseModel): [`TypedDict`][typing.TypedDict] and stdlib dataclasses, in particular when: - inheritance is being used. - - multiple classes have the same name in the same source file. + - multiple classes have the same name in the same source file (unless Python 3.13 or greater is used). + + /// version-added | v2.7 + /// ''' cache_strings: bool | Literal['all', 'keys', 'none'] @@ -1033,16 +1068,15 @@ class Model(BaseModel): !!! tip If repeated strings are rare, it's recommended to use `'keys'` or `'none'` to reduce memory usage, as the performance difference is minimal if repeated strings are rare. + + /// version-added | v2.7 + /// """ validate_by_alias: bool """ Whether an aliased field may be populated by its alias. Defaults to `True`. - !!! note - In v2.11, `validate_by_alias` was introduced in conjunction with [`validate_by_name`][pydantic.ConfigDict.validate_by_name] - to empower users with more fine grained validation control. In http://example.com + ``` + + /// version-added | v2.12 + /// """ @@ -1179,6 +1246,14 @@ class TD(TypedDict): print(ta.validate_python({'x': 'ABC'})) #> {'x': 'abc'} ``` + + /// deprecated-removed | v2.11 v3 + Passing `config` as a keyword argument. + /// + + /// version-changed | v2.11 + Keyword arguments can be provided directly instead of a config dictionary. + /// """ if config is not None and kwargs: raise ValueError('Cannot specify both `config` and keyword arguments') diff --git a/blimgui/dist64/pydantic/dataclasses.py b/blimgui/dist64/pydantic/dataclasses.py index f1ac051..cecd540 100644 --- a/blimgui/dist64/pydantic/dataclasses.py +++ b/blimgui/dist64/pydantic/dataclasses.py @@ -3,6 +3,7 @@ from __future__ import annotations as _annotations import dataclasses +import functools import sys import types from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, NoReturn, TypeVar, overload @@ -10,7 +11,7 @@ from typing_extensions import TypeGuard, dataclass_transform -from ._internal import _config, _decorators, _namespace_utils, _typing_extra +from ._internal import _config, _decorators, _mock_val_ser, _namespace_utils, _typing_extra from ._internal import _dataclasses as _pydantic_dataclasses from ._migration import getattr_migration from .config import ConfigDict @@ -149,39 +150,6 @@ def dataclass( else: kwargs = {} - def make_pydantic_fields_compatible(cls: type[Any]) -> None: - """Make sure that stdlib `dataclasses` understands `Field` kwargs like `kw_only` - To do that, we simply change - `x: int = pydantic.Field(..., kw_only=True)` - into - `x: int = dataclasses.field(default=pydantic.Field(..., kw_only=True), kw_only=True)` - """ - for annotation_cls in cls.__mro__: - annotations: dict[str, Any] = getattr(annotation_cls, '__annotations__', {}) - for field_name in annotations: - field_value = getattr(cls, field_name, None) - # Process only if this is an instance of `FieldInfo`. - if not isinstance(field_value, FieldInfo): - continue - - # Initialize arguments for the standard `dataclasses.field`. - field_args: dict = {'default': field_value} - - # Handle `kw_only` for Python 3.10+ - if sys.version_info >= (3, 10) and field_value.kw_only: - field_args['kw_only'] = True - - # Set `repr` attribute if it's explicitly specified to be not `True`. - if field_value.repr is not True: - field_args['repr'] = field_value.repr - - setattr(cls, field_name, dataclasses.field(**field_args)) - # In Python 3.9, when subclassing, information is pulled from cls.__dict__['__annotations__'] - # for annotations, so we must make sure it's initialized before we add to it. - if cls.__dict__.get('__annotations__') is None: - cls.__annotations__ = {} - cls.__annotations__[field_name] = annotations[field_name] - def create_dataclass(cls: type[Any]) -> type[PydanticDataclass]: """Create a Pydantic dataclass from a regular dataclass. @@ -216,15 +184,16 @@ def create_dataclass(cls: type[Any]) -> type[PydanticDataclass]: config_dict = config if config is not None else getattr(cls, '__pydantic_config__', None) config_wrapper = _config.ConfigWrapper(config_dict) decorators = _decorators.DecoratorInfos.build(cls) + decorators.update_from_config(config_wrapper) # Keep track of the original __doc__ so that we can restore it after applying the dataclasses decorator # Otherwise, classes with no __doc__ will have their signature added into the JSON schema description, # since dataclasses.dataclass will set this as the __doc__ original_doc = cls.__doc__ - if _pydantic_dataclasses.is_builtin_dataclass(cls): - # Don't preserve the docstring for vanilla dataclasses, as it may include the signature - # This matches v1 behavior, and there was an explicit test for it + if _pydantic_dataclasses.is_stdlib_dataclass(cls): + # Vanilla dataclasses include a default docstring (representing the class signature), + # which we don't want to preserve. original_doc = None # We don't want to add validation to the existing std lib dataclass, so we will subclass it @@ -236,8 +205,6 @@ def create_dataclass(cls: type[Any]) -> type[PydanticDataclass]: bases = bases + (generic_base,) cls = types.new_class(cls.__name__, bases) - make_pydantic_fields_compatible(cls) - # Respect frozen setting from dataclass constructor and fallback to config setting if not provided if frozen is not None: frozen_ = frozen @@ -252,26 +219,90 @@ def create_dataclass(cls: type[Any]) -> type[PydanticDataclass]: else: frozen_ = config_wrapper.frozen or False - cls = dataclasses.dataclass( # type: ignore[call-overload] - cls, - # the value of init here doesn't affect anything except that it makes it easier to generate a signature - init=True, - repr=repr, - eq=eq, - order=order, - unsafe_hash=unsafe_hash, - frozen=frozen_, - **kwargs, - ) + # Make Pydantic's `Field()` function compatible with stdlib dataclasses. As we'll decorate + # `cls` with the stdlib `@dataclass` decorator first, there are two attributes, `kw_only` and + # `repr` that need to be understood *during* the stdlib creation. We do so in two steps: + + # 1. On the decorated class, wrap `Field()` assignment with `dataclass.field()`, with the + # two attributes set (done in `as_dataclass_field()`) + cls_anns = _typing_extra.safe_get_annotations(cls) + for field_name in cls_anns: + # We should look for assignments in `__dict__` instead, but for now we follow + # the same behavior as stdlib dataclasses (see https://github.com/python/cpython/issues/88609) + field_value = getattr(cls, field_name, None) + if isinstance(field_value, FieldInfo): + setattr(cls, field_name, _pydantic_dataclasses.as_dataclass_field(field_value)) + + # 2. For bases of `cls` that are stdlib dataclasses, we temporarily patch their fields + # (see the docstring of the context manager): + with _pydantic_dataclasses.patch_base_fields(cls): + cls = dataclasses.dataclass( # pyright: ignore[reportCallIssue] + cls, + # the value of init here doesn't affect anything except that it makes it easier to generate a signature + init=True, + repr=repr, + eq=eq, + order=order, + unsafe_hash=unsafe_hash, + frozen=frozen_, + **kwargs, + ) + + if config_wrapper.validate_assignment: + original_setattr = cls.__setattr__ + + @functools.wraps(cls.__setattr__) + def validated_setattr(instance: PydanticDataclass, name: str, value: Any, /) -> None: + if frozen_: + return original_setattr(instance, name, value) # pyright: ignore[reportCallIssue] + inst_cls = type(instance) + attr = getattr(inst_cls, name, None) + + if isinstance(attr, property): + attr.__set__(instance, value) + elif isinstance(attr, functools.cached_property): + instance.__dict__.__setitem__(name, value) + else: + inst_cls.__pydantic_validator__.validate_assignment(instance, name, value) + + cls.__setattr__ = validated_setattr.__get__(None, cls) # type: ignore + + if slots and not hasattr(cls, '__setstate__'): + # If slots is set, `pickle` (relied on by `copy.copy()`) will use + # `__setattr__()` to reconstruct the dataclass. However, the custom + # `__setattr__()` set above relies on `validate_assignment()`, which + # in turn expects all the field values to be already present on the + # instance, resulting in attribute errors. + # As such, we make use of `object.__setattr__()` instead. + # Note that we do so only if `__setstate__()` isn't already set (this is the + # case if on top of `slots`, `frozen` is used). + + # Taken from `dataclasses._dataclass_get/setstate()`: + def _dataclass_getstate(self: Any) -> list[Any]: + return [getattr(self, f.name) for f in dataclasses.fields(self)] + + def _dataclass_setstate(self: Any, state: list[Any]) -> None: + for field, value in zip(dataclasses.fields(self), state): + object.__setattr__(self, field.name, value) + + cls.__getstate__ = _dataclass_getstate # pyright: ignore[reportAttributeAccessIssue] + cls.__setstate__ = _dataclass_setstate # pyright: ignore[reportAttributeAccessIssue] # This is an undocumented attribute to distinguish stdlib/Pydantic dataclasses. # It should be set as early as possible: cls.__is_pydantic_dataclass__ = True - cls.__pydantic_decorators__ = decorators # type: ignore cls.__doc__ = original_doc + # Can be non-existent for dynamically created classes: + firstlineno = getattr(original_cls, '__firstlineno__', None) cls.__module__ = original_cls.__module__ + if sys.version_info >= (3, 13) and firstlineno is not None: + # As per https://docs.python.org/3/reference/datamodel.html#type.__firstlineno__: + # Setting the `__module__` attribute removes the `__firstlineno__` item from the type’s dictionary. + original_cls.__firstlineno__ = firstlineno + cls.__firstlineno__ = firstlineno cls.__qualname__ = original_cls.__qualname__ + cls.__pydantic_fields_complete__ = classmethod(_pydantic_fields_complete) cls.__pydantic_complete__ = False # `complete_dataclass` will set it to `True` if successful. # TODO `parent_namespace` is currently None, but we could do the same thing as Pydantic models: # fetch the parent ns using `parent_frame_namespace` (if the dataclass was defined in a function), @@ -282,6 +313,14 @@ def create_dataclass(cls: type[Any]) -> type[PydanticDataclass]: return create_dataclass if _cls is None else create_dataclass(_cls) +def _pydantic_fields_complete(cls: type[PydanticDataclass]) -> bool: + """Return whether the fields where successfully collected (i.e. type hints were successfully resolves). + + This is a private property, not meant to be used outside Pydantic. + """ + return all(field_info._complete for field_info in cls.__pydantic_fields__.values()) + + __getattr__ = getattr_migration(__name__) if sys.version_info < (3, 11): @@ -328,7 +367,7 @@ def rebuild_dataclass( return None for attr in ('__pydantic_core_schema__', '__pydantic_validator__', '__pydantic_serializer__'): - if attr in cls.__dict__: + if attr in cls.__dict__ and not isinstance(getattr(cls, attr), _mock_val_ser.MockValSer): # Deleting the validator/serializer is necessary as otherwise they can get reused in # pydantic-core. Same applies for the core schema that can be reused in schema generation. delattr(cls, attr) diff --git a/blimgui/dist64/pydantic/deprecated/class_validators.py b/blimgui/dist64/pydantic/deprecated/class_validators.py index ad92864..f1a331d 100644 --- a/blimgui/dist64/pydantic/deprecated/class_validators.py +++ b/blimgui/dist64/pydantic/deprecated/class_validators.py @@ -118,7 +118,7 @@ def validator( ) if allow_reuse is True: # pragma: no cover - warn(_ALLOW_REUSE_WARNING_MESSAGE, DeprecationWarning) + warn(_ALLOW_REUSE_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) fields = __field, *fields if isinstance(fields[0], FunctionType): raise PydanticUserError( @@ -234,7 +234,7 @@ def root_validator( return root_validator()(*__args) # type: ignore if allow_reuse is True: # pragma: no cover - warn(_ALLOW_REUSE_WARNING_MESSAGE, DeprecationWarning) + warn(_ALLOW_REUSE_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) mode: Literal['before', 'after'] = 'before' if pre is True else 'after' if pre is False and skip_on_failure is not True: raise PydanticUserError( diff --git a/blimgui/dist64/pydantic/deprecated/copy_internals.py b/blimgui/dist64/pydantic/deprecated/copy_internals.py index 1b0dc12..0170dc0 100644 --- a/blimgui/dist64/pydantic/deprecated/copy_internals.py +++ b/blimgui/dist64/pydantic/deprecated/copy_internals.py @@ -44,7 +44,7 @@ def _iter( ) if include is not None: - include = _utils.ValueItems.merge({k: True for k in self.__pydantic_fields__}, include, intersect=True) + include = _utils.ValueItems.merge(dict.fromkeys(self.__pydantic_fields__, True), include, intersect=True) allowed_keys = _calculate_keys(self, include=include, exclude=exclude, exclude_unset=exclude_unset) # type: ignore if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none): diff --git a/blimgui/dist64/pydantic/experimental/__init__.py b/blimgui/dist64/pydantic/experimental/__init__.py index 4aa58c6..5b5add1 100644 --- a/blimgui/dist64/pydantic/experimental/__init__.py +++ b/blimgui/dist64/pydantic/experimental/__init__.py @@ -1,10 +1 @@ """The "experimental" module of pydantic contains potential new features that are subject to change.""" - -import warnings - -from pydantic.warnings import PydanticExperimentalWarning - -warnings.warn( - 'This module is experimental, its contents are subject to change and deprecation.', - category=PydanticExperimentalWarning, -) diff --git a/blimgui/dist64/pydantic/experimental/missing_sentinel.py b/blimgui/dist64/pydantic/experimental/missing_sentinel.py new file mode 100644 index 0000000..3e7f820 --- /dev/null +++ b/blimgui/dist64/pydantic/experimental/missing_sentinel.py @@ -0,0 +1,5 @@ +"""Experimental module exposing a function a `MISSING` sentinel.""" + +from pydantic_core import MISSING + +__all__ = ('MISSING',) diff --git a/blimgui/dist64/pydantic/experimental/pipeline.py b/blimgui/dist64/pydantic/experimental/pipeline.py index bd63d98..633fb00 100644 --- a/blimgui/dist64/pydantic/experimental/pipeline.py +++ b/blimgui/dist64/pydantic/experimental/pipeline.py @@ -17,10 +17,12 @@ import annotated_types if TYPE_CHECKING: - from pydantic_core import core_schema as cs - from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError +from pydantic_core import core_schema as cs + +from pydantic import Strict from pydantic._internal._internal_dataclass import slots_true as _slots_true if sys.version_info < (3, 10): @@ -336,8 +338,6 @@ def then(self, other: _Pipeline[_OutT, _OtherOut]) -> _Pipeline[_InT, _OtherOut] __and__ = then def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> cs.CoreSchema: - from pydantic_core import core_schema as cs - queue = deque(self._steps) s = None @@ -361,8 +361,6 @@ def __supports_type__(self, _: _OutT) -> bool: def _check_func( func: Callable[[Any], bool], predicate_err: str | Callable[[], str], s: cs.CoreSchema | None ) -> cs.CoreSchema: - from pydantic_core import core_schema as cs - def handler(v: Any) -> Any: if func(v): return v @@ -375,8 +373,6 @@ def handler(v: Any) -> Any: def _apply_step(step: _Step, s: cs.CoreSchema | None, handler: GetCoreSchemaHandler, source_type: Any) -> cs.CoreSchema: - from pydantic_core import core_schema as cs - if isinstance(step, _ValidateAs): s = _apply_parse(s, step.tp, step.strict, handler, source_type) elif isinstance(step, _ValidateAsDefer): @@ -400,10 +396,6 @@ def _apply_parse( handler: GetCoreSchemaHandler, source_type: Any, ) -> cs.CoreSchema: - from pydantic_core import core_schema as cs - - from pydantic import Strict - if tp is _FieldTypeMarker: return cs.chain_schema([s, handler(source_type)]) if s else handler(source_type) @@ -419,8 +411,6 @@ def _apply_parse( def _apply_transform( s: cs.CoreSchema | None, func: Callable[[Any], Any], handler: GetCoreSchemaHandler ) -> cs.CoreSchema: - from pydantic_core import core_schema as cs - if s is None: return cs.no_info_plain_validator_function(func) @@ -585,24 +575,21 @@ def check_tz_naive(v: object) -> bool: assert s is not None elif isinstance(constraint, annotated_types.Predicate): func = constraint.func + # Same logic as in `_known_annotated_metadata.apply_known_metadata()`: + predicate_name = f'{func.__qualname__!r} ' if hasattr(func, '__qualname__') else '' + + def predicate_func(v: Any) -> Any: + if not func(v): + raise PydanticCustomError( + 'predicate_failed', + f'Predicate {predicate_name}failed', # pyright: ignore[reportArgumentType] + ) + return v - if func.__name__ == '': - # attempt to extract the source code for a lambda function - # to use as the function name in error messages - # TODO: is there a better way? should we just not do this? - import inspect - - try: - source = inspect.getsource(func).strip() - source = source.removesuffix(')') - lambda_source_code = '`' + ''.join(''.join(source.split('lambda ')[1:]).split(':')[1:]).strip() + '`' - except OSError: - # stringified annotations - lambda_source_code = 'lambda' - - s = _check_func(func, lambda_source_code, s) + if s is None: + s = cs.no_info_plain_validator_function(predicate_func) else: - s = _check_func(func, func.__name__, s) + s = cs.no_info_after_validator_function(predicate_func, s) elif isinstance(constraint, _NotEq): value = constraint.value diff --git a/blimgui/dist64/pydantic/fields.py b/blimgui/dist64/pydantic/fields.py index 3e9f9f5..b091710 100644 --- a/blimgui/dist64/pydantic/fields.py +++ b/blimgui/dist64/pydantic/fields.py @@ -4,39 +4,37 @@ import dataclasses import inspect +import re import sys -import typing -from collections.abc import Mapping +from collections.abc import Callable, Mapping from copy import copy from dataclasses import Field as DataclassField from functools import cached_property -from typing import Annotated, Any, Callable, ClassVar, Literal, TypeVar, cast, overload +from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Literal, TypeVar, cast, final, overload from warnings import warn import annotated_types import typing_extensions -from pydantic_core import PydanticUndefined -from typing_extensions import TypeAlias, Unpack, deprecated +from pydantic_core import MISSING, PydanticUndefined +from typing_extensions import Self, TypeAlias, TypedDict, Unpack, deprecated from typing_inspection import typing_objects from typing_inspection.introspection import UNKNOWN, AnnotationSource, ForbiddenQualifier, Qualifier, inspect_annotation from . import types from ._internal import _decorators, _fields, _generics, _internal_dataclass, _repr, _typing_extra, _utils from ._internal._namespace_utils import GlobalsNamespace, MappingNamespace -from .aliases import AliasChoices, AliasPath +from .aliases import AliasChoices, AliasGenerator, AliasPath from .config import JsonDict from .errors import PydanticForbiddenQualifier, PydanticUserError from .json_schema import PydanticJsonSchemaWarning from .warnings import PydanticDeprecatedSince20 -if typing.TYPE_CHECKING: +if TYPE_CHECKING: + from ._internal._config import ConfigWrapper from ._internal._repr import ReprArgs -else: - # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915 - # and https://youtrack.jetbrains.com/issue/PY-51428 - DeprecationWarning = PydanticDeprecatedSince20 -__all__ = 'Field', 'PrivateAttr', 'computed_field' + +__all__ = 'Field', 'FieldInfo', 'PrivateAttr', 'computed_field' _Unset: Any = PydanticUndefined @@ -49,7 +47,7 @@ Deprecated: TypeAlias = deprecated -class _FromFieldInfoInputs(typing_extensions.TypedDict, total=False): +class _FromFieldInfoInputs(TypedDict, total=False): """This class exists solely to add type checking for the `**kwargs` in `FieldInfo.from_field`.""" # TODO PEP 747: use TypeForm: @@ -64,6 +62,7 @@ class _FromFieldInfoInputs(typing_extensions.TypedDict, total=False): description: str | None examples: list[Any] | None exclude: bool | None + exclude_if: Callable[[Any], bool] | None gt: annotated_types.SupportsGt | None ge: annotated_types.SupportsGe | None lt: annotated_types.SupportsLt | None @@ -72,7 +71,7 @@ class _FromFieldInfoInputs(typing_extensions.TypedDict, total=False): strict: bool | None min_length: int | None max_length: int | None - pattern: str | typing.Pattern[str] | None + pattern: str | re.Pattern[str] | None allow_inf_nan: bool | None max_digits: int | None decimal_places: int | None @@ -96,6 +95,14 @@ class _FieldInfoInputs(_FromFieldInfoInputs, total=False): default: Any +class _FieldInfoAsDict(TypedDict, closed=True): + # TODO PEP 747: use TypeForm: + annotation: Any + metadata: list[Any] + attributes: dict[str, Any] + + +@final class FieldInfo(_repr.Representation): """This class holds information about a field. @@ -103,8 +110,11 @@ class FieldInfo(_repr.Representation): function is explicitly used. !!! warning - You generally shouldn't be creating `FieldInfo` directly, you'll only need to use it when accessing - [`BaseModel`][pydantic.main.BaseModel] `.model_fields` internals. + The `FieldInfo` class is meant to expose information about a field in a Pydantic model or dataclass. + `FieldInfo` instances shouldn't be instantiated directly, nor mutated. + + If you need to derive a new model from another one and are willing to alter `FieldInfo` instances, + refer to this [dynamic model example](../examples/dynamic_models.md). Attributes: annotation: The type annotation of the field. @@ -120,6 +130,7 @@ class FieldInfo(_repr.Representation): description: The description of the field. examples: List of examples of the field. exclude: Whether to exclude the field from the model serialization. + exclude_if: A callable that determines whether to exclude a field during serialization based on its value. discriminator: Field name or Discriminator for discriminating the type in a tagged union. deprecated: A deprecation message, an instance of `warnings.deprecated` or the `typing_extensions.deprecated` backport, or a boolean. If `True`, a default deprecation message will be emitted when accessing the field. @@ -130,9 +141,14 @@ class FieldInfo(_repr.Representation): init: Whether the field should be included in the constructor of the dataclass. init_var: Whether the field should _only_ be included in the constructor of the dataclass, and not stored. kw_only: Whether the field should be a keyword-only argument in the constructor of the dataclass. - metadata: List of metadata constraints. + metadata: The metadata list. Contains all the data that isn't expressed as direct `FieldInfo` attributes, including: + + * Type-specific constraints, such as `gt` or `min_length` (these are converted to metadata classes such as `annotated_types.Gt`). + * Any other arbitrary object used within [`Annotated`][typing.Annotated] metadata + (e.g. [custom types handlers](../concepts/types.md#as-an-annotation) or any object not recognized by Pydantic). """ + # TODO PEP 747: use TypeForm: annotation: type[Any] | None default: Any default_factory: Callable[[], Any] | Callable[[dict[str, Any]], Any] | None @@ -145,6 +161,7 @@ class FieldInfo(_repr.Representation): description: str | None examples: list[Any] | None exclude: bool | None + exclude_if: Callable[[Any], bool] | None discriminator: str | types.Discriminator | None deprecated: Deprecated | str | bool | None json_schema_extra: JsonDict | Callable[[JsonDict], None] | None @@ -169,6 +186,7 @@ class FieldInfo(_repr.Representation): 'description', 'examples', 'exclude', + 'exclude_if', 'discriminator', 'deprecated', 'json_schema_extra', @@ -184,11 +202,12 @@ class FieldInfo(_repr.Representation): '_complete', '_original_assignment', '_original_annotation', + '_final', ) # used to convert kwargs to metadata/constraints, # None has a special meaning - these items are collected into a `PydanticGeneralMetadata` - metadata_lookup: ClassVar[dict[str, typing.Callable[[Any], Any] | None]] = { + metadata_lookup: ClassVar[dict[str, Callable[[Any], Any] | None]] = { 'strict': types.Strict, 'gt': annotated_types.Gt, 'ge': annotated_types.Ge, @@ -212,10 +231,14 @@ def __init__(self, **kwargs: Unpack[_FieldInfoInputs]) -> None: See the signature of `pydantic.fields.Field` for more details about the expected arguments. """ - self._attributes_set = {k: v for k, v in kwargs.items() if v is not _Unset} + # Tracking the explicitly set attributes is necessary to correctly merge `Field()` functions + # (e.g. with `Annotated[int, Field(alias='a'), Field(alias=None)]`, even though `None` is the default value, + # we need to track that `alias=None` was explicitly set): + self._attributes_set = {k: v for k, v in kwargs.items() if v is not _Unset and k not in self.metadata_lookup} kwargs = {k: _DefaultValues.get(k) if v is _Unset else v for k, v in kwargs.items()} # type: ignore self.annotation = kwargs.get('annotation') + # Note: in theory, the second `pop()` arguments are not required below, as defaults are already set from `_DefaultsValues`. default = kwargs.pop('default', PydanticUndefined) if default is Ellipsis: self.default = PydanticUndefined @@ -238,6 +261,7 @@ def __init__(self, **kwargs: Unpack[_FieldInfoInputs]) -> None: self.description = kwargs.pop('description', None) self.examples = kwargs.pop('examples', None) self.exclude = kwargs.pop('exclude', None) + self.exclude_if = kwargs.pop('exclude_if', None) self.discriminator = kwargs.pop('discriminator', None) # For compatibility with FastAPI<=0.110.0, we preserve the existing value if it is not overridden self.deprecated = kwargs.pop('deprecated', getattr(self, 'deprecated', None)) @@ -258,6 +282,10 @@ def __init__(self, **kwargs: Unpack[_FieldInfoInputs]) -> None: self._complete = True self._original_annotation: Any = PydanticUndefined self._original_assignment: Any = PydanticUndefined + # Used to track whether the `FieldInfo` instance represents the data about a field (and is exposed in `model_fields`/`__pydantic_fields__`), + # or if it is the result of the `Field()` function being used as metadata in an `Annotated` type/as an assignment + # (not an ideal pattern, see https://github.com/pydantic/pydantic/issues/11122): + self._final = False @staticmethod def from_field(default: Any = PydanticUndefined, **kwargs: Unpack[_FromFieldInfoInputs]) -> FieldInfo: @@ -339,30 +367,13 @@ class MyModel(pydantic.BaseModel): final = 'final' in inspected_ann.qualifiers metadata = inspected_ann.metadata - if not metadata: - # No metadata, e.g. `field: int`, or `field: Final[str]`: - field_info = FieldInfo(annotation=type_expr, frozen=final or None) - field_info._qualifiers = inspected_ann.qualifiers - return field_info - - # With metadata, e.g. `field: Annotated[int, Field(...), Gt(1)]`: - field_info_annotations = [a for a in metadata if isinstance(a, FieldInfo)] - field_info = FieldInfo.merge_field_infos(*field_info_annotations, annotation=type_expr) - - new_field_info = copy(field_info) - new_field_info.annotation = type_expr - new_field_info.frozen = final or field_info.frozen - field_metadata: list[Any] = [] - for a in metadata: - if typing_objects.is_deprecated(a): - new_field_info.deprecated = a.message - elif not isinstance(a, FieldInfo): - field_metadata.append(a) - else: - field_metadata.extend(a.metadata) - new_field_info.metadata = field_metadata - new_field_info._qualifiers = inspected_ann.qualifiers - return new_field_info + attr_overrides = {'annotation': type_expr} + if final: + attr_overrides['frozen'] = True + field_info = FieldInfo._construct(metadata, **attr_overrides) + field_info._qualifiers = inspected_ann.qualifiers + field_info._final = True + return field_info @staticmethod def from_annotated_attribute( @@ -392,7 +403,7 @@ class MyModel(pydantic.BaseModel): Returns: A field object with the passed values. """ - if annotation is default: + if annotation is not MISSING and annotation is default: raise PydanticUserError( 'Error when building FieldInfo from annotated attribute. ' "Make sure you don't have any field name clashing with a type annotation.", @@ -416,58 +427,149 @@ class MyModel(pydantic.BaseModel): final = 'final' in inspected_ann.qualifiers metadata = inspected_ann.metadata - if isinstance(default, FieldInfo): - # e.g. `field: int = Field(...)` - default.annotation = type_expr - default.metadata += metadata - merged_default = FieldInfo.merge_field_infos( - *[x for x in metadata if isinstance(x, FieldInfo)], - default, - annotation=default.annotation, - ) - merged_default.frozen = final or merged_default.frozen - merged_default._qualifiers = inspected_ann.qualifiers - return merged_default - - if isinstance(default, dataclasses.Field): - # `collect_dataclass_fields()` passes the dataclass Field as a default. - pydantic_field = FieldInfo._from_dataclass_field(default) - pydantic_field.annotation = type_expr - pydantic_field.metadata += metadata - pydantic_field = FieldInfo.merge_field_infos( - *[x for x in metadata if isinstance(x, FieldInfo)], - pydantic_field, - annotation=pydantic_field.annotation, - ) - pydantic_field.frozen = final or pydantic_field.frozen - pydantic_field.init_var = 'init_var' in inspected_ann.qualifiers - pydantic_field.init = getattr(default, 'init', None) - pydantic_field.kw_only = getattr(default, 'kw_only', None) - pydantic_field._qualifiers = inspected_ann.qualifiers - return pydantic_field - - if not metadata: - # No metadata, e.g. `field: int = ...`, or `field: Final[str] = ...`: - field_info = FieldInfo(annotation=type_expr, default=default, frozen=final or None) - field_info._qualifiers = inspected_ann.qualifiers + # HACK 1: the order in which the metadata is merged is inconsistent; we need to prepend + # metadata from the assignment at the beginning of the metadata. Changing this is only + # possible in v3 (at least). See https://github.com/pydantic/pydantic/issues/10507 + prepend_metadata: list[Any] | None = None + attr_overrides = {'annotation': type_expr} + if final: + attr_overrides['frozen'] = True + + # HACK 2: FastAPI is subclassing `FieldInfo` and historically expected the actual + # instance's type to be preserved when constructing new models with its subclasses as assignments. + # This code is never reached by Pydantic itself, and in an ideal world this shouldn't be necessary. + if not metadata and isinstance(default, FieldInfo) and type(default) is not FieldInfo: + field_info = default._copy() + field_info._attributes_set.update(attr_overrides) + for k, v in attr_overrides.items(): + setattr(field_info, k, v) return field_info - # With metadata, e.g. `field: Annotated[int, Field(...), Gt(1)] = ...`: - field_infos = [a for a in metadata if isinstance(a, FieldInfo)] - field_info = FieldInfo.merge_field_infos(*field_infos, annotation=type_expr, default=default) - field_metadata: list[Any] = [] - for a in metadata: - if typing_objects.is_deprecated(a): - field_info.deprecated = a.message - elif not isinstance(a, FieldInfo): - field_metadata.append(a) - else: - field_metadata.extend(a.metadata) - field_info.metadata = field_metadata + if isinstance(default, FieldInfo): + default_copy = default._copy() # Copy unnecessary when we remove HACK 1. + prepend_metadata = default_copy.metadata + default_copy.metadata = [] + metadata = metadata + [default_copy] + elif isinstance(default, dataclasses.Field): + from_field = FieldInfo._from_dataclass_field(default) + prepend_metadata = from_field.metadata # Unnecessary when we remove HACK 1. + from_field.metadata = [] + metadata = metadata + [from_field] + if 'init_var' in inspected_ann.qualifiers: + attr_overrides['init_var'] = True + if (init := getattr(default, 'init', None)) is not None: + attr_overrides['init'] = init + if (kw_only := getattr(default, 'kw_only', None)) is not None: + attr_overrides['kw_only'] = kw_only + else: + # `default` is the actual default value + attr_overrides['default'] = default + + field_info = FieldInfo._construct( + prepend_metadata + metadata if prepend_metadata is not None else metadata, **attr_overrides + ) field_info._qualifiers = inspected_ann.qualifiers + field_info._final = True return field_info + @classmethod + def _construct(cls, metadata: list[Any], **attr_overrides: Any) -> Self: + """Construct the final `FieldInfo` instance, by merging the possibly existing `FieldInfo` instances from the metadata. + + With the following example: + + ```python {test="skip" lint="skip"} + class Model(BaseModel): + f: Annotated[int, Gt(1), Field(description='desc', lt=2)] + ``` + + `metadata` refers to the metadata elements of the `Annotated` form. This metadata is iterated over from left to right: + + - If the element is a `Field()` function (which is itself a `FieldInfo` instance), the field attributes (such as + `description`) are saved to be set on the final `FieldInfo` instance. + On the other hand, some kwargs (such as `lt`) are stored as `metadata` (see `FieldInfo.__init__()`, calling + `FieldInfo._collect_metadata()`). In this case, the final metadata list is extended with the one from this instance. + - Else, the element is considered as a single metadata object, and is appended to the final metadata list. + + Args: + metadata: The list of metadata elements to merge together. If the `FieldInfo` instance to be constructed is for + a field with an assigned `Field()`, this `Field()` assignment should be added as the last element of the + provided metadata. + **attr_overrides: Extra attributes that should be set on the final merged `FieldInfo` instance. + + Returns: + The final merged `FieldInfo` instance. + """ + merged_metadata: list[Any] = [] + merged_kwargs: dict[str, Any] = {} + + for meta in metadata: + if isinstance(meta, FieldInfo): + merged_metadata.extend(meta.metadata) + + new_js_extra: JsonDict | None = None + current_js_extra = meta.json_schema_extra + if current_js_extra is not None and 'json_schema_extra' in merged_kwargs: + # We need to merge `json_schema_extra`'s: + existing_js_extra = merged_kwargs['json_schema_extra'] + if isinstance(existing_js_extra, dict): + if isinstance(current_js_extra, dict): + new_js_extra = { + **existing_js_extra, + **current_js_extra, + } + elif callable(current_js_extra): + warn( + 'Composing `dict` and `callable` type `json_schema_extra` is not supported. ' + 'The `callable` type is being ignored. ' + "If you'd like support for this behavior, please open an issue on pydantic.", + UserWarning, + ) + elif callable(existing_js_extra) and isinstance(current_js_extra, dict): + warn( + 'Composing `dict` and `callable` type `json_schema_extra` is not supported. ' + 'The `callable` type is being ignored. ' + "If you'd like support for this behavior, please open an issue on pydantic.", + UserWarning, + ) + + # HACK: It is common for users to define "make model partial" (or similar) utilities, that + # convert all model fields to be optional (i.e. have a default value). To do so, they mutate + # each `FieldInfo` instance from `model_fields` to set a `default`, and use `create_model()` + # with `Annotated[ | None, mutated_field_info]`` as an annotation. However, such + # mutations (by doing simple assignments) are only accidentally working, because we also + # need to track attributes explicitly set in `_attributes_set` (relying on default values for + # each attribute is *not* enough, for instance with `Annotated[int, Field(alias='a'), Field(alias=None)]` + # the resulting `FieldInfo` should have `alias=None`). + # To mitigate this, we add a special case when a "final" `FieldInfo` instance (that is an instance coming + # from `model_fields`) is used in annotated metadata (or assignment). In this case, we assume *all* attributes + # were explicitly set, and as such we use all of them (and this will correctly pick up the mutations). + # In theory, this shouldn't really be supported, you are only supposed to use the `Field()` function, not + # a `FieldInfo` instance directly (granted, `Field()` returns a `FieldInfo`, see + # https://github.com/pydantic/pydantic/issues/11122): + if meta._final: + merged_kwargs.update({attr: getattr(meta, attr) for attr in _Attrs}) + else: + merged_kwargs.update(meta._attributes_set) + + if new_js_extra is not None: + merged_kwargs['json_schema_extra'] = new_js_extra + elif typing_objects.is_deprecated(meta): + merged_kwargs['deprecated'] = meta + else: + merged_metadata.append(meta) + + merged_kwargs.update(attr_overrides) + merged_field_info = cls(**merged_kwargs) + merged_field_info.metadata = merged_metadata + return merged_field_info + @staticmethod + @typing_extensions.deprecated( + "The 'merge_field_infos()' method is deprecated and will be removed in a future version. " + 'If you relied on this method, please open an issue in the Pydantic issue tracker.', + category=None, + ) def merge_field_infos(*field_infos: FieldInfo, **overrides: Any) -> FieldInfo: """Merge `FieldInfo` instances keeping only explicitly set attributes. @@ -478,7 +580,7 @@ def merge_field_infos(*field_infos: FieldInfo, **overrides: Any) -> FieldInfo: """ if len(field_infos) == 1: # No merging necessary, but we still need to make a copy and apply the overrides - field_info = copy(field_infos[0]) + field_info = field_infos[0]._copy() field_info._attributes_set.update(overrides) default_override = overrides.pop('default', PydanticUndefined) @@ -557,6 +659,8 @@ def _from_dataclass_field(dc_field: DataclassField[Any]) -> FieldInfo: # use the `Field` function so in correct kwargs raise the correct `TypeError` dc_field_metadata = {k: v for k, v in dc_field.metadata.items() if k in _FIELD_ARG_NAMES} + if sys.version_info >= (3, 14) and dc_field.doc is not None: + dc_field_metadata['description'] = dc_field.doc return Field(default=default, default_factory=default_factory, repr=dc_field.repr, **dc_field_metadata) # pyright: ignore[reportCallIssue] @staticmethod @@ -692,8 +796,40 @@ def apply_typevars_map( pydantic._internal._generics.replace_types is used for replacing the typevars with their concrete types. """ - annotation, _ = _typing_extra.try_eval_type(self.annotation, globalns, localns) - self.annotation = _generics.replace_types(annotation, typevars_map) + annotation = _generics.replace_types(self.annotation, typevars_map) + annotation, evaluated = _typing_extra.try_eval_type(annotation, globalns, localns) + self.annotation = annotation + if not evaluated: + self._complete = False + self._original_annotation = self.annotation + + def asdict(self) -> _FieldInfoAsDict: + """Return a dictionary representation of the `FieldInfo` instance. + + The returned value is a dictionary with three items: + + * `annotation`: The type annotation of the field. + * `metadata`: The metadata list. + * `attributes`: A mapping of the remaining `FieldInfo` attributes to their values (e.g. `alias`, `title`). + """ + return { + 'annotation': self.annotation, + 'metadata': self.metadata, + 'attributes': {attr: getattr(self, attr) for attr in _Attrs}, + } + + def _copy(self) -> Self: + """Return a copy of the `FieldInfo` instance.""" + # Note: we can't define a custom `__copy__()`, as `FieldInfo` is being subclassed + # by some third-party libraries with extra attributes defined (and as `FieldInfo` + # is slotted, we can't make a copy of the `__dict__`). + copied = copy(self) + for attr_name in ('metadata', '_attributes_set', '_qualifiers'): + # Apply "deep-copy" behavior on collections attributes: + value = getattr(copied, attr_name).copy() + setattr(copied, attr_name, value) + + return copied def __repr_args__(self) -> ReprArgs: yield 'annotation', _repr.PlainRepr(_repr.display_as_type(self.annotation)) @@ -709,6 +845,7 @@ def __repr_args__(self) -> ReprArgs: '_complete', '_original_assignment', '_original_annotation', + '_final', ): continue elif s == 'metadata' and not self.metadata: @@ -731,11 +868,11 @@ def __repr_args__(self) -> ReprArgs: yield s, value -class _EmptyKwargs(typing_extensions.TypedDict): +class _EmptyKwargs(TypedDict): """This class exists solely to ensure that type checking warns about passing `**extra` in `Field`.""" -_DefaultValues = { +_Attrs = { 'default': ..., 'default_factory': None, 'alias': None, @@ -743,10 +880,13 @@ class _EmptyKwargs(typing_extensions.TypedDict): 'validation_alias': None, 'serialization_alias': None, 'title': None, + 'field_title_generator': None, 'description': None, 'examples': None, 'exclude': None, + 'exclude_if': None, 'discriminator': None, + 'deprecated': None, 'json_schema_extra': None, 'frozen': None, 'validate_default': None, @@ -754,6 +894,11 @@ class _EmptyKwargs(typing_extensions.TypedDict): 'init': None, 'init_var': None, 'kw_only': None, +} + +_DefaultValues = { + **_Attrs, + 'kw_only': None, 'pattern': None, 'strict': None, 'gt': None, @@ -788,6 +933,7 @@ def Field( description: str | None = _Unset, examples: list[Any] | None = _Unset, exclude: bool | None = _Unset, + exclude_if: Callable[[Any], bool] | None = _Unset, discriminator: str | types.Discriminator | None = _Unset, deprecated: Deprecated | str | bool | None = _Unset, json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, @@ -797,7 +943,7 @@ def Field( init: bool | None = _Unset, init_var: bool | None = _Unset, kw_only: bool | None = _Unset, - pattern: str | typing.Pattern[str] | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, strict: bool | None = _Unset, coerce_numbers_to_str: bool | None = _Unset, gt: annotated_types.SupportsGt | None = _Unset, @@ -814,7 +960,47 @@ def Field( fail_fast: bool | None = _Unset, **extra: Unpack[_EmptyKwargs], ) -> Any: ... -@overload # `default` argument set +@overload # `default` argument set, validate_default=True (no type checking on the default value) +def Field( + default: Any, + *, + alias: str | None = _Unset, + alias_priority: int | None = _Unset, + validation_alias: str | AliasPath | AliasChoices | None = _Unset, + serialization_alias: str | None = _Unset, + title: str | None = _Unset, + field_title_generator: Callable[[str, FieldInfo], str] | None = _Unset, + description: str | None = _Unset, + examples: list[Any] | None = _Unset, + exclude: bool | None = _Unset, + exclude_if: Callable[[Any], bool] | None = _Unset, + discriminator: str | types.Discriminator | None = _Unset, + deprecated: Deprecated | str | bool | None = _Unset, + json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, + frozen: bool | None = _Unset, + validate_default: Literal[True], + repr: bool = _Unset, + init: bool | None = _Unset, + init_var: bool | None = _Unset, + kw_only: bool | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, + strict: bool | None = _Unset, + coerce_numbers_to_str: bool | None = _Unset, + gt: annotated_types.SupportsGt | None = _Unset, + ge: annotated_types.SupportsGe | None = _Unset, + lt: annotated_types.SupportsLt | None = _Unset, + le: annotated_types.SupportsLe | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + union_mode: Literal['smart', 'left_to_right'] = _Unset, + fail_fast: bool | None = _Unset, + **extra: Unpack[_EmptyKwargs], +) -> Any: ... +@overload # `default` argument set, validate_default=False or unset def Field( default: _T, *, @@ -827,16 +1013,20 @@ def Field( description: str | None = _Unset, examples: list[Any] | None = _Unset, exclude: bool | None = _Unset, + # NOTE: to get proper type checking on `exclude_if`'s argument, we could use `_T` instead of `Any`. However, + # this requires (at least for pyright) adding an additional overload where `exclude_if` is required (otherwise + # `a: int = Field(default_factory=str)` results in a false negative). + exclude_if: Callable[[Any], bool] | None = _Unset, discriminator: str | types.Discriminator | None = _Unset, deprecated: Deprecated | str | bool | None = _Unset, json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, frozen: bool | None = _Unset, - validate_default: bool | None = _Unset, + validate_default: Literal[False] = ..., repr: bool = _Unset, init: bool | None = _Unset, init_var: bool | None = _Unset, kw_only: bool | None = _Unset, - pattern: str | typing.Pattern[str] | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, strict: bool | None = _Unset, coerce_numbers_to_str: bool | None = _Unset, gt: annotated_types.SupportsGt | None = _Unset, @@ -853,7 +1043,47 @@ def Field( fail_fast: bool | None = _Unset, **extra: Unpack[_EmptyKwargs], ) -> _T: ... -@overload # `default_factory` argument set +@overload # `default_factory` argument set, validate_default=True (no type checking on the default value) +def Field( # pyright: ignore[reportOverlappingOverload] + *, + default_factory: Callable[[], Any] | Callable[[dict[str, Any]], Any], + alias: str | None = _Unset, + alias_priority: int | None = _Unset, + validation_alias: str | AliasPath | AliasChoices | None = _Unset, + serialization_alias: str | None = _Unset, + title: str | None = _Unset, + field_title_generator: Callable[[str, FieldInfo], str] | None = _Unset, + description: str | None = _Unset, + examples: list[Any] | None = _Unset, + exclude: bool | None = _Unset, + exclude_if: Callable[[Any], bool] | None = _Unset, + discriminator: str | types.Discriminator | None = _Unset, + deprecated: Deprecated | str | bool | None = _Unset, + json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, + frozen: bool | None = _Unset, + validate_default: Literal[True], + repr: bool = _Unset, + init: bool | None = _Unset, + init_var: bool | None = _Unset, + kw_only: bool | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, + strict: bool | None = _Unset, + coerce_numbers_to_str: bool | None = _Unset, + gt: annotated_types.SupportsGt | None = _Unset, + ge: annotated_types.SupportsGe | None = _Unset, + lt: annotated_types.SupportsLt | None = _Unset, + le: annotated_types.SupportsLe | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + union_mode: Literal['smart', 'left_to_right'] = _Unset, + fail_fast: bool | None = _Unset, + **extra: Unpack[_EmptyKwargs], +) -> Any: ... +@overload # `default_factory` argument set, validate_default=False or unset def Field( *, default_factory: Callable[[], _T] | Callable[[dict[str, Any]], _T], @@ -866,16 +1096,20 @@ def Field( description: str | None = _Unset, examples: list[Any] | None = _Unset, exclude: bool | None = _Unset, + # NOTE: to get proper type checking on `exclude_if`'s argument, we could use `_T` instead of `Any`. However, + # this requires (at least for pyright) adding an additional overload where `exclude_if` is required (otherwise + # `a: int = Field(default_factory=str)` results in a false negative). + exclude_if: Callable[[Any], bool] | None = _Unset, discriminator: str | types.Discriminator | None = _Unset, deprecated: Deprecated | str | bool | None = _Unset, json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, frozen: bool | None = _Unset, - validate_default: bool | None = _Unset, + validate_default: Literal[False] | None = _Unset, repr: bool = _Unset, init: bool | None = _Unset, init_var: bool | None = _Unset, kw_only: bool | None = _Unset, - pattern: str | typing.Pattern[str] | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, strict: bool | None = _Unset, coerce_numbers_to_str: bool | None = _Unset, gt: annotated_types.SupportsGt | None = _Unset, @@ -904,6 +1138,7 @@ def Field( # No default set description: str | None = _Unset, examples: list[Any] | None = _Unset, exclude: bool | None = _Unset, + exclude_if: Callable[[Any], bool] | None = _Unset, discriminator: str | types.Discriminator | None = _Unset, deprecated: Deprecated | str | bool | None = _Unset, json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, @@ -913,7 +1148,7 @@ def Field( # No default set init: bool | None = _Unset, init_var: bool | None = _Unset, kw_only: bool | None = _Unset, - pattern: str | typing.Pattern[str] | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, strict: bool | None = _Unset, coerce_numbers_to_str: bool | None = _Unset, gt: annotated_types.SupportsGt | None = _Unset, @@ -943,6 +1178,7 @@ def Field( # noqa: C901 description: str | None = _Unset, examples: list[Any] | None = _Unset, exclude: bool | None = _Unset, + exclude_if: Callable[[Any], bool] | None = _Unset, discriminator: str | types.Discriminator | None = _Unset, deprecated: Deprecated | str | bool | None = _Unset, json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = _Unset, @@ -952,7 +1188,7 @@ def Field( # noqa: C901 init: bool | None = _Unset, init_var: bool | None = _Unset, kw_only: bool | None = _Unset, - pattern: str | typing.Pattern[str] | None = _Unset, + pattern: str | re.Pattern[str] | None = _Unset, strict: bool | None = _Unset, coerce_numbers_to_str: bool | None = _Unset, gt: annotated_types.SupportsGt | None = _Unset, @@ -994,6 +1230,7 @@ def Field( # noqa: C901 description: Human-readable description. examples: Example values for this field. exclude: Whether to exclude the field from the model serialization. + exclude_if: A callable that determines whether to exclude a field during serialization based on its value. discriminator: Field name or Discriminator for discriminating the type in a tagged union. deprecated: A deprecation message, an instance of `warnings.deprecated` or the `typing_extensions.deprecated` backport, or a boolean. If `True`, a default deprecation message will be emitted when accessing the field. @@ -1042,13 +1279,21 @@ def Field( # noqa: C901 min_items = extra.pop('min_items', None) # type: ignore if min_items is not None: - warn('`min_items` is deprecated and will be removed, use `min_length` instead', DeprecationWarning) + warn( + '`min_items` is deprecated and will be removed, use `min_length` instead', + PydanticDeprecatedSince20, + stacklevel=2, + ) if min_length in (None, _Unset): min_length = min_items # type: ignore max_items = extra.pop('max_items', None) # type: ignore if max_items is not None: - warn('`max_items` is deprecated and will be removed, use `max_length` instead', DeprecationWarning) + warn( + '`max_items` is deprecated and will be removed, use `max_length` instead', + PydanticDeprecatedSince20, + stacklevel=2, + ) if max_length in (None, _Unset): max_length = max_items # type: ignore @@ -1064,7 +1309,11 @@ def Field( # noqa: C901 allow_mutation = extra.pop('allow_mutation', None) # type: ignore if allow_mutation is not None: - warn('`allow_mutation` is deprecated and will be removed. use `frozen` instead', DeprecationWarning) + warn( + '`allow_mutation` is deprecated and will be removed. use `frozen` instead', + PydanticDeprecatedSince20, + stacklevel=2, + ) if allow_mutation is False: frozen = True @@ -1077,7 +1326,8 @@ def Field( # noqa: C901 'Using extra keyword arguments on `Field` is deprecated and will be removed.' ' Use `json_schema_extra` instead.' f' (Extra keys: {", ".join(k.__repr__() for k in extra.keys())})', - DeprecationWarning, + PydanticDeprecatedSince20, + stacklevel=2, ) if not json_schema_extra or json_schema_extra is _Unset: json_schema_extra = extra # type: ignore @@ -1097,7 +1347,11 @@ def Field( # noqa: C901 include = extra.pop('include', None) # type: ignore if include is not None: - warn('`include` is deprecated and does nothing. It will be removed, use `exclude` instead', DeprecationWarning) + warn( + '`include` is deprecated and does nothing. It will be removed, use `exclude` instead', + PydanticDeprecatedSince20, + stacklevel=2, + ) return FieldInfo.from_field( default, @@ -1111,6 +1365,7 @@ def Field( # noqa: C901 description=description, examples=examples, exclude=exclude, + exclude_if=exclude_if, discriminator=discriminator, deprecated=deprecated, json_schema_extra=json_schema_extra, @@ -1157,16 +1412,14 @@ class ModelPrivateAttr(_repr.Representation): __slots__ = ('default', 'default_factory') - def __init__( - self, default: Any = PydanticUndefined, *, default_factory: typing.Callable[[], Any] | None = None - ) -> None: + def __init__(self, default: Any = PydanticUndefined, *, default_factory: Callable[[], Any] | None = None) -> None: if default is Ellipsis: self.default = PydanticUndefined else: self.default = default self.default_factory = default_factory - if not typing.TYPE_CHECKING: + if not TYPE_CHECKING: # We put `__getattr__` in a non-TYPE_CHECKING block because otherwise, mypy allows arbitrary attribute access def __getattr__(self, item: str) -> Any: @@ -1288,11 +1541,11 @@ class ComputedFieldInfo: alias: str | None alias_priority: int | None title: str | None - field_title_generator: typing.Callable[[str, ComputedFieldInfo], str] | None + field_title_generator: Callable[[str, ComputedFieldInfo], str] | None description: str | None deprecated: Deprecated | str | bool | None examples: list[Any] | None - json_schema_extra: JsonDict | typing.Callable[[JsonDict], None] | None + json_schema_extra: JsonDict | Callable[[JsonDict], None] | None repr: bool @property @@ -1304,6 +1557,45 @@ def deprecation_message(self) -> str | None: return 'deprecated' if self.deprecated else None return self.deprecated if isinstance(self.deprecated, str) else self.deprecated.message + def _update_from_config(self, config_wrapper: ConfigWrapper, name: str) -> None: + """Update the instance from the configuration set on the class this computed field belongs to.""" + title_generator = self.field_title_generator or config_wrapper.field_title_generator + if title_generator is not None and self.title is None: + self.title = title_generator(name, self) + if config_wrapper.alias_generator is not None: + self._apply_alias_generator(config_wrapper.alias_generator, name) + + def _apply_alias_generator(self, alias_generator: Callable[[str], str] | AliasGenerator, name: str) -> None: + """Apply an alias generator to aliases if appropriate. + + Args: + alias_generator: A callable that takes a string and returns a string, or an `AliasGenerator` instance. + name: The name of the computed field from which to generate the alias. + """ + # Apply an alias_generator if + # 1. An alias is not specified + # 2. An alias is specified, but the priority is <= 1 + + if self.alias_priority is None or self.alias_priority <= 1 or self.alias is None: + alias, _, serialization_alias = None, None, None + + if isinstance(alias_generator, AliasGenerator): + alias, _, serialization_alias = alias_generator.generate_aliases(name) + elif callable(alias_generator): + alias = alias_generator(name) + + # if priority is not set, we set to 1 + # which supports the case where the alias_generator from a child class is used + # to generate an alias for a field in a parent class + if self.alias_priority is None or self.alias_priority <= 1: + self.alias_priority = 1 + + # if the priority is 1, then we set the aliases to the generated alias + # note that we use the serialization_alias with priority over alias, as computed_field + # aliases are used for serialization only (not validation) + if self.alias_priority == 1: + self.alias = _utils.get_first_not_none(serialization_alias, alias) + def _wrapped_property_is_private(property_: cached_property | property) -> bool: # type: ignore """Returns true if provided property is private, False otherwise.""" @@ -1319,27 +1611,27 @@ def _wrapped_property_is_private(property_: cached_property | property) -> bool: # this should really be `property[T], cached_property[T]` but property is not generic unlike cached_property # See https://github.com/python/typing/issues/985 and linked issues -PropertyT = typing.TypeVar('PropertyT') +PropertyT = TypeVar('PropertyT') -@typing.overload +@overload def computed_field(func: PropertyT, /) -> PropertyT: ... -@typing.overload +@overload def computed_field( *, alias: str | None = None, alias_priority: int | None = None, title: str | None = None, - field_title_generator: typing.Callable[[str, ComputedFieldInfo], str] | None = None, + field_title_generator: Callable[[str, ComputedFieldInfo], str] | None = None, description: str | None = None, deprecated: Deprecated | str | bool | None = None, examples: list[Any] | None = None, - json_schema_extra: JsonDict | typing.Callable[[JsonDict], None] | None = None, + json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = None, repr: bool = True, return_type: Any = PydanticUndefined, -) -> typing.Callable[[PropertyT], PropertyT]: ... +) -> Callable[[PropertyT], PropertyT]: ... def computed_field( @@ -1349,14 +1641,14 @@ def computed_field( alias: str | None = None, alias_priority: int | None = None, title: str | None = None, - field_title_generator: typing.Callable[[str, ComputedFieldInfo], str] | None = None, + field_title_generator: Callable[[str, ComputedFieldInfo], str] | None = None, description: str | None = None, deprecated: Deprecated | str | bool | None = None, examples: list[Any] | None = None, - json_schema_extra: JsonDict | typing.Callable[[JsonDict], None] | None = None, + json_schema_extra: JsonDict | Callable[[JsonDict], None] | None = None, repr: bool | None = None, return_type: Any = PydanticUndefined, -) -> PropertyT | typing.Callable[[PropertyT], PropertyT]: +) -> PropertyT | Callable[[PropertyT], PropertyT]: """!!! abstract "Usage Documentation" [The `computed_field` decorator](../concepts/fields.md#the-computed_field-decorator) diff --git a/blimgui/dist64/pydantic/functional_serializers.py b/blimgui/dist64/pydantic/functional_serializers.py index 4b065e4..0c1522f 100644 --- a/blimgui/dist64/pydantic/functional_serializers.py +++ b/blimgui/dist64/pydantic/functional_serializers.py @@ -231,6 +231,7 @@ def field_serializer( def field_serializer( *fields: str, mode: Literal['plain', 'wrap'] = 'plain', + # TODO PEP 747 (grep for 'return_type' on the whole code base): return_type: Any = PydanticUndefined, when_used: WhenUsed = 'always', check_fields: bool | None = None, @@ -243,16 +244,14 @@ def field_serializer( In the below example, a field of type `set` is used to mitigate duplication. A `field_serializer` is used to serialize the data as a sorted list. ```python - from typing import Set - from pydantic import BaseModel, field_serializer class StudentModel(BaseModel): name: str = 'Jane' - courses: Set[str] + courses: set[str] @field_serializer('courses', when_used='json') - def serialize_courses_in_order(self, courses: Set[str]): + def serialize_courses_in_order(self, courses: set[str]): return sorted(courses) student = StudentModel(courses={'Math', 'Chemistry', 'English'}) @@ -260,7 +259,7 @@ def serialize_courses_in_order(self, courses: Set[str]): #> {"name":"Jane","courses":["Chemistry","English","Math"]} ``` - See [Custom serializers](../concepts/serialization.md#custom-serializers) for more information. + See [the usage documentation](../concepts/serialization.md#serializers) for more information. Four signatures are supported: @@ -300,7 +299,7 @@ def dec(f: FieldSerializer) -> _decorators.PydanticDescriptorProxy[Any]: if TYPE_CHECKING: # The first argument in the following callables represent the `self` type: - ModelPlainSerializerWithInfo: TypeAlias = Callable[[Any, SerializationInfo], Any] + ModelPlainSerializerWithInfo: TypeAlias = Callable[[Any, SerializationInfo[Any]], Any] """A model serializer method with the `info` argument, in `plain` mode.""" ModelPlainSerializerWithoutInfo: TypeAlias = Callable[[Any], Any] @@ -309,7 +308,7 @@ def dec(f: FieldSerializer) -> _decorators.PydanticDescriptorProxy[Any]: ModelPlainSerializer: TypeAlias = 'ModelPlainSerializerWithInfo | ModelPlainSerializerWithoutInfo' """A model serializer method in `plain` mode.""" - ModelWrapSerializerWithInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo], Any] + ModelWrapSerializerWithInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo[Any]], Any] """A model serializer method with the `info` argument, in `wrap` mode.""" ModelWrapSerializerWithoutInfo: TypeAlias = Callable[[Any, SerializerFunctionWrapHandler], Any] @@ -391,7 +390,7 @@ def serialize_model(self): - `(self, nxt: SerializerFunctionWrapHandler)` - `(self, nxt: SerializerFunctionWrapHandler, info: SerializationInfo)` - See [Custom serializers](../concepts/serialization.md#custom-serializers) for more information. + See [the usage documentation](../concepts/serialization.md#serializers) for more information. Args: f: The function to be decorated. @@ -422,15 +421,19 @@ def dec(f: ModelSerializer) -> _decorators.PydanticDescriptorProxy[Any]: if TYPE_CHECKING: SerializeAsAny = Annotated[AnyType, ...] # SerializeAsAny[list[str]] will be treated by type checkers as list[str] - """Force serialization to ignore whatever is defined in the schema and instead ask the object - itself how it should be serialized. - In particular, this means that when model subclasses are serialized, fields present in the subclass - but not in the original schema will be included. + """Annotation used to mark a type as having duck-typing serialization behavior. + + See [usage documentation](../concepts/serialization.md#serializing-with-duck-typing) for more details. """ else: @dataclasses.dataclass(**_internal_dataclass.slots_true) - class SerializeAsAny: # noqa: D101 + class SerializeAsAny: + """Annotation used to mark a type as having duck-typing serialization behavior. + + See [usage documentation](../concepts/serialization.md#serializing-with-duck-typing) for more details. + """ + def __class_getitem__(cls, item: Any) -> Any: return Annotated[item, SerializeAsAny()] @@ -442,9 +445,7 @@ def __get_pydantic_core_schema__( while schema_to_update['type'] == 'definitions': schema_to_update = schema_to_update.copy() schema_to_update = schema_to_update['schema'] - schema_to_update['serialization'] = core_schema.wrap_serializer_function_ser_schema( - lambda x, h: h(x), schema=core_schema.any_schema() - ) + schema_to_update['serialization'] = core_schema.simple_ser_schema('any') return schema __hash__ = object.__hash__ diff --git a/blimgui/dist64/pydantic/functional_validators.py b/blimgui/dist64/pydantic/functional_validators.py index 2eed4ef..fc4bbba 100644 --- a/blimgui/dist64/pydantic/functional_validators.py +++ b/blimgui/dist64/pydantic/functional_validators.py @@ -4,17 +4,19 @@ import dataclasses import sys +import warnings from functools import partialmethod from types import FunctionType from typing import TYPE_CHECKING, Annotated, Any, Callable, Literal, TypeVar, Union, cast, overload from pydantic_core import PydanticUndefined, core_schema -from pydantic_core import core_schema as _core_schema from typing_extensions import Self, TypeAlias from ._internal import _decorators, _generics, _internal_dataclass from .annotated_handlers import GetCoreSchemaHandler from .errors import PydanticUserError +from .version import version_short +from .warnings import ArbitraryTypeWarning, PydanticDeprecatedSince212 if sys.version_info < (3, 11): from typing_extensions import Protocol @@ -72,10 +74,10 @@ class Model(BaseModel): def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: schema = handler(source_type) - info_arg = _inspect_validator(self.func, 'after') + info_arg = _inspect_validator(self.func, mode='after', type='field') if info_arg: func = cast(core_schema.WithInfoValidatorFunction, self.func) - return core_schema.with_info_after_validator_function(func, schema=schema, field_name=handler.field_name) + return core_schema.with_info_after_validator_function(func, schema=schema) else: func = cast(core_schema.NoInfoValidatorFunction, self.func) return core_schema.no_info_after_validator_function(func, schema=schema) @@ -94,8 +96,8 @@ class BeforeValidator: Attributes: func: The validator function. - json_schema_input_type: The input type of the function. This is only used to generate the appropriate - JSON Schema (in validation mode). + json_schema_input_type: The input type used to generate the appropriate + JSON Schema (in validation mode). The actual input type is `Any`. Example: ```python @@ -130,13 +132,12 @@ def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaH else handler.generate_schema(self.json_schema_input_type) ) - info_arg = _inspect_validator(self.func, 'before') + info_arg = _inspect_validator(self.func, mode='before', type='field') if info_arg: func = cast(core_schema.WithInfoValidatorFunction, self.func) return core_schema.with_info_before_validator_function( func, schema=schema, - field_name=handler.field_name, json_schema_input_schema=input_schema, ) else: @@ -167,8 +168,8 @@ class PlainValidator: Attributes: func: The validator function. - json_schema_input_type: The input type of the function. This is only used to generate the appropriate - JSON Schema (in validation mode). If not provided, will default to `Any`. + json_schema_input_type: The input type used to generate the appropriate + JSON Schema (in validation mode). The actual input type is `Any`. Example: ```python @@ -176,11 +177,15 @@ class PlainValidator: from pydantic import BaseModel, PlainValidator + def validate(v: object) -> int: + if not isinstance(v, (int, str)): + raise ValueError(f'Expected int or str, go {type(v)}') + + return int(v) + 1 + MyInt = Annotated[ int, - PlainValidator( - lambda v: int(v) + 1, json_schema_input_type=Union[str, int] # (1)! - ), + PlainValidator(validate, json_schema_input_type=Union[str, int]), # (1)! ] class Model(BaseModel): @@ -194,7 +199,7 @@ class Model(BaseModel): ``` 1. In this example, we've specified the `json_schema_input_type` as `Union[str, int]` which indicates to the JSON schema - generator that in validation mode, the input type for the `a` field can be either a `str` or an `int`. + generator that in validation mode, the input type for the `a` field can be either a [`str`][] or an [`int`][]. """ func: core_schema.NoInfoValidatorFunction | core_schema.WithInfoValidatorFunction @@ -225,12 +230,11 @@ def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaH input_schema = handler.generate_schema(self.json_schema_input_type) - info_arg = _inspect_validator(self.func, 'plain') + info_arg = _inspect_validator(self.func, mode='plain', type='field') if info_arg: func = cast(core_schema.WithInfoValidatorFunction, self.func) return core_schema.with_info_plain_validator_function( func, - field_name=handler.field_name, serialization=serialization, # pyright: ignore[reportArgumentType] json_schema_input_schema=input_schema, ) @@ -259,8 +263,8 @@ class WrapValidator: Attributes: func: The validator function. - json_schema_input_type: The input type of the function. This is only used to generate the appropriate - JSON Schema (in validation mode). + json_schema_input_type: The input type used to generate the appropriate + JSON Schema (in validation mode). The actual input type is `Any`. ```python from datetime import datetime @@ -301,13 +305,12 @@ def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaH else handler.generate_schema(self.json_schema_input_type) ) - info_arg = _inspect_validator(self.func, 'wrap') + info_arg = _inspect_validator(self.func, mode='wrap', type='field') if info_arg: func = cast(core_schema.WithInfoWrapValidatorFunction, self.func) return core_schema.with_info_wrap_validator_function( func, schema=schema, - field_name=handler.field_name, json_schema_input_schema=input_schema, ) else: @@ -332,33 +335,33 @@ class _OnlyValueValidatorClsMethod(Protocol): def __call__(self, cls: Any, value: Any, /) -> Any: ... class _V2ValidatorClsMethod(Protocol): - def __call__(self, cls: Any, value: Any, info: _core_schema.ValidationInfo, /) -> Any: ... + def __call__(self, cls: Any, value: Any, info: core_schema.ValidationInfo[Any], /) -> Any: ... class _OnlyValueWrapValidatorClsMethod(Protocol): - def __call__(self, cls: Any, value: Any, handler: _core_schema.ValidatorFunctionWrapHandler, /) -> Any: ... + def __call__(self, cls: Any, value: Any, handler: core_schema.ValidatorFunctionWrapHandler, /) -> Any: ... class _V2WrapValidatorClsMethod(Protocol): def __call__( self, cls: Any, value: Any, - handler: _core_schema.ValidatorFunctionWrapHandler, - info: _core_schema.ValidationInfo, + handler: core_schema.ValidatorFunctionWrapHandler, + info: core_schema.ValidationInfo[Any], /, ) -> Any: ... _V2Validator = Union[ _V2ValidatorClsMethod, - _core_schema.WithInfoValidatorFunction, + core_schema.WithInfoValidatorFunction, _OnlyValueValidatorClsMethod, - _core_schema.NoInfoValidatorFunction, + core_schema.NoInfoValidatorFunction, ] _V2WrapValidator = Union[ _V2WrapValidatorClsMethod, - _core_schema.WithInfoWrapValidatorFunction, + core_schema.WithInfoWrapValidatorFunction, _OnlyValueWrapValidatorClsMethod, - _core_schema.NoInfoWrapValidatorFunction, + core_schema.NoInfoWrapValidatorFunction, ] _PartialClsOrStaticMethod: TypeAlias = Union[classmethod[Any, Any, Any], staticmethod[Any, Any], partialmethod[Any]] @@ -519,7 +522,7 @@ def dec( _ModelTypeCo = TypeVar('_ModelTypeCo', covariant=True) -class ModelWrapValidatorHandler(_core_schema.ValidatorFunctionWrapHandler, Protocol[_ModelTypeCo]): +class ModelWrapValidatorHandler(core_schema.ValidatorFunctionWrapHandler, Protocol[_ModelTypeCo]): """`@model_validator` decorated function handler argument type. This is used when `mode='wrap'`.""" def __call__( # noqa: D102 @@ -559,7 +562,7 @@ def __call__( # noqa: D102 # thus validators _must_ handle all cases value: Any, handler: ModelWrapValidatorHandler[_ModelType], - info: _core_schema.ValidationInfo, + info: core_schema.ValidationInfo, /, ) -> _ModelType: ... @@ -604,7 +607,7 @@ def __call__( # noqa: D102 # or anything else that gets passed to validate_python # thus validators _must_ handle all cases value: Any, - info: _core_schema.ValidationInfo, + info: core_schema.ValidationInfo[Any], /, ) -> Any: ... @@ -619,7 +622,7 @@ def __call__( # noqa: D102 # or anything else that gets passed to validate_python # thus validators _must_ handle all cases value: Any, - info: _core_schema.ValidationInfo, + info: core_schema.ValidationInfo[Any], /, ) -> Any: ... @@ -629,7 +632,7 @@ def __call__( # noqa: D102 have info argument. """ -ModelAfterValidator = Callable[[_ModelType, _core_schema.ValidationInfo], _ModelType] +ModelAfterValidator = Callable[[_ModelType, core_schema.ValidationInfo[Any]], _ModelType] """A `@model_validator` decorated function signature. This is used when `mode='after'`.""" _AnyModelWrapValidator = Union[ModelWrapValidator[_ModelType], ModelWrapValidatorWithoutInfo[_ModelType]] @@ -716,8 +719,18 @@ def verify_square(self) -> Self: """ def dec(f: Any) -> _decorators.PydanticDescriptorProxy[Any]: - # auto apply the @classmethod decorator + # auto apply the @classmethod decorator. NOTE: in V3, do not apply the conversion for 'after' validators: f = _decorators.ensure_classmethod_based_on_signature(f) + if mode == 'after' and isinstance(f, classmethod): + warnings.warn( + category=PydanticDeprecatedSince212, + message=( + "Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. " + f'See the documentation at https://docs.pydantic.dev/{version_short()}/concepts/validators/#model-after-validator.' + ), + stacklevel=2, + ) + dec_info = _decorators.ModelValidatorDecoratorInfo(mode=mode) return _decorators.PydanticDescriptorProxy(f, dec_info) @@ -816,7 +829,9 @@ def __class_getitem__(cls, item: Any) -> Any: @classmethod def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: - original_schema = handler(source) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ArbitraryTypeWarning) + original_schema = handler(source) metadata = {'pydantic_js_annotation_functions': [lambda _c, h: h(original_schema)]} return core_schema.any_schema( metadata=metadata, @@ -826,3 +841,53 @@ def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler ) __hash__ = object.__hash__ + + +_FromTypeT = TypeVar('_FromTypeT') + + +class ValidateAs: + """A helper class to validate a custom type from a type that is natively supported by Pydantic. + + Args: + from_type: The type natively supported by Pydantic to use to perform validation. + instantiation_hook: A callable taking the validated type as an argument, and returning + the populated custom type. + + Example: + ```python {lint="skip"} + from typing import Annotated + + from pydantic import BaseModel, TypeAdapter, ValidateAs + + class MyCls: + def __init__(self, a: int) -> None: + self.a = a + + def __repr__(self) -> str: + return f"MyCls(a={self.a})" + + class Model(BaseModel): + a: int + + + ta = TypeAdapter( + Annotated[MyCls, ValidateAs(Model, lambda v: MyCls(a=v.a))] + ) + + print(ta.validate_python({'a': 1})) + #> MyCls(a=1) + ``` + """ + + # TODO: make use of PEP 747 + def __init__(self, from_type: type[_FromTypeT], /, instantiation_hook: Callable[[_FromTypeT], Any]) -> None: + self.from_type = from_type + self.instantiation_hook = instantiation_hook + + def __get_pydantic_core_schema__(self, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + schema = handler(self.from_type) + return core_schema.no_info_after_validator_function( + self.instantiation_hook, + schema=schema, + ) diff --git a/blimgui/dist64/pydantic/json_schema.py b/blimgui/dist64/pydantic/json_schema.py index be9595c..db28a1d 100644 --- a/blimgui/dist64/pydantic/json_schema.py +++ b/blimgui/dist64/pydantic/json_schema.py @@ -36,8 +36,8 @@ ) import pydantic_core -from pydantic_core import CoreSchema, PydanticOmit, core_schema, to_jsonable_python -from pydantic_core.core_schema import ComputedField +from pydantic_core import MISSING, PydanticOmit, core_schema, to_jsonable_python +from pydantic_core.core_schema import ComputedField, CoreSchema from typing_extensions import TypeAlias, assert_never, deprecated, final from typing_inspection.introspection import get_literal_values @@ -131,6 +131,8 @@ class PydanticJsonSchemaWarning(UserWarning): CoreModeRef = tuple[CoreRef, JsonSchemaMode] JsonSchemaKeyT = TypeVar('JsonSchemaKeyT', bound=Hashable) +_PRIMITIVE_JSON_SCHEMA_TYPES = ('string', 'boolean', 'null', 'integer', 'number') + @dataclasses.dataclass(**_internal_dataclass.slots_true) class _DefinitionsRemapping: @@ -242,6 +244,14 @@ class GenerateJsonSchema: Args: by_alias: Whether to use field aliases in the generated schemas. ref_template: The format string to use when generating reference names. + union_format: The format to use when combining schemas from unions together. Can be one of: + + - `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf) + keyword to combine schemas (the default). + - `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type) + keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive + type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to + `any_of`. Raises: JsonSchemaError: If the instance of the class is inadvertently reused after generating a schema. @@ -253,9 +263,15 @@ class GenerateJsonSchema: # this value can be modified on subclasses to easily control which warnings are emitted ignored_warning_kinds: set[JsonSchemaWarningKind] = {'skipped-choice'} - def __init__(self, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE): + def __init__( + self, + by_alias: bool = True, + ref_template: str = DEFAULT_REF_TEMPLATE, + union_format: Literal['any_of', 'primitive_type_array'] = 'any_of', + ) -> None: self.by_alias = by_alias self.ref_template = ref_template + self.union_format: Literal['any_of', 'primitive_type_array'] = union_format self.core_to_json_refs: dict[CoreModeRef, JsonRef] = {} self.core_to_defs_refs: dict[CoreModeRef, DefsRef] = {} @@ -488,14 +504,13 @@ def handler_func(schema_or_field: CoreSchemaOrField) -> JsonSchemaValue: and ser_schema.get('when_used') in ('unless-none', 'json-unless-none') and schema_or_field['type'] == 'nullable' ): - json_schema = self.get_flattened_anyof([{'type': 'null'}, json_schema]) + json_schema = self.get_union_of_schemas([{'type': 'null'}, json_schema]) if json_schema is None: if _core_utils.is_core_schema(schema_or_field) or _core_utils.is_core_schema_field(schema_or_field): generate_for_schema_type = self._schema_type_to_method[schema_or_field['type']] json_schema = generate_for_schema_type(schema_or_field) else: raise TypeError(f'Unexpected schema type: schema={schema_or_field}') - return json_schema current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, handler_func) @@ -590,9 +605,7 @@ def _sort_recursive(self, value: Any, parent_key: str | None = None) -> Any: sorted_dict[key] = self._sort_recursive(value[key], parent_key=key) return sorted_dict elif isinstance(value, list): - sorted_list: list[JsonSchemaValue] = [] - for item in value: - sorted_list.append(self._sort_recursive(item, parent_key)) + sorted_list: list[JsonSchemaValue] = [self._sort_recursive(item, parent_key) for item in value] return sorted_list else: return value @@ -674,7 +687,49 @@ def decimal_schema(self, schema: core_schema.DecimalSchema) -> JsonSchemaValue: Returns: The generated JSON schema. """ - json_schema = self.str_schema(core_schema.str_schema()) + + def get_decimal_pattern(schema: core_schema.DecimalSchema) -> str: + max_digits = schema.get('max_digits') + decimal_places = schema.get('decimal_places') + + pattern = ( + r'^(?!^[-+.]*$)[+-]?0*' # check it is not empty string and not one or sequence of ".+-" characters. + ) + + # Case 1: Both max_digits and decimal_places are set + if max_digits is not None and decimal_places is not None: + integer_places = max(0, max_digits - decimal_places) + pattern += ( + rf'(?:' + rf'\d{{0,{integer_places}}}' + rf'|' + rf'(?=[\d.]{{1,{max_digits + 1}}}0*$)' + rf'\d{{0,{integer_places}}}\.\d{{0,{decimal_places}}}0*$' + rf')' + ) + + # Case 2: Only max_digits is set + elif max_digits is not None and decimal_places is None: + pattern += ( + rf'(?:' + rf'\d{{0,{max_digits}}}' + rf'|' + rf'(?=[\d.]{{1,{max_digits + 1}}}0*$)' + rf'\d*\.\d*0*$' + rf')' + ) + + # Case 3: Only decimal_places is set + elif max_digits is None and decimal_places is not None: + pattern += rf'\d*\.?\d{{0,{decimal_places}}}0*$' + + # Case 4: Both are None (no restrictions) + else: + pattern += r'\d*\.?\d*$' # look for arbitrary integer or decimal + + return pattern + + json_schema = self.str_schema(core_schema.str_schema(pattern=get_decimal_pattern(schema))) if self.mode == 'validation': multiple_of = schema.get('multiple_of') le = schema.get('le') @@ -805,6 +860,17 @@ def literal_schema(self, schema: core_schema.LiteralSchema) -> JsonSchemaValue: result['type'] = 'null' return result + def missing_sentinel_schema(self, schema: core_schema.MissingSentinelSchema) -> JsonSchemaValue: + """Generates a JSON schema that matches the `MISSING` sentinel value. + + Args: + schema: The core schema. + + Returns: + The generated JSON schema. + """ + raise PydanticOmit + def enum_schema(self, schema: core_schema.EnumSchema) -> JsonSchemaValue: """Generates a JSON schema that matches an Enum value. @@ -1109,34 +1175,34 @@ def default_schema(self, schema: core_schema.WithDefaultSchema) -> JsonSchemaVal json_schema = self.generate_inner(schema['schema']) default = self.get_default_value(schema) - if default is NoDefault: + if default is NoDefault or default is MISSING: return json_schema # we reflect the application of custom plain, no-info serializers to defaults for # JSON Schemas viewed in serialization mode: # TODO: improvements along with https://github.com/pydantic/pydantic/issues/8208 - if ( - self.mode == 'serialization' - and (ser_schema := schema['schema'].get('serialization')) - and (ser_func := ser_schema.get('function')) - and ser_schema.get('type') == 'function-plain' - and not ser_schema.get('info_arg') - and not (default is None and ser_schema.get('when_used') in ('unless-none', 'json-unless-none')) - ): - try: - default = ser_func(default) # type: ignore - except Exception: - # It might be that the provided default needs to be validated (read: parsed) first - # (assuming `validate_default` is enabled). However, we can't perform - # such validation during JSON Schema generation so we don't support - # this pattern for now. - # (One example is when using `foo: ByteSize = '1MB'`, which validates and - # serializes as an int. In this case, `ser_func` is `int` and `int('1MB')` fails). - self.emit_warning( - 'non-serializable-default', - f'Unable to serialize value {default!r} with the plain serializer; excluding default from JSON schema', - ) - return json_schema + if self.mode == 'serialization': + # `_get_ser_schema_for_default_value()` is used to unpack potentially nested validator schemas: + ser_schema = _get_ser_schema_for_default_value(schema['schema']) + if ( + ser_schema is not None + and (ser_func := ser_schema.get('function')) + and not (default is None and ser_schema.get('when_used') in ('unless-none', 'json-unless-none')) + ): + try: + default = ser_func(default) # type: ignore + except Exception: + # It might be that the provided default needs to be validated (read: parsed) first + # (assuming `validate_default` is enabled). However, we can't perform + # such validation during JSON Schema generation so we don't support + # this pattern for now. + # (One example is when using `foo: ByteSize = '1MB'`, which validates and + # serializes as an int. In this case, `ser_func` is `int` and `int('1MB')` fails). + self.emit_warning( + 'non-serializable-default', + f'Unable to serialize value {default!r} with the plain serializer; excluding default from JSON schema', + ) + return json_schema try: encoded_default = self.encode_default(default) @@ -1181,9 +1247,7 @@ def nullable_schema(self, schema: core_schema.NullableSchema) -> JsonSchemaValue if inner_json_schema == null_schema: return null_schema else: - # Thanks to the equality check against `null_schema` above, I think 'oneOf' would also be valid here; - # I'll use 'anyOf' for now, but it could be changed it if it would work better with some external tooling - return self.get_flattened_anyof([inner_json_schema, null_schema]) + return self.get_union_of_schemas([inner_json_schema, null_schema]) def union_schema(self, schema: core_schema.UnionSchema) -> JsonSchemaValue: """Generates a JSON schema that matches a schema that allows values matching any of the given schemas. @@ -1208,7 +1272,43 @@ def union_schema(self, schema: core_schema.UnionSchema) -> JsonSchemaValue: self.emit_warning('skipped-choice', exc.message) if len(generated) == 1: return generated[0] - return self.get_flattened_anyof(generated) + return self.get_union_of_schemas(generated) + + def get_union_of_schemas(self, schemas: list[JsonSchemaValue]) -> JsonSchemaValue: + """Returns the JSON Schema representation for the union of the provided JSON Schemas. + + The result depends on the configured `'union_format'`. + + Args: + schemas: The list of JSON Schemas to be included in the union. + + Returns: + The JSON Schema representing the union of schemas. + """ + if self.union_format == 'primitive_type_array': + types: list[str] = [] + for schema in schemas: + schema_types: list[str] | str | None = schema.get('type') + if schema_types is None: + # No type, meaning it can be a ref or an empty schema. + break + if not isinstance(schema_types, list): + schema_types = [schema_types] + if not all(t in _PRIMITIVE_JSON_SCHEMA_TYPES for t in schema_types): + break + if len(schema) != 1: + # We only want to include types that don't have any constraints. For instance, + # if `schemas = [{'type': 'string', 'maxLength': 3}, {'type': 'string', 'minLength': 5}]`, + # we don't want to produce `{'type': 'string', 'maxLength': 3, 'minLength': 5}`. + # Same if we have some metadata (e.g. `title`) on a specific union member, we want to preserve it. + break + + types.extend(schema_types) + else: + # If we got there, all the schemas where valid to be used with the `'primitive_type_array` format + return {'type': list(dict.fromkeys(types))} + + return self.get_flattened_anyof(schemas) def tagged_union_schema(self, schema: core_schema.TaggedUnionSchema) -> JsonSchemaValue: """Generates a JSON schema that matches a schema that allows values matching any of the given schemas, where @@ -1363,10 +1463,26 @@ def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaVa with self._config_wrapper_stack.push(config): json_schema = self._named_required_fields_schema(named_required_fields) + # There's some duplication between `extra_behavior` and + # the config's `extra`/core config's `extra_fields_behavior`. + # However, it is common to manually create TypedDictSchemas, + # where you don't necessarily have a class. + # At runtime, `extra_behavior` takes priority over the config + # for validation, so follow the same for the JSON Schema: + if schema.get('extra_behavior') == 'forbid': + json_schema['additionalProperties'] = False + elif schema.get('extra_behavior') == 'allow': + if 'extras_schema' in schema and schema['extras_schema'] != {'type': 'any'}: + json_schema['additionalProperties'] = self.generate_inner(schema['extras_schema']) + else: + json_schema['additionalProperties'] = True + if cls is not None: + # `_update_class_schema()` will not override + # `additionalProperties` if already present: self._update_class_schema(json_schema, cls, config) - else: - extra = config.get('extra') + elif 'additionalProperties' not in json_schema: + extra = schema.get('config', {}).get('extra_fields_behavior') if extra == 'forbid': json_schema['additionalProperties'] = False elif extra == 'allow': @@ -1525,7 +1641,7 @@ def _update_class_schema(self, json_schema: JsonSchemaValue, cls: type[Any], con json_schema.setdefault('description', root_description) extra = config.get('extra') - if 'additionalProperties' not in json_schema: + if 'additionalProperties' not in json_schema: # This check is particularly important for `typed_dict_schema()` if extra == 'allow': json_schema['additionalProperties'] = True elif extra == 'forbid': @@ -1640,13 +1756,19 @@ def field_is_required( Returns: `True` if the field should be marked as required in the generated JSON schema, `False` otherwise. """ - if self.mode == 'serialization' and self._config.json_schema_serialization_defaults_required: - return not field.get('serialization_exclude') + if field['type'] == 'typed-dict-field': + required = field.get('required', total) else: - if field['type'] == 'typed-dict-field': - return field.get('required', total) + required = field['schema']['type'] != 'default' + + if self.mode == 'serialization': + has_exclude_if = field.get('serialization_exclude_if') is not None + if self._config.json_schema_serialization_defaults_required: + return not has_exclude_if else: - return field['schema']['type'] != 'default' + return required and not has_exclude_if + else: + return required def dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema) -> JsonSchemaValue: """Generates a JSON schema that matches a schema that defines a dataclass's constructor arguments. @@ -1675,7 +1797,7 @@ def dataclass_schema(self, schema: core_schema.DataclassSchema) -> JsonSchemaVal Returns: The generated JSON schema. """ - from ._internal._dataclasses import is_builtin_dataclass + from ._internal._dataclasses import is_stdlib_dataclass cls = schema['cls'] config: ConfigDict = getattr(cls, '__pydantic_config__', cast('ConfigDict', {})) @@ -1686,7 +1808,7 @@ def dataclass_schema(self, schema: core_schema.DataclassSchema) -> JsonSchemaVal self._update_class_schema(json_schema, cls, config) # Dataclass-specific handling of description - if is_builtin_dataclass(cls): + if is_stdlib_dataclass(cls): # vanilla dataclass; don't use cls.__doc__ as it will contain the class signature by default description = None else: @@ -1748,7 +1870,8 @@ def kw_arguments_schema( for argument in arguments: name = self.get_argument_name(argument) argument_schema = self.generate_inner(argument['schema']).copy() - argument_schema['title'] = self.get_title_from_name(name) + if 'title' not in argument_schema and self.field_title_should_be_set(argument['schema']): + argument_schema['title'] = self.get_title_from_name(name) properties[name] = argument_schema if argument['schema']['type'] != 'default': @@ -1787,7 +1910,8 @@ def p_arguments_schema( name = self.get_argument_name(argument) argument_schema = self.generate_inner(argument['schema']).copy() - argument_schema['title'] = self.get_title_from_name(name) + if 'title' not in argument_schema and self.field_title_should_be_set(argument['schema']): + argument_schema['title'] = self.get_title_from_name(name) prefix_items.append(argument_schema) if argument['schema']['type'] != 'default': @@ -1957,7 +2081,7 @@ def definitions_schema(self, schema: core_schema.DefinitionsSchema) -> JsonSchem for definition in schema['definitions']: try: self.generate_inner(definition) - except PydanticInvalidForJsonSchema as e: + except PydanticInvalidForJsonSchema as e: # noqa: PERF203 core_ref: CoreRef = CoreRef(definition['ref']) # type: ignore self._core_defs_invalid_for_json_schema[self.get_defs_ref((core_ref, self.mode))] = e continue @@ -2380,6 +2504,7 @@ def model_json_schema( cls: type[BaseModel] | type[PydanticDataclass], by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, + union_format: Literal['any_of', 'primitive_type_array'] = 'any_of', schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, mode: JsonSchemaMode = 'validation', ) -> dict[str, Any]: @@ -2390,6 +2515,14 @@ def model_json_schema( by_alias: If `True` (the default), fields will be serialized according to their alias. If `False`, fields will be serialized according to their attribute name. ref_template: The template to use for generating JSON Schema references. + union_format: The format to use when combining schemas from unions together. Can be one of: + + - `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf) + keyword to combine schemas (the default). + - `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type) + keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive + type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to + `any_of`. schema_generator: The class to use for generating the JSON Schema. mode: The mode to use for generating the JSON Schema. It can be one of the following: @@ -2401,7 +2534,9 @@ def model_json_schema( """ from .main import BaseModel - schema_generator_instance = schema_generator(by_alias=by_alias, ref_template=ref_template) + schema_generator_instance = schema_generator( + by_alias=by_alias, ref_template=ref_template, union_format=union_format + ) if isinstance(cls.__pydantic_core_schema__, _mock_val_ser.MockCoreSchema): cls.__pydantic_core_schema__.rebuild() @@ -2420,6 +2555,7 @@ def models_json_schema( title: str | None = None, description: str | None = None, ref_template: str = DEFAULT_REF_TEMPLATE, + union_format: Literal['any_of', 'primitive_type_array'] = 'any_of', schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, ) -> tuple[dict[tuple[type[BaseModel] | type[PydanticDataclass], JsonSchemaMode], JsonSchemaValue], JsonSchemaValue]: """Utility function to generate a JSON Schema for multiple models. @@ -2430,6 +2566,14 @@ def models_json_schema( title: The title of the generated JSON Schema. description: The description of the generated JSON Schema. ref_template: The reference template to use for generating JSON Schema references. + union_format: The format to use when combining schemas from unions together. Can be one of: + + - `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf) + keyword to combine schemas (the default). + - `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type) + keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive + type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to + `any_of`. schema_generator: The schema generator to use for generating the JSON Schema. Returns: @@ -2444,7 +2588,7 @@ def models_json_schema( if isinstance(cls.__pydantic_core_schema__, _mock_val_ser.MockCoreSchema): cls.__pydantic_core_schema__.rebuild() - instance = schema_generator(by_alias=by_alias, ref_template=ref_template) + instance = schema_generator(by_alias=by_alias, ref_template=ref_template, union_format=union_format) inputs: list[tuple[type[BaseModel] | type[PydanticDataclass], JsonSchemaMode, CoreSchema]] = [ (m, mode, m.__pydantic_core_schema__) for m, mode in models ] @@ -2693,3 +2837,18 @@ def _get_typed_dict_config(cls: type[Any] | None) -> ConfigDict: except AttributeError: pass return {} + + +def _get_ser_schema_for_default_value(schema: CoreSchema) -> core_schema.PlainSerializerFunctionSerSchema | None: + """Get a `'function-plain'` serialization schema that can be used to serialize a default value. + + This takes into account having the serialization schema nested under validation schema(s). + """ + if ( + (ser_schema := schema.get('serialization')) + and ser_schema['type'] == 'function-plain' + and not ser_schema.get('info_arg') + ): + return ser_schema + if _core_utils.is_function_with_inner_schema(schema): + return _get_ser_schema_for_default_value(schema['schema']) diff --git a/blimgui/dist64/pydantic/main.py b/blimgui/dist64/pydantic/main.py index 820f34f..2b3148e 100644 --- a/blimgui/dist64/pydantic/main.py +++ b/blimgui/dist64/pydantic/main.py @@ -9,7 +9,6 @@ import operator import sys import types -import typing import warnings from collections.abc import Generator, Mapping from copy import copy, deepcopy @@ -20,6 +19,7 @@ Callable, ClassVar, Dict, + Generic, Literal, TypeVar, Union, @@ -49,7 +49,7 @@ from ._migration import getattr_migration from .aliases import AliasChoices, AliasPath from .annotated_handlers import GetCoreSchemaHandler, GetJsonSchemaHandler -from .config import ConfigDict +from .config import ConfigDict, ExtraValues from .errors import PydanticUndefinedAnnotation, PydanticUserError from .json_schema import DEFAULT_REF_TEMPLATE, GenerateJsonSchema, JsonSchemaMode, JsonSchemaValue, model_json_schema from .plugin._schema_validator import PluggableSchemaValidator @@ -64,10 +64,7 @@ from ._internal._utils import AbstractSetIntStr, MappingIntStrAny from .deprecated.parse import Protocol as DeprecatedParseProtocol from .fields import ComputedFieldInfo, FieldInfo, ModelPrivateAttr -else: - # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915 - # and https://youtrack.jetbrains.com/issue/PY-51428 - DeprecationWarning = PydanticDeprecatedSince20 + __all__ = 'BaseModel', 'create_model' @@ -211,13 +208,13 @@ class BaseModel(metaclass=_model_construction.ModelMetaclass): __pydantic_computed_fields__: ClassVar[Dict[str, ComputedFieldInfo]] # noqa: UP006 """A dictionary of computed field names and their corresponding [`ComputedFieldInfo`][pydantic.fields.ComputedFieldInfo] objects.""" - __pydantic_extra__: dict[str, Any] | None = _model_construction.NoInitField(init=False) + __pydantic_extra__: Dict[str, Any] | None = _model_construction.NoInitField(init=False) # noqa: UP006 """A dictionary containing extra values, if [`extra`][pydantic.config.ConfigDict.extra] is set to `'allow'`.""" __pydantic_fields_set__: set[str] = _model_construction.NoInitField(init=False) """The names of fields explicitly set during instantiation.""" - __pydantic_private__: dict[str, Any] | None = _model_construction.NoInitField(init=False) + __pydantic_private__: Dict[str, Any] | None = _model_construction.NoInitField(init=False) # noqa: UP006 """Values of private attributes set on the model instance.""" if not TYPE_CHECKING: @@ -386,7 +383,7 @@ def model_construct(cls, _fields_set: set[str] | None = None, **values: Any) -> def model_copy(self, *, update: Mapping[str, Any] | None = None, deep: bool = False) -> Self: """!!! abstract "Usage Documentation" - [`model_copy`](../concepts/serialization.md#model_copy) + [`model_copy`](../concepts/models.md#model-copy) Returns a copy of the model. @@ -429,13 +426,14 @@ def model_dump( exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> dict[str, Any]: """!!! abstract "Usage Documentation" - [`model_dump`](../concepts/serialization.md#modelmodel_dump) + [`model_dump`](../concepts/serialization.md#python-mode) Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. @@ -450,6 +448,9 @@ def model_dump( exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that are set to their default value. exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. @@ -470,6 +471,7 @@ def model_dump( exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, + exclude_computed_fields=exclude_computed_fields, round_trip=round_trip, warnings=warnings, fallback=fallback, @@ -480,6 +482,7 @@ def model_dump_json( self, *, indent: int | None = None, + ensure_ascii: bool = False, include: IncEx | None = None, exclude: IncEx | None = None, context: Any | None = None, @@ -487,18 +490,21 @@ def model_dump_json( exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> str: """!!! abstract "Usage Documentation" - [`model_dump_json`](../concepts/serialization.md#modelmodel_dump_json) + [`model_dump_json`](../concepts/serialization.md#json-mode) Generates a JSON representation of the model using Pydantic's `to_json` method. Args: indent: Indentation to use in the JSON output. If None is passed, the output will be compact. + ensure_ascii: If `True`, the output is guaranteed to have all incoming non-ASCII characters escaped. + If `False` (the default), these characters will be output as-is. include: Field(s) to include in the JSON output. exclude: Field(s) to exclude from the JSON output. context: Additional context to pass to the serializer. @@ -506,6 +512,9 @@ def model_dump_json( exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that are set to their default value. exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. @@ -519,6 +528,7 @@ def model_dump_json( return self.__pydantic_serializer__.to_json( self, indent=indent, + ensure_ascii=ensure_ascii, include=include, exclude=exclude, context=context, @@ -526,6 +536,7 @@ def model_dump_json( exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, + exclude_computed_fields=exclude_computed_fields, round_trip=round_trip, warnings=warnings, fallback=fallback, @@ -539,12 +550,22 @@ def model_json_schema( ref_template: str = DEFAULT_REF_TEMPLATE, schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, mode: JsonSchemaMode = 'validation', + *, + union_format: Literal['any_of', 'primitive_type_array'] = 'any_of', ) -> dict[str, Any]: """Generates a JSON schema for a model class. Args: by_alias: Whether to use attribute aliases or not. ref_template: The reference template. + union_format: The format to use when combining schemas from unions together. Can be one of: + + - `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf) + keyword to combine schemas (the default). + - `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type) + keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive + type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to + `any_of`. schema_generator: To override the logic used to generate the JSON schema, as a subclass of `GenerateJsonSchema` with your desired modifications mode: The mode in which to generate the schema. @@ -553,7 +574,12 @@ def model_json_schema( The JSON schema for the given model class. """ return model_json_schema( - cls, by_alias=by_alias, ref_template=ref_template, schema_generator=schema_generator, mode=mode + cls, + by_alias=by_alias, + ref_template=ref_template, + union_format=union_format, + schema_generator=schema_generator, + mode=mode, ) @classmethod @@ -573,7 +599,7 @@ def model_parametrized_name(cls, params: tuple[type[Any], ...]) -> str: Raises: TypeError: Raised when trying to generate concrete names for non-generic models. """ - if not issubclass(cls, typing.Generic): + if not issubclass(cls, Generic): raise TypeError('Concrete names should only be generated for generic models.') # Any strings received should represent forward references, so we handle them specially below. @@ -587,7 +613,6 @@ def model_post_init(self, context: Any, /) -> None: """Override this method to perform additional initialization after `__init__` and `model_construct`. This is useful if you want to do some validation that requires the entire model to be initialized. """ - pass @classmethod def model_rebuild( @@ -613,17 +638,20 @@ def model_rebuild( Returns `None` if the schema is already "complete" and rebuilding was not required. If rebuilding _was_ required, returns `True` if rebuilding was successful, otherwise `False`. """ - if not force and cls.__pydantic_complete__: + already_complete = cls.__pydantic_complete__ + if already_complete and not force: return None + cls.__pydantic_complete__ = False + for attr in ('__pydantic_core_schema__', '__pydantic_validator__', '__pydantic_serializer__'): - if attr in cls.__dict__: + if attr in cls.__dict__ and not isinstance(getattr(cls, attr), _mock_val_ser.MockValSer): # Deleting the validator/serializer is necessary as otherwise they can get reused in - # pydantic-core. Same applies for the core schema that can be reused in schema generation. + # pydantic-core. We do so only if they aren't mock instances, otherwise — as `model_rebuild()` + # isn't thread-safe — concurrent model instantiations can lead to the parent validator being used. + # Same applies for the core schema that can be reused in schema generation. delattr(cls, attr) - cls.__pydantic_complete__ = False - if _types_namespace is not None: rebuild_ns = _types_namespace elif _parent_namespace_depth > 0: @@ -637,31 +665,13 @@ def model_rebuild( parent_namespace={**rebuild_ns, **parent_ns}, ) - if not cls.__pydantic_fields_complete__: - typevars_map = _generics.get_model_typevars_map(cls) - try: - cls.__pydantic_fields__ = _fields.rebuild_model_fields( - cls, - ns_resolver=ns_resolver, - typevars_map=typevars_map, - ) - except NameError as e: - exc = PydanticUndefinedAnnotation.from_name_error(e) - _mock_val_ser.set_model_mocks(cls, f'`{exc.name}`') - if raise_errors: - raise exc from e - - if not raise_errors and not cls.__pydantic_fields_complete__: - # No need to continue with schema gen, it is guaranteed to fail - return False - - assert cls.__pydantic_fields_complete__ - return _model_construction.complete_model_class( cls, _config.ConfigWrapper(cls.model_config, check=False), + ns_resolver, raise_errors=raise_errors, - ns_resolver=ns_resolver, + # If the model was already complete, we don't need to call the hook again. + call_on_complete_hook=not already_complete, ) @classmethod @@ -670,6 +680,7 @@ def model_validate( obj: Any, *, strict: bool | None = None, + extra: ExtraValues | None = None, from_attributes: bool | None = None, context: Any | None = None, by_alias: bool | None = None, @@ -680,6 +691,8 @@ def model_validate( Args: obj: The object to validate. strict: Whether to enforce types strictly. + extra: Whether to ignore, allow, or forbid extra data during model validation. + See the [`extra` configuration value][pydantic.ConfigDict.extra] for details. from_attributes: Whether to extract data from object attributes. context: Additional context to pass to the validator. by_alias: Whether to use the field's alias when validating against the provided input data. @@ -701,7 +714,13 @@ def model_validate( ) return cls.__pydantic_validator__.validate_python( - obj, strict=strict, from_attributes=from_attributes, context=context, by_alias=by_alias, by_name=by_name + obj, + strict=strict, + extra=extra, + from_attributes=from_attributes, + context=context, + by_alias=by_alias, + by_name=by_name, ) @classmethod @@ -710,6 +729,7 @@ def model_validate_json( json_data: str | bytes | bytearray, *, strict: bool | None = None, + extra: ExtraValues | None = None, context: Any | None = None, by_alias: bool | None = None, by_name: bool | None = None, @@ -722,6 +742,8 @@ def model_validate_json( Args: json_data: The JSON data to validate. strict: Whether to enforce types strictly. + extra: Whether to ignore, allow, or forbid extra data during model validation. + See the [`extra` configuration value][pydantic.ConfigDict.extra] for details. context: Extra variables to pass to the validator. by_alias: Whether to use the field's alias when validating against the provided input data. by_name: Whether to use the field's name when validating against the provided input data. @@ -742,7 +764,7 @@ def model_validate_json( ) return cls.__pydantic_validator__.validate_json( - json_data, strict=strict, context=context, by_alias=by_alias, by_name=by_name + json_data, strict=strict, extra=extra, context=context, by_alias=by_alias, by_name=by_name ) @classmethod @@ -751,6 +773,7 @@ def model_validate_strings( obj: Any, *, strict: bool | None = None, + extra: ExtraValues | None = None, context: Any | None = None, by_alias: bool | None = None, by_name: bool | None = None, @@ -760,6 +783,8 @@ def model_validate_strings( Args: obj: The object containing string data to validate. strict: Whether to enforce types strictly. + extra: Whether to ignore, allow, or forbid extra data during model validation. + See the [`extra` configuration value][pydantic.ConfigDict.extra] for details. context: Extra variables to pass to the validator. by_alias: Whether to use the field's alias when validating against the provided input data. by_name: Whether to use the field's name when validating against the provided input data. @@ -777,7 +802,7 @@ def model_validate_strings( ) return cls.__pydantic_validator__.validate_strings( - obj, strict=strict, context=context, by_alias=by_alias, by_name=by_name + obj, strict=strict, extra=extra, context=context, by_alias=by_alias, by_name=by_name ) @classmethod @@ -829,21 +854,36 @@ def __get_pydantic_json_schema__( @classmethod def __pydantic_init_subclass__(cls, **kwargs: Any) -> None: """This is intended to behave just like `__init_subclass__`, but is called by `ModelMetaclass` - only after the class is actually fully initialized. In particular, attributes like `model_fields` will - be present when this is called. + only after basic class initialization is complete. In particular, attributes like `model_fields` will + be present when this is called, but forward annotations are not guaranteed to be resolved yet, + meaning that creating an instance of the class may fail. This is necessary because `__init_subclass__` will always be called by `type.__new__`, and it would require a prohibitively large refactor to the `ModelMetaclass` to ensure that `type.__new__` was called in such a manner that the class would already be sufficiently initialized. This will receive the same `kwargs` that would be passed to the standard `__init_subclass__`, namely, - any kwargs passed to the class definition that aren't used internally by pydantic. + any kwargs passed to the class definition that aren't used internally by Pydantic. Args: **kwargs: Any keyword arguments passed to the class definition that aren't used internally - by pydantic. + by Pydantic. + + Note: + You may want to override [`__pydantic_on_complete__()`][pydantic.main.BaseModel.__pydantic_on_complete__] + instead, which is called once the class and its fields are fully initialized and ready for validation. + """ + + @classmethod + def __pydantic_on_complete__(cls) -> None: + """This is called once the class and its fields are fully initialized and ready to be used. + + This typically happens when the class is created (just before + [`__pydantic_init_subclass__()`][pydantic.main.BaseModel.__pydantic_init_subclass__] is called on the superclass), + except when forward annotations are used that could not immediately be resolved. + In that case, it will be called later, when the model is rebuilt automatically or explicitly using + [`model_rebuild()`][pydantic.main.BaseModel.model_rebuild]. """ - pass def __class_getitem__( cls, typevar_values: type[Any] | tuple[type[Any], ...] @@ -856,7 +896,7 @@ def __class_getitem__( raise TypeError('Type parameters should be placed on typing.Generic, not BaseModel') if not hasattr(cls, '__parameters__'): raise TypeError(f'{cls} cannot be parametrized because it does not inherit from typing.Generic') - if not cls.__pydantic_generic_metadata__['parameters'] and typing.Generic not in cls.__bases__: + if not cls.__pydantic_generic_metadata__['parameters'] and Generic not in cls.__bases__: raise TypeError(f'{cls} is not a generic class') if not isinstance(typevar_values, tuple): @@ -881,7 +921,7 @@ def __class_getitem__( origin = cls.__pydantic_generic_metadata__['origin'] or cls model_name = origin.model_parametrized_name(args) params = tuple( - {param: None for param in _generics.iter_contained_typevars(typevars_map.values())} + dict.fromkeys(_generics.iter_contained_typevars(typevars_map.values())) ) # use dict as ordered set with _generics.generic_recursion_self_type(origin, args) as maybe_self_type: @@ -976,11 +1016,8 @@ def __getattr__(self, item: str) -> Any: except AttributeError: pydantic_extra = None - if pydantic_extra: - try: - return pydantic_extra[item] - except KeyError as exc: - raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') from exc + if pydantic_extra and item in pydantic_extra: + return pydantic_extra[item] else: if hasattr(self.__class__, item): return super().__getattribute__(item) # Raises AttributeError if appropriate @@ -1240,11 +1277,11 @@ def __str__(self) -> str: # ##### Deprecated methods from v1 ##### @property @typing_extensions.deprecated( - 'The `__fields__` attribute is deprecated, use `model_fields` instead.', category=None + 'The `__fields__` attribute is deprecated, use the `model_fields` class property instead.', category=None ) def __fields__(self) -> dict[str, FieldInfo]: warnings.warn( - 'The `__fields__` attribute is deprecated, use `model_fields` instead.', + 'The `__fields__` attribute is deprecated, use the `model_fields` class property instead.', category=PydanticDeprecatedSince20, stacklevel=2, ) @@ -1655,6 +1692,7 @@ def create_model( __module__: str = __name__, __validators__: dict[str, Callable[..., Any]] | None = None, __cls_kwargs__: dict[str, Any] | None = None, + __qualname__: str | None = None, **field_definitions: Any | tuple[str, Any], ) -> type[BaseModel]: ... @@ -1670,6 +1708,7 @@ def create_model( __module__: str = __name__, __validators__: dict[str, Callable[..., Any]] | None = None, __cls_kwargs__: dict[str, Any] | None = None, + __qualname__: str | None = None, **field_definitions: Any | tuple[str, Any], ) -> type[ModelT]: ... @@ -1684,6 +1723,7 @@ def create_model( # noqa: C901 __module__: str | None = None, __validators__: dict[str, Callable[..., Any]] | None = None, __cls_kwargs__: dict[str, Any] | None = None, + __qualname__: str | None = None, # TODO PEP 747: replace `Any` by the TypeForm: **field_definitions: Any | tuple[str, Any], ) -> type[ModelT]: @@ -1693,6 +1733,11 @@ def create_model( # noqa: C901 Dynamically creates and returns a new Pydantic model, in other words, `create_model` dynamically creates a subclass of [`BaseModel`][pydantic.BaseModel]. + !!! warning + This function may execute arbitrary code contained in field annotations, if string references need to be evaluated. + + See [Security implications of introspecting annotations](https://docs.python.org/3/library/annotationlib.html#annotationlib-security) for more information. + Args: model_name: The name of the newly created model. __config__: The configuration of the new model. @@ -1704,6 +1749,7 @@ def create_model( # noqa: C901 be added to the model, and the values are the validation methods themselves. You can read more about functional validators [here](https://docs.pydantic.dev/2.9/concepts/validators/#field-validators). __cls_kwargs__: A dictionary of keyword arguments for class creation, such as `metaclass`. + __qualname__: The qualified name of the newly created model. **field_definitions: Field definitions of the new model. Either: - a single element, representing the type annotation of the field. @@ -1746,7 +1792,9 @@ def create_model( # noqa: C901 namespace: dict[str, Any] = {'__annotations__': annotations, '__module__': __module__} if __doc__: - namespace.update({'__doc__': __doc__}) + namespace['__doc__'] = __doc__ + if __qualname__ is not None: + namespace['__qualname__'] = __qualname__ if __validators__: namespace.update(__validators__) namespace.update(fields) diff --git a/blimgui/dist64/pydantic/mypy.py b/blimgui/dist64/pydantic/mypy.py index 8fcb54d..6e8228e 100644 --- a/blimgui/dist64/pydantic/mypy.py +++ b/blimgui/dist64/pydantic/mypy.py @@ -955,15 +955,9 @@ def set_frozen(self, fields: list[PydanticModelField], api: SemanticAnalyzerPlug elif isinstance(var, PlaceholderNode) and not self._api.final_iteration: # See https://github.com/pydantic/pydantic/issues/5191 to hit this branch for test coverage self._api.defer() - else: # pragma: no cover - # I don't know whether it's possible to hit this branch, but I've added it for safety - try: - var_str = str(var) - except TypeError: - # This happens for PlaceholderNode; perhaps it will happen for other types in the future.. - var_str = repr(var) - detail = f'sym_node.node: {var_str} (of type {var.__class__})' - error_unexpected_behavior(detail, self._api, self._cls) + # `var` can also be a FuncDef or Decorator node (e.g. when overriding a field with a function or property). + # In that case, we don't want to do anything. Mypy will already raise an error that a field was not properly + # overridden. else: var = field.to_var(info, api, use_alias=False) var.info = info @@ -1319,9 +1313,9 @@ def add_method( arg_names.append(arg.variable.name) arg_kinds.append(arg.kind) - signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type) - if tvar_def: - signature.variables = [tvar_def] + signature = CallableType( + arg_types, arg_kinds, arg_names, return_type, function_type, variables=[tvar_def] if tvar_def else None + ) func = FuncDef(name, args, Block([PassStmt()])) func.info = info @@ -1373,7 +1367,7 @@ def parse_toml(config_file: str) -> dict[str, Any] | None: except ImportError: # pragma: no cover import warnings - warnings.warn('No TOML parser installed, cannot read configuration from `pyproject.toml`.') + warnings.warn('No TOML parser installed, cannot read configuration from `pyproject.toml`.', stacklevel=2) return None with open(config_file, 'rb') as rf: diff --git a/blimgui/dist64/pydantic/networks.py b/blimgui/dist64/pydantic/networks.py index 2221578..04a7cac 100644 --- a/blimgui/dist64/pydantic/networks.py +++ b/blimgui/dist64/pydantic/networks.py @@ -78,6 +78,7 @@ class UrlConstraints: default_host: The default host. Defaults to `None`. default_port: The default port. Defaults to `None`. default_path: The default path. Defaults to `None`. + preserve_empty_path: Whether to preserve empty URL paths. Defaults to `None`. """ max_length: int | None = None @@ -86,6 +87,7 @@ class UrlConstraints: default_host: str | None = None default_port: int | None = None default_path: str | None = None + preserve_empty_path: bool | None = None def __hash__(self) -> int: return hash( @@ -96,6 +98,7 @@ def __hash__(self) -> int: self.default_host, self.default_port, self.default_path, + self.preserve_empty_path, ) ) @@ -831,6 +834,22 @@ class MongoDsn(_BaseMultiHostUrl): * Database name not required * Port not required * User info may be passed without user part (e.g., `mongodb://mongodb0.example.com:27017`). + + !!! warning + If a port isn't specified, the default MongoDB port `27017` will be used. If this behavior is + undesirable, you can use the following: + + ```python + from typing import Annotated + + from pydantic import UrlConstraints + from pydantic_core import MultiHostUrl + + MongoDsnNoDefaultPort = Annotated[ + MultiHostUrl, + UrlConstraints(allowed_schemes=['mongodb', 'mongodb+srv']), + ] + ``` """ _constraints = UrlConstraints(allowed_schemes=['mongodb', 'mongodb+srv'], default_port=27017) @@ -945,7 +964,7 @@ def import_email_validator() -> None: try: import email_validator except ImportError as e: - raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e + raise ImportError("email-validator is not installed, run `pip install 'pydantic[email]'`") from e if not version('email-validator').partition('.')[0] == '2': raise ImportError('email-validator version >= 2.0 required, run pip install -U email-validator') diff --git a/blimgui/dist64/pydantic/plugin/__init__.py b/blimgui/dist64/pydantic/plugin/__init__.py index 3620305..707ef84 100644 --- a/blimgui/dist64/pydantic/plugin/__init__.py +++ b/blimgui/dist64/pydantic/plugin/__init__.py @@ -8,9 +8,13 @@ from typing import Any, Callable, Literal, NamedTuple -from pydantic_core import CoreConfig, CoreSchema, ValidationError +from pydantic_core._pydantic_core import ValidationError +from pydantic_core.core_schema import CoreSchema + from typing_extensions import Protocol, TypeAlias +from pydantic.config import ExtraValues + __all__ = ( 'PydanticPluginProtocol', 'BaseValidateHandlerProtocol', @@ -44,7 +48,7 @@ def new_schema_validator( schema_type: Any, schema_type_path: SchemaTypePath, schema_kind: SchemaKind, - config: CoreConfig | None, + config: dict | None, plugin_settings: dict[str, object], ) -> tuple[ ValidatePythonHandlerProtocol | None, ValidateJsonHandlerProtocol | None, ValidateStringsHandlerProtocol | None @@ -113,8 +117,9 @@ def on_enter( input: Any, *, strict: bool | None = None, + extra: ExtraValues | None = None, from_attributes: bool | None = None, - context: dict[str, Any] | None = None, + context: Any | None = None, self_instance: Any | None = None, by_alias: bool | None = None, by_name: bool | None = None, @@ -124,6 +129,7 @@ def on_enter( Args: input: The input to be validated. strict: Whether to validate the object in strict mode. + extra: Whether to ignore, allow, or forbid extra data during model validation. from_attributes: Whether to validate objects as inputs by extracting attributes. context: The context to use for validation, this is passed to functional validators. self_instance: An instance of a model to set attributes on from validation, this is used when running @@ -131,7 +137,6 @@ def on_enter( by_alias: Whether to use the field's alias to match the input data to an attribute. by_name: Whether to use the field's name to match the input data to an attribute. """ - pass class ValidateJsonHandlerProtocol(BaseValidateHandlerProtocol, Protocol): @@ -142,7 +147,8 @@ def on_enter( input: str | bytes | bytearray, *, strict: bool | None = None, - context: dict[str, Any] | None = None, + extra: ExtraValues | None = None, + context: Any | None = None, self_instance: Any | None = None, by_alias: bool | None = None, by_name: bool | None = None, @@ -152,13 +158,13 @@ def on_enter( Args: input: The JSON data to be validated. strict: Whether to validate the object in strict mode. + extra: Whether to ignore, allow, or forbid extra data during model validation. context: The context to use for validation, this is passed to functional validators. self_instance: An instance of a model to set attributes on from validation, this is used when running validation from the `__init__` method of a model. by_alias: Whether to use the field's alias to match the input data to an attribute. by_name: Whether to use the field's name to match the input data to an attribute. """ - pass StringInput: TypeAlias = 'dict[str, StringInput]' @@ -172,7 +178,8 @@ def on_enter( input: StringInput, *, strict: bool | None = None, - context: dict[str, Any] | None = None, + extra: ExtraValues | None = None, + context: Any | None = None, by_alias: bool | None = None, by_name: bool | None = None, ) -> None: @@ -181,8 +188,8 @@ def on_enter( Args: input: The string data to be validated. strict: Whether to validate the object in strict mode. + extra: Whether to ignore, allow, or forbid extra data during model validation. context: The context to use for validation, this is passed to functional validators. by_alias: Whether to use the field's alias to match the input data to an attribute. by_name: Whether to use the field's name to match the input data to an attribute. """ - pass diff --git a/blimgui/dist64/pydantic/plugin/_loader.py b/blimgui/dist64/pydantic/plugin/_loader.py index 7d1f0f2..a789092 100644 --- a/blimgui/dist64/pydantic/plugin/_loader.py +++ b/blimgui/dist64/pydantic/plugin/_loader.py @@ -49,7 +49,8 @@ def get_plugins() -> Iterable[PydanticPluginProtocol]: except (ImportError, AttributeError) as e: warnings.warn( f'{e.__class__.__name__} while loading the `{entry_point.name}` Pydantic plugin, ' - f'this plugin will not be installed.\n\n{e!r}' + f'this plugin will not be installed.\n\n{e!r}', + stacklevel=2, ) finally: _loading_plugins = False diff --git a/blimgui/dist64/pydantic/plugin/_schema_validator.py b/blimgui/dist64/pydantic/plugin/_schema_validator.py index 83f2562..57d4f36 100644 --- a/blimgui/dist64/pydantic/plugin/_schema_validator.py +++ b/blimgui/dist64/pydantic/plugin/_schema_validator.py @@ -6,7 +6,8 @@ from collections.abc import Iterable from typing import TYPE_CHECKING, Any, Callable, Literal, TypeVar -from pydantic_core import CoreConfig, CoreSchema, SchemaValidator, ValidationError +from pydantic_core._pydantic_core import SchemaValidator, ValidationError +from pydantic_core.core_schema import CoreSchema from typing_extensions import ParamSpec if TYPE_CHECKING: diff --git a/blimgui/dist64/pydantic/root_model.py b/blimgui/dist64/pydantic/root_model.py index 8b3ff01..80a5420 100644 --- a/blimgui/dist64/pydantic/root_model.py +++ b/blimgui/dist64/pydantic/root_model.py @@ -2,20 +2,17 @@ from __future__ import annotations as _annotations -import typing from copy import copy, deepcopy +from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar from pydantic_core import PydanticUndefined +from typing_extensions import Self, dataclass_transform from . import PydanticUserError from ._internal import _model_construction, _repr from .main import BaseModel, _object_setattr -if typing.TYPE_CHECKING: - from typing import Any, Literal - - from typing_extensions import Self, dataclass_transform - +if TYPE_CHECKING: from .fields import Field as PydanticModelField from .fields import PrivateAttr as PydanticModelPrivateAttr @@ -29,10 +26,10 @@ class _RootModelMetaclass(_model_construction.ModelMetaclass): ... __all__ = ('RootModel',) -RootModelRootType = typing.TypeVar('RootModelRootType') +RootModelRootType = TypeVar('RootModelRootType') -class RootModel(BaseModel, typing.Generic[RootModelRootType], metaclass=_RootModelMetaclass): +class RootModel(BaseModel, Generic[RootModelRootType], metaclass=_RootModelMetaclass): """!!! abstract "Usage Documentation" [`RootModel` and Custom Root Types](../concepts/models.md#rootmodel-and-custom-root-types) @@ -116,7 +113,7 @@ def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: _object_setattr(m, '__pydantic_fields_set__', copy(self.__pydantic_fields_set__)) return m - if typing.TYPE_CHECKING: + if TYPE_CHECKING: def model_dump( # type: ignore self, @@ -129,6 +126,7 @@ def model_dump( # type: ignore exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, serialize_as_any: bool = False, diff --git a/blimgui/dist64/pydantic/type_adapter.py b/blimgui/dist64/pydantic/type_adapter.py index a6cdaba..6f1a082 100644 --- a/blimgui/dist64/pydantic/type_adapter.py +++ b/blimgui/dist64/pydantic/type_adapter.py @@ -3,6 +3,7 @@ from __future__ import annotations as _annotations import sys +import types from collections.abc import Callable, Iterable from dataclasses import is_dataclass from types import FrameType @@ -23,7 +24,7 @@ from pydantic.main import BaseModel, IncEx from ._internal import _config, _generate_schema, _mock_val_ser, _namespace_utils, _repr, _typing_extra, _utils -from .config import ConfigDict +from .config import ConfigDict, ExtraValues from .errors import PydanticUndefinedAnnotation from .json_schema import ( DEFAULT_REF_TEMPLATE, @@ -215,19 +216,35 @@ def __init__( self.pydantic_complete = False parent_frame = self._fetch_parent_frame() - if parent_frame is not None: - globalns = parent_frame.f_globals - # Do not provide a local ns if the type adapter happens to be instantiated at the module level: - localns = parent_frame.f_locals if parent_frame.f_locals is not globalns else {} + if isinstance(type, types.FunctionType): + # Special case functions, which are *not* pushed to the `NsResolver` stack and without this special case + # would only have access to the parent namespace where the `TypeAdapter` was instantiated (if the function is defined + # in another module, we need to look at that module's globals). + if parent_frame is not None: + # `f_locals` is the namespace where the type adapter was instantiated (~ to `f_globals` if at the module level): + parent_ns = parent_frame.f_locals + else: # pragma: no cover + parent_ns = None + globalns, localns = _namespace_utils.ns_for_function( + type, + parent_namespace=parent_ns, + ) + parent_namespace = None else: - globalns = {} - localns = {} + if parent_frame is not None: + globalns = parent_frame.f_globals + # Do not provide a local ns if the type adapter happens to be instantiated at the module level: + localns = parent_frame.f_locals if parent_frame.f_locals is not globalns else {} + else: # pragma: no cover + globalns = {} + localns = {} + parent_namespace = localns self._module_name = module or cast(str, globalns.get('__name__', '')) self._init_core_attrs( ns_resolver=_namespace_utils.NsResolver( namespaces_tuple=_namespace_utils.NamespacesTuple(locals=localns, globals=globalns), - parent_namespace=localns, + parent_namespace=parent_namespace, ), force=False, ) @@ -384,8 +401,9 @@ def validate_python( /, *, strict: bool | None = None, + extra: ExtraValues | None = None, from_attributes: bool | None = None, - context: dict[str, Any] | None = None, + context: Any | None = None, experimental_allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, by_alias: bool | None = None, by_name: bool | None = None, @@ -395,6 +413,8 @@ def validate_python( Args: object: The Python object to validate against the model. strict: Whether to strictly check types. + extra: Whether to ignore, allow, or forbid extra data during model validation. + See the [`extra` configuration value][pydantic.ConfigDict.extra] for details. from_attributes: Whether to extract data from object attributes. context: Additional context to pass to the validator. experimental_allow_partial: **Experimental** whether to enable @@ -421,6 +441,7 @@ def validate_python( return self.validator.validate_python( object, strict=strict, + extra=extra, from_attributes=from_attributes, context=context, allow_partial=experimental_allow_partial, @@ -434,7 +455,8 @@ def validate_json( /, *, strict: bool | None = None, - context: dict[str, Any] | None = None, + extra: ExtraValues | None = None, + context: Any | None = None, experimental_allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, by_alias: bool | None = None, by_name: bool | None = None, @@ -447,6 +469,8 @@ def validate_json( Args: data: The JSON data to validate against the model. strict: Whether to strictly check types. + extra: Whether to ignore, allow, or forbid extra data during model validation. + See the [`extra` configuration value][pydantic.ConfigDict.extra] for details. context: Additional context to use during validation. experimental_allow_partial: **Experimental** whether to enable [partial validation](../concepts/experimental.md#partial-validation), e.g. to process streams. @@ -468,6 +492,7 @@ def validate_json( return self.validator.validate_json( data, strict=strict, + extra=extra, context=context, allow_partial=experimental_allow_partial, by_alias=by_alias, @@ -480,7 +505,8 @@ def validate_strings( /, *, strict: bool | None = None, - context: dict[str, Any] | None = None, + extra: ExtraValues | None = None, + context: Any | None = None, experimental_allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, by_alias: bool | None = None, by_name: bool | None = None, @@ -490,6 +516,8 @@ def validate_strings( Args: obj: The object contains string data to validate. strict: Whether to strictly check types. + extra: Whether to ignore, allow, or forbid extra data during model validation. + See the [`extra` configuration value][pydantic.ConfigDict.extra] for details. context: Additional context to use during validation. experimental_allow_partial: **Experimental** whether to enable [partial validation](../concepts/experimental.md#partial-validation), e.g. to process streams. @@ -511,13 +539,14 @@ def validate_strings( return self.validator.validate_strings( obj, strict=strict, + extra=extra, context=context, allow_partial=experimental_allow_partial, by_alias=by_alias, by_name=by_name, ) - def get_default_value(self, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> Some[T] | None: + def get_default_value(self, *, strict: bool | None = None, context: Any | None = None) -> Some[T] | None: """Get the default value for the wrapped type. Args: @@ -541,11 +570,12 @@ def dump_python( exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, - context: dict[str, Any] | None = None, + context: Any | None = None, ) -> Any: """Dump an instance of the adapted type to a Python object. @@ -558,6 +588,9 @@ def dump_python( exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with None values. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. round_trip: Whether to output the serialized data in a way that is compatible with deserialization. warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. @@ -578,6 +611,7 @@ def dump_python( exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, + exclude_computed_fields=exclude_computed_fields, round_trip=round_trip, warnings=warnings, fallback=fallback, @@ -591,17 +625,19 @@ def dump_json( /, *, indent: int | None = None, + ensure_ascii: bool = False, include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, - context: dict[str, Any] | None = None, + context: Any | None = None, ) -> bytes: """!!! abstract "Usage Documentation" [JSON Serialization](../concepts/json.md#json-serialization) @@ -611,12 +647,17 @@ def dump_json( Args: instance: The instance to be serialized. indent: Number of spaces for JSON indentation. + ensure_ascii: If `True`, the output is guaranteed to have all incoming non-ASCII characters escaped. + If `False` (the default), these characters will be output as-is. include: Fields to include. exclude: Fields to exclude. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. round_trip: Whether to serialize and deserialize the instance to ensure round-tripping. warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. @@ -631,12 +672,14 @@ def dump_json( return self.serializer.to_json( instance, indent=indent, + ensure_ascii=ensure_ascii, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, + exclude_computed_fields=exclude_computed_fields, round_trip=round_trip, warnings=warnings, fallback=fallback, @@ -649,6 +692,7 @@ def json_schema( *, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, + union_format: Literal['any_of', 'primitive_type_array'] = 'any_of', schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, mode: JsonSchemaMode = 'validation', ) -> dict[str, Any]: @@ -657,13 +701,26 @@ def json_schema( Args: by_alias: Whether to use alias names for field names. ref_template: The format string used for generating $ref strings. + union_format: The format to use when combining schemas from unions together. Can be one of: + + - `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf) + keyword to combine schemas (the default). + - `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type) + keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive + type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to + `any_of`. + schema_generator: To override the logic used to generate the JSON schema, as a subclass of + `GenerateJsonSchema` with your desired modifications + mode: The mode in which to generate the schema. schema_generator: The generator class used for creating the schema. mode: The mode to use for schema generation. Returns: The JSON schema for the model as a dictionary. """ - schema_generator_instance = schema_generator(by_alias=by_alias, ref_template=ref_template) + schema_generator_instance = schema_generator( + by_alias=by_alias, ref_template=ref_template, union_format=union_format + ) if isinstance(self.core_schema, _mock_val_ser.MockCoreSchema): self.core_schema.rebuild() assert not isinstance(self.core_schema, _mock_val_ser.MockCoreSchema), 'this is a bug! please report it' @@ -678,6 +735,7 @@ def json_schemas( title: str | None = None, description: str | None = None, ref_template: str = DEFAULT_REF_TEMPLATE, + union_format: Literal['any_of', 'primitive_type_array'] = 'any_of', schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, ) -> tuple[dict[tuple[JsonSchemaKeyT, JsonSchemaMode], JsonSchemaValue], JsonSchemaValue]: """Generate a JSON schema including definitions from multiple type adapters. @@ -690,6 +748,14 @@ def json_schemas( title: The title for the schema. description: The description for the schema. ref_template: The format string used for generating $ref strings. + union_format: The format to use when combining schemas from unions together. Can be one of: + + - `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf) + keyword to combine schemas (the default). + - `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type) + keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive + type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to + `any_of`. schema_generator: The generator class used for creating the schema. Returns: @@ -702,7 +768,9 @@ def json_schemas( element, along with the optional title and description keys. """ - schema_generator_instance = schema_generator(by_alias=by_alias, ref_template=ref_template) + schema_generator_instance = schema_generator( + by_alias=by_alias, ref_template=ref_template, union_format=union_format + ) inputs_ = [] for key, mode, adapter in inputs: diff --git a/blimgui/dist64/pydantic/types.py b/blimgui/dist64/pydantic/types.py index b5c4fd6..59160ab 100644 --- a/blimgui/dist64/pydantic/types.py +++ b/blimgui/dist64/pydantic/types.py @@ -693,7 +693,7 @@ def conbytes( @_dataclasses.dataclass(frozen=True) class StringConstraints(annotated_types.GroupedMetadata): """!!! abstract "Usage Documentation" - [`StringConstraints`](../concepts/fields.md#string-constraints) + [String types](./standard_library_types.md#strings) A field metadata class to apply constraints to `str` types. Use this class as an annotation via [`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated), as seen below. @@ -1028,8 +1028,7 @@ def _serialize(v: Any) -> str: return 'sys.stdin' elif v.name == '': return 'sys.stderr' - else: - return v + return v def __repr__(self) -> str: return 'ImportString' @@ -1142,7 +1141,7 @@ class UuidVersion: Use this class as an annotation via [`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated), as seen below. Attributes: - uuid_version: The version of the UUID. Must be one of 1, 3, 4, 5, or 7. + uuid_version: The version of the UUID. Must be one of 1, 3, 4, 5, 6, 7 or 8. Example: ```python @@ -1166,15 +1165,10 @@ def __get_pydantic_json_schema__( return field_schema def __get_pydantic_core_schema__(self, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: - if isinstance(self, source): - # used directly as a type - return core_schema.uuid_schema(version=self.uuid_version) - else: - # update existing schema with self.uuid_version - schema = handler(source) - _check_annotated_type(schema['type'], 'uuid', self.__class__.__name__) - schema['version'] = self.uuid_version # type: ignore - return schema + schema = handler(source) + _check_annotated_type(schema['type'], 'uuid', self.__class__.__name__) + schema['version'] = self.uuid_version # type: ignore + return schema def __hash__(self) -> int: return hash(type(self.uuid_version)) @@ -1535,7 +1529,8 @@ def __eq__(self, other: Any) -> bool: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -SecretType = TypeVar('SecretType') +# The `Secret` class being conceptually immutable, make the type variable covariant: +SecretType = TypeVar('SecretType', covariant=True) class _SecretBase(Generic[SecretType]): @@ -3074,9 +3069,11 @@ def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaH return handler(Annotated[source_type, Field(discriminator=self.discriminator)]) else: original_schema = handler(source_type) - return self._convert_schema(original_schema) + return self._convert_schema(original_schema, handler) - def _convert_schema(self, original_schema: core_schema.CoreSchema) -> core_schema.TaggedUnionSchema: + def _convert_schema( + self, original_schema: core_schema.CoreSchema, handler: GetCoreSchemaHandler | None = None + ) -> core_schema.TaggedUnionSchema: if original_schema['type'] != 'union': # This likely indicates that the schema was a single-item union that was simplified. # In this case, we do the same thing we do in @@ -3093,10 +3090,23 @@ def _convert_schema(self, original_schema: core_schema.CoreSchema) -> core_schem if metadata is not None: tag = metadata.get('pydantic_internal_union_tag_key') or tag if tag is None: - raise PydanticUserError( - f'`Tag` not provided for choice {choice} used with `Discriminator`', - code='callable-discriminator-no-tag', - ) + # `handler` is None when this method is called from `apply_discriminator()` (deferred discriminators) + if handler is not None and choice['type'] == 'definition-ref': + # If choice was built from a PEP 695 type alias, try to resolve the def: + try: + choice = handler.resolve_ref_schema(choice) + except LookupError: + pass + else: + metadata = cast('CoreMetadata | None', choice.get('metadata')) + if metadata is not None: + tag = metadata.get('pydantic_internal_union_tag_key') + + if tag is None: + raise PydanticUserError( + f'`Tag` not provided for choice {choice} used with `Discriminator`', + code='callable-discriminator-no-tag', + ) tagged_union_choices[tag] = choice # Have to do these verbose checks to ensure falsy values ('' and {}) don't get ignored diff --git a/blimgui/dist64/pydantic/v1/__init__.py b/blimgui/dist64/pydantic/v1/__init__.py index 6ad3f46..4807865 100644 --- a/blimgui/dist64/pydantic/v1/__init__.py +++ b/blimgui/dist64/pydantic/v1/__init__.py @@ -1,4 +1,7 @@ # flake8: noqa +import sys +import warnings + from pydantic.v1 import dataclasses from pydantic.v1.annotated_types import create_model_from_namedtuple, create_model_from_typeddict from pydantic.v1.class_validators import root_validator, validator @@ -129,3 +132,11 @@ 'compiled', 'VERSION', ] + + +if sys.version_info >= (3, 14): + warnings.warn( + "Core Pydantic V1 functionality isn't compatible with Python 3.14 or greater.", + UserWarning, + stacklevel=2, + ) diff --git a/blimgui/dist64/pydantic/v1/mypy.py b/blimgui/dist64/pydantic/v1/mypy.py index f4e27ab..0a77569 100644 --- a/blimgui/dist64/pydantic/v1/mypy.py +++ b/blimgui/dist64/pydantic/v1/mypy.py @@ -209,14 +209,14 @@ def _pydantic_field_callback(self, ctx: FunctionContext) -> 'Type': default_factory_type = default_factory_type.items()[0] # type: ignore[operator] if isinstance(default_factory_type, CallableType): - ret_type = default_factory_type.ret_type - # mypy doesn't think `ret_type` has `args`, you'd think mypy should know, - # add this check in case it varies by version - args = getattr(ret_type, 'args', None) - if args: - if all(isinstance(arg, TypeVarType) for arg in args): - # Looks like the default factory is a type like `list` or `dict`, replace all args with `Any` - ret_type.args = tuple(default_any_type for _ in args) # type: ignore[attr-defined] + ret_type = get_proper_type(default_factory_type.ret_type) + if ( + isinstance(ret_type, Instance) + and ret_type.args + and all(isinstance(arg, TypeVarType) for arg in ret_type.args) + ): + # Looks like the default factory is a type like `list` or `dict`, replace all args with `Any` + ret_type = ret_type.copy_modified(args=[default_any_type] * len(ret_type.args)) return ret_type return default_any_type @@ -863,9 +863,9 @@ def add_method( arg_kinds.append(arg.kind) function_type = ctx.api.named_type(f'{BUILTINS_NAME}.function') - signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type) - if tvar_def: - signature.variables = [tvar_def] + signature = CallableType( + arg_types, arg_kinds, arg_names, return_type, function_type, variables=[tvar_def] if tvar_def else None + ) func = FuncDef(name, args, Block([PassStmt()])) func.info = info diff --git a/blimgui/dist64/pydantic/v1/types.py b/blimgui/dist64/pydantic/v1/types.py index 0cd789a..e1840d9 100644 --- a/blimgui/dist64/pydantic/v1/types.py +++ b/blimgui/dist64/pydantic/v1/types.py @@ -829,7 +829,7 @@ class JsonWrapper: class JsonMeta(type): def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]: if t is Any: - return Json # allow Json[Any] to replecate plain Json + return Json # allow Json[Any] to replicate plain Json return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t})) diff --git a/blimgui/dist64/pydantic/v1/typing.py b/blimgui/dist64/pydantic/v1/typing.py index 3038ccd..9741161 100644 --- a/blimgui/dist64/pydantic/v1/typing.py +++ b/blimgui/dist64/pydantic/v1/typing.py @@ -1,3 +1,5 @@ +import functools +import operator import sys import typing from collections.abc import Callable @@ -199,9 +201,6 @@ def convert_generics(tp: Type[Any]) -> Type[Any]: return tp else: - from typing import _UnionGenericAlias # type: ignore - - from typing_extensions import _AnnotatedAlias def convert_generics(tp: Type[Any]) -> Type[Any]: """ @@ -221,7 +220,7 @@ def convert_generics(tp: Type[Any]) -> Type[Any]: # typing.Annotated needs special treatment if origin is Annotated: - return _AnnotatedAlias(convert_generics(args[0]), args[1:]) + return Annotated[(convert_generics(args[0]), *args[1:])] # type: ignore # recursively replace `str` instances inside of `GenericAlias` with `ForwardRef(arg)` converted = tuple( @@ -235,7 +234,7 @@ def convert_generics(tp: Type[Any]) -> Type[Any]: return TypingGenericAlias(origin, converted) elif isinstance(tp, TypesUnionType): # recreate types.UnionType (PEP604, Python >= 3.10) - return _UnionGenericAlias(origin, converted) + return functools.reduce(operator.or_, converted) # type: ignore else: try: setattr(tp, '__args__', converted) diff --git a/blimgui/dist64/pydantic/version.py b/blimgui/dist64/pydantic/version.py index 0396a7c..0c32380 100644 --- a/blimgui/dist64/pydantic/version.py +++ b/blimgui/dist64/pydantic/version.py @@ -2,12 +2,24 @@ from __future__ import annotations as _annotations -from pydantic_core import __version__ as __pydantic_core_version__ +import sys + +from pydantic_core._pydantic_core import __version__ as __pydantic_core_version__ __all__ = 'VERSION', 'version_info' -VERSION = '2.11.4' -"""The version of Pydantic.""" +VERSION = '2.12.5' +"""The version of Pydantic. + +This version specifier is guaranteed to be compliant with the [specification], +introduced by [PEP 440]. + +[specification]: https://packaging.python.org/en/latest/specifications/version-specifiers/ +[PEP 440]: https://peps.python.org/pep-0440/ +""" + +# Keep this in sync with the version constraint in the `pyproject.toml` dependencies: +_COMPATIBLE_PYDANTIC_CORE_VERSION = '2.41.5' def version_short() -> str: @@ -22,7 +34,6 @@ def version_info() -> str: """Return complete version information for Pydantic and its dependencies.""" import importlib.metadata import platform - import sys from pathlib import Path import pydantic_core._pydantic_core as pdc @@ -53,8 +64,8 @@ def version_info() -> str: info = { 'pydantic version': VERSION, - 'pydantic-core version': pdc.__version__, - 'pydantic-core build': getattr(pdc, 'build_info', None) or pdc.build_profile, + 'pydantic-core version': __pydantic_core_version__, + 'pydantic-core build': getattr(pdc, 'build_info', None) or pdc.build_profile, # pyright: ignore[reportPrivateImportUsage] 'python version': sys.version, 'platform': platform.platform(), 'related packages': ' '.join(related_packages), @@ -65,8 +76,26 @@ def version_info() -> str: def check_pydantic_core_version() -> bool: """Check that the installed `pydantic-core` dependency is compatible.""" - # Keep this in sync with the version constraint in the `pyproject.toml` dependencies: - return __pydantic_core_version__ == '2.33.2' + return __pydantic_core_version__ == _COMPATIBLE_PYDANTIC_CORE_VERSION + + +def _ensure_pydantic_core_version() -> None: # pragma: no cover + if not check_pydantic_core_version(): + raise_error = True + # Do not raise the error if pydantic is installed in editable mode (i.e. in development): + if sys.version_info >= (3, 13): # origin property added in 3.13 + from importlib.metadata import distribution + + dist = distribution('pydantic') + if getattr(getattr(dist.origin, 'dir_info', None), 'editable', False): + raise_error = False + + if raise_error: + raise SystemError( + f'The installed pydantic-core version ({__pydantic_core_version__}) is incompatible ' + f'with the current pydantic version, which requires {_COMPATIBLE_PYDANTIC_CORE_VERSION}. ' + "If you encounter this error, make sure that you haven't upgraded pydantic-core manually." + ) def parse_mypy_version(version: str) -> tuple[int, int, int]: diff --git a/blimgui/dist64/pydantic/warnings.py b/blimgui/dist64/pydantic/warnings.py index 6e874dd..2e2dd83 100644 --- a/blimgui/dist64/pydantic/warnings.py +++ b/blimgui/dist64/pydantic/warnings.py @@ -10,8 +10,12 @@ 'PydanticDeprecatedSince29', 'PydanticDeprecatedSince210', 'PydanticDeprecatedSince211', + 'PydanticDeprecatedSince212', 'PydanticDeprecationWarning', 'PydanticExperimentalWarning', + 'ArbitraryTypeWarning', + 'UnsupportedFieldAttributeWarning', + 'TypedDictExtraConfigWarning', ) @@ -84,6 +88,13 @@ def __init__(self, message: str, *args: object) -> None: super().__init__(message, *args, since=(2, 11), expected_removal=(3, 0)) +class PydanticDeprecatedSince212(PydanticDeprecationWarning): + """A specific `PydanticDeprecationWarning` subclass defining functionality deprecated since Pydantic 2.12.""" + + def __init__(self, message: str, *args: object) -> None: + super().__init__(message, *args, since=(2, 12), expected_removal=(3, 0)) + + class GenericBeforeBaseModelWarning(Warning): pass @@ -91,6 +102,21 @@ class GenericBeforeBaseModelWarning(Warning): class PydanticExperimentalWarning(Warning): """A Pydantic specific experimental functionality warning. - This warning is raised when using experimental functionality in Pydantic. It is raised to warn users that the functionality may change or be removed in future versions of Pydantic. """ + + +class CoreSchemaGenerationWarning(UserWarning): + """A warning raised during core schema generation.""" + + +class ArbitraryTypeWarning(CoreSchemaGenerationWarning): + """A warning raised when Pydantic fails to generate a core schema for an arbitrary type.""" + + +class UnsupportedFieldAttributeWarning(CoreSchemaGenerationWarning): + """A warning raised when a `Field()` attribute isn't supported in the context it is used.""" + + +class TypedDictExtraConfigWarning(CoreSchemaGenerationWarning): + """A warning raised when the [`extra`][pydantic.ConfigDict.extra] configuration is incompatible with the `closed` or `extra_items` specification.""" diff --git a/blimgui/dist64/pydantic_core-2.33.2.dist-info/RECORD b/blimgui/dist64/pydantic_core-2.33.2.dist-info/RECORD deleted file mode 100644 index c81b96d..0000000 --- a/blimgui/dist64/pydantic_core-2.33.2.dist-info/RECORD +++ /dev/null @@ -1,12 +0,0 @@ -pydantic_core-2.33.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pydantic_core-2.33.2.dist-info/METADATA,sha256=kEKXIAFJ46fZAfTe5HqUsrpIGbiMG3DinWzxgJAo4ko,6883 -pydantic_core-2.33.2.dist-info/RECORD,, -pydantic_core-2.33.2.dist-info/WHEEL,sha256=6gg8M7kMeWmDzkhJuJzWLhBYdt7OGDF81wlSJjN85_E,96 -pydantic_core-2.33.2.dist-info/licenses/LICENSE,sha256=--f2FfGNE1wHAA5ahjJdsn9Cx3KWme8WasH3o_RYa_Q,1101 -pydantic_core/__init__.py,sha256=l_umiyEyjUrwgWeFutkc_0mA0vHVOUNhBoEVmdCAH1c,4547 -pydantic_core/__pycache__/__init__.cpython-313.pyc,, -pydantic_core/__pycache__/core_schema.cpython-313.pyc,, -pydantic_core/_pydantic_core.cp313-win_amd64.pyd,sha256=nX5Qc-UaTYjbuJArJzuZ7a82ptrC4ytT2T0wEY31oZM,5309440 -pydantic_core/_pydantic_core.pyi,sha256=L8PY7NoyRt2AEHrm6SlewSRQSS5Noqk-hS-aQBHEl88,44398 -pydantic_core/core_schema.py,sha256=Dd6XdTw7E4wePWYat9Wu2G_LifaDids4bjhpE0YmrOI,153980 -pydantic_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/blimgui/dist64/pydantic_core-2.33.2.dist-info/WHEEL b/blimgui/dist64/pydantic_core-2.33.2.dist-info/WHEEL deleted file mode 100644 index de42c98..0000000 --- a/blimgui/dist64/pydantic_core-2.33.2.dist-info/WHEEL +++ /dev/null @@ -1,4 +0,0 @@ -Wheel-Version: 1.0 -Generator: maturin (1.8.3) -Root-Is-Purelib: false -Tag: cp313-cp313-win_amd64 diff --git a/blimgui/dist64/pydantic_core-2.33.2.dist-info/INSTALLER b/blimgui/dist64/pydantic_core-2.41.5.dist-info/INSTALLER similarity index 100% rename from blimgui/dist64/pydantic_core-2.33.2.dist-info/INSTALLER rename to blimgui/dist64/pydantic_core-2.41.5.dist-info/INSTALLER diff --git a/blimgui/dist64/pydantic_core-2.33.2.dist-info/METADATA b/blimgui/dist64/pydantic_core-2.41.5.dist-info/METADATA similarity index 74% rename from blimgui/dist64/pydantic_core-2.33.2.dist-info/METADATA rename to blimgui/dist64/pydantic_core-2.41.5.dist-info/METADATA index d0a426e..468d2a5 100644 --- a/blimgui/dist64/pydantic_core-2.33.2.dist-info/METADATA +++ b/blimgui/dist64/pydantic_core-2.41.5.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.4 Name: pydantic_core -Version: 2.33.2 +Version: 2.41.5 Classifier: Development Status :: 3 - Alpha Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 @@ -10,151 +10,171 @@ Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: GraalPy Classifier: Programming Language :: Rust Classifier: Framework :: Pydantic Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology -Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: POSIX :: Linux Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: MacOS Classifier: Typing :: Typed -Requires-Dist: typing-extensions>=4.6.0,!=4.7.0 +Requires-Dist: typing-extensions>=4.14.1 License-File: LICENSE Summary: Core functionality for Pydantic validation and serialization Home-Page: https://github.com/pydantic/pydantic-core Author-email: Samuel Colvin , Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, David Montague , David Hewitt , Sydney Runkle , Victorien Plot -License: MIT +License-Expression: MIT Requires-Python: >=3.9 Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM Project-URL: Homepage, https://github.com/pydantic/pydantic-core Project-URL: Funding, https://github.com/sponsors/samuelcolvin Project-URL: Source, https://github.com/pydantic/pydantic-core -# pydantic-core - -[![CI](https://github.com/pydantic/pydantic-core/workflows/ci/badge.svg?event=push)](https://github.com/pydantic/pydantic-core/actions?query=event%3Apush+branch%3Amain+workflow%3Aci) -[![Coverage](https://codecov.io/gh/pydantic/pydantic-core/branch/main/graph/badge.svg)](https://codecov.io/gh/pydantic/pydantic-core) -[![pypi](https://img.shields.io/pypi/v/pydantic-core.svg)](https://pypi.python.org/pypi/pydantic-core) -[![versions](https://img.shields.io/pypi/pyversions/pydantic-core.svg)](https://github.com/pydantic/pydantic-core) -[![license](https://img.shields.io/github/license/pydantic/pydantic-core.svg)](https://github.com/pydantic/pydantic-core/blob/main/LICENSE) - -This package provides the core functionality for [pydantic](https://docs.pydantic.dev) validation and serialization. - -Pydantic-core is currently around 17x faster than pydantic V1. -See [`tests/benchmarks/`](./tests/benchmarks/) for details. - -## Example of direct usage - -_NOTE: You should not need to use pydantic-core directly; instead, use pydantic, which in turn uses pydantic-core._ - -```py -from pydantic_core import SchemaValidator, ValidationError - - -v = SchemaValidator( - { - 'type': 'typed-dict', - 'fields': { - 'name': { - 'type': 'typed-dict-field', - 'schema': { - 'type': 'str', - }, - }, - 'age': { - 'type': 'typed-dict-field', - 'schema': { - 'type': 'int', - 'ge': 18, - }, - }, - 'is_developer': { - 'type': 'typed-dict-field', - 'schema': { - 'type': 'default', - 'schema': {'type': 'bool'}, - 'default': True, - }, - }, - }, - } -) - -r1 = v.validate_python({'name': 'Samuel', 'age': 35}) -assert r1 == {'name': 'Samuel', 'age': 35, 'is_developer': True} - -# pydantic-core can also validate JSON directly -r2 = v.validate_json('{"name": "Samuel", "age": 35}') -assert r1 == r2 - -try: - v.validate_python({'name': 'Samuel', 'age': 11}) -except ValidationError as e: - print(e) - """ - 1 validation error for model - age - Input should be greater than or equal to 18 - [type=greater_than_equal, context={ge: 18}, input_value=11, input_type=int] - """ -``` - -## Getting Started - -You'll need rust stable [installed](https://rustup.rs/), or rust nightly if you want to generate accurate coverage. - -With rust and python 3.9+ installed, compiling pydantic-core should be possible with roughly the following: - -```bash -# clone this repo or your fork -git clone git@github.com:pydantic/pydantic-core.git -cd pydantic-core -# create a new virtual env -python3 -m venv env -source env/bin/activate -# install dependencies and install pydantic-core -make install -``` - -That should be it, the example shown above should now run. - -You might find it useful to look at [`python/pydantic_core/_pydantic_core.pyi`](./python/pydantic_core/_pydantic_core.pyi) and -[`python/pydantic_core/core_schema.py`](./python/pydantic_core/core_schema.py) for more information on the python API, -beyond that, [`tests/`](./tests) provide a large number of examples of usage. - -If you want to contribute to pydantic-core, you'll want to use some other make commands: -* `make build-dev` to build the package during development -* `make build-prod` to perform an optimised build for benchmarking -* `make test` to run the tests -* `make testcov` to run the tests and generate a coverage report -* `make lint` to run the linter -* `make format` to format python and rust code -* `make` to run `format build-dev lint test` - -## Profiling - -It's possible to profile the code using the [`flamegraph` utility from `flamegraph-rs`](https://github.com/flamegraph-rs/flamegraph). (Tested on Linux.) You can install this with `cargo install flamegraph`. - -Run `make build-profiling` to install a release build with debugging symbols included (needed for profiling). - -Once that is built, you can profile pytest benchmarks with (e.g.): - -```bash -flamegraph -- pytest tests/benchmarks/test_micro_benchmarks.py -k test_list_of_ints_core_py --benchmark-enable -``` -The `flamegraph` command will produce an interactive SVG at `flamegraph.svg`. - -## Releasing - -1. Bump package version locally. Do not just edit `Cargo.toml` on Github, you need both `Cargo.toml` and `Cargo.lock` to be updated. -2. Make a PR for the version bump and merge it. -3. Go to https://github.com/pydantic/pydantic-core/releases and click "Draft a new release" -4. In the "Choose a tag" dropdown enter the new tag `v` and select "Create new tag on publish" when the option appears. -5. Enter the release title in the form "v " -6. Click Generate release notes button -7. Click Publish release -8. Go to https://github.com/pydantic/pydantic-core/actions and ensure that all build for release are done successfully. -9. Go to https://pypi.org/project/pydantic-core/ and ensure that the latest release is published. -10. Done 🎉 +# pydantic-core + +[![CI](https://github.com/pydantic/pydantic-core/workflows/ci/badge.svg?event=push)](https://github.com/pydantic/pydantic-core/actions?query=event%3Apush+branch%3Amain+workflow%3Aci) +[![Coverage](https://codecov.io/gh/pydantic/pydantic-core/branch/main/graph/badge.svg)](https://codecov.io/gh/pydantic/pydantic-core) +[![pypi](https://img.shields.io/pypi/v/pydantic-core.svg)](https://pypi.python.org/pypi/pydantic-core) +[![versions](https://img.shields.io/pypi/pyversions/pydantic-core.svg)](https://github.com/pydantic/pydantic-core) +[![license](https://img.shields.io/github/license/pydantic/pydantic-core.svg)](https://github.com/pydantic/pydantic-core/blob/main/LICENSE) + +This package provides the core functionality for [pydantic](https://docs.pydantic.dev) validation and serialization. + +Pydantic-core is currently around 17x faster than pydantic V1. +See [`tests/benchmarks/`](./tests/benchmarks/) for details. + +## Example of direct usage + +_NOTE: You should not need to use pydantic-core directly; instead, use pydantic, which in turn uses pydantic-core._ + +```py +from pydantic_core import SchemaValidator, ValidationError + + +v = SchemaValidator( + { + 'type': 'typed-dict', + 'fields': { + 'name': { + 'type': 'typed-dict-field', + 'schema': { + 'type': 'str', + }, + }, + 'age': { + 'type': 'typed-dict-field', + 'schema': { + 'type': 'int', + 'ge': 18, + }, + }, + 'is_developer': { + 'type': 'typed-dict-field', + 'schema': { + 'type': 'default', + 'schema': {'type': 'bool'}, + 'default': True, + }, + }, + }, + } +) + +r1 = v.validate_python({'name': 'Samuel', 'age': 35}) +assert r1 == {'name': 'Samuel', 'age': 35, 'is_developer': True} + +# pydantic-core can also validate JSON directly +r2 = v.validate_json('{"name": "Samuel", "age": 35}') +assert r1 == r2 + +try: + v.validate_python({'name': 'Samuel', 'age': 11}) +except ValidationError as e: + print(e) + """ + 1 validation error for model + age + Input should be greater than or equal to 18 + [type=greater_than_equal, context={ge: 18}, input_value=11, input_type=int] + """ +``` + +## Getting Started + +### Prerequisites + +You'll need: +1. **[Rust](https://rustup.rs/)** - Rust stable (or nightly for coverage) +2. **[uv](https://docs.astral.sh/uv/getting-started/installation/)** - Fast Python package manager (will install Python 3.9+ automatically) +3. **[git](https://git-scm.com/)** - For version control +4. **[make](https://www.gnu.org/software/make/)** - For running development commands (or use `nmake` on Windows) + +### Quick Start + +```bash +# Clone the repository (or from your fork) +git clone git@github.com:pydantic/pydantic-core.git +cd pydantic-core + +# Install all dependencies using uv, setup pre-commit hooks, and build the development version +make install +``` + +Verify your installation by running: + +```bash +make +``` + +This runs a full development cycle: formatting, building, linting, and testing + +### Development Commands + +Run `make help` to see all available commands, or use these common ones: + +```bash +make build-dev # to build the package during development +make build-prod # to perform an optimised build for benchmarking +make test # to run the tests +make testcov # to run the tests and generate a coverage report +make lint # to run the linter +make format # to format python and rust code +make all # to run to run build-dev + format + lint + test +``` + +### Useful Resources + +* [`python/pydantic_core/_pydantic_core.pyi`](./python/pydantic_core/_pydantic_core.pyi) - Python API types +* [`python/pydantic_core/core_schema.py`](./python/pydantic_core/core_schema.py) - Core schema definitions +* [`tests/`](./tests) - Comprehensive usage examples + +## Profiling + +It's possible to profile the code using the [`flamegraph` utility from `flamegraph-rs`](https://github.com/flamegraph-rs/flamegraph). (Tested on Linux.) You can install this with `cargo install flamegraph`. + +Run `make build-profiling` to install a release build with debugging symbols included (needed for profiling). + +Once that is built, you can profile pytest benchmarks with (e.g.): + +```bash +flamegraph -- pytest tests/benchmarks/test_micro_benchmarks.py -k test_list_of_ints_core_py --benchmark-enable +``` +The `flamegraph` command will produce an interactive SVG at `flamegraph.svg`. + +## Releasing + +1. Bump package version locally. Do not just edit `Cargo.toml` on Github, you need both `Cargo.toml` and `Cargo.lock` to be updated. +2. Make a PR for the version bump and merge it. +3. Go to https://github.com/pydantic/pydantic-core/releases and click "Draft a new release" +4. In the "Choose a tag" dropdown enter the new tag `v` and select "Create new tag on publish" when the option appears. +5. Enter the release title in the form "v " +6. Click Generate release notes button +7. Click Publish release +8. Go to https://github.com/pydantic/pydantic-core/actions and ensure that all build for release are done successfully. +9. Go to https://pypi.org/project/pydantic-core/ and ensure that the latest release is published. +10. Done 🎉 diff --git a/blimgui/dist64/pydantic_core-2.41.5.dist-info/RECORD b/blimgui/dist64/pydantic_core-2.41.5.dist-info/RECORD new file mode 100644 index 0000000..d459c4d --- /dev/null +++ b/blimgui/dist64/pydantic_core-2.41.5.dist-info/RECORD @@ -0,0 +1,12 @@ +pydantic_core-2.41.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pydantic_core-2.41.5.dist-info/METADATA,sha256=Ex83HNMObeb0Be1hbVnx5CewzvIq2S18OeUn7UwBhKk,7420 +pydantic_core-2.41.5.dist-info/RECORD,, +pydantic_core-2.41.5.dist-info/WHEEL,sha256=tZ3VAZ5HuUzziFCJ2lDsDJnJO-xy4omAQIa7TJCFCZk,96 +pydantic_core-2.41.5.dist-info/licenses/LICENSE,sha256=--f2FfGNE1wHAA5ahjJdsn9Cx3KWme8WasH3o_RYa_Q,1101 +pydantic_core/__init__.py,sha256=5kQVCp6sQ3LE_4Jsj2FN8oilYeOSUOlsWORH8XgGxr8,5286 +pydantic_core/__pycache__/__init__.cpython-314.pyc,, +pydantic_core/__pycache__/core_schema.cpython-314.pyc,, +pydantic_core/_pydantic_core.cp314-win_amd64.pyd,sha256=sf9aARYkDe_ehbR5gJxRHVYYfZRcXTMT6Ica1Sud9VA,5427712 +pydantic_core/_pydantic_core.pyi,sha256=9MaxWCkerllI7ar2Spz_0LSGSZJcmcGqC02rU-9Mdl4,45973 +pydantic_core/core_schema.py,sha256=k4n4J4QNy8uFumsUTXwzR5KbPh7EyLzWGX0x7pRdzxk,159165 +pydantic_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/blimgui/dist64/pydantic_core-2.41.5.dist-info/WHEEL b/blimgui/dist64/pydantic_core-2.41.5.dist-info/WHEEL new file mode 100644 index 0000000..0436dba --- /dev/null +++ b/blimgui/dist64/pydantic_core-2.41.5.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.9.6) +Root-Is-Purelib: false +Tag: cp314-cp314-win_amd64 diff --git a/blimgui/dist64/pydantic_core-2.33.2.dist-info/licenses/LICENSE b/blimgui/dist64/pydantic_core-2.41.5.dist-info/licenses/LICENSE similarity index 98% rename from blimgui/dist64/pydantic_core-2.33.2.dist-info/licenses/LICENSE rename to blimgui/dist64/pydantic_core-2.41.5.dist-info/licenses/LICENSE index a6ad8bc..0716871 100644 --- a/blimgui/dist64/pydantic_core-2.33.2.dist-info/licenses/LICENSE +++ b/blimgui/dist64/pydantic_core-2.41.5.dist-info/licenses/LICENSE @@ -1,21 +1,21 @@ -The MIT License (MIT) - -Copyright (c) 2022 Samuel Colvin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +The MIT License (MIT) + +Copyright (c) 2022 Samuel Colvin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/blimgui/dist64/pydantic_core/__init__.py b/blimgui/dist64/pydantic_core/__init__.py index 330933d..3317cdd 100644 --- a/blimgui/dist64/pydantic_core/__init__.py +++ b/blimgui/dist64/pydantic_core/__init__.py @@ -3,6 +3,8 @@ import sys as _sys from typing import Any as _Any +from typing_extensions import Sentinel + from ._pydantic_core import ( ArgsKwargs, MultiHostUrl, @@ -25,7 +27,6 @@ from_json, to_json, to_jsonable_python, - validate_core_schema, ) from .core_schema import CoreConfig, CoreSchema, CoreSchemaType, ErrorType @@ -41,6 +42,7 @@ __all__ = [ '__version__', + 'UNSET', 'CoreConfig', 'CoreSchema', 'CoreSchemaType', @@ -66,7 +68,6 @@ 'to_json', 'from_json', 'to_jsonable_python', - 'validate_core_schema', ] @@ -116,7 +117,7 @@ class ErrorTypeInfo(_TypedDict): """ type: ErrorType - """The type of error that occurred, this should a "slug" identifier that changes rarely or never.""" + """The type of error that occurred, this should be a "slug" identifier that changes rarely or never.""" message_template_python: str """String template to render a human readable error message from using context, when the input is Python.""" example_message_python: str @@ -142,3 +143,29 @@ class MultiHostHost(_TypedDict): """The host part of this host, or `None`.""" port: int | None """The port part of this host, or `None`.""" + + +MISSING = Sentinel('MISSING') +"""A singleton indicating a field value was not provided during validation. + +This singleton can be used a default value, as an alternative to `None` when it has +an explicit meaning. During serialization, any field with `MISSING` as a value is excluded +from the output. + +Example: + ```python + from pydantic import BaseModel + + from pydantic_core import MISSING + + + class Configuration(BaseModel): + timeout: int | None | MISSING = MISSING + + + # configuration defaults, stored somewhere else: + defaults = {'timeout': 200} + + conf = Configuration.model_validate({...}) + timeout = conf.timeout if timeout.timeout is not MISSING else defaults['timeout'] +""" diff --git a/blimgui/dist64/pydantic_core/_pydantic_core.pyi b/blimgui/dist64/pydantic_core/_pydantic_core.pyi index fcfeabb..5f8ea7b 100644 --- a/blimgui/dist64/pydantic_core/_pydantic_core.pyi +++ b/blimgui/dist64/pydantic_core/_pydantic_core.pyi @@ -6,7 +6,7 @@ from _typeshed import SupportsAllComparisons from typing_extensions import LiteralString, Self, TypeAlias from pydantic_core import ErrorDetails, ErrorTypeInfo, InitErrorDetails, MultiHostHost -from pydantic_core.core_schema import CoreConfig, CoreSchema, ErrorType +from pydantic_core.core_schema import CoreConfig, CoreSchema, ErrorType, ExtraBehavior __all__ = [ '__version__', @@ -34,7 +34,6 @@ __all__ = [ 'to_jsonable_python', 'list_all_errors', 'TzInfo', - 'validate_core_schema', ] __version__: str build_profile: str @@ -93,6 +92,7 @@ class SchemaValidator: input: Any, *, strict: bool | None = None, + extra: ExtraBehavior | None = None, from_attributes: bool | None = None, context: Any | None = None, self_instance: Any | None = None, @@ -107,6 +107,8 @@ class SchemaValidator: input: The Python object to validate. strict: Whether to validate the object in strict mode. If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used. + extra: Whether to ignore, allow, or forbid extra data during model validation. + If `None`, the value of [`CoreConfig.extra_fields_behavior`][pydantic_core.core_schema.CoreConfig] is used. from_attributes: Whether to validate objects as inputs to models by extracting attributes. If `None`, the value of [`CoreConfig.from_attributes`][pydantic_core.core_schema.CoreConfig] is used. context: The context to use for validation, this is passed to functional validators as @@ -131,6 +133,7 @@ class SchemaValidator: input: Any, *, strict: bool | None = None, + extra: ExtraBehavior | None = None, from_attributes: bool | None = None, context: Any | None = None, self_instance: Any | None = None, @@ -151,6 +154,7 @@ class SchemaValidator: input: str | bytes | bytearray, *, strict: bool | None = None, + extra: ExtraBehavior | None = None, context: Any | None = None, self_instance: Any | None = None, allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, @@ -170,6 +174,8 @@ class SchemaValidator: input: The JSON data to validate. strict: Whether to validate the object in strict mode. If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used. + extra: Whether to ignore, allow, or forbid extra data during model validation. + If `None`, the value of [`CoreConfig.extra_fields_behavior`][pydantic_core.core_schema.CoreConfig] is used. context: The context to use for validation, this is passed to functional validators as [`info.context`][pydantic_core.core_schema.ValidationInfo.context]. self_instance: An instance of a model set attributes on from validation. @@ -191,6 +197,7 @@ class SchemaValidator: input: _StringInput, *, strict: bool | None = None, + extra: ExtraBehavior | None = None, context: Any | None = None, allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False, by_alias: bool | None = None, @@ -206,6 +213,8 @@ class SchemaValidator: input: The input as a string, or bytes/bytearray if `strict=False`. strict: Whether to validate the object in strict mode. If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used. + extra: Whether to ignore, allow, or forbid extra data during model validation. + If `None`, the value of [`CoreConfig.extra_fields_behavior`][pydantic_core.core_schema.CoreConfig] is used. context: The context to use for validation, this is passed to functional validators as [`info.context`][pydantic_core.core_schema.ValidationInfo.context]. allow_partial: Whether to allow partial validation; if `True` errors in the last element of sequences @@ -228,6 +237,7 @@ class SchemaValidator: field_value: Any, *, strict: bool | None = None, + extra: ExtraBehavior | None = None, from_attributes: bool | None = None, context: Any | None = None, by_alias: bool | None = None, @@ -242,6 +252,8 @@ class SchemaValidator: field_value: The value to assign to the field. strict: Whether to validate the object in strict mode. If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used. + extra: Whether to ignore, allow, or forbid extra data during model validation. + If `None`, the value of [`CoreConfig.extra_fields_behavior`][pydantic_core.core_schema.CoreConfig] is used. from_attributes: Whether to validate objects as inputs to models by extracting attributes. If `None`, the value of [`CoreConfig.from_attributes`][pydantic_core.core_schema.CoreConfig] is used. context: The context to use for validation, this is passed to functional validators as @@ -305,6 +317,7 @@ class SchemaSerializer: exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, fallback: Callable[[Any], Any] | None = None, @@ -325,6 +338,7 @@ class SchemaSerializer: e.g. are not included in `__pydantic_fields_set__`. exclude_defaults: Whether to exclude fields that are equal to their default value. exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. round_trip: Whether to enable serialization and validation round-trip support. warnings: How to handle invalid fields. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. @@ -345,12 +359,14 @@ class SchemaSerializer: value: Any, *, indent: int | None = None, + ensure_ascii: bool = False, include: _IncEx | None = None, exclude: _IncEx | None = None, by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal['none', 'warn', 'error'] = True, fallback: Callable[[Any], Any] | None = None, @@ -363,6 +379,8 @@ class SchemaSerializer: Arguments: value: The Python object to serialize. indent: If `None`, the JSON will be compact, otherwise it will be pretty-printed with the indent provided. + ensure_ascii: If `True`, the output is guaranteed to have all incoming non-ASCII characters escaped. + If `False` (the default), these characters will be output as-is. include: A set of fields to include, if `None` all fields are included. exclude: A set of fields to exclude, if `None` no fields are excluded. by_alias: Whether to use the alias names of fields. @@ -370,6 +388,7 @@ class SchemaSerializer: e.g. are not included in `__pydantic_fields_set__`. exclude_defaults: Whether to exclude fields that are equal to their default value. exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. round_trip: Whether to enable serialization and validation round-trip support. warnings: How to handle invalid fields. False/"none" ignores them, True/"warn" logs errors, "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. @@ -390,6 +409,7 @@ def to_json( value: Any, *, indent: int | None = None, + ensure_ascii: bool = False, include: _IncEx | None = None, exclude: _IncEx | None = None, # Note: In Pydantic 2.11, the default value of `by_alias` on `SchemaSerializer` was changed from `True` to `None`, @@ -399,6 +419,7 @@ def to_json( exclude_none: bool = False, round_trip: bool = False, timedelta_mode: Literal['iso8601', 'float'] = 'iso8601', + temporal_mode: Literal['iso8601', 'seconds', 'milliseconds'] = 'iso8601', bytes_mode: Literal['utf8', 'base64', 'hex'] = 'utf8', inf_nan_mode: Literal['null', 'constants', 'strings'] = 'constants', serialize_unknown: bool = False, @@ -414,12 +435,17 @@ def to_json( Arguments: value: The Python object to serialize. indent: If `None`, the JSON will be compact, otherwise it will be pretty-printed with the indent provided. + ensure_ascii: If `True`, the output is guaranteed to have all incoming non-ASCII characters escaped. + If `False` (the default), these characters will be output as-is. include: A set of fields to include, if `None` all fields are included. exclude: A set of fields to exclude, if `None` no fields are excluded. by_alias: Whether to use the alias names of fields. exclude_none: Whether to exclude fields that have a value of `None`. round_trip: Whether to enable serialization and validation round-trip support. timedelta_mode: How to serialize `timedelta` objects, either `'iso8601'` or `'float'`. + temporal_mode: How to serialize datetime-like objects (`datetime`, `date`, `time`), either `'iso8601'`, `'seconds'`, or `'milliseconds'`. + `iso8601` returns an ISO 8601 string; `seconds` returns the Unix timestamp in seconds as a float; `milliseconds` returns the Unix timestamp in milliseconds as a float. + bytes_mode: How to serialize `bytes` objects, either `'utf8'`, `'base64'`, or `'hex'`. inf_nan_mode: How to serialize `Infinity`, `-Infinity` and `NaN` values, either `'null'`, `'constants'`, or `'strings'`. serialize_unknown: Attempt to serialize unknown types, `str(value)` will be used, if that fails @@ -478,6 +504,7 @@ def to_jsonable_python( exclude_none: bool = False, round_trip: bool = False, timedelta_mode: Literal['iso8601', 'float'] = 'iso8601', + temporal_mode: Literal['iso8601', 'seconds', 'milliseconds'] = 'iso8601', bytes_mode: Literal['utf8', 'base64', 'hex'] = 'utf8', inf_nan_mode: Literal['null', 'constants', 'strings'] = 'constants', serialize_unknown: bool = False, @@ -499,6 +526,9 @@ def to_jsonable_python( exclude_none: Whether to exclude fields that have a value of `None`. round_trip: Whether to enable serialization and validation round-trip support. timedelta_mode: How to serialize `timedelta` objects, either `'iso8601'` or `'float'`. + temporal_mode: How to serialize datetime-like objects (`datetime`, `date`, `time`), either `'iso8601'`, `'seconds'`, or `'milliseconds'`. + `iso8601` returns an ISO 8601 string; `seconds` returns the Unix timestamp in seconds as a float; `milliseconds` returns the Unix timestamp in milliseconds as a float. + bytes_mode: How to serialize `bytes` objects, either `'utf8'`, `'base64'`, or `'hex'`. inf_nan_mode: How to serialize `Infinity`, `-Infinity` and `NaN` values, either `'null'`, `'constants'`, or `'strings'`. serialize_unknown: Attempt to serialize unknown types, `str(value)` will be used, if that fails @@ -633,9 +663,6 @@ class ValidationError(ValueError): """ Python constructor for a Validation Error. - The API for constructing validation errors will probably change in the future, - hence the static method rather than `__init__`. - Arguments: title: The title of the error, as used in the heading of `str(validation_error)` line_errors: A list of [`InitErrorDetails`][pydantic_core.InitErrorDetails] which contain information @@ -714,22 +741,16 @@ class PydanticCustomError(ValueError): raise PydanticCustomError('custom_value_error', 'Value must be greater than {value}', {'value': 10, 'extra_context': 'extra_data'}) return v ``` + + Arguments: + error_type: The error type. + message_template: The message template. + context: The data to inject into the message template. """ def __init__( - self, error_type: LiteralString, message_template: LiteralString, context: dict[str, Any] | None = None - ) -> None: - """Initializes the `PydanticCustomError`. - - Arguments: - error_type: The error type. - message_template: The message template. - context: The data to inject into the message template. - """ - - def __new__( - cls, error_type: LiteralString, message_template: LiteralString, context: dict[str, Any] | None = None - ) -> Self: ... + self, error_type: LiteralString, message_template: LiteralString, context: dict[str, Any] | None = None, / + ) -> None: ... @property def context(self) -> dict[str, Any] | None: """Values which are required to render the error message, and could hence be useful in passing error data forward.""" @@ -757,20 +778,16 @@ class PydanticKnownError(ValueError): def custom_validator(v) -> None: if v <= 10: - raise PydanticKnownError(error_type='greater_than', context={'gt': 10}) + raise PydanticKnownError('greater_than', {'gt': 10}) return v ``` - """ - - def __init__(self, error_type: ErrorType, context: dict[str, Any] | None = None) -> None: - """Initializes the `PydanticKnownError`. - Arguments: - error_type: The error type. - context: The data to inject into the message template. - """ + Arguments: + error_type: The error type. + context: The data to inject into the message template. + """ - def __new__(cls, error_type: ErrorType, context: dict[str, Any] | None = None) -> Self: ... + def __init__(self, error_type: ErrorType, context: dict[str, Any] | None = None, /) -> None: ... @property def context(self) -> dict[str, Any] | None: """Values which are required to render the error message, and could hence be useful in passing error data forward.""" @@ -870,16 +887,12 @@ class PydanticSerializationError(ValueError): """An error raised when an issue occurs during serialization. In custom serializers, this error can be used to indicate that serialization has failed. - """ - - def __init__(self, message: str) -> None: - """Initializes the `PydanticSerializationError`. - Arguments: - message: The message associated with the error. - """ + Arguments: + message: The message associated with the error. + """ - def __new__(cls, message: str) -> Self: ... + def __init__(self, message: str, /) -> None: ... @final class PydanticSerializationUnexpectedValue(ValueError): @@ -918,16 +931,12 @@ class PydanticSerializationUnexpectedValue(ValueError): This is often used internally in `pydantic-core` when unexpected types are encountered during serialization, but it can also be used by users in custom serializers, as seen above. - """ - def __init__(self, message: str) -> None: - """Initializes the `PydanticSerializationUnexpectedValue`. - - Arguments: - message: The message associated with the unexpected value. - """ + Arguments: + message: The message associated with the unexpected value. + """ - def __new__(cls, message: str | None = None) -> Self: ... + def __init__(self, message: str, /) -> None: ... @final class ArgsKwargs: @@ -1001,7 +1010,14 @@ def list_all_errors() -> list[ErrorTypeInfo]: class TzInfo(datetime.tzinfo): """An `pydantic-core` implementation of the abstract [`datetime.tzinfo`][] class.""" - # def __new__(cls, seconds: float) -> Self: ... + def __init__(self, seconds: float = 0.0) -> None: + """Initializes the `TzInfo`. + + Arguments: + seconds: The offset from UTC in seconds. Defaults to 0.0 (UTC). + """ + + def __new__(cls, seconds: float = 0.0) -> Self: ... # Docstrings for attributes sourced from the abstract base class, [`datetime.tzinfo`](https://docs.python.org/3/library/datetime.html#datetime.tzinfo). @@ -1028,12 +1044,3 @@ class TzInfo(datetime.tzinfo): More info can be found at [`tzinfo.fromutc`][datetime.tzinfo.fromutc].""" def __deepcopy__(self, _memo: dict[Any, Any]) -> TzInfo: ... - -def validate_core_schema(schema: CoreSchema, *, strict: bool | None = None) -> CoreSchema: - """Validate a core schema. - - This currently uses lax mode for validation (i.e. will coerce strings to dates and such) - but may use strict mode in the future. - We may also remove this function altogether, do not rely on it being present if you are - using pydantic-core directly. - """ diff --git a/blimgui/dist64/pydantic_core/core_schema.py b/blimgui/dist64/pydantic_core/core_schema.py index cc22b84..ddbe7db 100644 --- a/blimgui/dist64/pydantic_core/core_schema.py +++ b/blimgui/dist64/pydantic_core/core_schema.py @@ -13,7 +13,7 @@ from re import Pattern from typing import TYPE_CHECKING, Any, Callable, Literal, Union -from typing_extensions import deprecated +from typing_extensions import TypeVar, deprecated if sys.version_info < (3, 12): from typing_extensions import TypedDict @@ -61,6 +61,10 @@ class CoreConfig(TypedDict, total=False): str_to_upper: Whether to convert string fields to uppercase. allow_inf_nan: Whether to allow infinity and NaN values for float fields. Default is `True`. ser_json_timedelta: The serialization option for `timedelta` values. Default is 'iso8601'. + Note that if ser_json_temporal is set, then this param will be ignored. + ser_json_temporal: The serialization option for datetime like values. Default is 'iso8601'. + The types this covers are datetime, date, time and timedelta. + If this is set, it will take precedence over ser_json_timedelta ser_json_bytes: The serialization option for `bytes` values. Default is 'utf8'. ser_json_inf_nan: The serialization option for infinity and NaN values in float fields. Default is 'null'. @@ -75,6 +79,7 @@ class CoreConfig(TypedDict, total=False): validate_by_alias: Whether to use the field's alias when validating against the provided input data. Default is `True`. validate_by_name: Whether to use the field's name when validating against the provided input data. Default is `False`. Replacement for `populate_by_name`. serialize_by_alias: Whether to serialize by alias. Default is `False`, expected to change to `True` in V3. + url_preserve_empty_path: Whether to preserve empty URL paths when validating values for a URL type. Defaults to `False`. """ title: str @@ -102,6 +107,7 @@ class CoreConfig(TypedDict, total=False): allow_inf_nan: bool # default: True # the config options are used to customise serialization to JSON ser_json_timedelta: Literal['iso8601', 'float'] # default: 'iso8601' + ser_json_temporal: Literal['iso8601', 'seconds', 'milliseconds'] # default: 'iso8601' ser_json_bytes: Literal['utf8', 'base64', 'hex'] # default: 'utf8' ser_json_inf_nan: Literal['null', 'constants', 'strings'] # default: 'null' val_json_bytes: Literal['utf8', 'base64', 'hex'] # default: 'utf8' @@ -114,42 +120,71 @@ class CoreConfig(TypedDict, total=False): validate_by_alias: bool # default: True validate_by_name: bool # default: False serialize_by_alias: bool # default: False + url_preserve_empty_path: bool # default: False IncExCall: TypeAlias = 'set[int | str] | dict[int | str, IncExCall] | None' +ContextT = TypeVar('ContextT', covariant=True, default='Any | None') + + +class SerializationInfo(Protocol[ContextT]): + """Extra data used during serialization.""" + + @property + def include(self) -> IncExCall: + """The `include` argument set during serialization.""" + ... -class SerializationInfo(Protocol): @property - def include(self) -> IncExCall: ... + def exclude(self) -> IncExCall: + """The `exclude` argument set during serialization.""" + ... @property - def exclude(self) -> IncExCall: ... + def context(self) -> ContextT: + """The current serialization context.""" + ... @property - def context(self) -> Any | None: - """Current serialization context.""" + def mode(self) -> Literal['python', 'json'] | str: + """The serialization mode set during serialization.""" + ... @property - def mode(self) -> str: ... + def by_alias(self) -> bool: + """The `by_alias` argument set during serialization.""" + ... @property - def by_alias(self) -> bool: ... + def exclude_unset(self) -> bool: + """The `exclude_unset` argument set during serialization.""" + ... @property - def exclude_unset(self) -> bool: ... + def exclude_defaults(self) -> bool: + """The `exclude_defaults` argument set during serialization.""" + ... @property - def exclude_defaults(self) -> bool: ... + def exclude_none(self) -> bool: + """The `exclude_none` argument set during serialization.""" + ... @property - def exclude_none(self) -> bool: ... + def exclude_computed_fields(self) -> bool: + """The `exclude_computed_fields` argument set during serialization.""" + ... @property - def serialize_as_any(self) -> bool: ... + def serialize_as_any(self) -> bool: + """The `serialize_as_any` argument set during serialization.""" + ... @property - def round_trip(self) -> bool: ... + def round_trip(self) -> bool: + """The `round_trip` argument set during serialization.""" + ... def mode_is_json(self) -> bool: ... @@ -158,19 +193,21 @@ def __str__(self) -> str: ... def __repr__(self) -> str: ... -class FieldSerializationInfo(SerializationInfo, Protocol): +class FieldSerializationInfo(SerializationInfo[ContextT], Protocol): + """Extra data used during field serialization.""" + @property - def field_name(self) -> str: ... + def field_name(self) -> str: + """The name of the current field being serialized.""" + ... -class ValidationInfo(Protocol): - """ - Argument passed to validation functions. - """ +class ValidationInfo(Protocol[ContextT]): + """Extra data used during validation.""" @property - def context(self) -> Any | None: - """Current validation context.""" + def context(self) -> ContextT: + """The current validation context.""" ... @property @@ -180,7 +217,7 @@ def config(self) -> CoreConfig | None: @property def mode(self) -> Literal['python', 'json']: - """The type of input data we are currently validating""" + """The type of input data we are currently validating.""" ... @property @@ -240,11 +277,11 @@ def simple_ser_schema(type: ExpectedSerializationTypes) -> SimpleSerSchema: # (input_value: Any, /) -> Any GeneralPlainNoInfoSerializerFunction = Callable[[Any], Any] # (input_value: Any, info: FieldSerializationInfo, /) -> Any -GeneralPlainInfoSerializerFunction = Callable[[Any, SerializationInfo], Any] +GeneralPlainInfoSerializerFunction = Callable[[Any, SerializationInfo[Any]], Any] # (model: Any, input_value: Any, /) -> Any FieldPlainNoInfoSerializerFunction = Callable[[Any, Any], Any] # (model: Any, input_value: Any, info: FieldSerializationInfo, /) -> Any -FieldPlainInfoSerializerFunction = Callable[[Any, Any, FieldSerializationInfo], Any] +FieldPlainInfoSerializerFunction = Callable[[Any, Any, FieldSerializationInfo[Any]], Any] SerializerFunction = Union[ GeneralPlainNoInfoSerializerFunction, GeneralPlainInfoSerializerFunction, @@ -311,11 +348,11 @@ def __call__(self, input_value: Any, index_key: int | str | None = None, /) -> A # (input_value: Any, serializer: SerializerFunctionWrapHandler, /) -> Any GeneralWrapNoInfoSerializerFunction = Callable[[Any, SerializerFunctionWrapHandler], Any] # (input_value: Any, serializer: SerializerFunctionWrapHandler, info: SerializationInfo, /) -> Any -GeneralWrapInfoSerializerFunction = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo], Any] +GeneralWrapInfoSerializerFunction = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo[Any]], Any] # (model: Any, input_value: Any, serializer: SerializerFunctionWrapHandler, /) -> Any FieldWrapNoInfoSerializerFunction = Callable[[Any, Any, SerializerFunctionWrapHandler], Any] # (model: Any, input_value: Any, serializer: SerializerFunctionWrapHandler, info: FieldSerializationInfo, /) -> Any -FieldWrapInfoSerializerFunction = Callable[[Any, Any, SerializerFunctionWrapHandler, FieldSerializationInfo], Any] +FieldWrapInfoSerializerFunction = Callable[[Any, Any, SerializerFunctionWrapHandler, FieldSerializationInfo[Any]], Any] WrapSerializerFunction = Union[ GeneralWrapNoInfoSerializerFunction, GeneralWrapInfoSerializerFunction, @@ -953,7 +990,7 @@ class DateSchema(TypedDict, total=False): gt: date now_op: Literal['past', 'future'] # defaults to current local utc offset from `time.localtime().tm_gmtoff` - # value is restricted to -86_400 < offset < 86_400 by bounds in generate_self_schema.py + # value is restricted to -86_400 < offset < 86_400: now_utc_offset: int ref: str metadata: dict[str, Any] @@ -1316,6 +1353,25 @@ class Color(Enum): ) +class MissingSentinelSchema(TypedDict, total=False): + type: Required[Literal['missing-sentinel']] + metadata: dict[str, Any] + serialization: SerSchema + + +def missing_sentinel_schema( + metadata: dict[str, Any] | None = None, + serialization: SerSchema | None = None, +) -> MissingSentinelSchema: + """Returns a schema for the `MISSING` sentinel.""" + + return _dict_not_none( + type='missing-sentinel', + metadata=metadata, + serialization=serialization, + ) + + # must match input/parse_json.rs::JsonType::try_from JsonType = Literal['null', 'bool', 'int', 'float', 'str', 'list', 'dict'] @@ -1885,6 +1941,7 @@ class DictSchema(TypedDict, total=False): values_schema: CoreSchema # default: AnySchema min_length: int max_length: int + fail_fast: bool strict: bool ref: str metadata: dict[str, Any] @@ -1897,6 +1954,7 @@ def dict_schema( *, min_length: int | None = None, max_length: int | None = None, + fail_fast: bool | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, @@ -1920,6 +1978,7 @@ def dict_schema( values_schema: The value must be a dict with values that match this schema min_length: The value must be a dict with at least this many items max_length: The value must be a dict with at most this many items + fail_fast: Stop validation on the first error strict: Whether the keys and values should be validated with strict mode ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core @@ -1931,6 +1990,7 @@ def dict_schema( values_schema=values_schema, min_length=min_length, max_length=max_length, + fail_fast=fail_fast, strict=strict, ref=ref, metadata=metadata, @@ -1948,13 +2008,13 @@ class NoInfoValidatorFunctionSchema(TypedDict): # (input_value: Any, info: ValidationInfo, /) -> Any -WithInfoValidatorFunction = Callable[[Any, ValidationInfo], Any] +WithInfoValidatorFunction = Callable[[Any, ValidationInfo[Any]], Any] class WithInfoValidatorFunctionSchema(TypedDict, total=False): type: Required[Literal['with-info']] function: Required[WithInfoValidatorFunction] - field_name: str + field_name: str # deprecated ValidationFunction = Union[NoInfoValidatorFunctionSchema, WithInfoValidatorFunctionSchema] @@ -2042,7 +2102,7 @@ def fn(v: bytes, info: core_schema.ValidationInfo) -> str: return v.decode() + 'world' func_schema = core_schema.with_info_before_validator_function( - function=fn, schema=core_schema.str_schema(), field_name='a' + function=fn, schema=core_schema.str_schema() ) schema = core_schema.typed_dict_schema({'a': core_schema.typed_dict_field(func_schema)}) @@ -2052,13 +2112,20 @@ def fn(v: bytes, info: core_schema.ValidationInfo) -> str: Args: function: The validator function to call - field_name: The name of the field + field_name: The name of the field this validator is applied to, if any (deprecated) schema: The schema to validate the output of the validator function ref: optional unique identifier of the schema, used to reference the schema in other places json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ + if field_name is not None: + warnings.warn( + 'The `field_name` argument on `with_info_before_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.', + DeprecationWarning, + stacklevel=2, + ) + return _dict_not_none( type='function-before', function=_dict_not_none(type='with-info', function=function, field_name=field_name), @@ -2140,7 +2207,7 @@ def fn(v: str, info: core_schema.ValidationInfo) -> str: return v + 'world' func_schema = core_schema.with_info_after_validator_function( - function=fn, schema=core_schema.str_schema(), field_name='a' + function=fn, schema=core_schema.str_schema() ) schema = core_schema.typed_dict_schema({'a': core_schema.typed_dict_field(func_schema)}) @@ -2151,11 +2218,18 @@ def fn(v: str, info: core_schema.ValidationInfo) -> str: Args: function: The validator function to call after the schema is validated schema: The schema to validate before the validator function - field_name: The name of the field this validators is applied to, if any + field_name: The name of the field this validator is applied to, if any (deprecated) ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ + if field_name is not None: + warnings.warn( + 'The `field_name` argument on `with_info_after_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.', + DeprecationWarning, + stacklevel=2, + ) + return _dict_not_none( type='function-after', function=_dict_not_none(type='with-info', function=function, field_name=field_name), @@ -2181,13 +2255,13 @@ class NoInfoWrapValidatorFunctionSchema(TypedDict): # (input_value: Any, validator: ValidatorFunctionWrapHandler, info: ValidationInfo, /) -> Any -WithInfoWrapValidatorFunction = Callable[[Any, ValidatorFunctionWrapHandler, ValidationInfo], Any] +WithInfoWrapValidatorFunction = Callable[[Any, ValidatorFunctionWrapHandler, ValidationInfo[Any]], Any] class WithInfoWrapValidatorFunctionSchema(TypedDict, total=False): type: Required[Literal['with-info']] function: Required[WithInfoWrapValidatorFunction] - field_name: str + field_name: str # deprecated WrapValidatorFunction = Union[NoInfoWrapValidatorFunctionSchema, WithInfoWrapValidatorFunctionSchema] @@ -2287,12 +2361,19 @@ def fn( Args: function: The validator function to call schema: The schema to validate the output of the validator function - field_name: The name of the field this validators is applied to, if any + field_name: The name of the field this validator is applied to, if any (deprecated) json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ + if field_name is not None: + warnings.warn( + 'The `field_name` argument on `with_info_wrap_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.', + DeprecationWarning, + stacklevel=2, + ) + return _dict_not_none( type='function-wrap', function=_dict_not_none(type='with-info', function=function, field_name=field_name), @@ -2379,12 +2460,19 @@ def fn(v: str, info: core_schema.ValidationInfo) -> str: Args: function: The validator function to call - field_name: The name of the field this validators is applied to, if any + field_name: The name of the field this validator is applied to, if any (deprecated) ref: optional unique identifier of the schema, used to reference the schema in other places json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ + if field_name is not None: + warnings.warn( + 'The `field_name` argument on `with_info_plain_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.', + DeprecationWarning, + stacklevel=2, + ) + return _dict_not_none( type='function-plain', function=_dict_not_none(type='with-info', function=function, field_name=field_name), @@ -2839,6 +2927,7 @@ class TypedDictField(TypedDict, total=False): serialization_alias: str serialization_exclude: bool # default: False metadata: dict[str, Any] + serialization_exclude_if: Callable[[Any], bool] # default None def typed_dict_field( @@ -2849,6 +2938,7 @@ def typed_dict_field( serialization_alias: str | None = None, serialization_exclude: bool | None = None, metadata: dict[str, Any] | None = None, + serialization_exclude_if: Callable[[Any], bool] | None = None, ) -> TypedDictField: """ Returns a schema that matches a typed dict field, e.g.: @@ -2865,6 +2955,7 @@ def typed_dict_field( validation_alias: The alias(es) to use to find the field in the validation data serialization_alias: The alias to use as a key when serializing serialization_exclude: Whether to exclude the field when serializing + serialization_exclude_if: A callable that determines whether to exclude the field when serializing based on its value. metadata: Any other information you want to include with the schema, not used by pydantic-core """ return _dict_not_none( @@ -2874,6 +2965,7 @@ def typed_dict_field( validation_alias=validation_alias, serialization_alias=serialization_alias, serialization_exclude=serialization_exclude, + serialization_exclude_if=serialization_exclude_if, metadata=metadata, ) @@ -2965,6 +3057,7 @@ class ModelField(TypedDict, total=False): validation_alias: Union[str, list[Union[str, int]], list[list[Union[str, int]]]] serialization_alias: str serialization_exclude: bool # default: False + serialization_exclude_if: Callable[[Any], bool] # default: None frozen: bool metadata: dict[str, Any] @@ -2975,6 +3068,7 @@ def model_field( validation_alias: str | list[str | int] | list[list[str | int]] | None = None, serialization_alias: str | None = None, serialization_exclude: bool | None = None, + serialization_exclude_if: Callable[[Any], bool] | None = None, frozen: bool | None = None, metadata: dict[str, Any] | None = None, ) -> ModelField: @@ -2992,6 +3086,7 @@ def model_field( validation_alias: The alias(es) to use to find the field in the validation data serialization_alias: The alias to use as a key when serializing serialization_exclude: Whether to exclude the field when serializing + serialization_exclude_if: A Callable that determines whether to exclude a field during serialization based on its value. frozen: Whether the field is frozen metadata: Any other information you want to include with the schema, not used by pydantic-core """ @@ -3001,6 +3096,7 @@ def model_field( validation_alias=validation_alias, serialization_alias=serialization_alias, serialization_exclude=serialization_exclude, + serialization_exclude_if=serialization_exclude_if, frozen=frozen, metadata=metadata, ) @@ -3193,6 +3289,7 @@ class DataclassField(TypedDict, total=False): serialization_alias: str serialization_exclude: bool # default: False metadata: dict[str, Any] + serialization_exclude_if: Callable[[Any], bool] # default: None def dataclass_field( @@ -3206,6 +3303,7 @@ def dataclass_field( serialization_alias: str | None = None, serialization_exclude: bool | None = None, metadata: dict[str, Any] | None = None, + serialization_exclude_if: Callable[[Any], bool] | None = None, frozen: bool | None = None, ) -> DataclassField: """ @@ -3231,6 +3329,7 @@ def dataclass_field( validation_alias: The alias(es) to use to find the field in the validation data serialization_alias: The alias to use as a key when serializing serialization_exclude: Whether to exclude the field when serializing + serialization_exclude_if: A callable that determines whether to exclude the field when serializing based on its value. metadata: Any other information you want to include with the schema, not used by pydantic-core frozen: Whether the field is frozen """ @@ -3244,6 +3343,7 @@ def dataclass_field( validation_alias=validation_alias, serialization_alias=serialization_alias, serialization_exclude=serialization_exclude, + serialization_exclude_if=serialization_exclude_if, metadata=metadata, frozen=frozen, ) @@ -3810,6 +3910,7 @@ def url_schema( default_host: str | None = None, default_port: int | None = None, default_path: str | None = None, + preserve_empty_path: bool | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, @@ -3834,6 +3935,7 @@ def url_schema( default_host: The default host to use if the URL does not have a host default_port: The default port to use if the URL does not have a port default_path: The default path to use if the URL does not have a path + preserve_empty_path: Whether to preserve an empty path or convert it to '/', default False strict: Whether to use strict URL parsing ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core @@ -3847,6 +3949,7 @@ def url_schema( default_host=default_host, default_port=default_port, default_path=default_path, + preserve_empty_path=preserve_empty_path, strict=strict, ref=ref, metadata=metadata, @@ -3876,6 +3979,7 @@ def multi_host_url_schema( default_host: str | None = None, default_port: int | None = None, default_path: str | None = None, + preserve_empty_path: bool | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, @@ -3900,6 +4004,7 @@ def multi_host_url_schema( default_host: The default host to use if the URL does not have a host default_port: The default port to use if the URL does not have a port default_path: The default path to use if the URL does not have a path + preserve_empty_path: Whether to preserve an empty path or convert it to '/', default False strict: Whether to use strict URL parsing ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core @@ -3913,6 +4018,7 @@ def multi_host_url_schema( default_host=default_host, default_port=default_port, default_path=default_path, + preserve_empty_path=preserve_empty_path, strict=strict, ref=ref, metadata=metadata, @@ -4012,6 +4118,7 @@ def definition_reference_schema( DatetimeSchema, TimedeltaSchema, LiteralSchema, + MissingSentinelSchema, EnumSchema, IsInstanceSchema, IsSubclassSchema, @@ -4070,6 +4177,7 @@ def definition_reference_schema( 'datetime', 'timedelta', 'literal', + 'missing-sentinel', 'enum', 'is-instance', 'is-subclass', @@ -4130,6 +4238,7 @@ def definition_reference_schema( 'model_attributes_type', 'dataclass_type', 'dataclass_exact_type', + 'default_factory_not_called', 'none_required', 'greater_than', 'greater_than_equal', @@ -4169,6 +4278,7 @@ def definition_reference_schema( 'value_error', 'assertion_error', 'literal_error', + 'missing_sentinel_error', 'date_type', 'date_parsing', 'date_from_datetime_parsing',