diff --git a/README.md b/README.md index ef72102..b302836 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,129 @@ # meshly -A Python library for mesh optimization and encoding/decoding. +A cross-platform library for efficient 3D mesh serialization and transport between Python and TypeScript/JavaScript applications. + +## What is meshly? + +**meshly** enables you to: + +1. **Serialize 3D meshes efficiently** - Compress mesh data (vertices, indices, normals, etc.) using [meshoptimizer](https://github.com/zeux/meshoptimizer) for optimal GPU-friendly storage +2. **Transport meshes from Python to the browser** - Create meshes in Python (NumPy/JAX) and load them in TypeScript/JavaScript for WebGL/THREE.js rendering +3. **Extend with custom data** - Inherit from `Packable` or `Mesh` to add your own array attributes that are automatically serialized + +### Use Cases + +- **Web-based 3D visualization** - Generate meshes server-side in Python, serve compressed zip files, render in browser with THREE.js +- **Simulation pipelines** - Store simulation results with mesh geometry and field data in a single portable format +- **CAD/CAM workflows** - Exchange mesh data between Python tools and web-based viewers +- **Machine learning** - Serialize mesh datasets with associated feature arrays for training pipelines ## Project Structure -This repository contains two main components: +This repository contains two libraries that work together: + +### Python Library (`meshly`) -1. **Python Library**: The Python `meshly` package for mesh optimization and encoding/decoding. ```bash pip install meshly ``` -2. **TypeScript Library**: The TypeScript `meshly` package for decoding Python meshoptimizer zip generated from Python into THREE.js geometries. +- Create and manipulate 3D meshes with NumPy/JAX arrays +- Serialize meshes to compressed zip files using meshoptimizer +- Extend with custom array attributes via Pydantic models +- Mesh operations: triangulate, optimize, simplify, combine, extract + +### TypeScript Library (`meshly`) + ```bash npm install meshly +# or +pnpm add meshly ``` -### Python Library +- Decode Python-generated mesh zip files in the browser +- Convert to THREE.js BufferGeometry for WebGL rendering +- Full TypeScript type definitions + +## Quick Example + +**Python (server-side):** +```python +import numpy as np +from meshly import Mesh -The Python library is located in the `meshly` directory and provides: +# Create a mesh +mesh = Mesh( + vertices=np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=np.float32), + indices=np.array([0, 1, 2], dtype=np.uint32) +) -- Mesh class as a Pydantic base class for representing and optimizing 3D meshes -- EncodedMesh class for storing encoded mesh data -- Functions for encoding and decoding meshes -- Utilities for compressing numpy arrays +# Save compressed (uses meshoptimizer) +mesh.save_to_zip("mesh.zip") +``` + +**TypeScript (browser):** +```typescript +import { Mesh } from 'meshly' +import * as THREE from 'three' -### TypeScript Library +// Load and decode +const response = await fetch('mesh.zip') +const mesh = await Mesh.decode(await response.arrayBuffer()) -The TypeScript library is located in the `typescript` directory and provides: +// Render with THREE.js +const geometry = mesh.toBufferGeometry() +const material = new THREE.MeshStandardMaterial({ color: 0x2194ce }) +scene.add(new THREE.Mesh(geometry, material)) +``` -- Functions to decode Python meshoptimizer zip files -- Conversion to THREE.js BufferGeometry -- Browser-compatible implementation +## Documentation + +- [Python README](python/README.md) - Full Python API documentation +- [TypeScript README](typescript/README.md) - Full TypeScript API documentation +- [Python Examples](python/examples/) - Jupyter notebooks with usage examples + +## Architecture + +### Zip File Format + +``` +mesh.zip +├── metadata.json # Class info + non-array fields +├── vertices.bin # Meshoptimizer-encoded vertices +├── indices.bin # Meshoptimizer-encoded indices (optional) +└── arrays/ # Standard compressed arrays + ├── normals/ + │ ├── array.bin + │ └── metadata.json + └── ... +``` + +### Custom Field Encoding + +Both Python and TypeScript support custom field encoding via `_get_custom_fields()`: + +```python +# Python +@classmethod +def _get_custom_fields(cls) -> Dict[str, CustomFieldConfig]: + return { + 'vertices': CustomFieldConfig( + file_name='vertices', + encode=Mesh._encode_vertices, + decode=Mesh._decode_vertices, + ), + } +``` + +```typescript +// TypeScript +protected static override getCustomFields(): Record { + return { + vertices: { fileName: 'vertices', decode: Mesh._decodeVertices }, + } +} +``` -## Usage +## License -See the examples directory for usage examples of both libraries. \ No newline at end of file +MIT \ No newline at end of file diff --git a/python/README.md b/python/README.md index a0c0f5d..ffdf52b 100644 --- a/python/README.md +++ b/python/README.md @@ -14,11 +14,14 @@ pip install meshly - **`Packable`**: Base class for automatic numpy/JAX array serialization to zip files - **`Mesh`**: 3D mesh representation extending Packable with meshoptimizer encoding for vertices/indices +- **`CustomFieldConfig`**: Configuration for custom field encoding/decoding +- **`ArrayUtils`**: Utility class for encoding/decoding individual arrays ### Key Capabilities - Automatic encoding/decoding of numpy array attributes, including nested dictionaries - Custom subclasses with additional array fields are automatically serialized +- Custom field encoding via `_get_custom_fields()` override - Enhanced polygon support with `index_sizes` and VTK-compatible `cell_types` - Mesh markers for boundary conditions, material regions, and geometric features - Mesh operations: triangulate, optimize, simplify, combine, extract @@ -26,6 +29,24 @@ pip install meshly ## Quick Start +### Standalone Array Compression + +Compress individual arrays without creating a Packable: + +```python +import numpy as np +from meshly import ArrayUtils + +# Create an array +data = np.random.randn(1000, 3).astype(np.float32) + +# Save to zip +ArrayUtils.save_to_zip(data, "array.zip") + +# Load from zip +loaded = ArrayUtils.load_from_zip("array.zip") +``` + ### Basic Mesh Usage ```python @@ -47,6 +68,10 @@ mesh.save_to_zip("mesh.zip") # Load from zip loaded = Mesh.load_from_zip("mesh.zip") print(f"Loaded {loaded.vertex_count} vertices") + +# Or use encode/decode for in-memory operations +encoded = mesh.encode() # Returns bytes +decoded = Mesh.decode(encoded) ``` ### Custom Mesh Subclasses @@ -83,6 +108,40 @@ mesh.save_to_zip("textured.zip") loaded = TexturedMesh.load_from_zip("textured.zip") ``` +### Custom Field Encoding + +For fields that need special encoding (like meshoptimizer for vertices/indices), override `_get_custom_fields()`: + +```python +from meshly import Packable, CustomFieldConfig +from typing import Dict + +class CompressedData(Packable): + """Example with custom field encoding.""" + data: np.ndarray + + @staticmethod + def _encode_data(data: np.ndarray, instance: "CompressedData") -> bytes: + # Custom encoding logic + return custom_compress(data) + + @staticmethod + def _decode_data(encoded: bytes, metadata, array_type) -> np.ndarray: + # Custom decoding logic + return custom_decompress(encoded) + + @classmethod + def _get_custom_fields(cls) -> Dict[str, CustomFieldConfig]: + return { + 'data': CustomFieldConfig( + file_name='data', + encode=cls._encode_data, + decode=cls._decode_data, + optional=False + ), + } +``` + ### Dict of Pydantic BaseModel Objects You can also use dictionaries containing Pydantic `BaseModel` instances with numpy arrays: @@ -127,6 +186,87 @@ loaded = SceneMesh.load_from_zip("scene.zip") # loaded.materials["wood"] is a MaterialProperties instance ``` +### Nested Packables + +Fields that are themselves `Packable` subclasses are automatically handled: + +```python +class PhysicsProperties(Packable): + """Physics data as a nested Packable.""" + mass: float = 1.0 + inertia_tensor: np.ndarray # 3x3 matrix + +class PhysicsMesh(Mesh): + """Mesh with nested Packable field.""" + physics: Optional[PhysicsProperties] = None + +# Nested Packables use their own encode/decode methods +mesh = PhysicsMesh( + vertices=vertices, + indices=indices, + physics=PhysicsProperties( + mass=2.5, + inertia_tensor=np.eye(3, dtype=np.float32) + ) +) + +mesh.save_to_zip("physics_mesh.zip") +loaded = PhysicsMesh.load_from_zip("physics_mesh.zip") +print(loaded.physics.mass) # 2.5 +``` + +### Caching Nested Packables + +For large projects with shared nested Packables, use caching to deduplicate data using SHA256 content-addressable storage: + +```python +from meshly import ReadHandler, WriteHandler + +# Create cache functions from a directory path +cache_saver = WriteHandler.create_cache_saver("/path/to/cache") +cache_loader = ReadHandler.create_cache_loader("/path/to/cache") + +# Save with caching - nested Packables stored separately by hash +mesh.save_to_zip("mesh.zip", cache_saver=cache_saver) + +# Load with caching - nested Packables loaded from cache +loaded = PhysicsMesh.load_from_zip("mesh.zip", cache_loader=cache_loader) +``` + +**Deduplication example:** + +```python +# Two meshes sharing identical physics properties +shared_physics = PhysicsProperties(mass=1.0, inertia_tensor=np.eye(3)) + +mesh1 = PhysicsMesh(vertices=v1, indices=i1, physics=shared_physics) +mesh2 = PhysicsMesh(vertices=v2, indices=i2, physics=shared_physics) + +# Save both with the same cache - physics stored only once! +mesh1.save_to_zip("mesh1.zip", cache_saver=cache_saver) +mesh2.save_to_zip("mesh2.zip", cache_saver=cache_saver) +``` + +**Custom cache functions:** + +```python +from meshly import CacheLoader, CacheSaver + +# Type signatures: +# CacheLoader = Callable[[str], Optional[bytes]] # hash -> bytes or None +# CacheSaver = Callable[[str, bytes], None] # hash, bytes -> None + +# Example: Redis-backed cache +def redis_loader(hash: str) -> Optional[bytes]: + return redis_client.get(f"packable:{hash}") + +def redis_saver(hash: str, data: bytes) -> None: + redis_client.set(f"packable:{hash}", data) + +mesh.save_to_zip("mesh.zip", cache_saver=redis_saver) +loaded = PhysicsMesh.load_from_zip("mesh.zip", cache_loader=redis_loader) +``` + ## Architecture ### Class Hierarchy @@ -146,19 +286,19 @@ PackableMetadata (base metadata) The `Packable` base class provides: - `save_to_zip()` / `load_from_zip()` - File I/O with compression -- `encode()` - In-memory serialization +- `encode()` / `decode()` - In-memory serialization to/from bytes +- `convert_to()` - Convert arrays between numpy and JAX +- `_get_custom_fields()` - Override point for custom field encoding - `load_metadata()` - Generic metadata loading with type parameter -- `_create_metadata()` - Override point for custom metadata ### Zip File Structure ``` mesh.zip ├── metadata.json # PackableMetadata or MeshMetadata -├── mesh/ # Mesh-specific (meshoptimizer encoded) -│ ├── vertices.bin -│ └── indices.bin -└── arrays/ # Additional arrays +├── vertices.bin # Meshoptimizer-encoded (custom field) +├── indices.bin # Meshoptimizer-encoded (custom field, optional) +└── arrays/ # Standard arrays (auto-compressed) ├── texture_coords/ │ ├── array.bin │ └── metadata.json @@ -288,36 +428,88 @@ mesh = Mesh( ) # Load with JAX arrays -mesh = Mesh.load_from_zip("mesh.zip", use_jax=True) +mesh = Mesh.load_from_zip("mesh.zip", array_type="jax") + +# Convert between array types +numpy_mesh = mesh.convert_to("numpy") +jax_mesh = mesh.convert_to("jax") ``` ## API Reference +### ArrayUtils + +```python +class ArrayUtils: + # Encode/decode arrays + @staticmethod + def encode_array(array: Array) -> EncodedArray + @staticmethod + def decode_array(encoded: EncodedArray) -> np.ndarray + + # File I/O for single arrays + @staticmethod + def save_to_zip(array: Array, destination: PathLike | BytesIO) -> None + @staticmethod + def load_from_zip(source: PathLike | BytesIO, array_type=None) -> Array + + # Array type utilities + @staticmethod + def is_array(obj) -> bool + @staticmethod + def detect_array_type(array: Array) -> ArrayType + @staticmethod + def convert_array(array: Array, array_type: ArrayType) -> Array +``` + +### CustomFieldConfig + +```python +@dataclass +class CustomFieldConfig(Generic[V, M]): + file_name: str # File name in zip (without .bin) + encode: Callable[[V, Any], bytes] # (value, instance) -> bytes + decode: Callable[[bytes, M, Optional[ArrayType]], V] # (bytes, metadata, array_type) -> value + optional: bool = False # Won't throw if missing +``` + ### Packable (Base Class) ```python class Packable(BaseModel): - def save_to_zip(self, destination, date_time=None) -> None + # File I/O + def save_to_zip(self, destination, cache_saver=None) -> None @classmethod - def load_from_zip(cls, source, use_jax=False) -> T + def load_from_zip(cls, source, array_type=None, cache_loader=None) -> T - @staticmethod - def load_array(source, name, use_jax=False) -> Array + # In-memory serialization + def encode(self, cache_saver=None) -> bytes + @classmethod + def decode(cls, buf: bytes, array_type=None, cache_loader=None) -> T + + # Array conversion + def convert_to(self, array_type: ArrayType) -> T - def encode(self) -> EncodedData + # Single array loading + @staticmethod + def load_array(source, name, array_type=None) -> Array + # Metadata @classmethod - def load_metadata(cls, zipf, metadata_cls=PackableMetadata) -> M + def load_metadata(cls, handler, metadata_cls=PackableMetadata) -> M - def _create_metadata(self, field_data) -> PackableMetadata # Override point + # Custom field encoding (override in subclasses) + @classmethod + def _get_custom_fields(cls) -> Dict[str, CustomFieldConfig] ``` ### Mesh ```python class Mesh(Packable): - vertices: Array # Required - indices: Optional[Array] # Optional + # Fields + vertices: Array # Required (meshoptimizer encoded) + indices: Optional[Array] # Optional (meshoptimizer encoded) index_sizes: Optional[Array] # Auto-inferred cell_types: Optional[Array] # Auto-inferred dim: Optional[int] # Auto-computed @@ -344,7 +536,9 @@ class Mesh(Packable): @staticmethod def combine(meshes, marker_names=None, preserve_markers=True) -> Mesh - def _create_metadata(self, field_data) -> MeshMetadata # Returns MeshMetadata + # Custom field encoding for meshoptimizer + @classmethod + def _get_custom_fields(cls) -> Dict[str, CustomFieldConfig] ``` ### Metadata Classes @@ -354,6 +548,7 @@ class PackableMetadata(BaseModel): class_name: str module_name: str field_data: Dict[str, Any] + packable_refs: Dict[str, str] # SHA256 hash refs for cached packables class MeshSizeInfo(BaseModel): vertex_count: int @@ -363,6 +558,19 @@ class MeshSizeInfo(BaseModel): class MeshMetadata(PackableMetadata): mesh_size: MeshSizeInfo + array_type: ArrayType = "numpy" # "numpy" or "jax" +``` + +### Cache Types + +```python +# Type aliases for cache callbacks +CacheLoader = Callable[[str], Optional[bytes]] # hash -> bytes or None +CacheSaver = Callable[[str, bytes], None] # hash, bytes -> None + +# Factory methods to create cache functions from paths +ReadHandler.create_cache_loader(source: PathLike) -> CacheLoader +WriteHandler.create_cache_saver(destination: PathLike) -> CacheSaver ``` ## Examples diff --git a/python/examples/array_example.ipynb b/python/examples/array_example.ipynb index 1742721..0f97d5c 100644 --- a/python/examples/array_example.ipynb +++ b/python/examples/array_example.ipynb @@ -65,7 +65,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -75,7 +75,7 @@ "Original size: 4000 bytes\n", "Encoded size: 2294 bytes\n", "Compression ratio: 0.574\n", - "Encoded metadata: shape=(1000,), dtype=float32, itemsize=4\n" + "Encoded metadata: shape=[1000], dtype=float32, itemsize=4\n" ] } ], @@ -86,12 +86,12 @@ "print(f\"Original size: {linear_array.nbytes} bytes\")\n", "print(f\"Encoded size: {len(encoded_linear.data)} bytes\")\n", "print(f\"Compression ratio: {len(encoded_linear.data) / linear_array.nbytes:.3f}\")\n", - "print(f\"Encoded metadata: shape={encoded_linear.shape}, dtype={encoded_linear.dtype}, itemsize={encoded_linear.itemsize}\")" + "print(f\"Encoded metadata: shape={encoded_linear.metadata.shape}, dtype={encoded_linear.metadata.dtype}, itemsize={encoded_linear.metadata.itemsize}\")" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -126,7 +126,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -139,13 +139,13 @@ " Decoded correctly: True\n", "\n", "Random values:\n", - " Original: 1,200 bytes, Encoded: 1,089 bytes\n", - " Compression ratio: 0.907\n", + " Original: 1,200 bytes, Encoded: 1,076 bytes\n", + " Compression ratio: 0.897\n", " Decoded correctly: True\n", "\n", "Integer values:\n", - " Original: 10,000 bytes, Encoded: 3,651 bytes\n", - " Compression ratio: 0.365\n", + " Original: 10,000 bytes, Encoded: 3,688 bytes\n", + " Compression ratio: 0.369\n", " Decoded correctly: True\n", "\n" ] @@ -177,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -232,7 +232,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -253,29 +253,21 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Saved to /tmp/tmpddux5oqo.zip\n", - "Zip file size: 2409 bytes\n", - "Compression ratio vs raw data: 1.004\n", - "Cleaned up /tmp/tmpddux5oqo.zip\n" - ] - }, - { - "ename": "AttributeError", - "evalue": "'ArrayResult' object has no attribute 'shape'", - "output_type": "error", - "traceback": [ - "\u001b[31m---------------------------------------------------------------------------\u001b[39m", - "\u001b[31mAttributeError\u001b[39m Traceback (most recent call last)", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[8]\u001b[39m\u001b[32m, line 17\u001b[39m\n\u001b[32m 14\u001b[39m \u001b[38;5;66;03m# Load the array back\u001b[39;00m\n\u001b[32m 15\u001b[39m loaded_array = ArrayUtils.load_from_zip(temp_path)\n\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33mLoaded array shape: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[43mloaded_array\u001b[49m\u001b[43m.\u001b[49m\u001b[43mshape\u001b[49m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 18\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mLoaded array dtype: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mloaded_array.dtype\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 19\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mArrays are equal: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnp.allclose(sample_array,\u001b[38;5;250m \u001b[39mloaded_array)\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/.local/lib/python3.12/site-packages/pydantic/main.py:991\u001b[39m, in \u001b[36mBaseModel.__getattr__\u001b[39m\u001b[34m(self, item)\u001b[39m\n\u001b[32m 988\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__getattribute__\u001b[39m(item) \u001b[38;5;66;03m# Raises AttributeError if appropriate\u001b[39;00m\n\u001b[32m 989\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 990\u001b[39m \u001b[38;5;66;03m# this is the current error\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m991\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mAttributeError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(\u001b[38;5;28mself\u001b[39m).\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m object has no attribute \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mitem\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m'\u001b[39m)\n", - "\u001b[31mAttributeError\u001b[39m: 'ArrayResult' object has no attribute 'shape'" + "Saved to /tmp/tmp7ghnmw7s.zip\n", + "Zip file size: 2487 bytes\n", + "Compression ratio vs raw data: 1.036\n", + "\n", + "Loaded array shape: (200, 3)\n", + "Loaded array dtype: float32\n", + "Arrays are equal: True\n", + "Cleaned up /tmp/tmp7ghnmw7s.zip\n" ] } ], @@ -318,7 +310,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -326,7 +318,7 @@ "output_type": "stream", "text": [ "Original array shape: (3, 3)\n", - "Buffer size: 298 bytes\n", + "Buffer size: 420 bytes\n", "Original array size: 36 bytes\n", "\n", "Loaded array shape: (3, 3)\n", @@ -381,7 +373,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -455,7 +447,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -466,10 +458,10 @@ "Test array size: 400,000 bytes (0.38 MB)\n", "\n", "ArrayUtils (compressed):\n", - " Compressed size: 342,221 bytes (0.33 MB)\n", - " Compression ratio: 0.856\n", - " Compression time: 0.010 seconds\n", - " Decompression time: 0.002 seconds\n", + " Compressed size: 345,331 bytes (0.33 MB)\n", + " Compression ratio: 0.863\n", + " Compression time: 0.001 seconds\n", + " Decompression time: 0.001 seconds\n", " Data integrity: True\n" ] } @@ -505,7 +497,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -520,9 +512,9 @@ " Data integrity: True\n", "\n", "Comparison:\n", - " Size reduction: 14.5% smaller\n", - " Compression time vs numpy save: 95.6x\n", - " Decompression time vs numpy load: 7.5x\n" + " Size reduction: 13.7% smaller\n", + " Compression time vs numpy save: 11.0x\n", + " Decompression time vs numpy load: 4.9x\n" ] } ], @@ -563,7 +555,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -580,7 +572,7 @@ " [1.5 1. 0. ]]\n", "\n", "Saved vertex data to demo_vertices.zip\n", - "File size: 313 bytes\n", + "File size: 439 bytes\n", "\n", "This zip file can be loaded in TypeScript using:\n", "const vertices = await ArrayUtils.loadFromZip(zipBuffer);\n" @@ -620,7 +612,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [ { diff --git a/python/examples/mesh_example.ipynb b/python/examples/mesh_example.ipynb index 27fd8dd..31ea71e 100644 --- a/python/examples/mesh_example.ipynb +++ b/python/examples/mesh_example.ipynb @@ -1,537 +1,765 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Pydantic-based Mesh Example\n", - "\n", - "This notebook demonstrates how to use the new Pydantic-based Mesh class in meshly. It covers:\n", - "\n", - "1. Creating custom Mesh subclasses with additional attributes\n", - "2. Working with numpy arrays in Pydantic models\n", - "3. Encoding and decoding meshes to/from zip files\n", - "4. Optimizing meshes with the built-in optimization methods" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import numpy as np\n", - "from typing import Optional, List\n", - "from pydantic import Field\n", - "\n", - "# Import the Mesh class\n", - "from meshly import Mesh, MeshUtils" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Creating a Custom Mesh Subclass\n", - "\n", - "One of the key benefits of the new Pydantic-based Mesh class is the ability to create custom subclasses with additional attributes. Let's create a `TexturedMesh` class that adds texture coordinates and normals." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "from pydantic import BaseModel, ConfigDict\n", - "\n", - "class MaterialProperties(BaseModel):\n", - " \"\"\"Material properties with numpy arrays - demonstrates BaseModel in dict edge case.\"\"\"\n", - " model_config = ConfigDict(arbitrary_types_allowed=True)\n", - " \n", - " name: str = Field(..., description=\"Material name\")\n", - " diffuse: np.ndarray = Field(..., description=\"Diffuse color array\")\n", - " specular: np.ndarray = Field(..., description=\"Specular color array\")\n", - " shininess: float = Field(32.0, description=\"Shininess value\")\n", - "\n", - "\n", - "class TexturedMesh(Mesh):\n", - " \"\"\"\n", - " A mesh with texture coordinates and normals.\n", - " \n", - " This demonstrates how to create a custom Mesh subclass with additional\n", - " numpy array attributes that will be automatically encoded/decoded.\n", - " \"\"\"\n", - " # Add texture coordinates and normals as additional numpy arrays\n", - " texture_coords: np.ndarray = Field(..., description=\"Texture coordinates\")\n", - " normals: Optional[np.ndarray] = Field(None, description=\"Vertex normals\")\n", - " \n", - " # Add non-array attributes\n", - " material_name: str = Field(\"default\", description=\"Material name\")\n", - " tags: List[str] = Field(default_factory=list, description=\"Tags for the mesh\")\n", - "\n", - " # Dictionary containing nested dictionaries with arrays\n", - " material_data: dict[str, dict[str, np.ndarray]] = Field(\n", - " default_factory=dict,\n", - " description=\"Nested dictionary structure with arrays\"\n", - " )\n", - "\n", - " material_colors: dict[str, str] = Field(\n", - " default_factory=dict,\n", - " description=\"Dictionary with non-array values\"\n", - " )\n", - "\n", - " # NEW: Dictionary containing BaseModel instances with numpy arrays\n", - " # This demonstrates the edge case of dict[str, BaseModel] where BaseModel has arrays\n", - " materials: dict[str, MaterialProperties] = Field(\n", - " default_factory=dict,\n", - " description=\"Dictionary of material name to MaterialProperties (BaseModel with arrays)\"\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Creating a Mesh Instance\n", - "\n", - "Now let's create a simple cube mesh with texture coordinates and normals." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mesh created with 8 vertices and 36 indices\n", - "Material name: cube_material\n", - "Tags: ['cube', 'example']\n", - "Materials (BaseModel dict): ['cube_material', 'secondary_material']\n" - ] - } - ], - "source": [ - "# Create vertices for a cube\n", - "vertices = np.array([\n", - " [-0.5, -0.5, -0.5], # 0: bottom-left-back\n", - " [0.5, -0.5, -0.5], # 1: bottom-right-back\n", - " [0.5, 0.5, -0.5], # 2: top-right-back\n", - " [-0.5, 0.5, -0.5], # 3: top-left-back\n", - " [-0.5, -0.5, 0.5], # 4: bottom-left-front\n", - " [0.5, -0.5, 0.5], # 5: bottom-right-front\n", - " [0.5, 0.5, 0.5], # 6: top-right-front\n", - " [-0.5, 0.5, 0.5] # 7: top-left-front\n", - "], dtype=np.float32)\n", - "\n", - "# Create indices for the cube\n", - "indices = np.array([\n", - " [0, 1, 2, 2, 3, 0], # back face\n", - " [1, 5, 6, 6, 2, 1], # right face\n", - " [5, 4, 7, 7, 6, 5], # front face\n", - " [4, 0, 3, 3, 7, 4], # left face\n", - " [3, 2, 6, 6, 7, 3], # top face\n", - " [4, 5, 1, 1, 0, 4] # bottom face\n", - "], dtype=np.uint32)\n", - "\n", - "# Create texture coordinates (one for each vertex)\n", - "texture_coords = np.array([\n", - " [0.0, 0.0], # 0\n", - " [1.0, 0.0], # 1\n", - " [1.0, 1.0], # 2\n", - " [0.0, 1.0], # 3\n", - " [0.0, 0.0], # 4\n", - " [1.0, 0.0], # 5\n", - " [1.0, 1.0], # 6\n", - " [0.0, 1.0] # 7\n", - "], dtype=np.float32)\n", - "\n", - "# Create normals (one for each vertex)\n", - "normals = np.array([\n", - " [0.0, 0.0, -1.0], # 0: back\n", - " [0.0, 0.0, -1.0], # 1: back\n", - " [0.0, 0.0, -1.0], # 2: back\n", - " [0.0, 0.0, -1.0], # 3: back\n", - " [0.0, 0.0, 1.0], # 4: front\n", - " [0.0, 0.0, 1.0], # 5: front\n", - " [0.0, 0.0, 1.0], # 6: front\n", - " [0.0, 0.0, 1.0] # 7: front\n", - "], dtype=np.float32)\n", - "\n", - "# Create MaterialProperties instances (BaseModel with numpy arrays)\n", - "cube_material = MaterialProperties(\n", - " name=\"cube_material\",\n", - " diffuse=np.array([1.0, 0.5, 0.31], dtype=np.float32),\n", - " specular=np.array([0.5, 0.5, 0.5], dtype=np.float32),\n", - " shininess=32.0\n", - ")\n", - "\n", - "secondary_material = MaterialProperties(\n", - " name=\"secondary_material\",\n", - " diffuse=np.array([0.2, 0.8, 0.2], dtype=np.float32),\n", - " specular=np.array([0.3, 0.3, 0.3], dtype=np.float32),\n", - " shininess=16.0\n", - ")\n", - "\n", - "# Create the textured mesh\n", - "mesh = TexturedMesh(\n", - " vertices=vertices,\n", - " indices=indices,\n", - " texture_coords=texture_coords,\n", - " normals=normals,\n", - " material_name=\"cube_material\",\n", - " tags=[\"cube\", \"example\"],\n", - " material_data={\n", - " \"cube_material\": {\n", - " \"diffuse\": np.array([1.0, 0.5, 0.31], dtype=np.float32),\n", - " \"specular\": np.array([0.5, 0.5, 0.5], dtype=np.float32),\n", - " \"shininess\": np.array([32.0], dtype=np.float32)\n", - " }\n", - " },\n", - " material_colors={\n", - " \"cube_material\": \"#FF7F50\"\n", - " },\n", - " # NEW: dict[str, BaseModel] with numpy arrays inside\n", - " materials={\n", - " \"cube_material\": cube_material,\n", - " \"secondary_material\": secondary_material\n", - " }\n", - ")\n", - "\n", - "print(f\"Mesh created with {mesh.vertex_count} vertices and {mesh.index_count} indices\")\n", - "print(f\"Material name: {mesh.material_name}\")\n", - "print(f\"Tags: {mesh.tags}\")\n", - "print(f\"Materials (BaseModel dict): {list(mesh.materials.keys())}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Optimizing the Mesh\n", - "\n", - "The Mesh class provides several optimization methods that can be used to improve rendering performance." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optimized for vertex cache\n", - "Optimized for overdraw\n", - "Optimized for vertex fetch\n" - ] - } - ], - "source": [ - "# Optimize the mesh for vertex cache\n", - "vertex_cache_optimized_mesh = mesh.optimize_vertex_cache()\n", - "print(\"Optimized for vertex cache\")\n", - "\n", - "# Optimize the mesh for overdraw\n", - "overdraw_optimized_mesh = mesh.optimize_overdraw()\n", - "print(\"Optimized for overdraw\")\n", - "\n", - "# Optimize the mesh for vertex fetch\n", - "vertex_fetch_optimized = mesh.optimize_vertex_fetch()\n", - "print(\"Optimized for vertex fetch\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Encoding and Saving the Mesh\n", - "\n", - "The Mesh class provides methods for encoding the mesh and saving it to a zip file." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Encoded mesh: 56 bytes for vertices, 95 bytes for indices\n", - "Encoded arrays: ['cell_types', 'materials.cube_material.diffuse', 'materials.cube_material.specular', 'index_sizes', 'texture_coords', 'vertices', 'materials.secondary_material.specular', 'material_data.cube_material.shininess', 'material_data.cube_material.specular', 'materials.secondary_material.diffuse', 'material_data.cube_material.diffuse', 'normals', 'indices']\n", - "Saved mesh to textured_cube.zip, file size: 5092 bytes\n" - ] - } - ], - "source": [ - "# Encode the mesh\n", - "encoded_mesh = mesh.encode()\n", - "print(f\"Encoded mesh: {len(encoded_mesh.arrays[\"vertices\"])} bytes for vertices, {len(encoded_mesh.arrays[\"indices\"])} bytes for indices\")\n", - "print(f\"Encoded arrays: {list(encoded_mesh.arrays.keys())}\")\n", - "\n", - "# Save the mesh to a zip file\n", - "zip_path = \"textured_cube.zip\"\n", - "mesh.save_to_zip(zip_path)\n", - "print(f\"Saved mesh to {zip_path}, file size: {os.path.getsize(zip_path)} bytes\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Loading the Mesh from a Zip File\n", - "\n", - "The Mesh class provides a class method for loading a mesh from a zip file." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loaded mesh: 8 vertices, 36 indices\n", - "Material name: cube_material\n", - "Tags: ['cube', 'example']\n", - "\n", - "Texture coordinates shape: (8, 2)\n", - "Normals shape: (8, 3)\n", - "Material data: {'cube_material': {'diffuse': array([1. , 0.5 , 0.31], dtype=float32), 'shininess': array([32.], dtype=float32), 'specular': array([0.5, 0.5, 0.5], dtype=float32)}}\n", - "Material colors: {'cube_material': '#FF7F50'}\n", - "\n", - "--- BaseModel dict edge case ---\n", - "Materials keys: ['cube_material', 'secondary_material']\n", - " cube_material:\n", - " type: MaterialProperties\n", - " diffuse: [1. 0.5 0.31]\n", - " specular: [0.5 0.5 0.5]\n", - " shininess: 32.0\n", - " secondary_material:\n", - " type: MaterialProperties\n", - " diffuse: [0.2 0.8 0.2]\n", - " specular: [0.3 0.3 0.3]\n", - " shininess: 16.0\n" - ] - } - ], - "source": [ - "# Load the mesh from the zip file\n", - "loaded_mesh = TexturedMesh.load_from_zip(zip_path)\n", - "print(f\"Loaded mesh: {loaded_mesh.vertex_count} vertices, {loaded_mesh.index_count} indices\")\n", - "print(f\"Material name: {loaded_mesh.material_name}\")\n", - "print(f\"Tags: {loaded_mesh.tags}\")\n", - "\n", - "# Verify that the texture coordinates and normals were loaded correctly\n", - "print(f\"\\nTexture coordinates shape: {loaded_mesh.texture_coords.shape}\")\n", - "print(f\"Normals shape: {loaded_mesh.normals.shape}\")\n", - "print(f\"Material data: {loaded_mesh.material_data}\")\n", - "print(f\"Material colors: {loaded_mesh.material_colors}\")\n", - "\n", - "# Verify the dict[str, BaseModel] edge case was loaded correctly\n", - "print(f\"\\n--- BaseModel dict edge case ---\")\n", - "print(f\"Materials keys: {list(loaded_mesh.materials.keys())}\")\n", - "for mat_name, mat in loaded_mesh.materials.items():\n", - " print(f\" {mat_name}:\")\n", - " print(f\" type: {type(mat).__name__}\")\n", - " print(f\" diffuse: {mat.diffuse}\")\n", - " print(f\" specular: {mat.specular}\")\n", - " print(f\" shininess: {mat.shininess}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Creating a Different Mesh Subclass\n", - "\n", - "Let's create another mesh subclass with different attributes to demonstrate the flexibility of the Pydantic-based Mesh class." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Skinned mesh created with 8 vertices and 36 indices\n", - "Skeleton name: human_skeleton\n", - "Animation names: ['walk', 'run', 'jump']\n", - "Bone weights shape: (8, 4)\n", - "Bone indices shape: (8, 4)\n" - ] - } - ], - "source": [ - "class SkinnedMesh(Mesh):\n", - " \"\"\"\n", - " A mesh with skinning information for animation.\n", - " \"\"\"\n", - " # Add bone weights and indices as additional numpy arrays\n", - " bone_weights: np.ndarray = Field(..., description=\"Bone weights for each vertex\")\n", - " bone_indices: np.ndarray = Field(..., description=\"Bone indices for each vertex\")\n", - " \n", - " # Add non-array attributes\n", - " skeleton_name: str = Field(\"default\", description=\"Skeleton name\")\n", - " animation_names: List[str] = Field(default_factory=list, description=\"Animation names\")\n", - "\n", - "# Create a simple skinned mesh\n", - "skinned_mesh = SkinnedMesh(\n", - " vertices=vertices,\n", - " indices=indices,\n", - " bone_weights=np.random.random((len(vertices), 4)).astype(np.float32), # 4 weights per vertex\n", - " bone_indices=np.random.randint(0, 4, (len(vertices), 4)).astype(np.uint8), # 4 bone indices per vertex\n", - " skeleton_name=\"human_skeleton\",\n", - " animation_names=[\"walk\", \"run\", \"jump\"]\n", - ")\n", - "\n", - "print(f\"Skinned mesh created with {skinned_mesh.vertex_count} vertices and {skinned_mesh.index_count} indices\")\n", - "print(f\"Skeleton name: {skinned_mesh.skeleton_name}\")\n", - "print(f\"Animation names: {skinned_mesh.animation_names}\")\n", - "print(f\"Bone weights shape: {skinned_mesh.bone_weights.shape}\")\n", - "print(f\"Bone indices shape: {skinned_mesh.bone_indices.shape}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7. Saving and Loading the Skinned Mesh\n", - "\n", - "Let's save and load the skinned mesh to demonstrate that all attributes are preserved." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Saved skinned mesh to skinned_cube.zip, file size: 2083 bytes\n", - "\n", - "Loaded skinned mesh: 8 vertices, 36 indices\n", - "Skeleton name: human_skeleton\n", - "Animation names: ['walk', 'run', 'jump']\n", - "Bone weights shape: (8, 4)\n", - "Bone indices shape: (8, 4)\n" - ] - } - ], - "source": [ - "# Save the skinned mesh to a zip file\n", - "skinned_zip_path = \"skinned_cube.zip\"\n", - "skinned_mesh.save_to_zip(skinned_zip_path)\n", - "print(f\"Saved skinned mesh to {skinned_zip_path}, file size: {os.path.getsize(skinned_zip_path)} bytes\")\n", - "\n", - "# Load the skinned mesh from the zip file\n", - "loaded_skinned_mesh = SkinnedMesh.load_from_zip(skinned_zip_path)\n", - "print(f\"\\nLoaded skinned mesh: {loaded_skinned_mesh.vertex_count} vertices, {loaded_skinned_mesh.index_count} indices\")\n", - "print(f\"Skeleton name: {loaded_skinned_mesh.skeleton_name}\")\n", - "print(f\"Animation names: {loaded_skinned_mesh.animation_names}\")\n", - "print(f\"Bone weights shape: {loaded_skinned_mesh.bone_weights.shape}\")\n", - "print(f\"Bone indices shape: {loaded_skinned_mesh.bone_indices.shape}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 8. Cleaning Up\n", - "\n", - "Let's clean up the files we created." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Removed textured_cube.zip\n", - "Removed skinned_cube.zip\n", - "\n", - "Example completed successfully!\n" - ] - } - ], - "source": [ - "# Clean up\n", - "for path in [zip_path, skinned_zip_path]:\n", - " if os.path.exists(path):\n", - " os.remove(path)\n", - " print(f\"Removed {path}\")\n", - "\n", - "print(\"\\nExample completed successfully!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Jax Conversion example" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Converted skinned mesh to JAX arrays, vertex dtype: float32\n" - ] - } - ], - "source": [ - "try:\n", - " import jax\n", - "\n", - " jax_skinned_mesh = skinned_mesh.to_jax()\n", - " print(f\"Converted skinned mesh to JAX arrays, vertex dtype: {jax_skinned_mesh.vertices.dtype}\")\n", - "except ImportError:\n", - " pass" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 4 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Pydantic-based Mesh Example\n", + "\n", + "This notebook demonstrates how to use the new Pydantic-based Mesh class in meshly. It covers:\n", + "\n", + "1. Creating custom Mesh subclasses with additional attributes\n", + "2. Working with numpy arrays in Pydantic models\n", + "3. Encoding and decoding meshes to/from zip files\n", + "4. Optimizing meshes with the built-in optimization methods" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "from typing import Optional, List\n", + "from pydantic import Field\n", + "\n", + "# Import the Mesh class\n", + "from meshly import Mesh" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Creating a Custom Mesh Subclass\n", + "\n", + "One of the key benefits of the new Pydantic-based Mesh class is the ability to create custom subclasses with additional attributes. Let's create a `TexturedMesh` class that adds texture coordinates and normals." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from pydantic import BaseModel, ConfigDict\n", + "from meshly import Packable\n", + "\n", + "class MaterialProperties(BaseModel):\n", + " \"\"\"Material properties with numpy arrays - demonstrates BaseModel in dict edge case.\"\"\"\n", + " model_config = ConfigDict(arbitrary_types_allowed=True)\n", + " \n", + " name: str = Field(..., description=\"Material name\")\n", + " diffuse: np.ndarray = Field(..., description=\"Diffuse color array\")\n", + " specular: np.ndarray = Field(..., description=\"Specular color array\")\n", + " shininess: float = Field(32.0, description=\"Shininess value\")\n", + "\n", + "\n", + "class PhysicsProperties(Packable):\n", + " \"\"\"Physics properties as a nested Packable - demonstrates Packable field support.\"\"\"\n", + " mass: float = Field(1.0, description=\"Object mass\")\n", + " friction: float = Field(0.5, description=\"Friction coefficient\")\n", + " # Arrays in nested Packable are encoded/decoded using the Packable's own encode/decode\n", + " inertia_tensor: np.ndarray = Field(..., description=\"3x3 inertia tensor\")\n", + " collision_points: np.ndarray = Field(..., description=\"Collision sample points\")\n", + "\n", + "\n", + "class TexturedMesh(Mesh):\n", + " \"\"\"\n", + " A mesh with texture coordinates and normals.\n", + " \n", + " This demonstrates how to create a custom Mesh subclass with additional\n", + " numpy array attributes that will be automatically encoded/decoded.\n", + " \"\"\"\n", + " # Add texture coordinates and normals as additional numpy arrays\n", + " texture_coords: np.ndarray = Field(..., description=\"Texture coordinates\")\n", + " normals: Optional[np.ndarray] = Field(None, description=\"Vertex normals\")\n", + " \n", + " # Add non-array attributes\n", + " material_name: str = Field(\"default\", description=\"Material name\")\n", + " tags: List[str] = Field(default_factory=list, description=\"Tags for the mesh\")\n", + "\n", + " # Dictionary containing nested dictionaries with arrays\n", + " material_data: dict[str, dict[str, np.ndarray]] = Field(\n", + " default_factory=dict,\n", + " description=\"Nested dictionary structure with arrays\"\n", + " )\n", + "\n", + " material_colors: dict[str, str] = Field(\n", + " default_factory=dict,\n", + " description=\"Dictionary with non-array values\"\n", + " )\n", + "\n", + " # Dictionary containing BaseModel instances with numpy arrays\n", + " # This demonstrates the edge case of dict[str, BaseModel] where BaseModel has arrays\n", + " materials: dict[str, MaterialProperties] = Field(\n", + " default_factory=dict,\n", + " description=\"Dictionary of material name to MaterialProperties (BaseModel with arrays)\"\n", + " )\n", + " \n", + " # Nested Packable field - uses its own encode/decode methods\n", + " # This demonstrates automatic handling of Packable fields within other Packables\n", + " physics: Optional[PhysicsProperties] = Field(\n", + " None,\n", + " description=\"Physics properties as a nested Packable\"\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Creating a Mesh Instance\n", + "\n", + "Now let's create a simple cube mesh with texture coordinates and normals." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mesh created with 8 vertices and 36 indices\n", + "Material name: cube_material\n", + "Tags: ['cube', 'example']\n", + "Materials (BaseModel dict): ['cube_material', 'secondary_material']\n", + "Physics (nested Packable): mass=2.5, friction=0.7\n" + ] + } + ], + "source": [ + "# Create vertices for a cube\n", + "vertices = np.array([\n", + " [-0.5, -0.5, -0.5], # 0: bottom-left-back\n", + " [0.5, -0.5, -0.5], # 1: bottom-right-back\n", + " [0.5, 0.5, -0.5], # 2: top-right-back\n", + " [-0.5, 0.5, -0.5], # 3: top-left-back\n", + " [-0.5, -0.5, 0.5], # 4: bottom-left-front\n", + " [0.5, -0.5, 0.5], # 5: bottom-right-front\n", + " [0.5, 0.5, 0.5], # 6: top-right-front\n", + " [-0.5, 0.5, 0.5] # 7: top-left-front\n", + "], dtype=np.float32)\n", + "\n", + "# Create indices for the cube\n", + "indices = np.array([\n", + " [0, 1, 2, 2, 3, 0], # back face\n", + " [1, 5, 6, 6, 2, 1], # right face\n", + " [5, 4, 7, 7, 6, 5], # front face\n", + " [4, 0, 3, 3, 7, 4], # left face\n", + " [3, 2, 6, 6, 7, 3], # top face\n", + " [4, 5, 1, 1, 0, 4] # bottom face\n", + "], dtype=np.uint32)\n", + "\n", + "# Create texture coordinates (one for each vertex)\n", + "texture_coords = np.array([\n", + " [0.0, 0.0], # 0\n", + " [1.0, 0.0], # 1\n", + " [1.0, 1.0], # 2\n", + " [0.0, 1.0], # 3\n", + " [0.0, 0.0], # 4\n", + " [1.0, 0.0], # 5\n", + " [1.0, 1.0], # 6\n", + " [0.0, 1.0] # 7\n", + "], dtype=np.float32)\n", + "\n", + "# Create normals (one for each vertex)\n", + "normals = np.array([\n", + " [0.0, 0.0, -1.0], # 0: back\n", + " [0.0, 0.0, -1.0], # 1: back\n", + " [0.0, 0.0, -1.0], # 2: back\n", + " [0.0, 0.0, -1.0], # 3: back\n", + " [0.0, 0.0, 1.0], # 4: front\n", + " [0.0, 0.0, 1.0], # 5: front\n", + " [0.0, 0.0, 1.0], # 6: front\n", + " [0.0, 0.0, 1.0] # 7: front\n", + "], dtype=np.float32)\n", + "\n", + "# Create MaterialProperties instances (BaseModel with numpy arrays)\n", + "cube_material = MaterialProperties(\n", + " name=\"cube_material\",\n", + " diffuse=np.array([1.0, 0.5, 0.31], dtype=np.float32),\n", + " specular=np.array([0.5, 0.5, 0.5], dtype=np.float32),\n", + " shininess=32.0\n", + ")\n", + "\n", + "secondary_material = MaterialProperties(\n", + " name=\"secondary_material\",\n", + " diffuse=np.array([0.2, 0.8, 0.2], dtype=np.float32),\n", + " specular=np.array([0.3, 0.3, 0.3], dtype=np.float32),\n", + " shininess=16.0\n", + ")\n", + "\n", + "# Create PhysicsProperties instance (nested Packable)\n", + "physics = PhysicsProperties(\n", + " mass=2.5,\n", + " friction=0.7,\n", + " inertia_tensor=np.eye(3, dtype=np.float32) * 0.1, # 3x3 identity scaled\n", + " collision_points=np.array([\n", + " [-0.5, -0.5, -0.5],\n", + " [0.5, 0.5, 0.5],\n", + " [0.0, 0.0, 0.0]\n", + " ], dtype=np.float32)\n", + ")\n", + "\n", + "# Create the textured mesh\n", + "mesh = TexturedMesh(\n", + " vertices=vertices,\n", + " indices=indices,\n", + " texture_coords=texture_coords,\n", + " normals=normals,\n", + " material_name=\"cube_material\",\n", + " tags=[\"cube\", \"example\"],\n", + " material_data={\n", + " \"cube_material\": {\n", + " \"diffuse\": np.array([1.0, 0.5, 0.31], dtype=np.float32),\n", + " \"specular\": np.array([0.5, 0.5, 0.5], dtype=np.float32),\n", + " \"shininess\": np.array([32.0], dtype=np.float32)\n", + " }\n", + " },\n", + " material_colors={\n", + " \"cube_material\": \"#FF7F50\"\n", + " },\n", + " # dict[str, BaseModel] with numpy arrays inside\n", + " materials={\n", + " \"cube_material\": cube_material,\n", + " \"secondary_material\": secondary_material\n", + " },\n", + " # Nested Packable field\n", + " physics=physics\n", + ")\n", + "\n", + "print(f\"Mesh created with {mesh.vertex_count} vertices and {mesh.index_count} indices\")\n", + "print(f\"Material name: {mesh.material_name}\")\n", + "print(f\"Tags: {mesh.tags}\")\n", + "print(f\"Materials (BaseModel dict): {list(mesh.materials.keys())}\")\n", + "print(f\"Physics (nested Packable): mass={mesh.physics.mass}, friction={mesh.physics.friction}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Optimizing the Mesh\n", + "\n", + "The Mesh class provides several optimization methods that can be used to improve rendering performance." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimized for vertex cache\n", + "Optimized for overdraw\n", + "Optimized for vertex fetch\n" + ] + } + ], + "source": [ + "# Optimize the mesh for vertex cache\n", + "vertex_cache_optimized_mesh = mesh.optimize_vertex_cache()\n", + "print(\"Optimized for vertex cache\")\n", + "\n", + "# Optimize the mesh for overdraw\n", + "overdraw_optimized_mesh = mesh.optimize_overdraw()\n", + "print(\"Optimized for overdraw\")\n", + "\n", + "# Optimize the mesh for vertex fetch\n", + "vertex_fetch_optimized = mesh.optimize_vertex_fetch()\n", + "print(\"Optimized for vertex fetch\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Encoding and Saving the Mesh\n", + "\n", + "The Mesh class provides methods for encoding the mesh and saving it to a zip file." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saved mesh to textured_cube.zip, file size: 7695 bytes\n" + ] + } + ], + "source": [ + "# Save the mesh to a zip file\n", + "zip_path = \"textured_cube.zip\"\n", + "mesh.save_to_zip(zip_path)\n", + "print(f\"Saved mesh to {zip_path}, file size: {os.path.getsize(zip_path)} bytes\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Loading the Mesh from a Zip File\n", + "\n", + "The Mesh class provides a class method for loading a mesh from a zip file." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded mesh: 8 vertices, 36 indices\n", + "Material name: cube_material\n", + "Tags: ['cube', 'example']\n", + "\n", + "Texture coordinates shape: (8, 2)\n", + "Normals shape: (8, 3)\n", + "Material data: {'cube_material': {'diffuse': array([1. , 0.5 , 0.31], dtype=float32), 'shininess': array([32.], dtype=float32), 'specular': array([0.5, 0.5, 0.5], dtype=float32)}}\n", + "Material colors: {'cube_material': '#FF7F50'}\n", + "\n", + "--- BaseModel dict edge case ---\n", + "Materials keys: ['cube_material', 'secondary_material']\n", + " cube_material:\n", + " type: MaterialProperties\n", + " diffuse: [1. 0.5 0.31]\n", + " specular: [0.5 0.5 0.5]\n", + " shininess: 32.0\n", + " secondary_material:\n", + " type: MaterialProperties\n", + " diffuse: [0.2 0.8 0.2]\n", + " specular: [0.3 0.3 0.3]\n", + " shininess: 16.0\n", + "\n", + "--- Nested Packable edge case ---\n", + "Physics type: PhysicsProperties\n", + "Physics mass: 2.5\n", + "Physics friction: 0.7\n", + "Physics inertia_tensor:\n", + "[[0.1 0. 0. ]\n", + " [0. 0.1 0. ]\n", + " [0. 0. 0.1]]\n", + "Physics collision_points:\n", + "[[-0.5 -0.5 -0.5]\n", + " [ 0.5 0.5 0.5]\n", + " [ 0. 0. 0. ]]\n" + ] + } + ], + "source": [ + "# Load the mesh from the zip file\n", + "loaded_mesh = TexturedMesh.load_from_zip(zip_path)\n", + "print(f\"Loaded mesh: {loaded_mesh.vertex_count} vertices, {loaded_mesh.index_count} indices\")\n", + "print(f\"Material name: {loaded_mesh.material_name}\")\n", + "print(f\"Tags: {loaded_mesh.tags}\")\n", + "\n", + "# Verify that the texture coordinates and normals were loaded correctly\n", + "print(f\"\\nTexture coordinates shape: {loaded_mesh.texture_coords.shape}\")\n", + "print(f\"Normals shape: {loaded_mesh.normals.shape}\")\n", + "print(f\"Material data: {loaded_mesh.material_data}\")\n", + "print(f\"Material colors: {loaded_mesh.material_colors}\")\n", + "\n", + "# Verify the dict[str, BaseModel] edge case was loaded correctly\n", + "print(f\"\\n--- BaseModel dict edge case ---\")\n", + "print(f\"Materials keys: {list(loaded_mesh.materials.keys())}\")\n", + "for mat_name, mat in loaded_mesh.materials.items():\n", + " print(f\" {mat_name}:\")\n", + " print(f\" type: {type(mat).__name__}\")\n", + " print(f\" diffuse: {mat.diffuse}\")\n", + " print(f\" specular: {mat.specular}\")\n", + " print(f\" shininess: {mat.shininess}\")\n", + "\n", + "# Verify the nested Packable was loaded correctly\n", + "print(f\"\\n--- Nested Packable edge case ---\")\n", + "print(f\"Physics type: {type(loaded_mesh.physics).__name__}\")\n", + "print(f\"Physics mass: {loaded_mesh.physics.mass}\")\n", + "print(f\"Physics friction: {loaded_mesh.physics.friction}\")\n", + "print(f\"Physics inertia_tensor:\\n{loaded_mesh.physics.inertia_tensor}\")\n", + "print(f\"Physics collision_points:\\n{loaded_mesh.physics.collision_points}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Creating a Different Mesh Subclass\n", + "\n", + "Let's create another mesh subclass with different attributes to demonstrate the flexibility of the Pydantic-based Mesh class." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Skinned mesh created with 8 vertices and 36 indices\n", + "Skeleton name: human_skeleton\n", + "Animation names: ['walk', 'run', 'jump']\n", + "Bone weights shape: (8, 4)\n", + "Bone indices shape: (8, 4)\n" + ] + } + ], + "source": [ + "class SkinnedMesh(Mesh):\n", + " \"\"\"\n", + " A mesh with skinning information for animation.\n", + " \"\"\"\n", + " # Add bone weights and indices as additional numpy arrays\n", + " bone_weights: np.ndarray = Field(..., description=\"Bone weights for each vertex\")\n", + " bone_indices: np.ndarray = Field(..., description=\"Bone indices for each vertex\")\n", + " \n", + " # Add non-array attributes\n", + " skeleton_name: str = Field(\"default\", description=\"Skeleton name\")\n", + " animation_names: List[str] = Field(default_factory=list, description=\"Animation names\")\n", + "\n", + "# Create a simple skinned mesh\n", + "skinned_mesh = SkinnedMesh(\n", + " vertices=vertices,\n", + " indices=indices,\n", + " bone_weights=np.random.random((len(vertices), 4)).astype(np.float32), # 4 weights per vertex\n", + " bone_indices=np.random.randint(0, 4, (len(vertices), 4)).astype(np.uint8), # 4 bone indices per vertex\n", + " skeleton_name=\"human_skeleton\",\n", + " animation_names=[\"walk\", \"run\", \"jump\"]\n", + ")\n", + "\n", + "print(f\"Skinned mesh created with {skinned_mesh.vertex_count} vertices and {skinned_mesh.index_count} indices\")\n", + "print(f\"Skeleton name: {skinned_mesh.skeleton_name}\")\n", + "print(f\"Animation names: {skinned_mesh.animation_names}\")\n", + "print(f\"Bone weights shape: {skinned_mesh.bone_weights.shape}\")\n", + "print(f\"Bone indices shape: {skinned_mesh.bone_indices.shape}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. Saving and Loading the Skinned Mesh\n", + "\n", + "Let's save and load the skinned mesh to demonstrate that all attributes are preserved." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saved skinned mesh to skinned_cube.zip, file size: 2562 bytes\n", + "\n", + "Loaded skinned mesh: 8 vertices, 36 indices\n", + "Skeleton name: human_skeleton\n", + "Animation names: ['walk', 'run', 'jump']\n", + "Bone weights shape: (8, 4)\n", + "Bone indices shape: (8, 4)\n" + ] + } + ], + "source": [ + "# Save the skinned mesh to a zip file\n", + "skinned_zip_path = \"skinned_cube.zip\"\n", + "skinned_mesh.save_to_zip(skinned_zip_path)\n", + "print(f\"Saved skinned mesh to {skinned_zip_path}, file size: {os.path.getsize(skinned_zip_path)} bytes\")\n", + "\n", + "# Load the skinned mesh from the zip file\n", + "loaded_skinned_mesh = SkinnedMesh.load_from_zip(skinned_zip_path)\n", + "print(f\"\\nLoaded skinned mesh: {loaded_skinned_mesh.vertex_count} vertices, {loaded_skinned_mesh.index_count} indices\")\n", + "print(f\"Skeleton name: {loaded_skinned_mesh.skeleton_name}\")\n", + "print(f\"Animation names: {loaded_skinned_mesh.animation_names}\")\n", + "print(f\"Bone weights shape: {loaded_skinned_mesh.bone_weights.shape}\")\n", + "print(f\"Bone indices shape: {loaded_skinned_mesh.bone_indices.shape}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. Cleaning Up (Part 1)\n", + "\n", + "Let's clean up the files we created so far before the cache examples." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Removed textured_cube.zip\n", + "Removed skinned_cube.zip\n", + "\n", + "Example completed successfully!\n" + ] + } + ], + "source": [ + "# Clean up\n", + "for path in [zip_path, skinned_zip_path]:\n", + " if os.path.exists(path):\n", + " os.remove(path)\n", + " print(f\"Removed {path}\")\n", + "\n", + "print(\"\\nExample completed successfully!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 9. Using Cache for Nested Packables\n", + "\n", + "When working with meshes that contain nested Packables (like our `TexturedMesh` with `PhysicsProperties`), you can use caching to deduplicate shared data and reduce file sizes. The cache uses SHA256 hashes for content-addressable storage." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cache directory: /tmp/tmpkflwgz8v\n", + "\n", + "Cached files (1 items):\n", + " 210dc1059e9d5af349f0dad45dbbdc8797eb82b49e7a3443528337e33ce60854.zip: 1157 bytes\n", + "\n", + "Original zip size: 0 bytes\n", + "Cached zip size: 6505 bytes\n", + "\n", + "--- Loaded from cache ---\n", + "Physics type: PhysicsProperties\n", + "Physics mass: 2.5\n", + "Physics friction: 0.7\n", + "Physics inertia_tensor:\n", + "[[0.1 0. 0. ]\n", + " [0. 0.1 0. ]\n", + " [0. 0. 0.1]]\n", + "\n", + "Removed textured_cube_cached.zip\n" + ] + } + ], + "source": [ + "import tempfile\n", + "from meshly import ReadHandler, WriteHandler\n", + "\n", + "# Create a temporary cache directory\n", + "with tempfile.TemporaryDirectory() as cache_dir:\n", + " print(f\"Cache directory: {cache_dir}\")\n", + " \n", + " # Create cache saver and loader using the handler factory methods\n", + " cache_saver = WriteHandler.create_cache_saver(cache_dir)\n", + " cache_loader = ReadHandler.create_cache_loader(cache_dir)\n", + " \n", + " # Save the mesh with caching - nested PhysicsProperties will be cached separately\n", + " cached_zip_path = \"textured_cube_cached.zip\"\n", + " mesh.save_to_zip(cached_zip_path, cache_saver=cache_saver)\n", + " \n", + " # Check what was cached\n", + " import os\n", + " cache_files = os.listdir(cache_dir)\n", + " print(f\"\\nCached files ({len(cache_files)} items):\")\n", + " for f in cache_files:\n", + " file_path = os.path.join(cache_dir, f)\n", + " print(f\" {f}: {os.path.getsize(file_path)} bytes\")\n", + " \n", + " # Compare file sizes\n", + " original_size = os.path.getsize(zip_path) if os.path.exists(zip_path) else 0\n", + " cached_size = os.path.getsize(cached_zip_path)\n", + " print(f\"\\nOriginal zip size: {original_size} bytes\")\n", + " print(f\"Cached zip size: {cached_size} bytes\")\n", + " \n", + " # Load the mesh back using the cache\n", + " loaded_cached_mesh = TexturedMesh.load_from_zip(cached_zip_path, cache_loader=cache_loader)\n", + " \n", + " # Verify the nested Packable was loaded correctly from cache\n", + " print(f\"\\n--- Loaded from cache ---\")\n", + " print(f\"Physics type: {type(loaded_cached_mesh.physics).__name__}\")\n", + " print(f\"Physics mass: {loaded_cached_mesh.physics.mass}\")\n", + " print(f\"Physics friction: {loaded_cached_mesh.physics.friction}\")\n", + " print(f\"Physics inertia_tensor:\\n{loaded_cached_mesh.physics.inertia_tensor}\")\n", + " \n", + " # Clean up\n", + " if os.path.exists(cached_zip_path):\n", + " os.remove(cached_zip_path)\n", + " print(f\"\\nRemoved {cached_zip_path}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Cache Deduplication Example\n", + "\n", + "When multiple meshes share the same nested Packable data, the cache automatically deduplicates them using SHA256 hashes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Cache directory for deduplication: /workspaces/meshly/cache\n", + "Cache entries: 1 (both meshes share the same physics cache)\n", + "\n", + "Mesh1 material: mesh1, physics mass: 1.0\n", + "Mesh2 material: mesh2, physics mass: 1.0\n" + ] + } + ], + "source": [ + "# Demonstrate cache deduplication - two meshes with identical physics properties\n", + "with tempfile.TemporaryDirectory() as cache_dir:\n", + " print(f\"\\nCache directory for deduplication: {cache_dir}\")\n", + " cache_saver = WriteHandler.create_cache_saver(cache_dir)\n", + " cache_loader = ReadHandler.create_cache_loader(cache_dir)\n", + " \n", + " # Create two meshes with identical physics (will share cache entry)\n", + " shared_physics = PhysicsProperties(\n", + " mass=1.0,\n", + " friction=0.5,\n", + " inertia_tensor=np.eye(3, dtype=np.float32),\n", + " collision_points=np.array([[0, 0, 0]], dtype=np.float32)\n", + " )\n", + " \n", + " mesh1 = TexturedMesh(\n", + " vertices=vertices,\n", + " indices=indices,\n", + " texture_coords=texture_coords,\n", + " normals=normals,\n", + " material_name=\"mesh1\",\n", + " physics=shared_physics\n", + " )\n", + " \n", + " mesh2 = TexturedMesh(\n", + " vertices=vertices * 2, # Different vertices\n", + " indices=indices,\n", + " texture_coords=texture_coords,\n", + " normals=normals,\n", + " material_name=\"mesh2\",\n", + " physics=shared_physics # Same physics - will be deduplicated!\n", + " )\n", + " \n", + " # Save both meshes with the same cache\n", + " mesh1.save_to_zip(\"mesh1.zip\", cache_saver=cache_saver)\n", + " mesh2.save_to_zip(\"mesh2.zip\", cache_saver=cache_saver)\n", + " \n", + " # Check the cache - should only have 1 entry (shared physics)\n", + " cache_files = os.listdir(cache_dir)\n", + " print(f\"Cache entries: {len(cache_files)} (both meshes share the same physics cache)\")\n", + " \n", + " # Load both meshes\n", + " loaded1 = TexturedMesh.load_from_zip(\"mesh1.zip\", cache_loader=cache_loader)\n", + " loaded2 = TexturedMesh.load_from_zip(\"mesh2.zip\", cache_loader=cache_loader)\n", + " \n", + " print(f\"\\nMesh1 material: {loaded1.material_name}, physics mass: {loaded1.physics.mass}\")\n", + " print(f\"Mesh2 material: {loaded2.material_name}, physics mass: {loaded2.physics.mass}\")\n", + " \n", + " # Clean up\n", + " for f in [\"mesh1.zip\", \"mesh2.zip\"]:\n", + " if os.path.exists(f):\n", + " os.remove(f)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 10. Jax Conversion Example" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Converted skinned mesh to JAX arrays, vertex dtype: float32\n" + ] + } + ], + "source": [ + "try:\n", + " import jax\n", + "\n", + " jax_skinned_mesh = skinned_mesh.convert_to(\"jax\")\n", + " print(f\"Converted skinned mesh to JAX arrays, vertex dtype: {jax_skinned_mesh.vertices.dtype}\")\n", + "except ImportError:\n", + " print(\"JAX not available - skipping conversion example\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example Complete!\n", + "\n", + "This notebook demonstrated:\n", + "- Creating custom Mesh subclasses with additional numpy arrays\n", + "- Working with nested dictionaries containing arrays\n", + "- Using BaseModel instances with arrays inside dictionaries\n", + "- **Nested Packables** - fields that are themselves Packable classes\n", + "- **Cache support** - using `WriteHandler.create_cache_saver()` and `ReadHandler.create_cache_loader()` for content-addressable storage\n", + "- **Deduplication** - identical nested Packables share the same cache entry" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 } diff --git a/python/meshly/__init__.py b/python/meshly/__init__.py index d5680b6..a1efb6a 100644 --- a/python/meshly/__init__.py +++ b/python/meshly/__init__.py @@ -8,29 +8,26 @@ 2. Mesh class as a Pydantic base class for representing 3D meshes 3. MeshUtils static class for mesh optimization operations 4. ArrayUtils class for array encoding/decoding operations -5. EncodedMesh class for storing encoded mesh data -6. I/O utilities for storing and loading meshes and arrays -7. Support for custom subclasses with automatic encoding/decoding of numpy arrays -8. CellTypeUtils for VTK cell type conversions and edge topology extraction +5. I/O utilities for storing and loading meshes and arrays +6. Support for custom subclasses with automatic encoding/decoding of numpy arrays +7. CellTypeUtils for VTK cell type conversions and edge topology extraction """ from .packable import ( Packable, PackableMetadata, - EncodedData, ) from .mesh import ( Mesh, - Array, - HAS_JAX, ) from .array import ( EncodedArray, ArrayMetadata, - EncodedArrayModel, ArrayUtils, + ArrayType, + Array, ) from .cell_types import ( @@ -44,21 +41,30 @@ MeshUtils, ) +from .data_handler import ( + CacheLoader, + CacheSaver, + ReadHandler, + WriteHandler, +) + __all__ = [ # Packable base class "Packable", "PackableMetadata", - "EncodedData", + "CacheLoader", + "CacheSaver", + "ArrayType", + # Data handlers + "ReadHandler", + "WriteHandler", # Mesh classes "Mesh", # Array types and utilities "Array", - "HAS_JAX", "EncodedArray", - "EncodedArrayModel", "ArrayMetadata", - "ArrayResult", "ArrayUtils", # Cell type utilities "CellType", diff --git a/python/meshly/array.py b/python/meshly/array.py index 5f019d5..4afff0a 100644 --- a/python/meshly/array.py +++ b/python/meshly/array.py @@ -5,28 +5,29 @@ encoding functions and storing/loading them as encoded data. """ import ctypes -from typing import List, Tuple +from io import BytesIO +import json +from typing import Any, Dict, List, Literal, Optional, Union import numpy as np from pydantic import BaseModel, Field from meshoptimizer._loader import lib +from .data_handler import WriteHandler, ReadHandler, ZipBuffer +from .common import PathLike -class EncodedArrayModel(BaseModel): - """ - Pydantic model representing an encoded numpy array with metadata. - - This is a Pydantic version of the EncodedArray class in arrayutils.py. - """ - - data: bytes = Field(..., description="Encoded data as bytes") - shape: Tuple[int, ...] = Field(..., description="Original array shape") - dtype: str = Field(..., description="Original array data type as string") - itemsize: int = Field(..., description="Size of each item in bytes") +# Optional JAX support +try: + import jax.numpy as jnp + HAS_JAX = True + JaxArray = Union[np.ndarray, jnp.ndarray] - class Config: - """Pydantic configuration.""" +except ImportError: + jnp = None + HAS_JAX = False + JaxArray = np.ndarray - arbitrary_types_allowed = True +Array = Union[np.ndarray, JaxArray] +ArrayType = Literal["numpy", "jax"] class ArrayMetadata(BaseModel): @@ -39,6 +40,8 @@ class ArrayMetadata(BaseModel): shape: List[int] = Field(..., description="Shape of the array") dtype: str = Field(..., description="Data type of the array as string") itemsize: int = Field(..., description="Size of each item in bytes") + array_type: ArrayType = Field( + default="numpy", description="Array backend type (numpy or jax)") class EncodedArray(BaseModel): @@ -50,11 +53,10 @@ class EncodedArray(BaseModel): shape: Original array shape dtype: Original array data type itemsize: Size of each item in bytes + array_type: Array backend type (numpy or jax) """ data: bytes - shape: Tuple[int, ...] - dtype: np.dtype - itemsize: int + metadata: ArrayMetadata class Config: """Pydantic configuration.""" @@ -68,6 +70,124 @@ def __len__(self) -> int: class ArrayUtils: """Utility class for encoding and decoding numpy arrays.""" + @staticmethod + def is_array(obj) -> bool: + """Check if obj is a numpy or JAX array.""" + if isinstance(obj, np.ndarray): + return True + if HAS_JAX and jnp is not None and isinstance(obj, jnp.ndarray): + return True + return False + + @staticmethod + def detect_array_type(array: Array) -> ArrayType: + """Detect whether an array is numpy or jax.""" + if HAS_JAX and jnp is not None and isinstance(array, jnp.ndarray): + return "jax" + return "numpy" + + @staticmethod + def convert_array(array: Array, array_type: ArrayType): + """ + Convert an array to the specified array backend type. + + Args: + array: The array to convert (numpy or jax) + array_type: Target array type ("numpy" or "jax") + + Returns: + Array in the specified backend format + + Raises: + AssertionError: If JAX is requested but not available + """ + if array_type == "jax": + assert HAS_JAX, "JAX is not available. Install JAX to use JAX arrays." + if isinstance(array, jnp.ndarray): + return array # Already JAX + return jnp.array(array) + elif array_type == "numpy": # numpy + return np.asarray(array) + else: + raise ValueError(f"Unsupported array_type: {array_type}") + + @staticmethod + def convert_recursive(obj, array_type: ArrayType): + """ + Recursively convert arrays in nested structures to the specified type. + + Args: + obj: Object to convert (array, dict, list, tuple, or other) + array_type: Target array type ("numpy" or "jax") + + Returns: + Object with all arrays converted to the specified type + """ + if ArrayUtils.is_array(obj): + return ArrayUtils.convert_array(obj, array_type) + elif isinstance(obj, dict): + return {key: ArrayUtils.convert_recursive(value, array_type) for key, value in obj.items()} + elif isinstance(obj, (list, tuple)): + return type(obj)(ArrayUtils.convert_recursive(item, array_type) for item in obj) + else: + return obj + + @staticmethod + def extract_nested_arrays(obj, prefix: str = "") -> Dict[str, Array]: + """Recursively extract arrays from nested dicts and BaseModel instances. + + Note: Packable instances are skipped - they handle their own encoding. + """ + from pydantic import BaseModel + from .packable import Packable + arrays = {} + if ArrayUtils.is_array(obj): + arrays[prefix] = obj + elif isinstance(obj, Packable): + # Skip Packable instances - they encode themselves + pass + elif isinstance(obj, BaseModel): + for name in type(obj).model_fields: + value = getattr(obj, name, None) + if value is not None: + key = f"{prefix}.{name}" if prefix else name + arrays.update(ArrayUtils.extract_nested_arrays(value, key)) + elif isinstance(obj, dict): + for k, v in obj.items(): + key = f"{prefix}.{k}" if prefix else k + arrays.update(ArrayUtils.extract_nested_arrays(v, key)) + return arrays + + @staticmethod + def extract_non_arrays(obj): + """Extract non-array values, preserving BaseModel type info for reconstruction. + + Note: Packable instances are skipped - they handle their own encoding. + """ + from pydantic import BaseModel + from .packable import Packable + if ArrayUtils.is_array(obj): + return None + if isinstance(obj, Packable): + # Skip Packable instances - they encode themselves + return None + if isinstance(obj, BaseModel): + result = {"__model_class__": obj.__class__.__name__, + "__model_module__": obj.__class__.__module__} + for name in type(obj).model_fields: + val = getattr(obj, name, None) + if not ArrayUtils.is_array(val) and not isinstance(val, Packable): + extracted = ArrayUtils.extract_non_arrays(val) + if extracted is not None: + result[name] = extracted + return result if len(result) > 2 else None + if isinstance(obj, dict): + result = {k: ArrayUtils.extract_non_arrays(v) for k, v in obj.items() + if not ArrayUtils.is_array(v) and not isinstance(v, Packable)} + result = {k: v for k, v in result.items() if v is not None} + return result or None + return obj + @staticmethod def encode_array(array: np.ndarray) -> EncodedArray: """ @@ -79,6 +199,10 @@ def encode_array(array: np.ndarray) -> EncodedArray: Returns: EncodedArray object containing the encoded data and metadata """ + + # Convert other arrays to numpy for encoding + array = ArrayUtils.convert_array(array, "numpy") + # Store original shape and dtype original_shape = array.shape original_dtype = array.dtype @@ -115,11 +239,18 @@ def encode_array(array: np.ndarray) -> EncodedArray: # Return only the used portion of the buffer encoded_data = bytes(buffer[:result_size]) + array_type = ArrayUtils.detect_array_type(array) + + metadata = ArrayMetadata( + shape=list(original_shape), + dtype=str(original_dtype), + itemsize=item_size, + array_type=array_type + ) + return EncodedArray( data=encoded_data, - shape=original_shape, - dtype=original_dtype, - itemsize=item_size + metadata=metadata ) @staticmethod @@ -134,7 +265,7 @@ def decode_array(encoded_array: EncodedArray) -> np.ndarray: Decoded numpy array """ # Calculate total number of items - total_items = np.prod(encoded_array.shape) + total_items = np.prod(encoded_array.metadata.shape) # Create buffer for encoded data buffer_array = np.frombuffer(encoded_array.data, dtype=np.uint8) @@ -147,7 +278,7 @@ def decode_array(encoded_array: EncodedArray) -> np.ndarray: result = lib.meshopt_decodeVertexBuffer( destination.ctypes.data_as(ctypes.c_void_p), total_items, - encoded_array.itemsize, + encoded_array.metadata.itemsize, buffer_array.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), len(buffer_array) ) @@ -156,10 +287,125 @@ def decode_array(encoded_array: EncodedArray) -> np.ndarray: raise RuntimeError(f"Failed to decode array: error code {result}") # Reshape the array to its original shape - reshaped = destination.reshape(encoded_array.shape) + reshaped = destination.reshape(encoded_array.metadata.shape) # Convert back to original dtype if needed - if encoded_array.dtype != np.float32: - reshaped = reshaped.astype(encoded_array.dtype) + if encoded_array.metadata.dtype != np.float32: + reshaped = reshaped.astype(encoded_array.metadata.dtype) return reshaped + + @staticmethod + def save_array( + handler: WriteHandler, + name: str, + encoded_array: EncodedArray, + ) -> None: + """ + Save a single encoded array using a write handler. + + Args: + handler: WriteHandler for writing files + name: Array name (e.g., "normals" or "markerIndices.boundary") + encoded_array: EncodedArray to save + """ + array_path = name.replace(".", "/") + handler.write_binary( + f"arrays/{array_path}/array.bin", encoded_array.data) + handler.write_text( + f"arrays/{array_path}/metadata.json", + json.dumps(encoded_array.metadata.model_dump(), + indent=2, sort_keys=True), + ) + + @staticmethod + def load_array( + handler: ReadHandler, + name: str, + array_type: Optional[ArrayType] = None + ) -> Any: + """ + Load and decode a single array using a read handler. + + Args: + handler: ReadHandler for reading files + name: Array name (e.g., "normals" or "markerIndices.boundary") + array_type: Target array backend type ("numpy" or "jax"). If None (default), uses + the array_type stored in the array's metadata. + + Returns: + Decoded array (numpy or JAX) + + Raises: + KeyError: If array not found + """ + array_path = name.replace(".", "/") + bin_path = f"arrays/{array_path}/array.bin" + meta_path = f"arrays/{array_path}/metadata.json" + + try: + metadata_text = handler.read_text(meta_path) + metadata_dict = json.loads(metadata_text) + metadata = ArrayMetadata(**metadata_dict) + + encoded_bytes = handler.read_binary(bin_path) + except (KeyError, FileNotFoundError) as e: + raise KeyError(f"Array '{name}' not found") from e + + encoded = EncodedArray( + data=encoded_bytes, + metadata=metadata + ) + + decoded = ArrayUtils.decode_array(encoded) + return ArrayUtils.convert_array(decoded, array_type or metadata.array_type) + + @staticmethod + def save_to_zip( + array: Array, + destination: Union[PathLike, BytesIO], + ) -> None: + """ + Save a single array to a zip file. + + Args: + array: Array to save (numpy or JAX) + destination: Path to the output zip file or BytesIO buffer + """ + encoded = ArrayUtils.encode_array(array) + + zip_buffer = ZipBuffer() + handler = WriteHandler.create_handler(zip_buffer) + ArrayUtils.save_array(handler, "array", encoded) + handler.finalize() + + if isinstance(destination, BytesIO): + destination.write(zip_buffer.getvalue()) + else: + with open(destination, "wb") as f: + f.write(zip_buffer.getvalue()) + + @staticmethod + def load_from_zip( + source: Union[PathLike, BytesIO], + array_type: Optional[ArrayType] = None + ) -> Array: + """ + Load a single array from a zip file. + + Args: + source: Path to the input zip file or BytesIO buffer + array_type: Target array backend type ("numpy" or "jax"). If None (default), + uses the array_type stored in the array's metadata. + + Returns: + Decoded array (numpy or JAX) + """ + if isinstance(source, BytesIO): + source.seek(0) + handler = ReadHandler.create_handler(ZipBuffer(source.read())) + else: + with open(source, "rb") as f: + handler = ReadHandler.create_handler(ZipBuffer(f.read())) + + return ArrayUtils.load_array(handler, "array", array_type) diff --git a/python/meshly/data_handler.py b/python/meshly/data_handler.py new file mode 100644 index 0000000..3c2e702 --- /dev/null +++ b/python/meshly/data_handler.py @@ -0,0 +1,307 @@ +import stat +from typing import Callable, List, Optional, Union +import zipfile +from io import BytesIO +from pathlib import Path +from abc import abstractmethod +from .common import PathLike + + +ZipBuffer = BytesIO + +# Type aliases for cache callbacks +CacheLoader = Callable[[str], Optional[bytes]] +"""Load cached packable by SHA256 hash. Returns bytes or None if not found.""" + +CacheSaver = Callable[[str, bytes], None] +"""Save packable bytes to cache with SHA256 hash as key.""" + +ReadHandlerSource = Union[PathLike, ZipBuffer] +WriteHandlerDestination = Union[PathLike, ZipBuffer] + + +class DataHandler: + rel_path: str + + def resolved_path(self, subpath: PathLike) -> Path: + """Resolve the path relative to the repository.""" + if str(subpath).startswith(self.rel_path): + return Path(str(subpath)) + return Path(f"{self.rel_path}/{subpath}") + + +class ReadHandler(DataHandler): + """Protocol for reading files from various sources.""" + + def __init__(self, source: ReadHandlerSource, rel_path=""): + self.source = source + self.rel_path = rel_path + + @abstractmethod + def read_text(self, subpath: PathLike, encoding: str = "utf-8") -> str: + """Read text content from a file.""" + ... + + @abstractmethod + def read_binary(self, subpath: PathLike) -> bytes: + """Read binary content from a file.""" + ... + + @abstractmethod + def list_files(self, subpath: PathLike = "", recursive: bool = False) -> List[Path]: + """List files in the given subpath.""" + ... + + def to_path(self, rel_path: str): + """Get the original source as a PathLike if applicable.""" + return ReadHandler.create_handler(self.source, f"{self.rel_path}/{rel_path}" if self.rel_path != "" else rel_path, self) + + @staticmethod + def create_handler(source: ReadHandlerSource, rel_path="", existing_handler: Optional["ReadHandler"] = None): + """ + Create an appropriate read handler based on the source type. + + Args: + source: Path to file/directory or BytesIO object + rel_path: Relative path prefix for file operations + existing_handler: Optional existing handler to reuse resources from + + Returns: + ReadHandler implementation + """ + if isinstance(source, ZipBuffer): + return ZipReadHandler( + source, + rel_path, + zip_file=existing_handler.zip_file if existing_handler and isinstance( + existing_handler, ZipReadHandler) else None + ) + else: + return FileReadHandler(source, rel_path) + + @staticmethod + def create_cache_loader(source: ReadHandlerSource): + """ + Create a CacheLoader function that reads from a handler source. + + Args: + source: Path to cache directory or ZipBuffer containing cached packables + + Returns: + CacheLoader function: (hash: str) -> Optional[bytes] + + Example: + cache_loader = ReadHandler.create_cache_loader("/path/to/cache") + mesh = Mesh.load_from_zip("mesh.zip", cache_loader=cache_loader) + """ + handler = ReadHandler.create_handler(source) + + def loader(hash_digest: str) -> Optional[bytes]: + try: + return handler.read_binary(f"{hash_digest}.zip") + except (FileNotFoundError, KeyError): + return None + + return loader + + +class WriteHandler(DataHandler): + """Protocol for writing files to various destinations.""" + + def __init__(self, destination: WriteHandlerDestination, rel_path=""): + self.destination = destination + self.rel_path = rel_path + + @abstractmethod + def write_text(self, subpath: PathLike, content: str, executable: bool = False) -> None: + """Write text content to a file.""" + ... + + @abstractmethod + def write_binary(self, subpath: PathLike, content: Union[bytes, BytesIO], executable: bool = False) -> None: + """Write binary content to a file.""" + ... + + def to_path(self, rel_path: str): + """Get the original source as a PathLike if applicable.""" + return WriteHandler.create_handler(self.destination, f"{self.rel_path}/{rel_path}" if self.rel_path != "" else rel_path, self) + + @staticmethod + def create_handler(destination: WriteHandlerDestination, rel_path: str = "", existing_handler: Optional["WriteHandler"] = None): + """ + Create an appropriate write handler based on the destination type. + + Args: + destination: Path to file/directory or BytesIO object + is_zip: Whether to create a zip file + + Returns: + WriteFileHandlerProtocol implementation + """ + if isinstance(destination, ZipBuffer): + return ZipWriteHandler( + destination, + rel_path, + zip_file=existing_handler.zip_file if existing_handler and isinstance( + existing_handler, ZipWriteHandler) else None + ) + else: + return FileWriteHandler(destination, rel_path) + + @staticmethod + def create_cache_saver(destination: WriteHandlerDestination): + """ + Create a CacheSaver function that writes to a handler destination. + + Args: + destination: Path to cache directory or ZipBuffer for cached packables + + Returns: + CacheSaver function: (hash: str, data: bytes) -> None + + Example: + cache_saver = WriteHandler.create_cache_saver("/path/to/cache") + mesh.save_to_zip("mesh.zip", cache_saver=cache_saver) + """ + handler = WriteHandler.create_handler(destination) + written_hashes: set = set() + + def saver(hash_digest: str, data: bytes) -> None: + if hash_digest not in written_hashes: + handler.write_binary(f"{hash_digest}.zip", data) + written_hashes.add(hash_digest) + + return saver + + def finalize(self): + """Close any resources if needed.""" + pass + + +class FileReadHandler(ReadHandler): + """Handler for reading files from the regular file system.""" + + source: Path + + def __init__(self, source: PathLike, rel_path: str = ""): + super().__init__(Path(source), rel_path) + + def read_text(self, subpath: PathLike, encoding: str = "utf-8") -> str: + full_path = self.source / self.resolved_path(subpath) + return full_path.read_text(encoding) + + def read_binary(self, subpath: PathLike) -> bytes: + full_path = self.source / self.resolved_path(subpath) + return full_path.read_bytes() + + def list_files(self, subpath: PathLike = "", recursive: bool = False) -> List[Path]: + full_path = Path(self.source) / subpath + if recursive: + return [p.relative_to(self.source) for p in full_path.rglob("*")] + else: + return [p.relative_to(self.source) for p in full_path.glob("*")] + + +class ZipReadHandler(ReadHandler): + """Handler for reading files from zip archives.""" + + def __init__(self, source: Union[PathLike, BytesIO], rel_path: str = "", zip_file: Optional[zipfile.ZipFile] = None): + super().__init__(source, rel_path) + self.zip_file: zipfile.ZipFile = zip_file or zipfile.ZipFile( + source, "r") + + def read_text(self, subpath: PathLike, encoding: str = "utf-8") -> str: + return self.zip_file.read(str(self.resolved_path(subpath))).decode(encoding) + + def read_binary(self, subpath: PathLike) -> bytes: + return self.zip_file.read(str(self.resolved_path(subpath))) + + def list_files(self, subpath: PathLike = "", recursive: bool = False) -> List[Path]: + if subpath == "" or recursive: + return [ + Path(p.filename) + for p in self.zip_file.infolist() + if p.filename.startswith(str(self.resolved_path(subpath))) + ] + else: + return [ + Path(p.filename) + for p in self.zip_file.infolist() + if str(Path(p.filename).parent) == self.resolved_path(subpath) + ] + + def finalize(self): + """Close the zip file.""" + if self.zip_file: + self.zip_file.close() + + +class FileWriteHandler(WriteHandler): + """Handler for writing files to the regular file system.""" + + destination: Path + + def __init__(self, destination: PathLike, rel_path: str = ""): + super().__init__(Path(destination), rel_path) + + def write_text(self, subpath: PathLike, content: str, executable: bool = False) -> None: + full_path = self.destination / self.resolved_path(subpath) + full_path.parent.mkdir(parents=True, exist_ok=True) + full_path.write_text(content) + + if executable: + # Make file executable by owner, group, and others + current_permissions = full_path.stat().st_mode + full_path.chmod(current_permissions | stat.S_IXUSR | + stat.S_IXGRP | stat.S_IXOTH) + + def write_binary(self, subpath: PathLike, content: Union[bytes, BytesIO], executable: bool = False) -> None: + if isinstance(content, BytesIO): + content.seek(0) + content = content.read() + + full_path = self.destination / self.resolved_path(subpath) + full_path.parent.mkdir(parents=True, exist_ok=True) + full_path.write_bytes(content) + + if executable: + # Make file executable by owner, group, and others + current_permissions = full_path.stat().st_mode + full_path.chmod(current_permissions | stat.S_IXUSR | + stat.S_IXGRP | stat.S_IXOTH) + + +class ZipWriteHandler(WriteHandler): + """Handler for writing files to zip archives.""" + + # Fixed date_time for deterministic zip output (2020-01-01 00:00:00) + DETERMINISTIC_DATE_TIME = (2020, 1, 1, 0, 0, 0) + + def __init__(self, destination: Union[PathLike, BytesIO], rel_path: str = "", zip_file: Optional[zipfile.ZipFile] = None): + super().__init__(destination, rel_path) + self.zip_file = zip_file or zipfile.ZipFile(destination, "w") + + def write_text(self, subpath: PathLike, content: str, executable: bool = False) -> None: + zip_info = zipfile.ZipInfo(str(self.resolved_path( + subpath)), date_time=self.DETERMINISTIC_DATE_TIME) + if executable: + # Set Unix file permissions for executable files (0o755) + zip_info.external_attr = 0o755 << 16 + self.zip_file.writestr(zip_info, content) + + def write_binary(self, subpath: PathLike, content: Union[bytes, BytesIO], executable: bool = False) -> None: + if isinstance(content, BytesIO): + content.seek(0) + content = content.read() + + zip_info = zipfile.ZipInfo(str(self.resolved_path( + subpath)), date_time=self.DETERMINISTIC_DATE_TIME) + if executable: + # Set Unix file permissions for executable files (0o755) + zip_info.external_attr = 0o755 << 16 + self.zip_file.writestr(zip_info, content) + + def finalize(self): + """Close the zip file.""" + if hasattr(self, 'zip_file') and self.zip_file: + self.zip_file.close() diff --git a/python/meshly/mesh.py b/python/meshly/mesh.py index 7873088..a324126 100644 --- a/python/meshly/mesh.py +++ b/python/meshly/mesh.py @@ -12,11 +12,11 @@ - Marker support for boundary conditions and regions """ -from .utils import ElementUtils, TriangulationUtils, MeshUtils, ZipUtils, PackableUtils +from .utils import ElementUtils, TriangulationUtils, MeshUtils +from .data_handler import WriteHandler, ReadHandler from .cell_types import CellTypeUtils, VTKCellType -from .common import PathLike -from .array import EncodedArray -from .packable import Packable, PackableMetadata +from .array import ArrayUtils, ArrayType, Array +from .packable import Packable, PackableMetadata, CustomFieldConfig from meshoptimizer import ( encode_vertex_buffer, encode_index_sequence, @@ -27,43 +27,24 @@ optimize_vertex_fetch as meshopt_optimize_vertex_fetch, simplify as meshopt_simplify, ) -import json -import zipfile -from io import BytesIO from typing import ( Dict, Optional, Type, Any, TypeVar, - Union, List, Sequence, + Union, ) import numpy as np from pydantic import BaseModel, Field, model_validator -# Optional JAX support -try: - import jax.numpy as jnp - HAS_JAX = True -except ImportError: - jnp = None - HAS_JAX = False - -# Array type union - supports both numpy and JAX arrays -if HAS_JAX: - JaxArray = Union[np.ndarray, jnp.ndarray] -else: - JaxArray = np.ndarray -Array = Union[np.ndarray, JaxArray] - -# Use meshoptimizer directly - # Type variable for the Mesh class T = TypeVar("T", bound="Mesh") + class MeshSizeInfo(BaseModel): """Mesh size information for meshoptimizer encoding/decoding.""" vertex_count: int = Field(..., description="Number of vertices") @@ -77,6 +58,8 @@ class MeshMetadata(PackableMetadata): """Metadata for a Mesh saved to zip, extending PackableMetadata with mesh-specific info.""" mesh_size: MeshSizeInfo = Field(..., description="Mesh size information for decoding") + array_type: ArrayType = Field( + default="numpy", description="Array backend type for vertices/indices") class Mesh(Packable): @@ -90,11 +73,73 @@ class Mesh(Packable): specialized handling for vertices and indices using meshoptimizer encoding. """ - # Required fields - vertices: Array = Field(..., - description="Vertex data as a numpy or JAX array") + # ============================================================ + # Custom field encoders/decoders for meshoptimizer + # ============================================================ + + @staticmethod + def _encode_vertices(vertices: Array, mesh: "Mesh") -> bytes: + """Encode vertices using meshoptimizer.""" + return encode_vertex_buffer( + vertices, + mesh.vertex_count, + vertices.itemsize * vertices.shape[1], + ) + + @staticmethod + def _decode_vertices(encoded_bytes: bytes, metadata: MeshMetadata, array_type: Optional[ArrayType]) -> Array: + """Decode vertices using meshoptimizer.""" + mesh_size = metadata.mesh_size + effective_type = array_type or metadata.array_type + vertices = decode_vertex_buffer( + mesh_size.vertex_count, mesh_size.vertex_size, encoded_bytes + ) + return ArrayUtils.convert_array(vertices, effective_type) + + @staticmethod + def _encode_indices(indices: Array, mesh: "Mesh") -> bytes: + """Encode indices using meshoptimizer.""" + return encode_index_sequence(indices, mesh.index_count, mesh.vertex_count) + + @staticmethod + def _decode_indices(encoded_bytes: bytes, metadata: MeshMetadata, array_type: Optional[ArrayType]) -> Array: + """Decode indices using meshoptimizer.""" + mesh_size = metadata.mesh_size + effective_type = array_type or metadata.array_type + indices = decode_index_sequence( + mesh_size.index_count, mesh_size.index_size, encoded_bytes + ) + return ArrayUtils.convert_array(indices, effective_type) + + @classmethod + def _get_custom_fields(cls) -> Dict[str, CustomFieldConfig]: + """Custom field configurations for mesh-specific encoding/decoding.""" + return { + 'vertices': CustomFieldConfig( + file_name='vertices', + encode=Mesh._encode_vertices, + decode=Mesh._decode_vertices, + optional=False + ), + 'indices': CustomFieldConfig( + file_name='indices', + encode=Mesh._encode_indices, + decode=Mesh._decode_indices, + optional=True + ), + } + + # ============================================================ + # Field definitions + # ============================================================ + + vertices: Array = Field( + ..., + description="Vertex data as a numpy or JAX array", + ) indices: Optional[Union[Array, List[Any]]] = Field( - None, description="Index data as a flattened 1D numpy/JAX array or list of polygons" + None, + description="Index data as a flattened 1D numpy/JAX array or list of polygons", ) index_sizes: Optional[Union[Array, List[int]]] = None """ @@ -201,103 +246,52 @@ def validate_arrays(self) -> "Mesh": """ Validate and convert arrays to the correct types. - This method handles various input formats for indices and automatically infers - index_sizes when not explicitly provided: - - - 2D numpy arrays: Assumes uniform polygons, infers size from array shape - - List of lists: Supports mixed polygon sizes, infers from individual polygon lengths - - Flat arrays: Requires explicit index_sizes for polygon structure - - When index_sizes is explicitly provided, it validates that the structure matches - the inferred polygon sizes and that the sum equals the total number of indices. - - Cell types are automatically inferred from polygon sizes if not provided: - - Size 1: Vertex (1), Size 2: Line (3), Size 3: Triangle (5), Size 4: Quad (9) - - Raises: - ValueError: If explicit index_sizes doesn't match inferred structure or - if sum of index_sizes doesn't match total indices count, or - if cell_types length doesn't match index_sizes length. + Handles various input formats for indices and automatically infers + index_sizes and cell_types when not explicitly provided. """ - # Ensure vertices is a float32 array, preserving array type (numpy/JAX) + # Helper to convert arrays to numpy (needed for meshoptimizer) + def to_numpy(arr): + return ArrayUtils.convert_array(arr, "numpy") if ArrayUtils.is_array(arr) else arr + + # Ensure vertices is float32, preserving JAX type if present if self.vertices is not None: - if HAS_JAX and isinstance(self.vertices, jnp.ndarray): - # Keep as JAX array - self.vertices = self.vertices.astype(jnp.float32) - else: - # Convert to numpy array - self.vertices = np.asarray(self.vertices, dtype=np.float32) + vertex_type = ArrayUtils.detect_array_type(self.vertices) + self.vertices = ArrayUtils.convert_array( + np.asarray(self.vertices, dtype=np.float32), vertex_type) - # Handle indices - convert to flattened 1D array and extract size info using ElementUtils + # Process indices through ElementUtils if self.indices is not None: - # Convert JAX arrays to numpy first if needed - indices_to_process = self.indices - index_sizes_to_process = self.index_sizes - cell_types_to_process = self.cell_types - - if HAS_JAX and isinstance(indices_to_process, jnp.ndarray): - indices_to_process = np.asarray(indices_to_process) - if HAS_JAX and isinstance(index_sizes_to_process, jnp.ndarray): - index_sizes_to_process = np.asarray(index_sizes_to_process) - if HAS_JAX and isinstance(cell_types_to_process, jnp.ndarray): - cell_types_to_process = np.asarray(cell_types_to_process) - - try: - self.indices, self.index_sizes, self.cell_types = ElementUtils.convert_array_input( - indices_to_process, index_sizes_to_process, cell_types_to_process - ) - except ValueError as e: - raise ValueError(f"Error processing indices: {e}") + self.indices, self.index_sizes, self.cell_types = ElementUtils.convert_array_input( + to_numpy(self.indices), to_numpy( + self.index_sizes), to_numpy(self.cell_types) + ) - # Auto-compute dimension from cell types if not explicitly provided + # Auto-compute dimension from cell types if self.dim is None: - if self.cell_types is not None and len(self.cell_types) > 0: - self.dim = CellTypeUtils.get_mesh_dimension(self.cell_types) - else: - # Default to 3D if no cell types available - self.dim = 3 + self.dim = CellTypeUtils.get_mesh_dimension(self.cell_types) \ + if self.cell_types is not None and len(self.cell_types) > 0 else 3 - # Handle marker conversion - convert sequence format to flattened arrays + # Convert markers to flattened arrays if self.markers: converted_markers = {} - for marker_name, marker_data in self.markers.items(): - try: - # Handle JAX arrays - marker_data_to_process = marker_data - if HAS_JAX and isinstance(marker_data_to_process, jnp.ndarray): - marker_data_to_process = np.asarray( - marker_data_to_process) - - if isinstance(marker_data_to_process, np.ndarray): - # Already a numpy array, keep as is but validate it has corresponding sizes/types - converted_markers[marker_name] = np.asarray( - marker_data_to_process, dtype=np.uint32) - - # If marker_cell_types is defined but marker_sizes is missing, calculate it automatically - if marker_name in self.marker_cell_types and marker_name not in self.marker_sizes: - self.marker_sizes[marker_name] = CellTypeUtils.infer_sizes_from_vtk_cell_types( - self.marker_cell_types[marker_name]) - - # Validate that we have both sizes and types - if marker_name not in self.marker_sizes or marker_name not in self.marker_cell_types: - raise ValueError( - f"Marker '{marker_name}' provided as array but missing marker_sizes or marker_cell_types") - else: - # Convert sequence of sequences to flattened structure using ElementUtils - # This handles lists, tuples, or any sequence type - marker_list = [list(element) - for element in marker_data_to_process] - flattened_indices, sizes, cell_types = ElementUtils.convert_list_to_flattened( - marker_list) - converted_markers[marker_name] = flattened_indices - self.marker_sizes[marker_name] = sizes - self.marker_cell_types[marker_name] = cell_types - - except ValueError as e: - raise ValueError( - f"Error converting markers for '{marker_name}': {e}") - - # Update markers to be the flattened arrays + for name, data in self.markers.items(): + data = to_numpy(data) + if isinstance(data, np.ndarray): + converted_markers[name] = data.astype(np.uint32) + # Auto-calculate sizes from cell_types if missing + if name in self.marker_cell_types and name not in self.marker_sizes: + self.marker_sizes[name] = CellTypeUtils.infer_sizes_from_vtk_cell_types( + self.marker_cell_types[name]) + if name not in self.marker_sizes or name not in self.marker_cell_types: + raise ValueError( + f"Marker '{name}' missing marker_sizes or marker_cell_types") + else: + # Convert list of lists to flattened structure + indices, sizes, types = ElementUtils.convert_list_to_flattened( + [list(el) for el in data]) + converted_markers[name] = indices + self.marker_sizes[name] = sizes + self.marker_cell_types[name] = types self.markers = converted_markers return self @@ -533,7 +527,8 @@ def triangulate(self) -> "Mesh": # Check for unsupported types skip_types = {VTKCellType.VTK_VERTEX, VTKCellType.VTK_LINE} - supported_types = {VTKCellType.VTK_TRIANGLE} | polygon_types | volume_types + supported_types = { + VTKCellType.VTK_TRIANGLE} | polygon_types | volume_types all_handled = supported_types | skip_types for i, ct in enumerate(effective_types): @@ -659,88 +654,10 @@ def _create_metadata(self, field_data: Dict[str, Any]) -> MeshMetadata: module_name=self.__class__.__module__, field_data=field_data, mesh_size=mesh_size, + array_type=ArrayUtils.detect_array_type(self.vertices), ) - # Override save_to_zip for mesh-specific encoding (vertices/indices use meshoptimizer) - def save_to_zip( - self, - destination: Union[PathLike, BytesIO], - date_time: Optional[tuple] = None - ) -> None: - """Save mesh to a zip file with meshoptimizer compression for vertices/indices.""" - # Encode vertices/indices using meshoptimizer - encoded_vertices = encode_vertex_buffer( - self.vertices, - self.vertex_count, - self.vertices.itemsize * self.vertices.shape[1], - ) - - encoded_indices = None - if self.indices is not None: - encoded_indices = encode_index_sequence( - self.indices, self.index_count, self.vertex_count - ) - - # Use Packable helpers for the rest - encoded_data = self.encode() - field_data = self._extract_non_array_fields() - - # Prepare files using parent helper, excluding vertices/indices (handled specially) - files_to_write = self._prepare_zip_files( - encoded_data, field_data, - exclude_arrays={"vertices", "indices"} - ) - - # Add mesh-specific files - files_to_write.append(("mesh/vertices.bin", encoded_vertices)) - if encoded_indices is not None: - files_to_write.append(("mesh/indices.bin", encoded_indices)) - - with zipfile.ZipFile(destination, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=6) as zipf: - ZipUtils.write_files(zipf, files_to_write, date_time) - @classmethod - def load_from_zip(cls: Type[T], source: Union[PathLike, BytesIO], use_jax: bool = False) -> T: - """Load mesh from a zip file.""" - if use_jax and not HAS_JAX: - raise ValueError( - "JAX is not available. Install JAX to use JAX arrays.") - - with zipfile.ZipFile(source, "r") as zipf: - metadata = cls.load_metadata(zipf, MeshMetadata) - - # Get mesh size info from typed metadata - mesh_size = metadata.mesh_size - - # Decode vertices using meshoptimizer - with zipf.open("mesh/vertices.bin") as f: - encoded_vertices = f.read() - vertices = decode_vertex_buffer( - mesh_size.vertex_count, mesh_size.vertex_size, encoded_vertices - ) - if use_jax: - vertices = jnp.array(vertices) - - # Decode indices using meshoptimizer - indices = None - if "mesh/indices.bin" in zipf.namelist() and mesh_size.index_count: - with zipf.open("mesh/indices.bin") as f: - encoded_indices = f.read() - indices = decode_index_sequence( - mesh_size.index_count, mesh_size.index_size, encoded_indices - ) - if use_jax: - indices = jnp.array(indices) - - # Load and decode other arrays - data = ZipUtils.load_arrays(zipf, use_jax) - - # Build mesh args - mesh_data = {"vertices": vertices, "indices": indices} - mesh_data.update(data) - - # Merge non-array fields from metadata - if metadata.field_data: - PackableUtils.merge_field_data(mesh_data, metadata.field_data) - - return cls(**mesh_data) + def load_metadata(cls, handler: ReadHandler, metadata_cls: Type[PackableMetadata] = None) -> MeshMetadata: + """Load MeshMetadata from handler.""" + return super().load_metadata(handler, MeshMetadata) diff --git a/python/meshly/packable.py b/python/meshly/packable.py index 37ded7c..5949235 100644 --- a/python/meshly/packable.py +++ b/python/meshly/packable.py @@ -8,50 +8,25 @@ results, time-series data, or any structured data with numpy arrays. """ +import hashlib import json -import zipfile +from dataclasses import dataclass from io import BytesIO from typing import ( + Callable, Dict, + Generic, Optional, Set, Type, Any, TypeVar, Union, - List, ) -import numpy as np from pydantic import BaseModel, Field - -from .array import ArrayUtils, ArrayMetadata, EncodedArray +from .array import ArrayUtils, ArrayType, Array from .common import PathLike -from .utils.zip_utils import ZipUtils -from .utils.packable_utils import PackableUtils - -# Optional JAX support -try: - import jax.numpy as jnp - HAS_JAX = True -except ImportError: - jnp = None - HAS_JAX = False - -# Array type union - supports both numpy and JAX arrays -if HAS_JAX: - Array = Union[np.ndarray, jnp.ndarray] -else: - Array = np.ndarray - -# Recursive type for decoded array data from zip files -# Values are arrays or nested dicts containing arrays -ArrayData = Dict[str, Union[Array, Dict[str, Any]]] - - -class EncodedData(BaseModel): - """Container for encoded array data from a Packable.""" - arrays: Dict[str, EncodedArray] = Field( - default_factory=dict, description="Encoded arrays") +from .data_handler import WriteHandler, ReadHandler, ZipBuffer, CacheLoader, CacheSaver class PackableMetadata(BaseModel): @@ -61,10 +36,29 @@ class PackableMetadata(BaseModel): description="Module containing the data class") field_data: Dict[str, Any] = Field( default_factory=dict, description="Non-array field values") + packable_refs: Dict[str, str] = Field( + default_factory=dict, + description="SHA256 hashes for cached packable fields (field_name -> hash)" + ) + +TPackableMetadata = TypeVar("TPackableMetadata", bound=PackableMetadata) +TPackable = TypeVar("TPackable", bound="Packable") +FieldValue = TypeVar("FieldValue") # Value type for custom fields -T = TypeVar("T", bound="Packable") -M = TypeVar("M", bound=PackableMetadata) + +@dataclass +class CustomFieldConfig(Generic[FieldValue, TPackableMetadata]): + """Configuration for custom field encoding/decoding.""" + file_name: str + """File name in zip (without .bin extension)""" + encode: Callable[[FieldValue, Any], bytes] + """Encoder function: (value, instance) -> bytes""" + decode: Callable[[bytes, TPackableMetadata, + Optional[ArrayType]], FieldValue] + """Decoder function: (bytes, metadata, array_type) -> value""" + optional: bool = False + """Whether the field is optional (won't throw if missing)""" class Packable(BaseModel): @@ -102,51 +96,10 @@ def array_fields(self) -> Set[str]: continue value = getattr(self, field_name, None) if value is not None: - result.update(PackableUtils.extract_nested_arrays( + result.update(ArrayUtils.extract_nested_arrays( value, field_name).keys()) return result - def encode(self) -> EncodedData: - """ - Encode this container's arrays for serialization. - - Returns: - EncodedData with all arrays encoded - """ - encoded_arrays = {} - - for field_name in self.array_fields: - # Handle nested array paths (e.g., "textures.diffuse") - if "." in field_name: - # Extract the nested array - parts = field_name.split(".") - obj = self - for part in parts[:-1]: - if isinstance(obj, dict): - obj = obj[part] - else: - obj = getattr(obj, part) - - # Get the final array - if isinstance(obj, dict): - array = obj[parts[-1]] - else: - array = getattr(obj, parts[-1]) - - if PackableUtils.is_array(array): - encoded_arrays[field_name] = ArrayUtils.encode_array(array) - else: - # Handle direct array fields - try: - array = getattr(self, field_name) - if PackableUtils.is_array(array): - encoded_arrays[field_name] = ArrayUtils.encode_array( - array) - except AttributeError: - pass - - return EncodedData(arrays=encoded_arrays) - def _extract_non_array_fields(self) -> Dict[str, Any]: """Extract non-array field values for metadata, preserving BaseModel type info.""" model_data = {} @@ -155,8 +108,8 @@ def _extract_non_array_fields(self) -> Dict[str, Any]: if name in self.__private_attributes__ or name in direct_arrays: continue value = getattr(self, name, None) - if value is not None and not PackableUtils.is_array(value): - extracted = PackableUtils.extract_non_arrays(value) + if value is not None and not ArrayUtils.is_array(value): + extracted = ArrayUtils.extract_non_arrays(value) if extracted is not None: model_data[name] = extracted return model_data @@ -179,137 +132,467 @@ def _create_metadata(self, field_data: Dict[str, Any]) -> PackableMetadata: field_data=field_data, ) - def _prepare_zip_files( - self, - encoded_data: EncodedData, - field_data: Dict[str, Any], - exclude_arrays: Optional[set] = None - ) -> List[tuple]: + @classmethod + def load_metadata( + cls, + handler: ReadHandler, + metadata_cls: Type[TPackableMetadata] = PackableMetadata + ) -> TPackableMetadata: """ - Prepare list of files to write to zip. + Load and validate metadata using a read handler. Args: - encoded_data: Encoded array data - field_data: Non-array field data for metadata - exclude_arrays: Set of array names to exclude (handled separately) + handler: ReadHandler for reading files + metadata_cls: The metadata class to use for parsing (default: PackableMetadata) Returns: - List of (filename, data) tuples + Metadata object of the specified type + + Raises: + ValueError: If class name doesn't match """ - exclude_arrays = exclude_arrays or set() - files_to_write = [] + metadata_text = handler.read_text("metadata.json") + metadata_dict = json.loads(metadata_text) + metadata = metadata_cls(**metadata_dict) - # Add array data - for name in sorted(encoded_data.arrays.keys()): - if name in exclude_arrays: - continue - encoded_array = encoded_data.arrays[name] - array_path = name.replace(".", "/") - files_to_write.append( - (f"arrays/{array_path}/array.bin", encoded_array.data)) - - array_metadata = ArrayMetadata( - shape=list(encoded_array.shape), - dtype=str(encoded_array.dtype), - itemsize=encoded_array.itemsize, + if metadata.class_name != cls.__name__ or metadata.module_name != cls.__module__: + raise ValueError( + f"Class mismatch: expected {cls.__name__} but got {metadata.class_name} from {metadata.module_name}" ) - files_to_write.append(( - f"arrays/{array_path}/metadata.json", - json.dumps(array_metadata.model_dump(), - indent=2, sort_keys=True) - )) - - # Create metadata using overridable method - metadata = self._create_metadata(field_data) - files_to_write.append(("metadata.json", json.dumps( - metadata.model_dump(), indent=2, sort_keys=True))) - - return files_to_write - return files_to_write + return metadata def save_to_zip( self, destination: Union[PathLike, BytesIO], - date_time: Optional[tuple] = None + cache_saver: Optional[CacheSaver] = None, ) -> None: """ Save this container to a zip file. Args: - destination: Path to the output zip file or BytesIO object - date_time: Optional date_time tuple for deterministic zip files + destination: Path to the output zip file or BytesIO buffer + cache_saver: Optional callback to save nested Packables to cache. + When provided, nested Packable fields are saved via + cache_saver(hash, bytes) and only hash references are + stored in the parent zip. This enables deduplication + and smaller parent files. """ - encoded_data = self.encode() - field_data = self._extract_non_array_fields() - files_to_write = self._prepare_zip_files(encoded_data, field_data) + encoded = self.encode(cache_saver=cache_saver) + if isinstance(destination, BytesIO): + destination.write(encoded) + else: + with open(destination, "wb") as f: + f.write(encoded) + + @classmethod + def load_from_zip( + cls: Type[TPackable], + source: Union[PathLike, BytesIO], + array_type: Optional[ArrayType] = None, + cache_loader: Optional[CacheLoader] = None, + ) -> TPackable: + """ + Load a Packable from a zip file. + + Args: + source: Path to the input zip file or BytesIO object + array_type: Array backend to use ("numpy" or "jax"). If None (default), + uses the array_type stored in each array's metadata, + preserving the original array types that were saved. + cache_loader: Optional callback to load nested Packables from cache. + When the zip contains hash references (packable_refs), + cache_loader(hash) is called to retrieve cached bytes. + + Returns: + Loaded Packable instance + """ + if isinstance(source, BytesIO): + source.seek(0) + return cls.decode(source.read(), array_type, cache_loader) + else: + with open(source, "rb") as f: + return cls.decode(f.read(), array_type, cache_loader) - with zipfile.ZipFile(destination, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=6) as zipf: - ZipUtils.write_files(zipf, files_to_write, date_time) + @classmethod + def _get_custom_fields(cls) -> Dict[str, CustomFieldConfig]: + """ + Get custom field configurations for this class. + + Subclasses override this to define custom encoders/decoders. + + Returns: + Dict mapping field names to CustomFieldConfig objects + """ + return {} @classmethod - def load_metadata( + def _get_custom_field_names(cls) -> Set[str]: + """Get set of field names that have custom encoding/decoding.""" + return set(cls._get_custom_fields().keys()) + + def _get_packable_fields(self) -> Dict[str, "Packable"]: + """Get fields that are Packable instances (excluding self).""" + packable_fields = {} + for field_name in type(self).model_fields: + if field_name in self.__private_attributes__: + continue + value = getattr(self, field_name, None) + if value is not None and isinstance(value, Packable): + packable_fields[field_name] = value + return packable_fields + + def _get_packable_field_names(self) -> Set[str]: + """Get set of field names that are Packable instances.""" + return set(self._get_packable_fields().keys()) + + @classmethod + def _get_packable_field_types(cls) -> Set[str]: + """Get field names that are Packable types from type hints (for decoding).""" + import typing + hints = typing.get_type_hints(cls) + packable_fields = set() + + for field_name, field_type in hints.items(): + # Handle Optional[PackableSubclass] + origin = typing.get_origin(field_type) + if origin is Union: + args = typing.get_args(field_type) + for arg in args: + if isinstance(arg, type) and issubclass(arg, Packable): + packable_fields.add(field_name) + break + elif isinstance(field_type, type) and issubclass(field_type, Packable): + packable_fields.add(field_name) + + return packable_fields + + @classmethod + def _decode_custom_fields( + cls, + handler: ReadHandler, + metadata: PackableMetadata, + data: Dict[str, Any], + array_type: Optional[ArrayType] = None + ) -> None: + """Decode fields with custom decoders.""" + for field_name, config in cls._get_custom_fields().items(): + try: + encoded_bytes = handler.read_binary(f"{config.file_name}.bin") + data[field_name] = config.decode( + encoded_bytes, metadata, array_type) + except (KeyError, FileNotFoundError): + if not config.optional: + raise ValueError( + f"Required custom field '{field_name}' ({config.file_name}.bin) not found in zip") + + @classmethod + def _load_standard_arrays( cls, - zipf: zipfile.ZipFile, - metadata_cls: Type[M] = PackableMetadata - ) -> M: + handler: ReadHandler, + data: Dict[str, Any], + skip_fields: Set[str], + array_type: Optional[ArrayType] = None + ) -> None: + """Load standard arrays from arrays/ folder, skipping custom fields.""" + try: + all_files = handler.list_files("arrays", recursive=True) + except (KeyError, FileNotFoundError): + return + + for file_path in all_files: + file_str = str(file_path) + if not file_str.endswith("/array.bin"): + continue + + # Extract array name: "arrays/markerIndices/boundary/array.bin" -> "markerIndices.boundary" + array_path = file_str[7:-10] # Remove "arrays/" and "/array.bin" + name = array_path.replace("/", ".") + + # Skip custom fields + base_field = name.split(".")[0] + if base_field in skip_fields: + continue + + decoded = ArrayUtils.load_array(handler, name, array_type) + + if "." in name: + # Nested array - build nested structure + parts = name.split(".") + current = data + for part in parts[:-1]: + if part not in current: + current[part] = {} + current = current[part] + current[parts[-1]] = decoded + else: + # Flat array + data[name] = decoded + + def _encode_standard_arrays(self, skip_fields: Set[str]) -> Dict[str, bytes]: + """Encode standard arrays, skipping custom fields.""" + encoded_arrays = {} + + for field_name in self.array_fields: + # Skip fields with custom encoding + if field_name in skip_fields: + continue + + # Handle nested array paths (e.g., "textures.diffuse") + if "." in field_name: + parts = field_name.split(".") + obj = self + for part in parts[:-1]: + if isinstance(obj, dict): + obj = obj[part] + else: + obj = getattr(obj, part) + + if isinstance(obj, dict): + array = obj[parts[-1]] + else: + array = getattr(obj, parts[-1]) + + if ArrayUtils.is_array(array): + encoded_arrays[field_name] = ArrayUtils.encode_array(array) + else: + # Handle direct array fields + try: + array = getattr(self, field_name) + if ArrayUtils.is_array(array): + encoded_arrays[field_name] = ArrayUtils.encode_array( + array) + except AttributeError: + pass + + return encoded_arrays + + def _encode_custom_fields(self, handler: WriteHandler) -> None: + """Encode fields with custom encoders.""" + for field_name, config in self._get_custom_fields().items(): + value = getattr(self, field_name) + if value is not None: + encoded_bytes = config.encode(value, self) + handler.write_binary(f"{config.file_name}.bin", encoded_bytes) + + def _encode_packable_fields( + self, + handler: WriteHandler, + cache_saver: Optional[CacheSaver] = None + ) -> Dict[str, str]: + """Encode fields that are Packable instances. + + Args: + handler: WriteHandler for the parent zip (used when no cache) + cache_saver: Optional callback to save to cache. When provided, + packables are saved via cache_saver(hash, bytes) and + only hash refs are returned. + + Returns: + Dict mapping field names to SHA256 hashes (only when cache_saver provided) + """ + packable_refs: Dict[str, str] = {} + + for field_name, packable in self._get_packable_fields().items(): + # Recursively use cache for nested packables too + encoded_bytes = packable.encode(cache_saver=cache_saver) + + if cache_saver is not None: + # Compute SHA256 hash of the encoded bytes + hash_digest = hashlib.sha256(encoded_bytes).hexdigest() + packable_refs[field_name] = hash_digest + + # Save to cache + cache_saver(hash_digest, encoded_bytes) + else: + # Embed in parent zip as before + handler.write_binary(f"packables/{field_name}.zip", encoded_bytes) + + return packable_refs + + def encode(self, cache_saver: Optional[CacheSaver] = None) -> bytes: """ - Load and validate metadata from an open zip file. + Serialize this Packable to bytes. Args: - zipf: Open ZipFile object - metadata_cls: The metadata class to use for parsing (default: PackableMetadata) + cache_saver: Optional callback to save nested Packables to cache. + When provided, nested Packable fields are saved via + cache_saver(hash, bytes) instead of embedding in the zip. Returns: - Metadata object of the specified type + Bytes containing the zip-encoded data + """ + custom_field_names = self._get_custom_field_names() + packable_field_names = self._get_packable_field_names() + skip_fields = custom_field_names | packable_field_names - Raises: - ValueError: If class name doesn't match + # Encode standard arrays + encoded_arrays = self._encode_standard_arrays(skip_fields) + + # Create metadata + field_data = self._extract_non_array_fields() + metadata = self._create_metadata(field_data) + + # Write to zip + destination = ZipBuffer() + handler = WriteHandler.create_handler(destination) + + # Save standard arrays + for name in sorted(encoded_arrays.keys()): + ArrayUtils.save_array(handler, name, encoded_arrays[name]) + + # Save custom encoded fields + self._encode_custom_fields(handler) + + # Save packable fields (with optional caching) + packable_refs = self._encode_packable_fields(handler, cache_saver) + + # Store packable refs in metadata if using cache + if packable_refs: + metadata.packable_refs = packable_refs + + # Save metadata + handler.write_text( + "metadata.json", + json.dumps(metadata.model_dump(), indent=2, sort_keys=True), + ) + + handler.finalize() + return destination.getvalue() + + @classmethod + def _decode_packable_fields( + cls, + handler: ReadHandler, + metadata: PackableMetadata, + data: Dict[str, Any], + array_type: Optional[ArrayType] = None, + cache_loader: Optional[CacheLoader] = None + ) -> None: + """Decode fields that are Packable instances. + + Supports both embedded packables (in packables/ folder) and cached + packables (referenced by SHA256 hash in metadata.packable_refs). + + Args: + handler: ReadHandler for the parent zip + metadata: Loaded metadata containing packable_refs + data: Dict to populate with decoded packables + array_type: Optional array backend to use + cache_loader: Optional callback to load cached packables by hash """ - with zipf.open("metadata.json") as f: - metadata_dict = json.loads(f.read().decode("utf-8")) - metadata = metadata_cls(**metadata_dict) + # Get field type hints to know the Packable subclass for each field + import typing + hints = typing.get_type_hints(cls) + + # Helper to decode a packable field given its bytes + def decode_field(field_name: str, encoded_bytes: bytes) -> None: + field_type = hints.get(field_name) + if field_type is None: + return + + # Handle Optional[PackableSubclass] + origin = typing.get_origin(field_type) + if origin is Union: + args = typing.get_args(field_type) + for arg in args: + if isinstance(arg, type) and issubclass(arg, Packable): + field_type = arg + break + + if not isinstance(field_type, type) or not issubclass(field_type, Packable): + return + + data[field_name] = field_type.decode(encoded_bytes, array_type, cache_loader) + + # First, try to load from cache using hash refs + if cache_loader and metadata.packable_refs: + for field_name, hash_digest in metadata.packable_refs.items(): + cached_bytes = cache_loader(hash_digest) + if cached_bytes is not None: + decode_field(field_name, cached_bytes) + + # Then load any embedded packables (for backward compatibility or no-cache case) + try: + packable_files = handler.list_files("packables", recursive=True) + except (KeyError, FileNotFoundError): + return + + for file_path in packable_files: + file_str = str(file_path) + if not file_str.endswith(".zip"): + continue - if metadata.class_name != cls.__name__ or metadata.module_name != cls.__module__: - raise ValueError( - f"Class mismatch: expected {cls.__name__} but got {metadata.class_name} from {metadata.module_name}" - ) + # Extract field name: "packables/inner_mesh.zip" -> "inner_mesh" + field_name = file_str[10:-4] # Remove "packables/" and ".zip" - return metadata + # Skip if already loaded from cache + if field_name in data: + continue + + encoded_bytes = handler.read_binary(file_str) + decode_field(field_name, encoded_bytes) @classmethod - def load_from_zip(cls: Type[T], source: Union[PathLike, BytesIO], use_jax: bool = False) -> T: + def decode( + cls: Type[TPackable], + buf: bytes, + array_type: Optional[ArrayType] = None, + cache_loader: Optional[CacheLoader] = None + ) -> TPackable: """ - Load a Packable from a zip file. + Deserialize a Packable from bytes. Args: - source: Path to the input zip file or BytesIO object - use_jax: If True and JAX is available, decode arrays as JAX arrays + buf: Bytes containing the zip-encoded data + array_type: Array backend to use. If None (default), uses the + array_type stored in each array's metadata. + cache_loader: Optional callback to load nested Packables from cache. + When metadata contains hash references, cache_loader(hash) + is called to retrieve cached bytes. Returns: Loaded Packable instance """ - if use_jax and not HAS_JAX: - raise ValueError( - "JAX is not available. Install JAX to use JAX arrays.") + handler = ReadHandler.create_handler(ZipBuffer(buf)) + metadata = cls.load_metadata(handler) + + # Fields to skip when loading standard arrays + skip_fields = cls._get_custom_field_names() | cls._get_packable_field_types() + + data: Dict[str, Any] = {} + + # Decode custom fields first + cls._decode_custom_fields(handler, metadata, data, array_type) + + # Load standard arrays + cls._load_standard_arrays(handler, data, skip_fields, array_type) - with zipfile.ZipFile(source, "r") as zipf: - metadata = cls.load_metadata(zipf) + # Decode packable fields + cls._decode_packable_fields(handler, metadata, data, array_type, cache_loader) - # Load and decode all arrays (handles both flat and nested) - data = ZipUtils.load_arrays(zipf, use_jax) + # Merge non-array fields from metadata + if metadata.field_data: + Packable._merge_field_data(data, metadata.field_data) - # Merge non-array fields from metadata - if metadata.field_data: - PackableUtils.merge_field_data(data, metadata.field_data) + return cls(**data) - return cls(**data) + def __reduce__(self): + """ + Support for pickle serialization. + + Array types are preserved automatically via the per-array metadata. + """ + return ( + self.__class__.decode, + (self.encode(),), + ) @staticmethod def load_array( source: Union[PathLike, BytesIO], name: str, - use_jax: bool = False + array_type: Optional[ArrayType] = None ) -> Array: """ Load a single array from a zip file without loading the entire object. @@ -317,9 +600,10 @@ def load_array( Useful for large files where you only need one array. Args: - source: Path to the zip file or BytesIO object + source: Path to the zip file or BytesIO buffer name: Array name (e.g., "normals" or "markerIndices.boundary") - use_jax: If True, decode as JAX array + array_type: Array backend to use ("numpy" or "jax"). If None (default), + uses the array_type stored in the array's metadata. Returns: Decoded array (numpy or JAX) @@ -330,79 +614,82 @@ def load_array( Example: normals = Mesh.load_array("mesh.zip", "normals") """ - with zipfile.ZipFile(source, "r") as zipf: - return ZipUtils.load_array(zipf, name, use_jax) - - def to_numpy(self: T) -> T: + if isinstance(source, BytesIO): + source.seek(0) + handler = ReadHandler.create_handler(ZipBuffer(source.read())) + else: + with open(source, "rb") as f: + handler = ReadHandler.create_handler(ZipBuffer(f.read())) + return ArrayUtils.load_array(handler, name, array_type) + + def convert_to(self: TPackable, array_type: ArrayType) -> TPackable: """ - Create a new Packable with all arrays converted to NumPy arrays. + Create a new Packable with all arrays converted to the specified type. - Returns: - A new Packable with all arrays as NumPy arrays - """ - if not HAS_JAX: - return self.model_copy(deep=True) - - data_copy = self.model_copy(deep=True) - - def convert_to_numpy(obj: Any) -> Any: - if isinstance(obj, jnp.ndarray): - return np.array(obj) - elif isinstance(obj, np.ndarray): - return obj - elif isinstance(obj, dict): - return {key: convert_to_numpy(value) for key, value in obj.items()} - elif isinstance(obj, (list, tuple)): - return type(obj)(convert_to_numpy(item) for item in obj) - else: - return obj - - for field_name in data_copy.model_fields_set: - try: - value = getattr(data_copy, field_name) - if value is not None: - converted = convert_to_numpy(value) - setattr(data_copy, field_name, converted) - except AttributeError: - pass - - return data_copy - - def to_jax(self: T) -> T: - """ - Create a new Packable with all arrays converted to JAX arrays. + Args: + array_type: Target array backend ("numpy" or "jax") Returns: - A new Packable with all arrays as JAX arrays + A new Packable with all arrays converted Raises: - ValueError: If JAX is not available + AssertionError: If JAX is requested but not available """ - if not HAS_JAX: - raise ValueError( - "JAX is not available. Install JAX to convert to JAX arrays.") - data_copy = self.model_copy(deep=True) - def convert_to_jax(obj: Any) -> Any: - if isinstance(obj, np.ndarray): - return jnp.array(obj) - elif HAS_JAX and isinstance(obj, jnp.ndarray): - return obj - elif isinstance(obj, dict): - return {key: convert_to_jax(value) for key, value in obj.items()} - elif isinstance(obj, (list, tuple)): - return type(obj)(convert_to_jax(item) for item in obj) - else: - return obj - for field_name in data_copy.model_fields_set: try: value = getattr(data_copy, field_name) if value is not None: - converted = convert_to_jax(value) + converted = ArrayUtils.convert_recursive(value, array_type) setattr(data_copy, field_name, converted) except AttributeError: pass return data_copy + + @staticmethod + def _reconstruct_model(data: Dict[str, Any]) -> Any: + """Reconstruct BaseModel from serialized dict with __model_class__/__model_module__.""" + if not isinstance(data, dict): + return data + + # Recursively process nested dicts first + processed = {k: Packable._reconstruct_model(v) if isinstance(v, dict) else v + for k, v in data.items() if k not in ("__model_class__", "__model_module__")} + + if "__model_class__" not in data: + return processed + + try: + import importlib + module = importlib.import_module(data["__model_module__"]) + model_class = getattr(module, data["__model_class__"]) + return model_class(**processed) + except (ImportError, AttributeError): + return processed + + @staticmethod + def _merge_field_data(data: Dict[str, Any], field_data: Dict[str, Any]) -> None: + """Merge metadata fields into data, reconstructing BaseModel instances.""" + for key, value in field_data.items(): + existing = data.get(key) + if not isinstance(value, dict): + data[key] = value + elif "__model_class__" in value: + # Single BaseModel: merge arrays then reconstruct + merged = {**value, ** + (existing if isinstance(existing, dict) else {})} + data[key] = Packable._reconstruct_model(merged) + elif isinstance(existing, dict): + # Check if dict of BaseModels + for subkey, subval in value.items(): + if isinstance(subval, dict) and "__model_class__" in subval: + merged = {**subval, **existing.get(subkey, {})} + existing[subkey] = Packable._reconstruct_model(merged) + elif isinstance(subval, dict) and isinstance(existing.get(subkey), dict): + Packable._merge_field_data(existing[subkey], subval) + else: + existing[subkey] = subval + else: + data[key] = Packable._reconstruct_model(value) diff --git a/python/meshly/utils/__init__.py b/python/meshly/utils/__init__.py index 84b443a..8c5d72a 100644 --- a/python/meshly/utils/__init__.py +++ b/python/meshly/utils/__init__.py @@ -2,18 +2,14 @@ Utility modules for meshly. This package contains utility functions for mesh operations, element handling, -triangulation, and zip file operations. +and triangulation. """ from .element_utils import ElementUtils, TriangulationUtils from .mesh_utils import MeshUtils -from .packable_utils import PackableUtils -from .zip_utils import ZipUtils __all__ = [ "ElementUtils", "TriangulationUtils", "MeshUtils", - "PackableUtils", - "ZipUtils", ] diff --git a/python/meshly/utils/packable_utils.py b/python/meshly/utils/packable_utils.py deleted file mode 100644 index 9aa17aa..0000000 --- a/python/meshly/utils/packable_utils.py +++ /dev/null @@ -1,116 +0,0 @@ -"""Utilities for extracting arrays and reconstructing BaseModel instances.""" - -from typing import Any, Dict, Set, Union -import numpy as np -from pydantic import BaseModel - -# Optional JAX support -try: - import jax.numpy as jnp - HAS_JAX = True -except ImportError: - jnp = None - HAS_JAX = False - -if HAS_JAX: - Array = Union[np.ndarray, jnp.ndarray] -else: - Array = np.ndarray - - -class PackableUtils: - """Static utilities for array extraction and BaseModel reconstruction.""" - - @staticmethod - def is_array(obj: Any) -> bool: - """Check if obj is a numpy or JAX array.""" - return isinstance(obj, np.ndarray) or (HAS_JAX and isinstance(obj, jnp.ndarray)) - - @staticmethod - def extract_nested_arrays(obj: Any, prefix: str = "") -> Dict[str, Array]: - """Recursively extract arrays from nested dicts and BaseModel instances.""" - arrays = {} - if PackableUtils.is_array(obj): - arrays[prefix] = obj - elif isinstance(obj, BaseModel): - for name in type(obj).model_fields: - value = getattr(obj, name, None) - if value is not None: - key = f"{prefix}.{name}" if prefix else name - arrays.update( - PackableUtils.extract_nested_arrays(value, key)) - elif isinstance(obj, dict): - for k, v in obj.items(): - key = f"{prefix}.{k}" if prefix else k - arrays.update(PackableUtils.extract_nested_arrays(v, key)) - return arrays - - @staticmethod - def extract_non_arrays(obj: Any) -> Any: - """Extract non-array values, preserving BaseModel type info for reconstruction.""" - if PackableUtils.is_array(obj): - return None - if isinstance(obj, BaseModel): - result = {"__model_class__": obj.__class__.__name__, - "__model_module__": obj.__class__.__module__} - for name in type(obj).model_fields: - val = getattr(obj, name, None) - if not PackableUtils.is_array(val): - extracted = PackableUtils.extract_non_arrays(val) - if extracted is not None: - result[name] = extracted - return result if len(result) > 2 else None - if isinstance(obj, dict): - result = {k: PackableUtils.extract_non_arrays(v) for k, v in obj.items() - if not PackableUtils.is_array(v)} - result = {k: v for k, v in result.items() if v is not None} - return result or None - return obj - - @staticmethod - def reconstruct_model(data: Dict[str, Any]) -> Any: - """Reconstruct BaseModel from serialized dict with __model_class__/__model_module__.""" - if not isinstance(data, dict): - return data - - # Recursively process nested dicts first - processed = {k: PackableUtils.reconstruct_model(v) if isinstance(v, dict) else v - for k, v in data.items() if k not in ("__model_class__", "__model_module__")} - - if "__model_class__" not in data: - return processed - - try: - import importlib - module = importlib.import_module(data["__model_module__"]) - model_class = getattr(module, data["__model_class__"]) - return model_class(**processed) - except (ImportError, AttributeError): - return processed - - @staticmethod - def merge_field_data(data: Dict[str, Any], field_data: Dict[str, Any]) -> None: - """Merge metadata fields into data, reconstructing BaseModel instances.""" - for key, value in field_data.items(): - existing = data.get(key) - if not isinstance(value, dict): - data[key] = value - elif "__model_class__" in value: - # Single BaseModel: merge arrays then reconstruct - merged = {**value, ** - (existing if isinstance(existing, dict) else {})} - data[key] = PackableUtils.reconstruct_model(merged) - elif isinstance(existing, dict): - # Check if dict of BaseModels - for subkey, subval in value.items(): - if isinstance(subval, dict) and "__model_class__" in subval: - merged = {**subval, **existing.get(subkey, {})} - existing[subkey] = PackableUtils.reconstruct_model( - merged) - elif isinstance(subval, dict) and isinstance(existing.get(subkey), dict): - PackableUtils.merge_field_data( - existing[subkey], subval) - else: - existing[subkey] = subval - else: - data[key] = PackableUtils.reconstruct_model(value) diff --git a/python/meshly/utils/zip_utils.py b/python/meshly/utils/zip_utils.py deleted file mode 100644 index 8dcbea0..0000000 --- a/python/meshly/utils/zip_utils.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -Utility functions for zip file operations used by Packable and Mesh. -""" - -import json -import zipfile -from typing import Dict, List, Optional, Any - -import numpy as np - -from ..array import ArrayUtils, ArrayMetadata, EncodedArray - -# Optional JAX support -try: - import jax.numpy as jnp - HAS_JAX = True -except ImportError: - jnp = None - HAS_JAX = False - - -class ZipUtils: - """Static utility methods for zip file operations.""" - - @staticmethod - def write_files( - zipf: zipfile.ZipFile, - files_to_write: List[tuple], - date_time: Optional[tuple] = None - ) -> None: - """ - Write files to an open zip file. - - Args: - zipf: Open ZipFile object - files_to_write: List of (filename, data) tuples - date_time: Optional date_time tuple for deterministic output - """ - for filename, data_bytes in sorted(files_to_write): - if date_time is not None: - info = zipfile.ZipInfo(filename=filename, date_time=date_time) - else: - info = zipfile.ZipInfo(filename=filename) - info.compress_type = zipfile.ZIP_DEFLATED - info.external_attr = 0o644 << 16 - if isinstance(data_bytes, str): - data_bytes = data_bytes.encode('utf-8') - zipf.writestr(info, data_bytes) - - @staticmethod - def load_array( - zipf: zipfile.ZipFile, - name: str, - use_jax: bool = False - ) -> Any: - """ - Load and decode a single array from a zip file. - - Args: - zipf: Open ZipFile object - name: Array name (e.g., "normals" or "markerIndices.boundary") - use_jax: If True, decode as JAX array - - Returns: - Decoded array (numpy or JAX) - - Raises: - KeyError: If array not found in zip - """ - # Convert dotted name to path - array_path = name.replace(".", "/") - bin_path = f"arrays/{array_path}/array.bin" - meta_path = f"arrays/{array_path}/metadata.json" - - if bin_path not in zipf.namelist(): - raise KeyError(f"Array '{name}' not found in zip file") - - # Load metadata and data - with zipf.open(meta_path) as f: - metadata_dict = json.loads(f.read().decode("utf-8")) - metadata = ArrayMetadata(**metadata_dict) - - with zipf.open(bin_path) as f: - encoded_bytes = f.read() - - encoded = EncodedArray( - data=encoded_bytes, - shape=tuple(metadata.shape), - dtype=np.dtype(metadata.dtype), - itemsize=metadata.itemsize, - ) - - decoded = ArrayUtils.decode_array(encoded) - if use_jax and HAS_JAX: - decoded = jnp.array(decoded) - - return decoded - - @staticmethod - def load_arrays(zipf: zipfile.ZipFile, use_jax: bool = False) -> Dict[str, Any]: - """ - Load and decode all arrays from a zip file's arrays/ folder. - - Handles both flat arrays (e.g., "vertices") and nested arrays - (e.g., "markerIndices/boundary" becomes {"markerIndices": {"boundary": ...}}) - - Args: - zipf: Open ZipFile object - use_jax: If True, decode as JAX arrays - - Returns: - Decoded arrays organized into proper structure - """ - result: Dict[str, Any] = {} - - for array_file in zipf.namelist(): - if not (array_file.startswith("arrays/") and array_file.endswith("/array.bin")): - continue - - # Extract array name: "arrays/markerIndices/boundary/array.bin" -> "markerIndices.boundary" - array_path = array_file[7:-10] # Remove "arrays/" and "/array.bin" - name = array_path.replace("/", ".") - - decoded = ZipUtils.load_array(zipf, name, use_jax) - - if "." in name: - # Nested array - build nested structure - parts = name.split(".") - current = result - for part in parts[:-1]: - if part not in current: - current[part] = {} - current = current[part] - current[parts[-1]] = decoded - else: - # Flat array - result[name] = decoded - - return result diff --git a/python/tests/test_conversion.py b/python/tests/test_conversion.py index d683953..006ec14 100644 --- a/python/tests/test_conversion.py +++ b/python/tests/test_conversion.py @@ -1,10 +1,11 @@ """ -Tests for array type conversion functions (to_numpy, to_jax). +Tests for array type conversion functions (convert_to). """ import unittest import numpy as np -from meshly import Mesh, Array, HAS_JAX +from meshly import Mesh, Array +from meshly.array import HAS_JAX class TestConversion(unittest.TestCase): @@ -12,44 +13,48 @@ class TestConversion(unittest.TestCase): def setUp(self): """Set up test data.""" - self.vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=np.float32) + self.vertices = np.array( + [[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=np.float32) self.indices = np.array([0, 1, 2], dtype=np.uint32) - def test_to_numpy_conversion(self): + def test_convert_to_numpy(self): """Test converting mesh to NumPy arrays.""" # Start with NumPy mesh mesh = Mesh(vertices=self.vertices, indices=self.indices) - + # Convert to NumPy (should be no-op but create new instance) - numpy_mesh = mesh.to_numpy() - + numpy_mesh = mesh.convert_to("numpy") + self.assertIsInstance(numpy_mesh.vertices, np.ndarray) self.assertIsInstance(numpy_mesh.indices, np.ndarray) np.testing.assert_array_equal(numpy_mesh.vertices, self.vertices) np.testing.assert_array_equal(numpy_mesh.indices, self.indices) - + # Verify it's a different instance self.assertIsNot(numpy_mesh, mesh) @unittest.skipUnless(HAS_JAX, "JAX not available") - def test_to_jax_conversion(self): + def test_convert_to_jax(self): """Test converting mesh to JAX arrays.""" import jax.numpy as jnp - + # Start with NumPy mesh mesh = Mesh(vertices=self.vertices, indices=self.indices) - + # Convert to JAX - jax_mesh = mesh.to_jax() - - self.assertTrue(hasattr(jax_mesh.vertices, 'device'), "Vertices should be JAX arrays") - self.assertTrue(hasattr(jax_mesh.indices, 'device'), "Indices should be JAX arrays") - np.testing.assert_array_equal(np.array(jax_mesh.vertices), self.vertices) + jax_mesh = mesh.convert_to("jax") + + self.assertTrue(hasattr(jax_mesh.vertices, 'device'), + "Vertices should be JAX arrays") + self.assertTrue(hasattr(jax_mesh.indices, 'device'), + "Indices should be JAX arrays") + np.testing.assert_array_equal( + np.array(jax_mesh.vertices), self.vertices) np.testing.assert_array_equal(np.array(jax_mesh.indices), self.indices) - + # Verify it's a different instance self.assertIsNot(jax_mesh, mesh) - + # Original mesh should still have NumPy arrays self.assertIsInstance(mesh.vertices, np.ndarray) @@ -57,56 +62,59 @@ def test_to_jax_conversion(self): def test_bidirectional_conversion(self): """Test converting between NumPy and JAX arrays.""" import jax.numpy as jnp - + # Start with NumPy mesh numpy_mesh = Mesh(vertices=self.vertices, indices=self.indices) - + # Convert to JAX - jax_mesh = numpy_mesh.to_jax() + jax_mesh = numpy_mesh.convert_to("jax") self.assertTrue(hasattr(jax_mesh.vertices, 'device')) - + # Convert back to NumPy - numpy_mesh2 = jax_mesh.to_numpy() + numpy_mesh2 = jax_mesh.convert_to("numpy") self.assertIsInstance(numpy_mesh2.vertices, np.ndarray) self.assertIsInstance(numpy_mesh2.indices, np.ndarray) - + # Data should be preserved np.testing.assert_array_equal(numpy_mesh2.vertices, self.vertices) np.testing.assert_array_equal(numpy_mesh2.indices, self.indices) @unittest.skipUnless(HAS_JAX, "JAX not available") - def test_to_jax_with_custom_fields(self): + def test_convert_to_jax_with_custom_fields(self): """Test converting custom mesh class to JAX.""" import jax.numpy as jnp from pydantic import Field from typing import Optional - + class CustomMesh(Mesh): - normals: Optional[Array] = Field(None, description="Normal vectors") - + normals: Optional[Array] = Field( + None, description="Normal vectors") + normals = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1]], dtype=np.float32) - mesh = CustomMesh(vertices=self.vertices, indices=self.indices, normals=normals) - + mesh = CustomMesh(vertices=self.vertices, + indices=self.indices, normals=normals) + # Convert to JAX - jax_mesh = mesh.to_jax() - + jax_mesh = mesh.convert_to("jax") + # Verify all arrays are converted self.assertTrue(hasattr(jax_mesh.vertices, 'device')) self.assertTrue(hasattr(jax_mesh.normals, 'device')) - + # Verify data is preserved np.testing.assert_array_equal(np.array(jax_mesh.normals), normals) @unittest.skipUnless(HAS_JAX, "JAX not available") - def test_to_numpy_with_nested_arrays(self): + def test_convert_to_numpy_with_nested_arrays(self): """Test converting mesh with nested dictionary arrays to NumPy.""" import jax.numpy as jnp from pydantic import Field from typing import Dict, Any, Optional - + class CustomMesh(Mesh): - materials: Optional[Dict[str, Any]] = Field(None, description="Material properties") - + materials: Optional[Dict[str, Any]] = Field( + None, description="Material properties") + # Create with JAX arrays in nested structure materials = { 'diffuse': jnp.array([1.0, 0.0, 0.0], dtype=jnp.float32), @@ -114,30 +122,31 @@ class CustomMesh(Mesh): 'roughness': jnp.array([0.5], dtype=jnp.float32), } } - + jax_mesh = CustomMesh( vertices=jnp.array(self.vertices), indices=jnp.array(self.indices), materials=materials ) - + # Convert to NumPy - numpy_mesh = jax_mesh.to_numpy() - + numpy_mesh = jax_mesh.convert_to("numpy") + # Verify nested arrays are converted self.assertIsInstance(numpy_mesh.materials['diffuse'], np.ndarray) - self.assertIsInstance(numpy_mesh.materials['properties']['roughness'], np.ndarray) + self.assertIsInstance( + numpy_mesh.materials['properties']['roughness'], np.ndarray) - def test_to_jax_without_jax_raises_error(self): - """Test that to_jax raises error when JAX is unavailable.""" + def test_convert_to_jax_without_jax_raises_error(self): + """Test that convert_to("jax") raises error when JAX is unavailable.""" if HAS_JAX: self.skipTest("JAX is available, cannot test unavailable scenario") - + mesh = Mesh(vertices=self.vertices, indices=self.indices) - - with self.assertRaises(ValueError) as context: - mesh.to_jax() - + + with self.assertRaises(AssertionError) as context: + mesh.convert_to("jax") + self.assertIn("JAX is not available", str(context.exception)) diff --git a/python/tests/test_dict_arrays.py b/python/tests/test_dict_arrays.py index 184281f..542aa13 100644 --- a/python/tests/test_dict_arrays.py +++ b/python/tests/test_dict_arrays.py @@ -115,18 +115,10 @@ def test_dict_array_encoding_decoding(self): material_name="test_material" ) - # Encode the mesh + # Encode the mesh - returns bytes encoded_mesh = mesh.encode() - - # Check that all arrays are encoded (including vertices and indices) - expected_array_names = { - "vertices", "indices", "index_sizes", "cell_types", - "textures.diffuse", "textures.normal", "textures.specular", - "material_data.surface.roughness", "material_data.surface.metallic", - "material_data.lighting.emission" - } - - self.assertEqual(set(encoded_mesh.arrays.keys()), expected_array_names) + self.assertIsInstance(encoded_mesh, bytes) + self.assertGreater(len(encoded_mesh), 0) # Decode the mesh via zip round-trip (proper way to encode/decode) buffer = BytesIO() diff --git a/python/tests/test_encoded_mesh.py b/python/tests/test_encoded_mesh.py deleted file mode 100644 index d3ab45c..0000000 --- a/python/tests/test_encoded_mesh.py +++ /dev/null @@ -1,200 +0,0 @@ -""" -Tests for the EncodedMesh functionality. - -This file contains tests to verify that the EncodedMesh class and related -functions work correctly. -""" -import os -import tempfile -import numpy as np -import unittest -from typing import Optional, List -from pydantic import Field - -from meshly import Mesh -from meshly.packable import EncodedData - - -class TestEncodedMesh(unittest.TestCase): - """Test EncodedMesh functionality.""" - - def setUp(self): - """Set up test data.""" - # Create a simple mesh (a cube) - self.vertices = np.array([ - # positions - [-0.5, -0.5, -0.5], - [0.5, -0.5, -0.5], - [0.5, 0.5, -0.5], - [-0.5, 0.5, -0.5], - [-0.5, -0.5, 0.5], - [0.5, -0.5, 0.5], - [0.5, 0.5, 0.5], - [-0.5, 0.5, 0.5] - ], dtype=np.float32) - - self.indices = np.array([ - 0, 1, 2, 2, 3, 0, # front - 1, 5, 6, 6, 2, 1, # right - 5, 4, 7, 7, 6, 5, # back - 4, 0, 3, 3, 7, 4, # left - 3, 2, 6, 6, 7, 3, # top - 4, 5, 1, 1, 0, 4 # bottom - ], dtype=np.uint32) - - self.mesh = Mesh(vertices=self.vertices, indices=self.indices) - - def get_triangles_set(self, vertices, indices): - """ - Get a set of triangles from vertices and indices. - Each triangle is represented as a frozenset of tuples of vertex coordinates. - This makes the comparison invariant to vertex order within triangles. - """ - triangles = set() - for i in range(0, len(indices), 3): - # Get the three vertices of the triangle - v1 = tuple(vertices[indices[i]]) - v2 = tuple(vertices[indices[i+1]]) - v3 = tuple(vertices[indices[i+2]]) - # Create a frozenset of the vertices (order-invariant) - triangle = frozenset([v1, v2, v3]) - triangles.add(triangle) - return triangles - - def test_mesh_encode_decode(self): - """Test that the mesh.encode and Mesh.load_from_zip methods work.""" - from io import BytesIO - - # Encode the mesh using the instance method - encoded_data = self.mesh.encode() - - # Check that the encoded_data is an instance of EncodedData - self.assertIsInstance(encoded_data, EncodedData) - - # Decode the mesh via zip round-trip - buffer = BytesIO() - self.mesh.save_to_zip(buffer) - buffer.seek(0) - decoded_mesh = Mesh.load_from_zip(buffer) - - # Check that the decoded vertices match the original - np.testing.assert_array_almost_equal( - self.mesh.vertices, decoded_mesh.vertices) - - # Check that the triangles match - original_triangles = self.get_triangles_set( - self.mesh.vertices, self.mesh.indices) - decoded_triangles = self.get_triangles_set( - decoded_mesh.vertices, decoded_mesh.indices) - - self.assertEqual(original_triangles, decoded_triangles) - - -class CustomMesh(Mesh): - """A custom mesh class for testing.""" - normals: np.ndarray = Field(..., description="Vertex normals") - colors: Optional[np.ndarray] = Field(None, description="Vertex colors") - material_name: str = Field("default", description="Material name") - tags: List[str] = Field(default_factory=list, - description="Tags for the mesh") - - -class TestCustomMesh(unittest.TestCase): - """Test custom Mesh subclass functionality.""" - - def setUp(self): - """Set up test data.""" - # Create a simple mesh (a cube) - self.vertices = np.array([ - [-0.5, -0.5, -0.5], - [0.5, -0.5, -0.5], - [0.5, 0.5, -0.5], - [-0.5, 0.5, -0.5], - [-0.5, -0.5, 0.5], - [0.5, -0.5, 0.5], - [0.5, 0.5, 0.5], - [-0.5, 0.5, 0.5] - ], dtype=np.float32) - - self.indices = np.array([ - 0, 1, 2, 2, 3, 0, # front - 1, 5, 6, 6, 2, 1, # right - 5, 4, 7, 7, 6, 5, # back - 4, 0, 3, 3, 7, 4, # left - 3, 2, 6, 6, 7, 3, # top - 4, 5, 1, 1, 0, 4 # bottom - ], dtype=np.uint32) - - self.normals = np.array([ - [0.0, 0.0, -1.0], - [0.0, 0.0, -1.0], - [0.0, 0.0, -1.0], - [0.0, 0.0, -1.0], - [0.0, 0.0, 1.0], - [0.0, 0.0, 1.0], - [0.0, 0.0, 1.0], - [0.0, 0.0, 1.0] - ], dtype=np.float32) - - self.colors = np.array([ - [1.0, 0.0, 0.0, 1.0], - [0.0, 1.0, 0.0, 1.0], - [0.0, 0.0, 1.0, 1.0], - [1.0, 1.0, 0.0, 1.0], - [1.0, 0.0, 1.0, 1.0], - [0.0, 1.0, 1.0, 1.0], - [0.5, 0.5, 0.5, 1.0], - [1.0, 1.0, 1.0, 1.0] - ], dtype=np.float32) - - self.mesh = CustomMesh( - vertices=self.vertices, - indices=self.indices, - normals=self.normals, - colors=self.colors, - material_name="test_material", - tags=["test", "cube"] - ) - - def test_custom_mesh_attributes(self): - """Test that the custom mesh attributes are set correctly.""" - self.assertEqual(self.mesh.vertex_count, len(self.vertices)) - self.assertEqual(self.mesh.index_count, len(self.indices)) - np.testing.assert_array_equal(self.mesh.normals, self.normals) - np.testing.assert_array_equal(self.mesh.colors, self.colors) - self.assertEqual(self.mesh.material_name, "test_material") - self.assertEqual(self.mesh.tags, ["test", "cube"]) - - def test_custom_mesh_encode_decode(self): - """Test that the custom mesh can be encoded and decoded.""" - # Create a temporary file for testing - with tempfile.NamedTemporaryFile(suffix='.zip', delete=False) as temp_file: - temp_path = temp_file.name - - try: - # Save the mesh to a zip file - self.mesh.save_to_zip(temp_path) - - # Load the mesh from the zip file - loaded_mesh = CustomMesh.load_from_zip(temp_path) - - # Check that the loaded mesh has the correct attributes - self.assertEqual(loaded_mesh.vertex_count, self.mesh.vertex_count) - self.assertEqual(loaded_mesh.index_count, self.mesh.index_count) - np.testing.assert_array_almost_equal( - loaded_mesh.vertices, self.mesh.vertices) - np.testing.assert_array_almost_equal( - loaded_mesh.normals, self.mesh.normals) - np.testing.assert_array_almost_equal( - loaded_mesh.colors, self.mesh.colors) - self.assertEqual(loaded_mesh.material_name, - self.mesh.material_name) - self.assertEqual(loaded_mesh.tags, self.mesh.tags) - finally: - # Clean up - if os.path.exists(temp_path): - os.remove(temp_path) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/tests/test_index_sizes.py b/python/tests/test_index_sizes.py index e0495e5..14e975f 100644 --- a/python/tests/test_index_sizes.py +++ b/python/tests/test_index_sizes.py @@ -151,11 +151,10 @@ def test_index_sizes_encoding_decoding(self): mesh = Mesh(vertices=self.vertices, indices=indices) original_index_sizes = mesh.index_sizes.copy() - # Encode the mesh + # Encode the mesh - returns bytes encoded_mesh = mesh.encode() - - # Verify index_sizes is in the encoded arrays - self.assertIn("index_sizes", encoded_mesh.arrays) + self.assertIsInstance(encoded_mesh, bytes) + self.assertGreater(len(encoded_mesh), 0) # Decode via zip round-trip buffer = BytesIO() @@ -433,11 +432,10 @@ def test_cell_types_encoding_decoding(self): cell_types=explicit_cell_types ) - # Encode the mesh + # Encode the mesh - returns bytes encoded_mesh = mesh.encode() - - # Verify cell_types is in the encoded arrays - self.assertIn("cell_types", encoded_mesh.arrays) + self.assertIsInstance(encoded_mesh, bytes) + self.assertGreater(len(encoded_mesh), 0) # Decode via zip round-trip from io import BytesIO diff --git a/python/tests/test_jax_support.py b/python/tests/test_jax_support.py index fd261dc..fc733d3 100644 --- a/python/tests/test_jax_support.py +++ b/python/tests/test_jax_support.py @@ -5,7 +5,8 @@ import unittest import numpy as np from io import BytesIO -from meshly import Mesh, Array, HAS_JAX +from meshly import Mesh, Array +from meshly.array import HAS_JAX class TestJAXSupport(unittest.TestCase): @@ -46,7 +47,7 @@ def test_numpy_functionality_preserved(self): buffer = BytesIO() mesh.save_to_zip(buffer) buffer.seek(0) - decoded = Mesh.load_from_zip(buffer, use_jax=False) + decoded = Mesh.load_from_zip(buffer, array_type="numpy") self.assertIsInstance(decoded.vertices, np.ndarray) self.assertIsInstance(decoded.indices, np.ndarray) @@ -65,7 +66,7 @@ def test_jax_functionality(self): buffer = BytesIO() mesh.save_to_zip(buffer) buffer.seek(0) - decoded_jax = Mesh.load_from_zip(buffer, use_jax=True) + decoded_jax = Mesh.load_from_zip(buffer, array_type="jax") # Verify arrays are JAX arrays self.assertTrue(hasattr(decoded_jax.vertices, 'device'), @@ -111,8 +112,8 @@ def test_jax_unavailable_error(self): buffer.seek(0) # Should raise error when JAX is requested but not available - with self.assertRaises(ValueError) as context: - Mesh.load_from_zip(buffer, use_jax=True) + with self.assertRaises(AssertionError) as context: + Mesh.load_from_zip(buffer, array_type="jax") self.assertIn("JAX is not available", str(context.exception)) @@ -158,7 +159,7 @@ class CustomMesh(Mesh): buffer = BytesIO() mesh.save_to_zip(buffer) buffer.seek(0) - decoded_jax = CustomMesh.load_from_zip(buffer, use_jax=True) + decoded_jax = CustomMesh.load_from_zip(buffer, array_type="jax") # Verify all arrays are JAX arrays self.assertTrue(hasattr(decoded_jax.vertices, 'device'), diff --git a/python/tests/test_packable.py b/python/tests/test_packable.py index 030c0eb..205f2ff 100644 --- a/python/tests/test_packable.py +++ b/python/tests/test_packable.py @@ -8,7 +8,7 @@ import numpy as np from pydantic import BaseModel, Field, ConfigDict -from meshly.packable import Packable, EncodedData +from meshly.packable import Packable class SimpleData(Packable): @@ -75,9 +75,10 @@ def test_encode_decode(self): values=np.array([1.0, 2.0, 3.0], dtype=np.float32) ) - # Test that encode produces arrays + # Test that encode produces bytes encoded = original.encode() - self.assertIn("values", encoded.arrays) + self.assertIsInstance(encoded, bytes) + self.assertGreater(len(encoded), 0) # Test full round-trip via zip buffer = BytesIO() @@ -155,22 +156,18 @@ def test_nested_dict_arrays(self): loaded.fields["density"], data.fields["density"] ) - def test_deterministic_zip(self): - """Test deterministic zip output with date_time parameter.""" + def test_deterministic_encode(self): + """Test that encode produces consistent output.""" data = SimpleData( name="deterministic", values=np.array([1.0, 2.0, 3.0], dtype=np.float32) ) - date_time = (2020, 1, 1, 0, 0, 0) + # Multiple encodes should produce the same bytes + encoded1 = data.encode() + encoded2 = data.encode() - buffer1 = BytesIO() - data.save_to_zip(buffer1, date_time=date_time) - - buffer2 = BytesIO() - data.save_to_zip(buffer2, date_time=date_time) - - self.assertEqual(buffer1.getvalue(), buffer2.getvalue()) + self.assertEqual(encoded1, encoded2) def test_class_mismatch_error(self): """Test error when loading with wrong class.""" @@ -271,5 +268,135 @@ def test_dict_of_basemodel_with_optional_none_field(self): ) +class InnerPackable(Packable): + """Inner packable for testing nested support.""" + label: str = Field(..., description="Label") + data: np.ndarray = Field(..., description="Data array") + + +class OuterPackable(Packable): + """Outer packable containing a nested packable.""" + name: str = Field(..., description="Name") + inner: Optional[InnerPackable] = Field(None, description="Nested packable") + + +class TestNestedPackableCache(unittest.TestCase): + """Test nested Packable with cache support.""" + + def test_nested_packable_without_cache(self): + """Test nested packable save/load without cache.""" + inner = InnerPackable( + label="inner", + data=np.array([1.0, 2.0, 3.0], dtype=np.float32) + ) + outer = OuterPackable(name="outer", inner=inner) + + buffer = BytesIO() + outer.save_to_zip(buffer) + + buffer.seek(0) + loaded = OuterPackable.load_from_zip(buffer) + + self.assertEqual(loaded.name, "outer") + self.assertIsNotNone(loaded.inner) + self.assertEqual(loaded.inner.label, "inner") + np.testing.assert_array_almost_equal(loaded.inner.data, inner.data) + + def test_nested_packable_with_cache(self): + """Test nested packable save/load with cache.""" + from meshly.data_handler import ReadHandler, WriteHandler + + inner = InnerPackable( + label="cached_inner", + data=np.array([4.0, 5.0, 6.0], dtype=np.float32) + ) + outer = OuterPackable(name="cached_outer", inner=inner) + + with tempfile.TemporaryDirectory() as tmpdir: + cache_path = os.path.join(tmpdir, "cache") + zip_path = os.path.join(tmpdir, "outer.zip") + + # Create cache saver and save with cache + cache_saver = WriteHandler.create_cache_saver(cache_path) + outer.save_to_zip(zip_path, cache_saver=cache_saver) + + # Verify cache file was created + cache_files = os.listdir(cache_path) + self.assertEqual(len(cache_files), 1) + self.assertTrue(cache_files[0].endswith(".zip")) + + # Create cache loader and load with cache + cache_loader = ReadHandler.create_cache_loader(cache_path) + loaded = OuterPackable.load_from_zip(zip_path, cache_loader=cache_loader) + + self.assertEqual(loaded.name, "cached_outer") + self.assertIsNotNone(loaded.inner) + self.assertEqual(loaded.inner.label, "cached_inner") + np.testing.assert_array_almost_equal(loaded.inner.data, inner.data) + + def test_cache_deduplication(self): + """Test that identical nested packables share the same cache file.""" + from meshly.data_handler import ReadHandler, WriteHandler + + # Create identical inner packables + inner1 = InnerPackable( + label="same", + data=np.array([1.0, 2.0], dtype=np.float32) + ) + inner2 = InnerPackable( + label="same", + data=np.array([1.0, 2.0], dtype=np.float32) + ) + outer1 = OuterPackable(name="outer1", inner=inner1) + outer2 = OuterPackable(name="outer2", inner=inner2) + + with tempfile.TemporaryDirectory() as tmpdir: + cache_path = os.path.join(tmpdir, "cache") + zip1_path = os.path.join(tmpdir, "outer1.zip") + zip2_path = os.path.join(tmpdir, "outer2.zip") + + # Save both with same cache + cache_saver = WriteHandler.create_cache_saver(cache_path) + outer1.save_to_zip(zip1_path, cache_saver=cache_saver) + outer2.save_to_zip(zip2_path, cache_saver=cache_saver) + + # Both should use the same cache file (SHA256 deduplication) + cache_files = os.listdir(cache_path) + self.assertEqual(len(cache_files), 1) + + # Both should load correctly + cache_loader = ReadHandler.create_cache_loader(cache_path) + loaded1 = OuterPackable.load_from_zip(zip1_path, cache_loader=cache_loader) + loaded2 = OuterPackable.load_from_zip(zip2_path, cache_loader=cache_loader) + + self.assertEqual(loaded1.inner.label, "same") + self.assertEqual(loaded2.inner.label, "same") + + def test_cache_missing_falls_back_to_embedded(self): + """Test loading works when cache file is missing but data is embedded.""" + from meshly.data_handler import ReadHandler + + inner = InnerPackable( + label="fallback", + data=np.array([7.0, 8.0], dtype=np.float32) + ) + outer = OuterPackable(name="fallback_outer", inner=inner) + + # Save without cache (embedded) + buffer = BytesIO() + outer.save_to_zip(buffer) + + # Load with a cache loader that won't find anything (should still work from embedded data) + with tempfile.TemporaryDirectory() as tmpdir: + cache_path = os.path.join(tmpdir, "cache") + os.makedirs(cache_path) # Create empty cache dir + cache_loader = ReadHandler.create_cache_loader(cache_path) + buffer.seek(0) + loaded = OuterPackable.load_from_zip(buffer, cache_loader=cache_loader) + + self.assertEqual(loaded.name, "fallback_outer") + self.assertEqual(loaded.inner.label, "fallback") + + if __name__ == "__main__": unittest.main() diff --git a/typescript/README.md b/typescript/README.md index 0de8d0c..59a2a23 100644 --- a/typescript/README.md +++ b/typescript/README.md @@ -1,6 +1,6 @@ # meshly -A TypeScript library for loading Python meshoptimizer zip files and converting to THREE.js geometries. +A TypeScript library for decoding Python meshly zip files and converting to THREE.js geometries. ## Installation @@ -12,25 +12,26 @@ pnpm add meshly ## Features -- Load meshes from Python meshly zip files +- Decode meshes from Python meshly zip files - Decode meshoptimizer-compressed vertex and index buffers - Convert to THREE.js BufferGeometry - Support for polygon meshes with automatic triangulation - Marker extraction for boundary conditions and regions +- Custom field decoding via `getCustomFields()` override - Full TypeScript type definitions ## Quick Start -### Load Mesh from Zip +### Decode Mesh from Zip ```typescript import { Mesh } from 'meshly' import * as THREE from 'three' -// Load mesh from zip file +// Fetch and decode mesh const response = await fetch('mesh.zip') const zipData = await response.arrayBuffer() -const mesh = await Mesh.loadFromZip(zipData) +const mesh = await Mesh.decode(zipData) // Convert to THREE.js geometry const geometry = mesh.toBufferGeometry() @@ -43,7 +44,7 @@ const threeMesh = new THREE.Mesh(geometry, material) ```typescript import { Mesh } from 'meshly' -const mesh = await Mesh.loadFromZip(zipData) +const mesh = await Mesh.decode(zipData) // Access mesh properties console.log(`Vertices: ${mesh.vertices.length / 3}`) @@ -65,6 +66,27 @@ Packable (base class) └── Mesh (3D mesh with meshoptimizer decoding) ``` +### Custom Field Decoding + +Subclasses can override `getCustomFields()` to define custom decoders: + +```typescript +protected static override getCustomFields(): Record { + return { + vertices: { + fileName: 'vertices', + decode: (data, metadata) => Mesh._decodeVertices(data, metadata), + optional: false + }, + indices: { + fileName: 'indices', + decode: (data, metadata) => Mesh._decodeIndices(data, metadata), + optional: true + } + } +} +``` + ### Metadata Interfaces ```typescript @@ -93,10 +115,9 @@ interface MeshSize { ``` mesh.zip ├── metadata.json # MeshMetadata (extends PackableMetadata) -├── mesh/ # Meshoptimizer encoded -│ ├── vertices.bin -│ └── indices.bin -└── arrays/ # Additional arrays +├── vertices.bin # Meshoptimizer-encoded (custom field) +├── indices.bin # Meshoptimizer-encoded (custom field, optional) +└── arrays/ # Standard arrays ├── indexSizes/ │ ├── array.bin │ └── metadata.json @@ -110,7 +131,7 @@ mesh.zip Meshly handles various polygon types with automatic triangulation for THREE.js: ```typescript -const mesh = await Mesh.loadFromZip(zipData) +const mesh = await Mesh.decode(zipData) // Check polygon structure console.log(`Polygon count: ${mesh.getPolygonCount()}`) @@ -143,7 +164,7 @@ const geometry = mesh.toBufferGeometry() Extract submeshes by marker name: ```typescript -const mesh = await Mesh.loadFromZip(zipData) +const mesh = await Mesh.decode(zipData) // Check available markers console.log('Markers:', Object.keys(mesh.markerIndices || {})) @@ -158,7 +179,7 @@ const geometry = mesh.extractMarkerAsBufferGeometry('inlet') ## Loading Individual Arrays -Load a single array without loading the entire mesh (useful for large files): +Load a single array without decoding the entire mesh (useful for large files): ```typescript // Load just the normals array @@ -168,28 +189,24 @@ const normals = await Mesh.loadArray(zipData, 'normals') const inletIndices = await Mesh.loadArray(zipData, 'markerIndices.inlet') ``` -## Decoding Encoded Meshes +## API Reference -Decode meshes from the EncodedMesh format: +### CustomFieldConfig ```typescript -import { Mesh, EncodedMesh } from 'meshly' - -// Decode an encoded mesh -const encodedMesh: EncodedMesh = { - vertices: encodedVertexBuffer, - indices: encodedIndexBuffer, - vertex_count: 100, - vertex_size: 12, // 3 floats × 4 bytes - index_count: 300, - index_size: 4, - arrays: { /* additional encoded arrays */ } -} +// Cache loader function type for nested packables +type CacheLoader = (hash: string) => Promise -const mesh = Mesh.decode(encodedMesh) -``` +// Custom decoder function type +type CustomDecoder = (data: Uint8Array, metadata: M) => T -## API Reference +// Custom field configuration +interface CustomFieldConfig { + fileName: string // File name in zip (without .bin) + decode: CustomDecoder // Decoder function + optional?: boolean // Won't throw if missing (default: false) +} +``` ### Packable (Base Class) @@ -197,9 +214,23 @@ const mesh = Mesh.decode(encodedMesh) class Packable { constructor(data: TData) - static async loadMetadata(zip: JSZip): Promise - static async loadFromZip(zipData: ArrayBuffer | Uint8Array): Promise> + // Decode from zip data (with optional cache loader for nested packables) + static async decode( + zipData: ArrayBuffer | Uint8Array, + cacheLoader?: CacheLoader + ): Promise> + + // Load single array static async loadArray(zipData: ArrayBuffer | Uint8Array, name: string): Promise + + // Load metadata + static async loadMetadata(zip: JSZip): Promise + + // Custom field configuration (override in subclasses) + protected static getCustomFields(): Record + + // Packable field types for nested packable decoding (override in subclasses) + protected static getPackableFieldTypes(): Record } ``` @@ -222,9 +253,8 @@ class Mesh extends Packable { isUniformPolygons(): boolean getPolygonIndices(): Uint32Array[] | Uint32Array - // Decoding - static decode(encodedMesh: EncodedMesh): Mesh - static async loadFromZip(zipData: ArrayBuffer | Uint8Array): Promise + // Decoding (with optional cache loader for nested packables) + static async decode(zipData: ArrayBuffer | Uint8Array, cacheLoader?: CacheLoader): Promise // Marker extraction extractByMarker(markerName: string): Mesh @@ -232,6 +262,9 @@ class Mesh extends Packable { // THREE.js integration toBufferGeometry(): THREE.BufferGeometry extractMarkerAsBufferGeometry(markerName: string): THREE.BufferGeometry + + // Custom field configuration for meshoptimizer decoding + protected static override getCustomFields(): Record> } ``` @@ -259,11 +292,13 @@ interface PackableMetadata { class_name: string module_name: string field_data?: Record + packable_refs?: Record // SHA256 hash refs for cached packables } // Mesh-specific metadata extending base interface MeshMetadata extends PackableMetadata { mesh_size: MeshSize + array_type?: "numpy" | "jax" // For Python compatibility } interface MeshSize { @@ -274,30 +309,61 @@ interface MeshSize { } ``` +### Cache Support + +When loading meshes with nested Packables that were saved with caching (using Python's `cache_saver`), provide a `CacheLoader` function: + +```typescript +import { Mesh, CacheLoader } from 'meshly' + +// CacheLoader type: (hash: string) => Promise + +// Example: Fetch from server cache +const cacheLoader: CacheLoader = async (hash) => { + const response = await fetch(`/cache/${hash}.zip`) + return response.ok ? response.arrayBuffer() : undefined +} + +// Decode with cache support +const mesh = await Mesh.decode(zipData, cacheLoader) +``` + +**Cache loader examples:** + +```typescript +// From IndexedDB +const idbLoader: CacheLoader = async (hash) => { + const db = await openDB('meshly-cache') + return db.get('packables', hash) +} + +// From Map (in-memory) +const memoryCache = new Map() +const memoryLoader: CacheLoader = async (hash) => memoryCache.get(hash) +``` +``` + ### Utility Classes ```typescript // Array encoding/decoding class ArrayUtils { static decodeArray(data: Uint8Array, metadata: ArrayMetadata): Float32Array | Uint32Array -} - -// Zip file utilities -class ZipUtils { - static async loadArrays(zip: JSZip): Promise> static async loadArray(zip: JSZip, name: string): Promise } -// Packable field merging utilities -class PackableUtils { - static mergeFieldData(data: Record, fieldData: Record): void - static stripModelMetadata(obj: Record): Record +// Array metadata +interface ArrayMetadata { + shape: number[] + dtype: string + itemsize: number + array_type?: "numpy" | "jax" // For Python compatibility } ``` ## Python Compatibility -This library is designed to load meshes created by the Python meshly library: +This library is designed to decode meshes created by the Python meshly library: ```python # Python: Save mesh @@ -307,9 +373,9 @@ mesh.save_to_zip("mesh.zip") ``` ```typescript -// TypeScript: Load mesh +// TypeScript: Decode mesh import { Mesh } from 'meshly' -const mesh = await Mesh.loadFromZip(zipData) +const mesh = await Mesh.decode(zipData) const geometry = mesh.toBufferGeometry() ``` diff --git a/typescript/src/array.ts b/typescript/src/array.ts index bcea9a1..ce1b78d 100644 --- a/typescript/src/array.ts +++ b/typescript/src/array.ts @@ -1,3 +1,4 @@ +import JSZip from "jszip" import { MeshoptDecoder } from "meshoptimizer" /** @@ -5,28 +6,47 @@ import { MeshoptDecoder } from "meshoptimizer" */ export type TypedArray = Float32Array | Float64Array | Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array +/** + * Array backend type - matches Python's ArrayType for compatibility. + * TypeScript always uses TypedArrays, but this field is stored in metadata + * for cross-language compatibility with Python's numpy/jax arrays. + */ +export type ArrayType = "numpy" | "jax" /** - * Metadata for an array + * Metadata for an array - matches Python's ArrayMetadata */ export interface ArrayMetadata { shape: number[] dtype: string itemsize: number + /** Array backend type (for Python compatibility) - defaults to "numpy" */ + array_type?: ArrayType +} + +/** + * Encoded array with data and metadata - matches Python's EncodedArray + */ +export interface EncodedArray { + /** Encoded data as Uint8Array (bytes in Python) */ + data: Uint8Array + /** Array metadata */ + metadata: ArrayMetadata } /** - * Utility class for decoding arrays + * Utility class for encoding and decoding arrays */ export class ArrayUtils { /** * Decodes an encoded array using the meshoptimizer algorithm * - * @param data Encoded array data - * @param metadata Array metadata - * @returns Decoded array as a Float32Array or Uint32Array + * @param encodedArray EncodedArray containing data and metadata + * @returns Decoded array as a TypedArray */ - static decodeArray(data: Uint8Array, metadata: ArrayMetadata): TypedArray { + static decodeArray(encodedArray: EncodedArray): TypedArray { + const { data, metadata } = encodedArray + // Calculate the total number of items const totalItems = metadata.shape.reduce((acc, dim) => acc * dim, 1) @@ -47,4 +67,39 @@ export class ArrayUtils { return new Float32Array(destUint8Array.buffer) } } + + /** + * Load and decode a single array from a zip file. + * + * @param zip - JSZip instance to read from + * @param name - Array name (e.g., "normals" or "markerIndices.boundary") + * @returns Decoded typed array + * @throws Error if array not found in zip + */ + static async loadArray( + zip: JSZip, + name: string + ): Promise { + // Convert dotted name to path + const arrayPath = name.replace(/\./g, "/") + const arraysFolder = zip.folder("arrays") + + if (!arraysFolder) { + throw new Error(`Array '${name}' not found in zip file`) + } + + const metadataFile = arraysFolder.file(`${arrayPath}/metadata.json`) + const arrayFile = arraysFolder.file(`${arrayPath}/array.bin`) + + if (!metadataFile || !arrayFile) { + throw new Error(`Array '${name}' not found in zip file`) + } + + const metadataText = await metadataFile.async("text") + const metadata: ArrayMetadata = JSON.parse(metadataText) + const data = await arrayFile.async("uint8array") + + const encodedArray: EncodedArray = { data, metadata } + return ArrayUtils.decodeArray(encodedArray) + } } \ No newline at end of file diff --git a/typescript/src/index.ts b/typescript/src/index.ts index 123ca16..8545d94 100644 --- a/typescript/src/index.ts +++ b/typescript/src/index.ts @@ -7,12 +7,15 @@ // Export from packable module export { + CacheLoader, + CustomDecoder, + CustomFieldConfig, Packable, PackableMetadata } from './packable' // Export from array module -export { ArrayMetadata, ArrayUtils } from './array' +export { ArrayMetadata, ArrayType, ArrayUtils, EncodedArray } from './array' // Export from mesh module export { @@ -22,6 +25,3 @@ export { MeshSize } from './mesh' -// Export from utils module -export { ZipUtils } from './utils' - diff --git a/typescript/src/mesh.ts b/typescript/src/mesh.ts index 7fafd86..b8e8827 100644 --- a/typescript/src/mesh.ts +++ b/typescript/src/mesh.ts @@ -1,8 +1,8 @@ import JSZip from 'jszip' import { MeshoptDecoder } from "meshoptimizer" import * as THREE from 'three' -import { Packable, PackableMetadata } from './packable' -import { PackableUtils, ZipUtils } from './utils' +import { ArrayType } from './array' +import { CustomFieldConfig, Packable, PackableMetadata } from './packable' /** @@ -21,6 +21,8 @@ export interface MeshSize { */ export interface MeshMetadata extends PackableMetadata { mesh_size: MeshSize + /** Array backend type for vertices/indices (for Python compatibility) */ + array_type?: ArrayType } /** @@ -83,8 +85,8 @@ export interface MeshData { * * @example * ```typescript - * // Load from zip - * const mesh = await Mesh.loadFromZip(zipData) + * // Decode from zip data + * const mesh = await Mesh.decode(zipData) * * // Convert to Three.js BufferGeometry * const geometry = mesh.toBufferGeometry() @@ -102,6 +104,86 @@ export class Mesh extends Packable { declare markerTypes?: Record declare markers?: Record + // ============================================================ + // Custom field decoders for meshoptimizer + // ============================================================ + + /** + * Decode vertices using meshoptimizer + */ + private static _decodeVertices(data: Uint8Array, metadata: MeshMetadata): Float32Array { + const meshSize = metadata.mesh_size + const verticesUint8 = new Uint8Array(meshSize.vertex_count * meshSize.vertex_size) + MeshoptDecoder.decodeVertexBuffer( + verticesUint8, + meshSize.vertex_count, + meshSize.vertex_size, + data + ) + return new Float32Array(verticesUint8.buffer) + } + + /** + * Decode indices using meshoptimizer + */ + private static _decodeIndices(data: Uint8Array, metadata: MeshMetadata): Uint32Array { + const meshSize = metadata.mesh_size + const indicesUint8 = new Uint8Array(meshSize.index_count! * meshSize.index_size) + MeshoptDecoder.decodeIndexSequence( + indicesUint8, + meshSize.index_count!, + meshSize.index_size, + data + ) + return new Uint32Array(indicesUint8.buffer) + } + + /** + * Custom field configurations for mesh-specific decoding + */ + protected static override getCustomFields(): Record> { + return { + vertices: { + fileName: 'vertices', + decode: (data, metadata) => Mesh._decodeVertices(data, metadata), + optional: false + }, + indices: { + fileName: 'indices', + decode: (data, metadata) => Mesh._decodeIndices(data, metadata), + optional: true + } + } + } + + // ============================================================ + // Override decode to use MeshMetadata + // ============================================================ + + /** + * Decode a Mesh from zip data + */ + static override async decode(zipData: ArrayBuffer | Uint8Array): Promise { + const zip = await JSZip.loadAsync(zipData) + const metadata = await Packable.loadMetadata(zip) + const customFieldNames = Mesh.getCustomFieldNames() + + const data: Record = {} + + // Decode custom fields (vertices, indices) + await Mesh.decodeCustomFields(zip, metadata, data) + + // Load standard arrays + await Mesh.loadStandardArrays(zip, data, customFieldNames) + + // Merge non-array fields from metadata + if (metadata.field_data) { + Mesh._mergeFieldData(data, metadata.field_data) + } + + return new Mesh(data as unknown as MeshData) + } + // ============================================================ // Mesh-specific utility methods // ============================================================ @@ -160,64 +242,6 @@ export class Mesh extends Packable { } - // ============================================================ - // Zip file loading - // ============================================================ - - /** - * Load a mesh from a zip file - */ - static override async loadFromZip(zipData: ArrayBuffer | Uint8Array): Promise { - const zip = await JSZip.loadAsync(zipData) - - // Load metadata using Packable's generic loadMetadata - const metadata = await Packable.loadMetadata(zip) - const meshSize = metadata.mesh_size - - // Mesh-specific: decode vertices with meshoptimizer - const vertexData = await zip.file('mesh/vertices.bin')?.async('uint8array') - if (!vertexData) { - throw new Error('Vertex data not found in zip file') - } - const verticesUint8 = new Uint8Array(meshSize.vertex_count * meshSize.vertex_size) - MeshoptDecoder.decodeVertexBuffer( - verticesUint8, - meshSize.vertex_count, - meshSize.vertex_size, - vertexData - ) - const vertices = new Float32Array(verticesUint8.buffer) - - // Mesh-specific: decode indices with meshoptimizer (if present) - let indices: Uint32Array | undefined - if (meshSize.index_count !== null) { - const indexData = await zip.file('mesh/indices.bin')?.async('uint8array') - if (indexData) { - const indicesUint8 = new Uint8Array(meshSize.index_count * meshSize.index_size) - MeshoptDecoder.decodeIndexSequence( - indicesUint8, - meshSize.index_count, - meshSize.index_size, - indexData - ) - indices = new Uint32Array(indicesUint8.buffer) - } - } - - // Reuse shared utility for loading additional arrays - const data = await ZipUtils.loadArrays(zip) - const meshData = data as unknown as MeshData - meshData.vertices = vertices - if (indices) meshData.indices = indices - - // Merge non-array fields from metadata - if (metadata.field_data) { - PackableUtils.mergeFieldData(meshData as unknown as Record, metadata.field_data) - } - - return new Mesh(meshData) - } - // ============================================================ // Marker extraction // ============================================================ diff --git a/typescript/src/packable.ts b/typescript/src/packable.ts index b858598..cd7412e 100644 --- a/typescript/src/packable.ts +++ b/typescript/src/packable.ts @@ -7,17 +7,9 @@ */ import JSZip from "jszip" -import { TypedArray } from "./array" -import { PackableUtils } from "./utils/packableUtils" -import { ZipUtils } from "./utils/zipUtils" +import { ArrayUtils, TypedArray } from "./array" -/** - * Recursive type for decoded array data from zip files. - * Values are typed arrays or nested objects containing arrays. - */ -export type ArrayData = Record - /** * Base metadata interface for Packable zip files. * Uses snake_case to match Python serialization format. @@ -29,8 +21,36 @@ export interface PackableMetadata { module_name: string /** Non-array field values */ field_data?: Record + /** SHA256 hash references for cached packable fields (field_name -> hash) */ + packable_refs?: Record } +/** + * Cache loader function type. + * Given a SHA256 hash, returns the cached packable bytes (or undefined if not found). + */ +export type CacheLoader = (hash: string) => Promise + +/** + * Custom decoder function type. + * Takes encoded bytes and metadata, returns decoded value. + */ +export type CustomDecoder = ( + data: Uint8Array, + metadata: M +) => T + +/** + * Custom field configuration for decoding + */ +export interface CustomFieldConfig { + /** File name in zip (without .bin extension) */ + fileName: string + /** Custom decoder function */ + decode: CustomDecoder + /** Whether the field is optional (won't throw if missing) */ + optional?: boolean +} /** @@ -39,6 +59,8 @@ export interface PackableMetadata { * Subclasses can define typed array attributes which will be automatically * detected and loaded from zip files. Non-array fields are preserved * in metadata. + * + * Custom field decoding is supported via `getCustomFields()` override. */ export class Packable { /** @@ -60,48 +82,286 @@ export class Packable { return JSON.parse(metadataText) as T } + // ============================================================ + // Custom field handling + // ============================================================ + + /** + * Get custom field configurations for this class. + * Subclasses override this to define custom decoders. + */ + protected static getCustomFields(): Record { + return {} + } + + /** + * Get the set of custom field names + */ + protected static getCustomFieldNames(): Set { + return new Set(Object.keys(this.getCustomFields())) + } + + /** + * Decode custom fields from the zip + */ + protected static async decodeCustomFields( + zip: JSZip, + metadata: M, + data: Record + ): Promise { + const customFields = this.getCustomFields() + + for (const [fieldName, config] of Object.entries(customFields)) { + const file = zip.file(`${config.fileName}.bin`) + if (file) { + const encoded = await file.async('uint8array') + data[fieldName] = config.decode(encoded, metadata) + } else if (!config.optional) { + throw new Error(`Required custom field '${fieldName}' (${config.fileName}.bin) not found in zip`) + } + } + } + + // ============================================================ + // Packable field handling + // ============================================================ + + /** + * Get packable field types for this class. + * Subclasses override this to declare nested Packable fields. + * Returns a map of field names to their Packable subclass constructors. + */ + protected static getPackableFieldTypes(): Record { + return {} + } + + /** + * Get the set of packable field names + */ + protected static getPackableFieldNames(): Set { + return new Set(Object.keys(this.getPackableFieldTypes())) + } + + /** + * Decode packable fields from the zip or cache. + * + * Supports both embedded packables (in packables/ folder) and cached + * packables (referenced by SHA256 hash in metadata.packable_refs). + */ + protected static async decodePackableFields( + zip: JSZip, + metadata: PackableMetadata, + data: Record, + cacheLoader?: CacheLoader + ): Promise { + const packableFieldTypes = this.getPackableFieldTypes() + const loadedFields = new Set() + + // First, try to load from cache using hash refs + if (cacheLoader && metadata.packable_refs) { + for (const [fieldName, hash] of Object.entries(metadata.packable_refs)) { + const PackableClass = packableFieldTypes[fieldName] + if (!PackableClass) continue + + const cachedData = await cacheLoader(hash) + if (cachedData) { + // Use the specific subclass's decode method with cache support + data[fieldName] = await PackableClass.decode(cachedData, cacheLoader) + loadedFields.add(fieldName) + } + } + } + + // Then load any embedded packables (for backward compatibility or no-cache case) + const packablesFolder = zip.folder("packables") + if (!packablesFolder) return + + const packableFiles: string[] = [] + packablesFolder.forEach((relativePath, file) => { + if (relativePath.endsWith(".zip") && !file.dir) { + packableFiles.push(relativePath) + } + }) + + for (const relativePath of packableFiles) { + // Extract field name: "inner_mesh.zip" -> "inner_mesh" + const fieldName = relativePath.slice(0, -4) + + // Skip if already loaded from cache + if (loadedFields.has(fieldName)) continue + + const PackableClass = packableFieldTypes[fieldName] + if (!PackableClass) continue + + const file = packablesFolder.file(relativePath) + if (file) { + const encodedBytes = await file.async('arraybuffer') + data[fieldName] = await PackableClass.decode(encodedBytes, cacheLoader) + } + } + } + + // ============================================================ + // Standard array loading + // ============================================================ + + /** + * Load standard arrays from arrays/ folder + */ + protected static async loadStandardArrays( + zip: JSZip, + data: Record, + skipFields: Set + ): Promise { + const arraysFolder = zip.folder("arrays") + if (!arraysFolder) return + // Find all array directories + const arrayPaths = new Set() + arraysFolder.forEach((relativePath) => { + const parts = relativePath.split("/") + if (parts.length >= 2) { + const dirPath = parts.slice(0, -1).join("/") + if (dirPath) { + arrayPaths.add(dirPath) + } + } + }) + // Load and decode each array + for (const arrayPath of arrayPaths) { + const name = arrayPath.replace(/\//g, ".") + + // Skip custom fields + const baseFieldName = name.split(".")[0] + if (skipFields.has(baseFieldName)) continue + + const decoded = await ArrayUtils.loadArray(zip, name) + + if (name.includes(".")) { + // Nested array - build nested structure + const parts = name.split(".") + let current = data + for (let i = 0; i < parts.length - 1; i++) { + const part = parts[i] + if (!current[part]) { + current[part] = {} + } + current = current[part] as Record + } + current[parts[parts.length - 1]] = decoded + } else { + // Flat array + data[name] = decoded + } + } + } /** - * Load a Packable from a zip file + * Decode a Packable from zip data. + * + * @param zipData - Zip file bytes + * @param cacheLoader - Optional function to load cached packables by SHA256 hash. + * When provided and metadata contains packable_refs, + * nested packables are loaded from cache. + * + * Subclasses can override this to handle custom field decoding. */ - static async loadFromZip( - zipData: ArrayBuffer | Uint8Array + static async decode( + zipData: ArrayBuffer | Uint8Array, + cacheLoader?: CacheLoader ): Promise> { const zip = await JSZip.loadAsync(zipData) - const metadata = await Packable.loadMetadata(zip) + const customFieldNames = this.getCustomFieldNames() + const packableFieldNames = this.getPackableFieldNames() + const skipFields = new Set([...customFieldNames, ...packableFieldNames]) + + const data: Record = {} + + // Decode custom fields first + await this.decodeCustomFields(zip, metadata, data) - // Load and decode all arrays (handles both flat and nested) - const data = await ZipUtils.loadArrays(zip) + // Load standard arrays + await this.loadStandardArrays(zip, data, skipFields) + + // Decode packable fields + await this.decodePackableFields(zip, metadata, data, cacheLoader) // Merge non-array fields from metadata if (metadata.field_data) { - PackableUtils.mergeFieldData(data as Record, metadata.field_data) + Packable._mergeFieldData(data, metadata.field_data) } return new Packable(data as TData) } + // ============================================================ + // Private static helper methods + // ============================================================ + + /** + * Merge non-array field values into data object (in place). + */ + protected static _mergeFieldData( + data: Record, + fieldData: Record + ): void { + for (const [key, value] of Object.entries(fieldData)) { + // Skip Python BaseModel reconstruction metadata + if (key === "__model_class__" || key === "__model_module__") { + continue + } + + const existing = data[key] + + if ( + existing && + typeof existing === "object" && + typeof value === "object" && + !ArrayBuffer.isView(existing) && + !ArrayBuffer.isView(value) + ) { + // Both are objects - merge recursively + Packable._mergeFieldData( + existing as Record, + value as Record + ) + } else if (typeof value === "object" && value !== null && !ArrayBuffer.isView(value)) { + // Value is an object that might contain Python metadata - clean it + data[key] = Packable._stripModelMetadata(value as Record) + } else { + data[key] = value + } + } + } + + /** + * Recursively strip Python BaseModel metadata keys from an object. + */ + private static _stripModelMetadata(obj: Record): Record { + const result: Record = {} + for (const [key, value] of Object.entries(obj)) { + if (key === "__model_class__" || key === "__model_module__") { + continue + } + if (typeof value === "object" && value !== null && !ArrayBuffer.isView(value)) { + result[key] = Packable._stripModelMetadata(value as Record) + } else { + result[key] = value + } + } + return result + } + /** * Load a single array from a zip file without loading the entire object. - * - * Useful for large files where you only need one array. - * - * @param zipData - Zip file as ArrayBuffer or Uint8Array - * @param name - Array name (e.g., "normals" or "markers.inlet") - * @returns Decoded typed array - * @throws Error if array not found in zip - * - * @example - * const normals = await Mesh.loadArray(zipData, "normals") */ static async loadArray( zipData: ArrayBuffer | Uint8Array, name: string ): Promise { const zip = await JSZip.loadAsync(zipData) - return ZipUtils.loadArray(zip, name) + return ArrayUtils.loadArray(zip, name) } } diff --git a/typescript/src/utils/index.ts b/typescript/src/utils/index.ts deleted file mode 100644 index 8484829..0000000 --- a/typescript/src/utils/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -/** - * Meshly Utilities - */ - -export { PackableUtils } from './packableUtils' -export { ZipUtils } from './zipUtils' diff --git a/typescript/src/utils/packableUtils.ts b/typescript/src/utils/packableUtils.ts deleted file mode 100644 index cc9abb0..0000000 --- a/typescript/src/utils/packableUtils.ts +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Utilities for Packable serialization and field data handling. - */ - -/** - * Static utilities for Packable field data merging. - */ -export class PackableUtils { - /** - * Merge non-array field values into data object (in place). - * - * Values like `dim: 2` from metadata.fieldData get merged in. - * Existing object structures are merged recursively. - * Python BaseModel metadata keys (__model_class__, __model_module__) are stripped. - * - * @param data - Target object to merge into (modified in place) - * @param fieldData - Field values from metadata - */ - static mergeFieldData( - data: Record, - fieldData: Record - ): void { - for (const [key, value] of Object.entries(fieldData)) { - // Skip Python BaseModel reconstruction metadata - if (key === "__model_class__" || key === "__model_module__") { - continue - } - - const existing = data[key] - - if ( - existing && - typeof existing === "object" && - typeof value === "object" && - !ArrayBuffer.isView(existing) && - !ArrayBuffer.isView(value) - ) { - // Both are objects - merge recursively - PackableUtils.mergeFieldData( - existing as Record, - value as Record - ) - } else if (typeof value === "object" && value !== null && !ArrayBuffer.isView(value)) { - // Value is an object that might contain Python metadata - clean it - data[key] = PackableUtils.stripModelMetadata(value as Record) - } else { - data[key] = value - } - } - } - - /** - * Recursively strip Python BaseModel metadata keys from an object. - */ - static stripModelMetadata(obj: Record): Record { - const result: Record = {} - for (const [key, value] of Object.entries(obj)) { - if (key === "__model_class__" || key === "__model_module__") { - continue - } - if (typeof value === "object" && value !== null && !ArrayBuffer.isView(value)) { - result[key] = PackableUtils.stripModelMetadata(value as Record) - } else { - result[key] = value - } - } - return result - } -} diff --git a/typescript/src/utils/zipUtils.ts b/typescript/src/utils/zipUtils.ts deleted file mode 100644 index 2974629..0000000 --- a/typescript/src/utils/zipUtils.ts +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Utility class for zip file operations used by Packable and Mesh. - */ - -import JSZip from "jszip" -import { ArrayMetadata, ArrayUtils, TypedArray } from "../array" - -/** - * Static utility methods for zip file operations. - */ -export class ZipUtils { - /** - * Load and decode a single array from a zip file. - * - * @param zip - JSZip instance to read from - * @param name - Array name (e.g., "normals" or "markerIndices.boundary") - * @returns Decoded typed array - * @throws Error if array not found in zip - */ - static async loadArray( - zip: JSZip, - name: string - ): Promise { - // Convert dotted name to path - const arrayPath = name.replace(/\./g, "/") - const arraysFolder = zip.folder("arrays") - - if (!arraysFolder) { - throw new Error(`Array '${name}' not found in zip file`) - } - - const metadataFile = arraysFolder.file(`${arrayPath}/metadata.json`) - const arrayFile = arraysFolder.file(`${arrayPath}/array.bin`) - - if (!metadataFile || !arrayFile) { - throw new Error(`Array '${name}' not found in zip file`) - } - - const metadataText = await metadataFile.async("text") - const metadata: ArrayMetadata = JSON.parse(metadataText) - const data = await arrayFile.async("uint8array") - - return ArrayUtils.decodeArray(data, metadata) - } - - /** - * Load and decode all arrays from a zip file's arrays/ folder. - * - * Handles both flat arrays (e.g., "vertices") and nested arrays - * (e.g., "markerIndices.boundary" becomes { markerIndices: { boundary: ... } }) - * - * @param zip - JSZip instance to read from - * @returns Decoded arrays organized into proper structure - */ - static async loadArrays(zip: JSZip): Promise> { - const result: Record = {} - const arraysFolder = zip.folder("arrays") - - if (!arraysFolder) { - return result - } - - // Find all array directories - const arrayPaths = new Set() - arraysFolder.forEach((relativePath) => { - const parts = relativePath.split("/") - if (parts.length >= 2) { - const dirPath = parts.slice(0, -1).join("/") - if (dirPath) { - arrayPaths.add(dirPath) - } - } - }) - - // Load and decode each array - for (const arrayPath of arrayPaths) { - // Convert path to dotted name (e.g., "markerIndices/boundary" -> "markerIndices.boundary") - const name = arrayPath.replace(/\//g, ".") - - const decoded = await ZipUtils.loadArray(zip, name) - - if (name.includes(".")) { - // Nested array - build nested structure - const parts = name.split(".") - let current = result - - for (let i = 0; i < parts.length - 1; i++) { - const part = parts[i] - if (!current[part]) { - current[part] = {} - } - current = current[part] as Record - } - current[parts[parts.length - 1]] = decoded - } else { - // Flat array - result[name] = decoded - } - } - - return result - } -}