From 14b0ae4c5f3d4bd8fdf505728b3d63047c775047 Mon Sep 17 00:00:00 2001 From: Scott Roy <161522778+metascroy@users.noreply.github.com> Date: Fri, 22 Aug 2025 15:06:54 -0700 Subject: [PATCH 1/5] Update coremltools --- backends/apple/coreml/scripts/install_requirements.sh | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/apple/coreml/scripts/install_requirements.sh b/backends/apple/coreml/scripts/install_requirements.sh index e9f73105bcd..5ec1ea6a1de 100755 --- a/backends/apple/coreml/scripts/install_requirements.sh +++ b/backends/apple/coreml/scripts/install_requirements.sh @@ -12,7 +12,7 @@ SCRIPT_DIR_PATH="$( # TODO(jathu): remove the need to fetch coremltools to build deps for coreml_executor_runner. # Keep this version in sync with: pyproject.toml -COREMLTOOLS_VERSION="8.3" +COREMLTOOLS_VERSION="9.0b1" red=`tput setaf 1` green=`tput setaf 2` diff --git a/pyproject.toml b/pyproject.toml index 61448a849cf..1ab7ddcc150 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ dependencies=[ # See also third-party/TARGETS for buck's typing-extensions version. "typing-extensions>=4.10.0", # Keep this version in sync with: ./backends/apple/coreml/scripts/install_requirements.sh - "coremltools==8.3; platform_system == 'Darwin' or platform_system == 'Linux'", + "coremltools==9.0b1; platform_system == 'Darwin' or platform_system == 'Linux'", # scikit-learn is used to support palettization in the coreml backend "scikit-learn==1.7.1", "hydra-core>=1.3.0", From dd9a51276a1fc3925ab4bb4f853d7c84fe39e562 Mon Sep 17 00:00:00 2001 From: Scott Roy <161522778+metascroy@users.noreply.github.com> Date: Fri, 22 Aug 2025 15:21:52 -0700 Subject: [PATCH 2/5] up --- backends/apple/coreml/compiler/torch_ops.py | 23 --------------------- 1 file changed, 23 deletions(-) diff --git a/backends/apple/coreml/compiler/torch_ops.py b/backends/apple/coreml/compiler/torch_ops.py index 81306c9a2fd..41b263e2f03 100644 --- a/backends/apple/coreml/compiler/torch_ops.py +++ b/backends/apple/coreml/compiler/torch_ops.py @@ -14,11 +14,9 @@ from coremltools.converters.mil.frontend import _utils from coremltools.converters.mil.frontend.torch.ops import ( _get_inputs, - _get_kwinputs, NUM_TO_NUMPY_DTYPE, NUM_TO_TORCH_DTYPE, split, - to, transpose, unbind, ) @@ -26,7 +24,6 @@ register_torch_op, ) from coremltools.converters.mil.mil import types -from executorch.exir.dim_order_utils import get_memory_format # https://github.com/apple/coremltools/pull/2556 @@ -47,26 +44,6 @@ def split_copy(context, node): split(context, node) -@register_torch_op( - torch_alias=[ - "dim_order_ops::_to_dim_order_copy", - "dim_order_ops._to_dim_order_copy", - ], - override=False, -) -def _to_dim_order_copy(context, node): - dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0] - node.kwinputs.pop("dim_order") - - # In CoreML, dim_order.val will be an ndarray, so we convert it to a list - dim_order = [int(d) for d in dim_order.val] - memory_format = get_memory_format(dim_order) - assert ( - memory_format == _torch.contiguous_format - ), "Only contiguous memory format is supported in CoreML" - to(context, node) - - # https://github.com/apple/coremltools/pull/2558 @register_torch_op( torch_alias=["torchao::dequantize_affine", "torchao.dequantize_affine"], From 0b68c70ad98ae964315a3da1e7f85f7ba41762b5 Mon Sep 17 00:00:00 2001 From: Scott Roy <161522778+metascroy@users.noreply.github.com> Date: Sat, 23 Aug 2025 11:26:37 -0700 Subject: [PATCH 3/5] up --- backends/apple/coreml/compiler/torch_ops.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/backends/apple/coreml/compiler/torch_ops.py b/backends/apple/coreml/compiler/torch_ops.py index 41b263e2f03..800a92f5d17 100644 --- a/backends/apple/coreml/compiler/torch_ops.py +++ b/backends/apple/coreml/compiler/torch_ops.py @@ -12,6 +12,10 @@ import torch as _torch from coremltools import _logger from coremltools.converters.mil.frontend import _utils +from coremltools.converters.mil.frontend.torch.dom_order_ops import ( + _empty_dim_order, + _to_dim_order_copy, +) from coremltools.converters.mil.frontend.torch.ops import ( _get_inputs, NUM_TO_NUMPY_DTYPE, @@ -44,6 +48,20 @@ def split_copy(context, node): split(context, node) +# This is a temporary hack to register the alias "dim_order_ops._to_dim_order_copy", +# which was missed by coremltools +@register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False) +def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node): + return _to_dim_order_copy(context, node) + + +# This is a temporary hack to register the alias "dim_order_ops._empty_dim_order", +# which was missed by coremltools +@register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False) +def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node): + return _empty_dim_order(context, node) + + # https://github.com/apple/coremltools/pull/2558 @register_torch_op( torch_alias=["torchao::dequantize_affine", "torchao.dequantize_affine"], From 82991b59d215a1324677860f48efd300dad8345b Mon Sep 17 00:00:00 2001 From: Scott Roy <161522778+metascroy@users.noreply.github.com> Date: Sat, 23 Aug 2025 11:27:14 -0700 Subject: [PATCH 4/5] up --- backends/apple/coreml/compiler/torch_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/apple/coreml/compiler/torch_ops.py b/backends/apple/coreml/compiler/torch_ops.py index 800a92f5d17..b4842c2cadb 100644 --- a/backends/apple/coreml/compiler/torch_ops.py +++ b/backends/apple/coreml/compiler/torch_ops.py @@ -52,14 +52,14 @@ def split_copy(context, node): # which was missed by coremltools @register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False) def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node): - return _to_dim_order_copy(context, node) + _to_dim_order_copy(context, node) # This is a temporary hack to register the alias "dim_order_ops._empty_dim_order", # which was missed by coremltools @register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False) def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node): - return _empty_dim_order(context, node) + _empty_dim_order(context, node) # https://github.com/apple/coremltools/pull/2558 From 440b14e113e894701ee9e4ca259b3e0fb6f1a5c7 Mon Sep 17 00:00:00 2001 From: Scott Roy <161522778+metascroy@users.noreply.github.com> Date: Sat, 23 Aug 2025 15:15:41 -0700 Subject: [PATCH 5/5] up --- backends/apple/coreml/compiler/torch_ops.py | 57 ++++++++++++++++----- 1 file changed, 43 insertions(+), 14 deletions(-) diff --git a/backends/apple/coreml/compiler/torch_ops.py b/backends/apple/coreml/compiler/torch_ops.py index b4842c2cadb..e53670951e0 100644 --- a/backends/apple/coreml/compiler/torch_ops.py +++ b/backends/apple/coreml/compiler/torch_ops.py @@ -12,15 +12,13 @@ import torch as _torch from coremltools import _logger from coremltools.converters.mil.frontend import _utils -from coremltools.converters.mil.frontend.torch.dom_order_ops import ( - _empty_dim_order, - _to_dim_order_copy, -) from coremltools.converters.mil.frontend.torch.ops import ( _get_inputs, + _get_kwinputs, NUM_TO_NUMPY_DTYPE, NUM_TO_TORCH_DTYPE, split, + to, transpose, unbind, ) @@ -28,6 +26,7 @@ register_torch_op, ) from coremltools.converters.mil.mil import types +from executorch.exir.dim_order_utils import get_memory_format # https://github.com/apple/coremltools/pull/2556 @@ -48,18 +47,48 @@ def split_copy(context, node): split(context, node) -# This is a temporary hack to register the alias "dim_order_ops._to_dim_order_copy", -# which was missed by coremltools -@register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False) -def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node): - _to_dim_order_copy(context, node) +def is_fbcode(): + return not hasattr(_torch.version, "git_version") + +if not is_fbcode(): + from coremltools.converters.mil.frontend.torch.dim_order_ops import ( + _empty_dim_order, + _to_dim_order_copy, + ) -# This is a temporary hack to register the alias "dim_order_ops._empty_dim_order", -# which was missed by coremltools -@register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False) -def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node): - _empty_dim_order(context, node) + # This is a temporary hack to register the alias "dim_order_ops._to_dim_order_copy", + # which was missed by coremltools + @register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False) + def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node): + _to_dim_order_copy(context, node) + + # This is a temporary hack to register the alias "dim_order_ops._empty_dim_order", + # which was missed by coremltools + @register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False) + def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node): + _empty_dim_order(context, node) + +else: + # TODO: remove this case when fbcode updates to coremltools 9.0 + @register_torch_op( + torch_alias=[ + "dim_order_ops::_to_dim_order_copy", + "dim_order_ops._to_dim_order_copy", + ], + override=False, + ) + def _to_dim_order_copy(context, node): + dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0] + node.kwinputs.pop("dim_order") + + # In CoreML, dim_order.val will be an ndarray, so we convert it to a list + dim_order = [int(d) for d in dim_order.val] + memory_format = get_memory_format(dim_order) + assert ( + memory_format == _torch.contiguous_format + ), "Only contiguous memory format is supported in CoreML" + to(context, node) # https://github.com/apple/coremltools/pull/2558