Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions backends/cadence/aot/quantizer/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,3 +372,17 @@ def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
# Add 16-bit quantizers for LinearPattern
quantizers.append(CadenceAtenQuantizer(LinearPattern(), qconfig_A16))
super().__init__(quantizers)


class CadenceWith16BitConvActivationsQuantizer(CadenceQuantizer):
"""
Quantizer including A16 conv
"""

def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
if quantizers is None:
quantizers = []
# Add 16-bit quantizers for Conv patterns
quantizers.append(CadenceAtenQuantizer(Conv1dPattern(), qconfig_A16))
quantizers.append(CadenceAtenQuantizer(Conv2dPattern(), qconfig_A16))
super().__init__(quantizers)
49 changes: 49 additions & 0 deletions backends/cadence/hifi/operators/op_quantized_conv2d_nchw_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/backends/cadence/hifi/operators/operators.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_conv2d.h>

#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))

Expand Down Expand Up @@ -532,6 +533,30 @@ void quantized_conv2d_nchw_out(
__ET_UNUSED const Tensor& out_multiplier,
__ET_UNUSED const Tensor& out_shift,
Tensor& out) {
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nchw_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}

const float bias_scale_float = bias_scale.const_data_ptr<float>()[0];
const int32_t weight_zero_point_int =
weight_zero_point.const_data_ptr<int32_t>()[0];
Expand Down Expand Up @@ -596,6 +621,30 @@ void quantized_conv2d_nchw_per_tensor_out(
__ET_UNUSED int64_t out_multiplier,
__ET_UNUSED int64_t out_shift,
Tensor& out) {
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nchw_per_tensor_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}

bool optimized = 0;

if ((input.scalar_type() == ScalarType::Char) ||
Expand Down
53 changes: 50 additions & 3 deletions backends/cadence/hifi/operators/op_quantized_conv2d_nhwc_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/backends/cadence/hifi/operators/operators.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_conv2d.h>

#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))

Expand Down Expand Up @@ -435,9 +436,32 @@ void quantized_conv2d_nhwc_out(
const Tensor& bias_scale,
double output_scale,
int64_t output_zero_point,
__ET_UNUSED const Tensor& out_multiplier,
__ET_UNUSED const Tensor& out_shift,
const Tensor& out_multiplier,
const Tensor& out_shift,
Tensor& out) {
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nhwc_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}
const float bias_scale_float = bias_scale.const_data_ptr<float>()[0];
const int32_t weight_zero_point_int =
weight_zero_point.const_data_ptr<int32_t>()[0];
Expand Down Expand Up @@ -502,8 +526,31 @@ void quantized_conv2d_nhwc_per_tensor_out(
__ET_UNUSED int64_t out_multiplier,
__ET_UNUSED int64_t out_shift,
Tensor& out) {
bool optimized = 0;
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nhwc_per_tensor_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}

bool optimized = 0;
if ((input.scalar_type() == ScalarType::Char) ||
(input.scalar_type() == ScalarType::Byte))
optimized = 1;
Expand Down
6 changes: 4 additions & 2 deletions backends/cadence/hifi/operators/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ OPERATORS = [
"ne",
"permute_copy",
"pow",
"quantized_conv2d_nchw_out",
"quantized_conv2d_nchw_asym8sxsym8s_asym8s_per_tensor_out",
"quantized_conv2d_nchw_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv1d_ncl_asym8sxsym8s_asym8s_per_tensor_out",
Expand All @@ -74,7 +73,6 @@ OPERATORS = [
"quantized_conv2d_nchw_depthwise_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv2d_nchw_dilated_asym8sxsym8s_asym8s_per_tensor_out",
"quantized_conv2d_nchw_dilated_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv2d_nhwc_out",
"quantized_conv2d_nhwc_asym8sxsym8s_asym8s_per_tensor_out",
"quantized_conv2d_nhwc_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv1d_nlc_asym8sxsym8s_asym8s_per_tensor_out",
Expand Down Expand Up @@ -125,3 +123,7 @@ def define_common_targets():
# quantized_linear_out and quantized_linear_per_tensor_out needs additional dependency for int16 support
define_operator("quantized_linear_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_linear"])
define_operator("quantized_linear_per_tensor_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_linear"])

# quantized_conv2d_nchw_out and quantized_conv2d_nhwc_out need additional dependency for int16 support
define_operator("quantized_conv2d_nchw_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_conv2d"])
define_operator("quantized_conv2d_nhwc_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_conv2d"])
Loading
Loading