From 7d5c12f6e5c9758ec2abda6556bba89e15288d3a Mon Sep 17 00:00:00 2001 From: Olia Date: Wed, 20 Aug 2025 17:16:12 +0200 Subject: [PATCH 1/2] comparison operator update --- tmva/sofie/inc/TMVA/ROperator_Comparision.hxx | 161 +++++------------- 1 file changed, 47 insertions(+), 114 deletions(-) diff --git a/tmva/sofie/inc/TMVA/ROperator_Comparision.hxx b/tmva/sofie/inc/TMVA/ROperator_Comparision.hxx index 0d365ae517de5..5a1e1729b3362 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Comparision.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Comparision.hxx @@ -77,7 +77,7 @@ public: ROperator_Comparision(const std::string & nameX1, const std::string & nameX2, const std::string & nameY): fNX1(UTILITY::Clean_name(nameX1)), fNX2(UTILITY::Clean_name(nameX2)), fNY(UTILITY::Clean_name(nameY)){ fInputTensorNames = { fNX1, fNX2 }; - + // output will be a boolean vector so should not be considered for memory optimized pool fOutputTensorNames = { fNY }; } @@ -113,6 +113,8 @@ public: fShapeX2 = model.GetTensorShape(fNX2); fDimShapeX2 = ConvertShapeToDim(fShapeX2); } + fShapeX1 = model.GetTensorShape(fNX1); + fShapeX2 = model.GetTensorShape(fNX2); fTensorType1 = model.GetTensorType(fNX1); fTensorType2 = model.GetTensorType(fNX2); bool broadcast = !UTILITY::AreSameShape(fShapeX1, fShapeX2); @@ -131,10 +133,6 @@ public: // Update the data and the shape of A model.UpdateInitializedTensor(fNX1, model.GetTensorType(fNX1), fShapeY, broadcastedData); fShapeX1 = fShapeY; - } else { - // Add an intermediate tensor for broadcasting A - fNBroadcastedX1 = "Broadcasted" + fNX1; - model.AddIntermediateTensor(fNBroadcastedX1, model.GetTensorType(fNX1), fShapeY); } } // Broadcast B to Y @@ -147,105 +145,28 @@ public: // Update the data and the shape of B model.UpdateInitializedTensor(fNX2, model.GetTensorType(fNX2), fShapeY, broadcastedData); fShapeX2 = fShapeY; - } else { - // Add an intermediate tensor for broadcasting B - fNBroadcastedX2 = "Broadcasted" + fNX2; - model.AddIntermediateTensor(fNBroadcastedX2, model.GetTensorType(fNX2), fShapeY); } } } else { fShapeY = fShapeX1; } // case of constant tensors - T * data1 = nullptr; - T * data2 = nullptr; - std::vector shapeData1; - std::vector shapeData2; - size_t length = ConvertShapeToLength(fShapeY); - bool * outData = new bool[length]; - if (model.IsInitializedTensor(fNX1)) { - data1 = static_cast(model.GetInitializedTensorData(fNX1).get()); - } else if (model.IsShapeTensor(fNX1)) { - shapeData1 = model.GetShapeTensorValues(fNX1); - } - if (model.IsInitializedTensor(fNX2)) { - data2 = static_cast(model.GetInitializedTensorData(fNX2).get()); - } else if (model.IsShapeTensor(fNX2)) { - shapeData2 = model.GetShapeTensorValues(fNX2); - } - if (data1 && data2) { + if (model.IsInitializedTensor(fNX1) && model.IsInitializedTensor(fNX2) ) { fIsOutputConstant = true; + auto data1 = static_cast(model.GetInitializedTensorData(fNX1).get()); + auto data2 = static_cast(model.GetInitializedTensorData(fNX2).get()); + size_t length = ConvertShapeToLength(fShapeY); + bool * outData = new bool[length]; for (size_t i = 0; i < length; i++) outData[i] = ComparisionTrait::Result(data1[i], data2[i]); model.AddConstantTensor(fNY, fShapeY, outData); if (model.Verbose()) std::cout << ComparisionTrait::Name() << " op ---> " << fNY << " " << ConvertShapeToString(fShapeY) << " : " << ConvertValuesToString(length,outData) << std::endl; - } else if ((data1 || !shapeData1.empty()) && (data2 || !shapeData2.empty())) { - fIsOutputConstant = true; - if (data1 && !data2) { - // data 1 is constant and data2 is shape - for (size_t i = 0; i < length; i++) { - if (shapeData2[i].isParam) { - if (shapeData2[i].dim == size_t(-1) || data1[i] > 0) { - fIsOutputConstant = false; - break; - } else { - // assume a comparison is done with .dim = 0 - shapeData2[i].dim = 0; - } - } - outData[i] = ComparisionTrait::Result(data1[i], static_cast(shapeData2[i].dim)); - } - } else if (!data1 && data2) { - // data 1 is shape and dat2 is constant - for (size_t i = 0; i < length; i++) { - if (shapeData1[i].isParam) { - if (shapeData1[i].dim == size_t(-1) || data2[i] > 0) { - fIsOutputConstant = false; - break; - } else { - // assume a comparison is done with .dim = 0 - shapeData1[i].dim = 0; - } - } - outData[i] = ComparisionTrait::Result(static_cast(shapeData1[i].dim), data2[i]); - } - } else if (!shapeData1.empty() && !shapeData2.empty() ) { - // both data1 and data2 are shape tensors - for (size_t i = 0; i < length; i++) { - if (!shapeData1[i].isParam && !shapeData2[i].isParam) { - outData[i] = ComparisionTrait::Result(shapeData1[i].dim, shapeData2[i].dim); - } - else if (shapeData1[i].isParam && shapeData2[i].isParam) { - if (shapeData1[i].param == shapeData2[i].param) - outData[i] = ComparisionTrait::Result(1,1); // comparison of two equal value - else { - fIsOutputConstant = false; - break; - } - } - else { - fIsOutputConstant = false; - break; - } - } - } - if (fIsOutputConstant) { - model.AddConstantTensor(fNY, fShapeY, outData); - if (model.Verbose()) - std::cout << ComparisionTrait::Name() << " op ---> " << fNY << " " << ConvertShapeToString(fShapeY) << " : " - << ConvertValuesToString(length,outData) << " (constant) " << std::endl; - - } - } - delete [] outData; - if (!fIsOutputConstant) { + delete [] outData; + } else { model.AddIntermediateTensor(fNY, ETensorType::BOOL , fShapeY); - if (model.Verbose()) - std::cout << ComparisionTrait::Name() << " op ---> " << fNY << " " << ConvertShapeToString(fShapeY) << std::endl; } - // check if this is not output operators to add a specific line for definining the tensor_xxx variable const auto & outputTensorNames = model.GetOutputTensorNames(); fIsModelOutput = false; @@ -257,39 +178,51 @@ public: if (fIsOutputConstant) return ""; opName = "op_" + opName; - if (fShapeY.empty()) { + if (fShapeY.empty()) { throw std::runtime_error("TMVA SOFIE Comparision Op called to Generate without being initialized first"); } std::stringstream out; out << SP << "\n//------ " << ComparisionTrait::Name() << " " << opName << " --> " << ConvertShapeToString(fShapeY) << "\n"; - size_t length = ConvertShapeToLength(fShapeY); - // Broadcast A if it's uninitialized - if (!fNBroadcastedX1.empty()) { - std::string type1 = ConvertTypeToString(fTensorType1); - out << SP << "// Broadcasting uninitialized tensor " << fNX1 << "\n"; - out << SP << "{\n"; - out << SP << SP << type1 << "* data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << type1 << ">(tensor_" << fNX1 << ", " << ConvertShapeToString(fShapeX1) << ", " << ConvertShapeToString(fShapeY) << ");\n"; - out << SP << SP << "std::copy(data, data + " << length << ", tensor_" << fNBroadcastedX1 << ");\n"; - out << SP << SP << "delete[] data;\n"; - out << SP << "}\n"; + + auto stridesX1 = UTILITY::ComputeStrideFromShape(fShapeX1); + auto stridesX2 = UTILITY::ComputeStrideFromShape(fShapeX2); + auto stridesY = UTILITY::ComputeStrideFromShape(fShapeY); + + std::string compute_idx_X1, compute_idx_X2, compute_idx_Y; + if (std::all_of(fShapeX1.begin(), fShapeX1.end(), [](size_t x) { return x == 1; })){ + compute_idx_X1 = "0"; + } else { + for(size_t i = 0; i(tensor_" << fNX2 << ", " << ConvertShapeToString(fShapeX2) << ", " << ConvertShapeToString(fShapeY) << ");\n"; - out << SP << SP << "std::copy(data, data + " << length << ", tensor_" << fNBroadcastedX2 << ");\n"; - out << SP << SP << "delete[] data;\n"; - out << SP << "}\n"; + if (std::all_of(fShapeX2.begin(), fShapeX2.end(), [](size_t x) { return x == 1; })){ + compute_idx_X2 = "0"; + } else { + for(size_t i = 0; i & tensor_" << fNY << " = fTensor_" << fNY << ";\n"; From b6041d845d3d852e56ae6be4669f08c3e95fb04c Mon Sep 17 00:00:00 2001 From: Olia Date: Mon, 25 Aug 2025 14:51:31 +0200 Subject: [PATCH 2/2] comparison operator tests --- tmva/sofie/test/TestCustomModelsFromONNX.cxx | 86 +++++++++++++++++++ .../input_models/Comparison_broadcast.onnx | 32 +++++++ .../input_models/Comparison_broadcast_3d.onnx | 36 ++++++++ 3 files changed, 154 insertions(+) create mode 100644 tmva/sofie/test/input_models/Comparison_broadcast.onnx create mode 100644 tmva/sofie/test/input_models/Comparison_broadcast_3d.onnx diff --git a/tmva/sofie/test/TestCustomModelsFromONNX.cxx b/tmva/sofie/test/TestCustomModelsFromONNX.cxx index 5b77caf2aed1d..9b384c2b801f5 100644 --- a/tmva/sofie/test/TestCustomModelsFromONNX.cxx +++ b/tmva/sofie/test/TestCustomModelsFromONNX.cxx @@ -41,6 +41,10 @@ #include "Shape_FromONNX.hxx" #include "input_models/references/Shape.ref.hxx" +#include "Comparison_broadcast_FromONNX.hxx" + +#include "Comparison_broadcast_3d_FromONNX.hxx" + #include "Constant_FromONNX.hxx" #include "input_models/references/Constant.ref.hxx" @@ -3214,3 +3218,85 @@ TEST(ONNX, ScatterElements) EXPECT_LE(std::abs(output[i] - correct_output[i]), DEFAULT_TOLERANCE); } } + +TEST(ONNX, ComparisonBroadcast) +{ + // A shape [1, 4] + std::vector input_A = { + 0.0f, 1.0f, 2.0f, 3.0f + }; + + // B shape [4] + std::vector input_B = { 4.0f, 4.0f, 2.0f, 2.0f }; + + // (A < B) + std::vector expected_output_less = { 1, 1, 0, 0 }; + + TMVA_SOFIE_Comparison_broadcast::Session s("Comparison_broadcast_FromONNX.dat"); + + std::vector> all_outputs = s.infer(input_A.data(), input_B.data()); + const std::vector& output_less = all_outputs[2]; + + EXPECT_EQ(output_less.size(), expected_output_less.size()); + + for (size_t i = 0; i < output_less.size(); ++i) { + EXPECT_EQ(output_less[i], expected_output_less[i]); + } +} + +TEST(ONNX, ComparisonBroadcast3d) +{ + + TMVA_SOFIE_Comparison_broadcast_3d::Session s("Comparison_broadcast_3d_FromONNX.dat"); + + std::vector input_A = { + 1.0f, 6.0f, 2.0f, 9.0f, + 0.0f, 5.0f, 3.0f, 1.0f, + 2.0f, 4.0f, 4.0f, 2.0f, + 1.0f, 7.0f, 0.0f, 3.0f + }; + + std::vector input_B = { 1.0f, 5.0f, 3.0f, 2.0f }; + + // (A > B) + // [ + // [[F, T, F, T], [F, F, F, F]], -> {0,1,0,1, 0,0,0,0} + // [[T, F, T, F], [F, T, F, T]] -> {1,0,1,0, 0,1,0,1} + // ] + std::vector expected_greater = { + 0, 1, 0, 1, 0, 0, 0, 0, + 1, 0, 1, 0, 0, 1, 0, 1 + }; + + // (A == B) + // [ + // [[T, F, F, F], [F, T, T, F]], -> {1,0,0,0, 0,1,1,0} + // [[F, F, F, T], [T, F, F, F]] -> {0,0,0,1, 1,0,0,0} + // ] + std::vector expected_equal = { + 1, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 1, 1, 0, 0, 0 + }; + + // (A < B) + // [ + // [[F, F, T, F], [T, F, F, T]], -> {0,0,1,0, 1,0,0,1} + // [[F, T, F, F], [F, F, T, F]] -> {0,1,0,0, 0,0,1,0} + // ] + std::vector expected_less = { + 0, 0, 1, 0, 1, 0, 0, 1, + 0, 1, 0, 0, 0, 0, 1, 0 + }; + + std::vector> all_outputs = s.infer(input_A.data(), input_B.data()); + + ASSERT_EQ(all_outputs.size(), 3); + + const std::vector& output_greater = all_outputs[0]; + const std::vector& output_equal = all_outputs[1]; + const std::vector& output_less = all_outputs[2]; + + ASSERT_EQ(output_greater, expected_greater); + ASSERT_EQ(output_equal, expected_equal); + ASSERT_EQ(output_less, expected_less); +} diff --git a/tmva/sofie/test/input_models/Comparison_broadcast.onnx b/tmva/sofie/test/input_models/Comparison_broadcast.onnx new file mode 100644 index 0000000000000..4c08d5715ee1a --- /dev/null +++ b/tmva/sofie/test/input_models/Comparison_broadcast.onnx @@ -0,0 +1,32 @@ + comparison_broadcast_demo:ä + +A +B +OutGreater"Greater + +A +BOutEqual"Equal + +A +BOutLess"LessComparisonOpsWithBroadcastZ +A +  + +Z +B + + +b + +OutGreater +   + +b +OutEqual +   + +b +OutLess +   + +B \ No newline at end of file diff --git a/tmva/sofie/test/input_models/Comparison_broadcast_3d.onnx b/tmva/sofie/test/input_models/Comparison_broadcast_3d.onnx new file mode 100644 index 0000000000000..e75e4173a9cd1 --- /dev/null +++ b/tmva/sofie/test/input_models/Comparison_broadcast_3d.onnx @@ -0,0 +1,36 @@ + comparison_broadcast_demo:ô + +A +B +OutGreater"Greater + +A +BOutEqual"Equal + +A +BOutLess"LessComparisonOpsBroadcastZ +A + + + +Z +B +  + +b + +OutGreater +  + + +b +OutEqual +  + + +b +OutLess +  + + +B \ No newline at end of file