Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
189 changes: 146 additions & 43 deletions tmva/sofie/inc/TMVA/ROperator_BasicNary.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,11 @@ struct NaryOperatorTraits<T, EBasicNaryOperator::Max> {
static const std::string Name() {return "Max";}
static std::string Op(const std::string& res, std::vector<std::string>& inputs) {
std::stringstream out;
out << "\t" << "\t" << res << " = " << inputs[0] << ";\n";
out << res << " = std::max({ " << inputs[0];
for (size_t i = 1; i < inputs.size(); i++) {
out << "\t" << "\t" << res << " = std::max(" << res << ", " << inputs[i] << ");\n";
out << ", " << inputs[i];
}
out << "});\n";
return out.str();
}
};
Expand All @@ -36,10 +37,11 @@ struct NaryOperatorTraits<T, EBasicNaryOperator::Min> {
static const std::string Name() {return "Min";}
static std::string Op(const std::string& res, std::vector<std::string>& inputs) {
std::stringstream out;
out << "\t" << "\t" << res << " = " << inputs[0] << ";\n";
out << res << " = std::min({ " << inputs[0];
for (size_t i = 1; i < inputs.size(); i++) {
out << "\t" << "\t" << res << " = std::min(" << res << ", " << inputs[i] << ");\n";
out << ", " << inputs[i];
}
out << "});\n";
return out.str();
}
};
Expand All @@ -52,7 +54,7 @@ struct NaryOperatorTraits<float, EBasicNaryOperator::Mean> {
static const std::string Name() {return "Mean";}
static std::string Op(const std::string& res, std::vector<std::string>& inputs) {
std::stringstream out;
out << "\t" << "\t" << res << " = (" << inputs[0];
out << res << " = (" << inputs[0];
for (size_t i = 1; i < inputs.size(); i++) {
out << " + " << inputs[i];
}
Expand All @@ -66,7 +68,7 @@ struct NaryOperatorTraits<T, EBasicNaryOperator::Sum> {
static const std::string Name() {return "Sum";}
static std::string Op(const std::string& res, std::vector<std::string>& inputs) {
std::stringstream out;
out << "\t" << "\t" << res << " = " << inputs[0];
out << res << " = " << inputs[0];
for (size_t i = 1; i < inputs.size(); i++) {
out << " + " << inputs[i];
}
Expand All @@ -83,10 +85,11 @@ private:

std::vector<std::string> fNInputs;
std::string fNY;
std::vector<std::vector<size_t>> fShapeInputs;
std::vector<std::vector<Dim>> fShapeInputs;

std::vector<std::string> fNBroadcastedInputs;
std::vector<size_t> fShapeY;
std::vector<Dim> fDimShapeY;

bool fBroadcast = false;

Expand Down Expand Up @@ -119,64 +122,164 @@ public:
}

void Initialize(RModel& model) override {
std::vector<std::vector<size_t>> inputShapes;
for (auto &it : fNInputs) {
if (!model.CheckIfTensorAlreadyExist(it)) {
throw std::runtime_error("TMVA SOFIE BasicNary Op Input Tensor " + it + " is not found in model");
}
fShapeInputs.push_back(model.GetTensorShape(it));
fShapeInputs.push_back(model.GetDimTensorShape(it));
if (fNInputs.size()> 2) {
if (model.IsDimInputTensor(it))
throw std::runtime_error("TMVA SOFIE BasicNary : supports only 2 inputs for dynamic tensors");
else
inputShapes.push_back(model.GetTensorShape(it));
}
}
// Find the common shape of the input tensors
fShapeY = UTILITY::MultidirectionalBroadcastShape(fShapeInputs);
model.AddIntermediateTensor(fNY, model.GetTensorType(fNInputs[0]), fShapeY);
// Broadcasting
size_t N = fNInputs.size();
fNBroadcastedInputs.reserve(N);
for (size_t i = 0; i < N; i++) {
if (!UTILITY::AreSameShape(model.GetTensorShape(fNInputs[i]), fShapeY)) {
fBroadcast = true;
std::string name = "Broadcasted" + fNInputs[i];
model.AddIntermediateTensor(name, model.GetTensorType(fNInputs[0]), fShapeY);
fNBroadcastedInputs.emplace_back("tensor_" + name);
} else {
fNBroadcastedInputs.emplace_back("tensor_" + fNInputs[i]);
if (fShapeInputs.size() > 2 ) {
// support dynamic tensors now for input list of size=2
auto shapeY = UTILITY::MultidirectionalBroadcastShape(inputShapes);
fDimShapeY = ConvertShapeToDim(shapeY);
} else if (fShapeInputs.size() == 2 ) {
auto ret = UTILITY::MultidirectionalBroadcastShape(fShapeInputs[0], fShapeInputs[1]);
// use same code as in BinaryOperator (need to extend for input sizes > 2)
fBroadcast = ret.first;
fDimShapeY = ret.second;
// case of all parametric shapes and MultiDirectionalBroadcastShape return the max of the 2
// need to do before we declare the output tensor shape and the broadcasted ones
if (ret.first & 4) {
// check if one of the parameter is an input dimension
// define function to find this
auto IsInputDimParam = [&](const std::string &p) {
auto inputNames = model.GetInputTensorNames();
for (auto &input : inputNames) {
for (auto &i_s : model.GetDimTensorShape(input)) {
if (i_s.isParam && i_s.param == p)
return true;
}
}
return false;
};
auto & shapeA = fShapeInputs[0];
auto & shapeB = fShapeInputs[1];
for (size_t i = 0; i < fDimShapeY.size(); i++) {
auto &s = fDimShapeY[i];
if (s.isParam && s.param.find("std::max") != std::string::npos) {
if (IsInputDimParam(shapeA[i].param)) {
// case dim is 1 we indicate that the input parameter is equal to 1
if (shapeA[i].dim != 1)
s = shapeA[i];
else
s = shapeB[i];
} else if (IsInputDimParam(shapeB[i].param)) {
if (shapeB[i].dim != 1)
s = shapeB[i];
else
s = shapeA[i];
}
}
}
}
} else if (fShapeInputs.size() == 1 ) {
fDimShapeY = fShapeInputs[0];
}
if (!fShapeY.empty())
model.AddIntermediateTensor(fNY, model.GetTensorType(fNInputs[0]), fShapeY);
else
model.AddIntermediateTensor(fNY, model.GetTensorType(fNInputs[0]), fDimShapeY);


fType = ConvertTypeToString(model.GetTensorType(fNInputs[0]));

if (model.Verbose()) {
std::cout << NaryOperatorTraits<T, Op>::Name() << " : ";
if (fNInputs.size() == 2)
std::cout << ConvertShapeToString(fShapeInputs[0]) << " , "
<< ConvertShapeToString(fShapeInputs[1]);
std::cout << " --> " << ConvertShapeToString(fDimShapeY) << std::endl;
}
}

std::string Generate(std::string OpName) override {
OpName = "op_" + OpName;
if (fShapeY.empty()) {
if (fDimShapeY.empty()) {
throw std::runtime_error("TMVA SOFIE BasicNary called to Generate without being initialized first");
}
std::stringstream out;
size_t length = ConvertShapeToLength(fShapeY);
auto length = ConvertDimShapeToLength(fDimShapeY);
out << SP << "\n//------ BasicNary operator\n";
if (fBroadcast) {
for (size_t i = 0; i < fNInputs.size(); i++) {
if (fNBroadcastedInputs[i] != fNInputs[i]) {
out << SP << SP << "// Broadcasting " << fNInputs[i] << " to " << ConvertShapeToString(fShapeY) << "\n";
out << SP << SP << "{\n";
out << SP << SP << SP << fType << "* data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<" << fType << ">(tensor_" + fNInputs[i] << ", " << ConvertShapeToString(fShapeInputs[i]);
out << ", " << ConvertShapeToString(fShapeY) << ");\n";
out << SP << SP << SP << "std::copy(data, data + " << length << ", " << fNBroadcastedInputs[i] << ");\n";
out << SP << SP << SP << "delete[] data;\n";
out << SP << SP << "}\n";
}
}
}

if (fNInputs.size() == 1) {
int nInputs = fNInputs.size();

if (nInputs == 1) {
out << SP << "std::copy(tensor_" << fNInputs[0] << ", tensor_" << fNInputs[0] << " + ";
out << length << ", tensor_" << fNY << ");\n";
} else {
std::vector<std::string> inputs(fNBroadcastedInputs.size());
for (size_t i = 0; i < fNBroadcastedInputs.size(); i++) {
inputs[i] = fNBroadcastedInputs[i] + "[id]";

// implement operator without broadcasting, but using loos on all indices
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// implement operator without broadcasting, but using loos on all indices
// implement operator without broadcasting, but using loops on all indices

std::vector<std::vector<Dim>> inputStrides(nInputs);
for (int i = 0; i < nInputs; i++)
inputStrides[i] = UTILITY::ComputeStrideFromShape(fShapeInputs[i]);

auto stridesY = UTILITY::ComputeStrideFromShape(fDimShapeY);

// make loop on output indices
std::string compute_idx_Y;
int nloop = 0;
if (fDimShapeY.empty() ||
std::all_of(fDimShapeY.begin(), fDimShapeY.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
compute_idx_Y = "0";
} else {
for (size_t i = 0; i < fDimShapeY.size(); ++i) {
if (fDimShapeY[i].dim != 1 && fDimShapeY[i].GetVal() != "1") {
nloop++;
for (int j = 0; j < nloop; j++) out << SP;
out << "for (size_t idx_" << i << " = 0; idx_" << i << " < " << fDimShapeY[i]
<< "; ++idx_" << i << "){\n";
compute_idx_Y += "idx_" + std::to_string(i);
if (stridesY[i].GetVal() != "1")
compute_idx_Y += " * " + stridesY[i].GetVal();
compute_idx_Y += " + ";
}
}
// remove last 3 characters " + "
for (int j = 0; j < 3; j++)
compute_idx_Y.pop_back();
}
// find indices for input tensors
std::vector<std::string> inputs(nInputs);
for (int ipt = 0; ipt < nInputs; ipt++ ) {
std::string compute_idx_X;
auto & shape = fShapeInputs[ipt];
auto & stride = inputStrides[ipt];
if (shape.empty() ||
std::all_of(shape.begin(), shape.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
compute_idx_X = "0";
} else {
for (size_t i = 0; i < shape.size(); ++i) {
if (shape[i].dim == 1 || shape[i].GetVal() == "1")
continue;
compute_idx_X += "idx_" + std::to_string(i + (fDimShapeY.size() - shape.size()));
if (stride[i].GetVal() != "1")
compute_idx_X += " * " + stride[i].GetVal();
compute_idx_X += " + ";
}
// remove last 3 character " + "
for (int j = 0; j < 3; j++)
compute_idx_X.pop_back();
}
inputs[ipt] = "tensor_" + fNInputs[ipt] + "[" + compute_idx_X + "]";
}

// perform the operation
for (int j = 0; j < nloop + 1; j++) out << SP;
std::string output = "tensor_" + fNY + "[" + compute_idx_Y + "]";
out << NaryOperatorTraits<T,Op>::Op(output, inputs);

for (int i = nloop; i > 0; i--) {
for (int j = 0; j < i; j++) out << SP;
out << "}\n";
}
out << SP << "for (size_t id = 0; id < " << length << "; id++) {\n";
out << NaryOperatorTraits<T,Op>::Op("tensor_" + fNY + "[id]", inputs);
out << SP << "}\n";
}
return out.str();
}
Expand Down
6 changes: 3 additions & 3 deletions tmva/sofie/inc/TMVA/ROperator_BatchNormalization.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ public:
}
}

std::string Generate(std::string OpName) override {
OpName = "op_" + OpName;
std::string Generate(std::string opName) override {
opName = "op_" + opName;
if (fShapeX.empty()){
throw std::runtime_error("TMVA SOFIE Batch Normalization called to Generate without being initialized first");
}
Expand All @@ -158,7 +158,7 @@ public:
spatial_dim = ConvertDimShapeToLength( spatialShape);
}

out << "\n\n//---- BatchNorm" << (fActivation == EActivationType::RELU ? " + ReLU" : "") << "\n";
out << "\n\n//---- BatchNorm" << (fActivation == EActivationType::RELU ? " + ReLU " : " ") << opName << "\n";
out << SP << "{\n";
out << SP << " size_t i = 0;\n";
out << SP << " for (size_t n = 0; n < " << batchSize << "; ++n) {\n";
Expand Down
21 changes: 11 additions & 10 deletions tmva/sofie/inc/TMVA/ROperator_Cast.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public:
throw std::runtime_error("TMVA SOFIE Cast Op Input Tensor is not found in model");
}
fShape = model.GetDimTensorShape(fNX);
// shoud we add a check if the same type
// should we add a check if the same type
auto inputType = model.GetTensorType(fNX);
if (model.IsInitializedTensor(fNX)) {
fIsOutputConstant = true;
Expand All @@ -57,29 +57,30 @@ public:
}
else
fIsOutputConstant = false;
} else if (model.IsShapeTensor(fNX) && ConvertStringToType(fAttrType) == ETensorType::INT64) {
auto shapeData = model.GetShapeTensorValues(fNX);
model.AddShapeTensor(fNY, shapeData, fShape.size() == 0);
fIsOutputConstant = true;
}
if (!fIsOutputConstant)
model.AddIntermediateTensor(fNY, ConvertStringToType(fAttrType), fShape);
if (model.Verbose()) {
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << fAttrType << " for " << fNY;
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << fAttrType << " for " << fNY
<< " shape " << ConvertDimShapeToString(fShape);
if (fIsOutputConstant) std::cout << " (constant) ";
std::cout << std::endl;
}
}


std::string Generate(std::string OpName) override {
if (fIsOutputConstant) return "";
std::string Generate(std::string opName) override {

// output shape can be empty if is a scalar

OpName = "op_" + OpName;
if (fShape.empty()) {
throw std::runtime_error("TMVA SOFIE Cast called to Generate without being initialized first");
}
std::stringstream out;
auto length = ConvertDimShapeToLength(fShape);

// out << SP << ETensorType << " " << OpName << "_attr = " << fattr << ";\n";
out << "\n//------ CAST\n";
out << "\n//------ CAST " << opName << " ---> " << fNY << " " << ConvertDimShapeToString(fShape) << "\n";
// no generated code for constant outputs
if (fIsOutputConstant) return out.str();

Expand Down
Loading
Loading