Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion tmva/sofie/inc/TMVA/RModel.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ private:
MemoryPoolInfo fIntermediateMemoryInfo; ///<! intermediate memory info (transient)
std::unordered_map<std::string_view, size_t> fIntermediateTensorFrequencyLookup; ///<! lookup table for intermediate tensor frequency (transient)

std::string fExtraCodeForDimShapes; // extra code needed for initialization of dynamic parameters (e.g. number of non zero elements in NonZero operator)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can't add new non-transient class members without updating the class version

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now version 0 is used to flag that RModel is not made persistent


public:
/**
Default constructor. Needed to allow serialization of ROOT objects. See
Expand Down Expand Up @@ -108,6 +110,7 @@ public:

void AddShapeTensor(const std::string & name, const std::vector<Dim> & shapeValues, bool scalar = false);

void AddExtraCodeForDimShapes(const std::string & code) { fExtraCodeForDimShapes += code; }

// add and initialize subgraph to the model
void InitializeSubGraph(std::shared_ptr<RModel> graph);
Expand Down Expand Up @@ -239,7 +242,8 @@ public:
bool UseVDT() const { return fUseVDT;}

// Use the ClassDef macro to allow definition of custom streaming
ClassDefNV(RModel, 3);
// Use Version 0 since we don't support for time being ROOT I/O streaming of RModel objects
ClassDefNV(RModel, 4);
};

// need to implement here templated member functions and its specialization
Expand Down
11 changes: 8 additions & 3 deletions tmva/sofie/inc/TMVA/ROperator_Cast.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,9 @@ public:
if (!fIsOutputConstant)
model.AddIntermediateTensor(fNY, fType, fShape);
if (model.Verbose()) {
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << ConvertTypeToString(fType) << " for " << fNY
<< " shape " << ConvertDimShapeToString(fShape);
std::cout << "Cast : " << ConvertTypeToString(inputType) << " " << fNX << " -> " << ConvertTypeToString(fType);
if (fType == ETensorType::BOOL) std::cout << " (converted from BOOL) ";
std::cout << " for " << fNY << " shape " << ConvertDimShapeToString(fShape);
if (fIsOutputConstant) std::cout << " (constant) ";
std::cout << std::endl;
}
Expand All @@ -87,7 +88,11 @@ public:

out << SP << "for (int id = 0; id < " << length << " ; id++){\n";

out << SP << SP << "tensor_" << fNY << "[id] = static_cast<"<< ConvertTypeToString(fType) << ">(tensor_" << fNX << "[id]);\n";
// need to handle bool case separatly since casting to uint8 will not give right result
if (fType == ETensorType::BOOL)
out << SP << SP << "tensor_" << fNY << "[id] = (tensor_" << fNX << "[id] != 0) ? 1 : 0;\n";
else
out << SP << SP << "tensor_" << fNY << "[id] = static_cast<"<< ConvertTypeToString(fType) << ">(tensor_" << fNX << "[id]);\n";

out << SP << "}\n";
return out.str();
Expand Down
20 changes: 14 additions & 6 deletions tmva/sofie/inc/TMVA/ROperator_NonZero.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ private:

std::string fNX;
std::string fNY;
std::string fNonZeroParam; // name of the parameter used to store the number of non zero elements when output is not constant
std::vector<Dim> fShapeX;
std::vector<Dim> fShapeY;

Expand Down Expand Up @@ -93,7 +94,15 @@ public:
fShapeY[0] = fShapeX.size();

// identify as -1 since we will declare maximum as size of input
fShapeY[1] = Dim{std::string("v_NonZero_") + fNX, static_cast<size_t>(-1)};
// we will compute at run time the actual number of non zero and rearrange the output vector accordingly
fNonZeroParam = "v_NonZero_" + fNX;
fShapeY[1] = Dim{fNonZeroParam, static_cast<size_t>(-1)};

// declare the parameter for number of non zero elements, used when output is not constant
auto inputLength = ConvertDimShapeToLength(fShapeX);
std::string codeDecl = SP + "size_t " + fNonZeroParam + " = " + inputLength + ";\n";
codeDecl += SP + "fV_NonZero_" + fNX + " = " + fNonZeroParam + ";\n";
model.AddExtraCodeForDimShapes(codeDecl);

model.AddIntermediateTensor(fNY, ETensorType::INT64, fShapeY);
if (model.Verbose()) {
Expand All @@ -104,13 +113,12 @@ public:

std::string GenerateSessionMembersCode(std::string /*opName*/) override {
if (fIsOutputConstant) return "";
// define output value used as max non zero with max size = input shape * N
auto inputLength = ConvertDimShapeToLength(fShapeX);
std::stringstream out;
out << SP << "size_t fV_NonZero_" << fNX << " = " << inputLength << ";\n";
out << SP << "size_t fV_NonZero_" << fNX << " = 0;\n";
return out.str();
}


std::string Generate(std::string opName) override {
if (fIsOutputConstant) {
return "";
Expand All @@ -127,9 +135,9 @@ public:
inputLength = ConvertShapeToLength(intShapeX);

size_t dims = fShapeX.size();
out << "\n//------ NonZero\n";
out << "\n//------ NonZero -> " << ConvertDimShapeToString(fShapeY) << "\n";

std::string vnonzero = "v_NonZero_" + fNX;
std::string vnonzero = fNonZeroParam;

// loop on input indices
out << SP << "size_t offset_" << opName << " = 0;\n";
Expand Down
4 changes: 3 additions & 1 deletion tmva/sofie/inc/TMVA/ROperator_Reshape.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ public:
}

std::string Generate(std::string opName) override {
if (fIsOutputConstant) return ""; //no op for constant tensors


std::stringstream out;
std::string opType = "Reshape";
Expand All @@ -345,6 +345,8 @@ public:

out << SP << "///--------" << opType << " operator " << opName << " --> " << ConvertDimShapeToString(fShapeOutput) << "\n";

if (fIsOutputConstant) return out.str(); //no op for constant tensors

// in case of dynamic output shape we need to set the shape value from input shape tensor
// and take case of the zero values
if (fDynamicShape) {
Expand Down
7 changes: 4 additions & 3 deletions tmva/sofie/inc/TMVA/ROperator_Slice.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ public:
}

model.AddIntermediateTensor(fNOutput, model.GetTensorType(fNData), fShapeOutput);
if (fIdentitySlice) model.AddAliasTensor(fNOutput, fNData);
//if (fIdentitySlice) model.AddAliasTensor(fNOutput, fNData);

if (model.Verbose()) {
std::cout << "Slice " << fNData << " " << ConvertDimShapeToString(fShapeInput)
Expand All @@ -366,8 +366,9 @@ public:
size_t ndim = fShapeInput.size();

if (fIdentitySlice) {
out << "/// Slice is just an identity (copy pointers) \n";
out << SP << "tensor_" << fNOutput << " = tensor_" << fNData << ";\n";
out << "/// Slice is just an identity (copy) \n";
//out << SP << "tensor_" << fNOutput << " = const_cast<" << ConvertTypeToString(fOutputType) << " *>(tensor_" << fNData << ");\n";
out << SP << "std::copy(tensor_" << fNData << ", tensor_" << fNData << " + " << ConvertDimShapeToLength(fShapeInput) << ", tensor_" << fNOutput << ");\n";
return out.str();
}

Expand Down
14 changes: 9 additions & 5 deletions tmva/sofie/inc/TMVA/ROperator_Softmax.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,14 @@ public:
}
}

std::string Generate(std::string OpName) override {
OpName = "op_" + OpName;
std::string Generate(std::string opName) override {
opName = "op_" + opName;
if (fShape.empty()) {
throw std::runtime_error("TMVA SOFIE Operator Softmax called to Generate without being initialized first");
}
std::stringstream out;
out << "///------- Softmax " << opName << " ---> " // << fNY << " "
<< ConvertDimShapeToString(fShape) << "\n" << std::endl;
size_t size = fShape.size();
auto length_str = ConvertDimShapeToLength(fShape);
size_t axis = fAttrAxis < 0 ? size + fAttrAxis : fAttrAxis;
Expand All @@ -85,7 +87,7 @@ public:
num_rows = "(" + length_str + ") / (" + axis_size + ")";
}

out << "\n" << SP << "//------ SOFTMAX - " << size << " " << length_str << " " << axis << "\n";
out << SP << "//----- softmax axis is last one - " << axis << "\n";
out << SP << "for (int i = 0; i < " << num_rows << "; ++i) {\n";
out << SP << SP << "size_t offset = i * " << axis_size << ";\n";
out << SP << SP << fType << " const * x_ptr = &tensor_" << fNX << "[offset];\n";
Expand All @@ -111,14 +113,15 @@ public:
out << SP << "}\n";

} else {
// generic case for any axis
auto stride = UTILITY::ComputeStrideFromShape(fShape);
size_t k = 0;
std::vector<std::string> l(size);
for (size_t i = 0; i < size; i++) {
if (i != axis) {
for (size_t j = 0; j < k; j++) out << SP;
l[i] = std::string("i") + std::to_string(i);
out << "for (int " << l[i] << " = 0; " << l[i] << " < " << fShape[i] << "; " << l[i] << "++) {\n";
out << SP << "for (int " << l[i] << " = 0; " << l[i] << " < " << fShape[i] << "; " << l[i] << "++) {\n";
k++;
}
}
Expand Down Expand Up @@ -167,7 +170,8 @@ public:
out << "for (int i = 0; i < " << fShape[axis] << "; i++) {\n";
for (size_t j = 0; j < size; j++) out << SP;
out << "size_t id = index + i";
if (stride[axis].GetVal() != "1") out << "*(" << stride[axis] << ");\n";
if (stride[axis].GetVal() != "1") out << "*(" << stride[axis] << ")";
out << ";\n";
for (size_t j = 0; j < size; j++) out << SP;
out << "tensor_" << fNY << "[id] /= sum;\n";
if (fLogSoftmax) {
Expand Down
Loading
Loading