Skip to content

Commit

Permalink
formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
bili2002 committed May 30, 2024
1 parent 091df95 commit 6eb5dcc
Show file tree
Hide file tree
Showing 4 changed files with 91 additions and 99 deletions.
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/cpu/ml/ml_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ enum NODE_MODE : uint8_t {
BRANCH_GT = 8,
BRANCH_EQ = 10,
BRANCH_NEQ = 12,
BRANCH_SM = 14, // Set membership
BRANCH_SM = 14, // Set membership
};

static inline NODE_MODE MakeTreeNodeMode(const std::string& input) {
Expand Down
46 changes: 19 additions & 27 deletions onnxruntime/core/providers/cpu/ml/tree_ensemble_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,10 @@ class TreeEnsembleCommon : public TreeEnsembleCommonAttributes {

private:
bool CheckIfSubtreesAreEqual(const size_t left_id, const size_t right_id, const int64_t tree_id, const InlinedVector<NODE_MODE>& cmodes,
const InlinedVector<size_t>& truenode_ids, const InlinedVector<size_t>& falsenode_ids, const std::vector<int64_t>& nodes_featureids,
const std::vector<ThresholdType>& nodes_values_as_tensor, const std::vector<float>& node_values,
const std::vector<float>& target_class_weights, const std::vector<ThresholdType>& target_class_weights_as_tensor,
const InlinedVector<TreeNodeElementId>& node_tree_ids, InlinedVector<std::pair<TreeNodeElementId, uint32_t>> indices);
const InlinedVector<size_t>& truenode_ids, const InlinedVector<size_t>& falsenode_ids, const std::vector<int64_t>& nodes_featureids,
const std::vector<ThresholdType>& nodes_values_as_tensor, const std::vector<float>& node_values,
const std::vector<float>& target_class_weights, const std::vector<ThresholdType>& target_class_weights_as_tensor,
const InlinedVector<TreeNodeElementId>& node_tree_ids, InlinedVector<std::pair<TreeNodeElementId, uint32_t>> indices);
size_t AddNodes(const size_t i, const InlinedVector<NODE_MODE>& cmodes, const InlinedVector<size_t>& truenode_ids,
const InlinedVector<size_t>& falsenode_ids, const std::vector<int64_t>& nodes_featureids,
const std::vector<ThresholdType>& nodes_values_as_tensor, const std::vector<float>& node_values,
Expand Down Expand Up @@ -354,10 +354,7 @@ bool TreeEnsembleCommon<InputType, ThresholdType, OutputType>::CheckIfSubtreesAr
const std::vector<float>& target_class_weights, const std::vector<ThresholdType>& target_class_weights_as_tensor,
const InlinedVector<TreeNodeElementId>& node_tree_ids, InlinedVector<std::pair<TreeNodeElementId, uint32_t>> indices) {
// Leaves have values set at 0
if (cmodes[left_id] != cmodes[right_id]
|| nodes_featureids[left_id] != nodes_featureids[right_id]
|| (!nodes_values_as_tensor.empty() && nodes_values_as_tensor[left_id] != nodes_values_as_tensor[right_id])
|| (nodes_values_as_tensor.empty() && node_values[left_id] != node_values[right_id])) {
if (cmodes[left_id] != cmodes[right_id] || nodes_featureids[left_id] != nodes_featureids[right_id] || (!nodes_values_as_tensor.empty() && nodes_values_as_tensor[left_id] != nodes_values_as_tensor[right_id]) || (nodes_values_as_tensor.empty() && node_values[left_id] != node_values[right_id])) {
return false;
}

Expand All @@ -370,16 +367,15 @@ bool TreeEnsembleCommon<InputType, ThresholdType, OutputType>::CheckIfSubtreesAr

if (target_class_weights_as_tensor.empty()) {
return target_class_weights[left_target_node] == target_class_weights[right_target_node];
}
else {
} else {
return target_class_weights_as_tensor[left_target_node] == target_class_weights_as_tensor[right_target_node];
}
}

return CheckIfSubtreesAreEqual(falsenode_ids[left_id], falsenode_ids[right_id], tree_id, cmodes, truenode_ids, falsenode_ids, nodes_featureids,
nodes_values_as_tensor, node_values, target_class_weights, target_class_weights_as_tensor, node_tree_ids, indices)
&& CheckIfSubtreesAreEqual(truenode_ids[left_id], truenode_ids[right_id], tree_id, cmodes, truenode_ids, falsenode_ids, nodes_featureids,
nodes_values_as_tensor, node_values, target_class_weights, target_class_weights_as_tensor, node_tree_ids, indices);
nodes_values_as_tensor, node_values, target_class_weights, target_class_weights_as_tensor, node_tree_ids, indices) &&
CheckIfSubtreesAreEqual(truenode_ids[left_id], truenode_ids[right_id], tree_id, cmodes, truenode_ids, falsenode_ids, nodes_featureids,
nodes_values_as_tensor, node_values, target_class_weights, target_class_weights_as_tensor, node_tree_ids, indices);
}

inline void UpdateThreshold(double val, double& mask) {
Expand Down Expand Up @@ -430,8 +426,7 @@ size_t TreeEnsembleCommon<InputType, ThresholdType, OutputType>::AddNodes(
if (node.flags == NODE_MODE::BRANCH_EQ && CANMASK(node_threshold, ThresholdType)) {
UpdateThreshold(node_threshold, node.value_or_unique_weight);
node.flags = NODE_MODE::BRANCH_SM;
}
else {
} else {
node.value_or_unique_weight = node_threshold;
}

Expand All @@ -445,10 +440,9 @@ size_t TreeEnsembleCommon<InputType, ThresholdType, OutputType>::AddNodes(
auto falsenode_threshold = nodes_values_as_tensor.empty() ? static_cast<ThresholdType>(node_values[falsenode_id]) : nodes_values_as_tensor[falsenode_id];

while (cmodes[falsenode_id] == NODE_MODE::BRANCH_EQ && nodes_[node_pos].feature_id == nodes_featureids[falsenode_id] &&
CANMASK(falsenode_threshold, ThresholdType) &&
CheckIfSubtreesAreEqual(truenode_ids[i], truenode_ids[falsenode_id], tree_id, cmodes, truenode_ids, falsenode_ids,
nodes_featureids, nodes_values_as_tensor, node_values, target_class_weights, target_class_weights_as_tensor, node_tree_ids, indices)) {

CANMASK(falsenode_threshold, ThresholdType) &&
CheckIfSubtreesAreEqual(truenode_ids[i], truenode_ids[falsenode_id], tree_id, cmodes, truenode_ids, falsenode_ids,
nodes_featureids, nodes_values_as_tensor, node_values, target_class_weights, target_class_weights_as_tensor, node_tree_ids, indices)) {
UpdateThreshold(falsenode_threshold, nodes_[node_pos].value_or_unique_weight);
falsenode_id = falsenode_ids[falsenode_id];
falsenode_threshold = nodes_values_as_tensor.empty() ? static_cast<ThresholdType>(node_values[falsenode_id]) : nodes_values_as_tensor[falsenode_id];
Expand Down Expand Up @@ -767,14 +761,12 @@ void TreeEnsembleCommon<InputType, ThresholdType, OutputType>::ComputeAgg(concur

inline bool SetMembershipCheck(double val, double mask) {
auto val_as_int = static_cast<int64_t>(val);
return CANMASK(val_as_int, double)
&& (((1ll << (val_as_int - 1)) & *reinterpret_cast<uint64_t*>(&mask)) != 0);
return CANMASK(val_as_int, double) && (((1ll << (val_as_int - 1)) & *reinterpret_cast<uint64_t*>(&mask)) != 0);
}

inline bool SetMembershipCheck(float val, float mask) {
auto val_as_int = static_cast<int64_t>(val);
return CANMASK(val_as_int, float)
&& (((1ll << (val_as_int - 1)) & *reinterpret_cast<uint32_t*>(&mask)) != 0);
return CANMASK(val_as_int, float) && (((1ll << (val_as_int - 1)) & *reinterpret_cast<uint32_t*>(&mask)) != 0);
}

inline bool _isnan_(float x) { return std::isnan(x); }
Expand Down Expand Up @@ -824,8 +816,8 @@ TreeEnsembleCommon<InputType, ThresholdType, OutputType>::ProcessTreeNodeLeave(
while (root->is_not_leaf()) {
val = x_data[root->feature_id];
root = (SetMembershipCheck(val, root->value_or_unique_weight) || (root->is_missing_track_true() && _isnan_(val)))
? root->truenode_or_weight.ptr
: root + 1;
? root->truenode_or_weight.ptr
: root + 1;
}
} else {
while (root->is_not_leaf()) {
Expand Down Expand Up @@ -868,8 +860,8 @@ TreeEnsembleCommon<InputType, ThresholdType, OutputType>::ProcessTreeNodeLeave(
break;
case NODE_MODE::BRANCH_SM:
root = (SetMembershipCheck(val, root->value_or_unique_weight) || (root->is_missing_track_true() && _isnan_(val)))
? root->truenode_or_weight.ptr
: root + 1;
? root->truenode_or_weight.ptr
: root + 1;
break;
case NODE_MODE::LEAF:
return root;
Expand Down
141 changes: 71 additions & 70 deletions onnxruntime/test/framework/inference_session_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2956,120 +2956,121 @@ TEST(InferenceSessionTests, InterThreadPoolWithDenormalAsZero) {
VerifyThreadPoolWithDenormalAsZero(session2.GetInterOpThreadPoolToUse(), false);
}


float calculateSD(std::vector<float>::iterator beg, std::vector<float>::iterator end) {
float sum = 0.0, mean, standardDeviation = 0.0;
int n = 0;
float sum = 0.0, mean, standardDeviation = 0.0;
int n = 0;

for (auto curr = beg; curr != end; curr++) {
sum += *curr;
n++;
}
for (auto curr = beg; curr != end; curr++) {
sum += *curr;
n++;
}

mean = sum / n;
mean = sum / n;

for (auto curr = beg; curr != end; curr++) {
standardDeviation += (*curr - mean) * (*curr - mean);
}
for (auto curr = beg; curr != end; curr++) {
standardDeviation += (*curr - mean) * (*curr - mean);
}

return sqrt(standardDeviation / n);
return sqrt(standardDeviation / n);
}

void benchmarkModel(size_t tests, int number_cnt, std::string model, std::string input) {
// Initialize logging manager
auto logging_manager = std::make_unique<logging::LoggingManager>(
std::unique_ptr<ISink>(new CLogSink()), logging::Severity::kVERBOSE, false,
LoggingManager::InstanceType::Temporal);

// Create environment
std::unique_ptr<Environment> env;
ASSERT_TRUE(Environment::Create(std::move(logging_manager), env).IsOK());
// Initialize logging manager
auto logging_manager = std::make_unique<logging::LoggingManager>(
std::unique_ptr<ISink>(new CLogSink()), logging::Severity::kVERBOSE, false,
LoggingManager::InstanceType::Temporal);

// Configure session options
SessionOptions so;
so.execution_mode = ExecutionMode::ORT_SEQUENTIAL;
so.graph_optimization_level = TransformerLevel::Level2;
so.intra_op_param.thread_pool_size = 1;
// Create environment
std::unique_ptr<Environment> env;
ASSERT_TRUE(Environment::Create(std::move(logging_manager), env).IsOK());

// Initialize and load the InferenceSession
InferenceSession session{so, *env};
// Configure session options
SessionOptions so;
so.execution_mode = ExecutionMode::ORT_SEQUENTIAL;
so.graph_optimization_level = TransformerLevel::Level2;
so.intra_op_param.thread_pool_size = 1;

ASSERT_STATUS_OK(session.Load(model.c_str()));
ASSERT_STATUS_OK(session.Initialize());
// Initialize and load the InferenceSession
InferenceSession session{so, *env};

// Input numpy array
std::fstream input_file(input.c_str());
std::vector<float> values = {};
for(float number; input_file >> number;) { values.push_back(number); }
ASSERT_STATUS_OK(session.Load(model.c_str()));
ASSERT_STATUS_OK(session.Initialize());

std::vector<int64_t> dims = {static_cast<long long>(values.size()) / number_cnt, number_cnt};
// Input numpy array
std::fstream input_file(input.c_str());
std::vector<float> values = {};
for (float number; input_file >> number;) {
values.push_back(number);
}

std::cout << "Loaded: " << values.size() << std::endl;
std::vector<int64_t> dims = {static_cast<long long>(values.size()) / number_cnt, number_cnt};

OrtValue ml_value;
CreateMLValue<float>(TestCPUExecutionProvider()->CreatePreferredAllocators()[0], dims, values, &ml_value);
NameMLValMap feeds;
feeds.insert(std::make_pair("float_input", ml_value));
std::cout << "Loaded: " << values.size() << std::endl;

// Configure output
std::vector<std::string> output_names;
output_names.push_back("variable");
std::vector<OrtValue> fetches;
OrtValue ml_value;
CreateMLValue<float>(TestCPUExecutionProvider()->CreatePreferredAllocators()[0], dims, values, &ml_value);
NameMLValMap feeds;
feeds.insert(std::make_pair("float_input", ml_value));

// Configure RunOptions
RunOptions run_options;
// Configure output
std::vector<std::string> output_names;
output_names.push_back("variable");
std::vector<OrtValue> fetches;

const size_t MAX_ITER = tests;
std::vector<float> times = {};
// Configure RunOptions
RunOptions run_options;

for(size_t ITER = 0; ITER < MAX_ITER; ITER ++) {
const auto begin = clock();
const size_t MAX_ITER = tests;
std::vector<float> times = {};

{
common::Status st = session.Run(run_options, feeds, output_names, &fetches);
if (!st.IsOK()) {
std::cout << "Run returned status: " << st.ErrorMessage() << std::endl;
}
ASSERT_TRUE(st.IsOK());
}
for (size_t ITER = 0; ITER < MAX_ITER; ITER++) {
const auto begin = clock();

const auto end = clock();
const auto time_in_seconds = (double)(end - begin) / CLOCKS_PER_SEC;
std::cout << "Total time in predict " << time_in_seconds << std::endl;
times.push_back(time_in_seconds);
{
common::Status st = session.Run(run_options, feeds, output_names, &fetches);
if (!st.IsOK()) {
std::cout << "Run returned status: " << st.ErrorMessage() << std::endl;
}
ASSERT_TRUE(st.IsOK());
}

const auto average = std::accumulate(times.begin(), times.end(), 0.0) / times.size();
const auto SD = calculateSD(times.begin(), times.end());
std::cout << "Same Mode - Average " << average << "; Standard Deviation " << SD << std::endl;
const auto end = clock();
const auto time_in_seconds = (double)(end - begin) / CLOCKS_PER_SEC;
std::cout << "Total time in predict " << time_in_seconds << std::endl;
times.push_back(time_in_seconds);
}

const auto average = std::accumulate(times.begin(), times.end(), 0.0) / times.size();
const auto SD = calculateSD(times.begin(), times.end());
std::cout << "Same Mode - Average " << average << "; Standard Deviation " << SD << std::endl;
}

TEST(InferenceSessionTests, BenchSameModeOne) {
benchmarkModel(1, 5, "model.onnx", "input.txt");
benchmarkModel(1, 5, "model.onnx", "input.txt");
}

TEST(InferenceSessionTests, BenchNotSameModeOne) {
benchmarkModel(1, 15, "model3.onnx", "input3.txt");
benchmarkModel(1, 15, "model3.onnx", "input3.txt");
}

TEST(InferenceSessionTests, BenchSameMode1) {
benchmarkModel(10, 5, "model_single_tree.onnx", "input_single_tree.txt");
benchmarkModel(10, 5, "model_single_tree.onnx", "input_single_tree.txt");
}

TEST(InferenceSessionTests, BenchSameMode10) {
benchmarkModel(10, 5, "model_10_trees.onnx", "input.txt");
benchmarkModel(10, 5, "model_10_trees.onnx", "input.txt");
}

TEST(InferenceSessionTests, BenchSameMode100) {
benchmarkModel(10, 5, "model.onnx", "input.txt");
benchmarkModel(10, 5, "model.onnx", "input.txt");
}

TEST(InferenceSessionTests, BenchSameMode1000) {
benchmarkModel(10, 5, "model_1000_trees.onnx", "input_1000_trees.txt");
benchmarkModel(10, 5, "model_1000_trees.onnx", "input_1000_trees.txt");
}

TEST(InferenceSessionTests, BenchNotSameMode100) {
benchmarkModel(10, 15, "model3.onnx", "input3.txt");
benchmarkModel(10, 15, "model3.onnx", "input3.txt");
}

} // namespace test
Expand Down
1 change: 0 additions & 1 deletion onnxruntime/test/providers/cpu/ml/treeregressor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,6 @@ TEST(MLOpTest, TreeRegressorSingleTargetSum_as_tensor_precision) {
GenTreeAndRunTest1_as_tensor_precision(3);
}


TEST(MLOpTest, TreeRegressorCategoricals) {
OpTester test("TreeEnsembleRegressor", 3, onnxruntime::kMLDomain);

Expand Down

0 comments on commit 6eb5dcc

Please sign in to comment.