Skip to content

Commit 23dc20e

Browse files
committed
code generation for softmax and minor code cleanup
1 parent cd636c5 commit 23dc20e

File tree

1 file changed

+4
-18
lines changed

1 file changed

+4
-18
lines changed

src/onnx_gdf_parser.cpp

Lines changed: 4 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ int calculateTensorDims
4444
std::string layer_type = layer_details.find("type")->second;
4545
std::string layer_input = layer_details.find("input")->second;
4646
std::string layer_output = layer_details.find("output")->second;
47-
std::cout << "Input output type is " << layer_input << " " << layer_output << " " << layer_type << std::endl;
4847
int in_w, in_h, in_c, in_n;
4948
int out_w, out_h, out_c, out_n;
5049
std::vector<int> output_dims;
@@ -83,30 +82,25 @@ int calculateTensorDims
8382

8483
if(layer_details.size() > 5) {
8584
in_out_map[layer_bias] = bias_dims;
86-
std::cout << "Bias dims aer " << layer_bias << " " << bias_dims[0] << std::endl;
8785
}
88-
std::cout << "Conv" << std::endl;
8986
}
9087
else if(layer_type == "Relu") {
9188
out_w = in_w;
9289
out_h = in_h;
9390
out_c = in_c;
9491
out_n = in_n;
95-
std::cout << "Relu" << std::endl;
9692
}
9793
else if(layer_type == "LRN") {
9894
out_w = in_w;
9995
out_h = in_h;
10096
out_c = in_c;
10197
out_n = in_n;
102-
std::cout << "LRN" << std::endl;
10398
}
10499
else if(layer_type == "Dropout") {
105100
out_w = in_w;
106101
out_h = in_h;
107102
out_c = in_c;
108103
out_n = in_n;
109-
std::cout << "Dropout" << std::endl;
110104
}
111105
else if(layer_type == "MaxPool") {
112106
std::string params = layer_details.find("params")->second;
@@ -121,7 +115,6 @@ int calculateTensorDims
121115

122116
out_c = in_c;
123117
out_n = in_n;
124-
std::cout << "MaxPool" << std::endl;
125118
}
126119
else if(layer_type == "Gemm") {
127120

@@ -155,20 +148,15 @@ int calculateTensorDims
155148
if(layer_details.size() > 5) {
156149
in_out_map[layer_bias] = bias_dims;
157150
}
158-
std::cout << "Gemm" << std::endl;
159-
160151
}
161152

162153
output_dims.push_back(out_n);
163154
output_dims.push_back(out_c);
164155
output_dims.push_back(out_h);
165156
output_dims.push_back(out_w);
166157
input_tensor_dim_map[layer_output] = output_dims;
167-
std::cout << "Added output : " << layer_output << std::endl;
168158
in_out_map[layer_output] = output_dims;
169159

170-
std::cout << "Out dims: " << out_w << " " << out_h << " " << out_c << " " << out_n << std::endl;
171-
172160
tensorDims[i] = in_out_map;
173161
}
174162

@@ -341,6 +329,9 @@ int writeGDF
341329
else if(layer_type == "Dropout") {
342330
ofsGDF << "node org.khronos.openvx.copy " << layer_input << " " << layer_output << std::endl;
343331
}
332+
else if(layer_type == "Softmax") {
333+
ofsGDF << "node org.khronos.nn_extension.softmax_layer " << layer_input << " " << layer_output << std::endl;
334+
}
344335

345336
if(i == net.size() - 1) {
346337
ofsGDF << "write " << layer_output << " output.f32" << std::endl;
@@ -389,7 +380,7 @@ int dumpOnnxModel(const onnx::GraphProto& graph_proto)
389380
}
390381

391382
fclose(fs);
392-
std::cout << "INFO: Weights dumped for: " << tensor_proto.name() << std::endl;
383+
//std::cout << "INFO: Weights dumped for: " << tensor_proto.name() << std::endl;
393384
}
394385
else {
395386
std::cout <<"ERROR: Unsupported data types will be supported in future." << std::endl;
@@ -438,7 +429,6 @@ int getLayerParams(const onnx::NodeProto& node_proto, std::string& params)
438429
+ " " + std::to_string(dilation_w)
439430
+ " " + std::to_string(dilation_h);
440431

441-
std::cout << "INFO: The parameters are : " << pad_h << " " << pad_w << " " << stride_w << " " << stride_h << " " << kernel_w << " " << kernel_h << std::endl;
442432
}
443433
else if(layer_type == "MaxPool") {
444434

@@ -472,7 +462,6 @@ int getLayerParams(const onnx::NodeProto& node_proto, std::string& params)
472462
+ " " + std::to_string(pad_w)
473463
+ " " + std::to_string(pad_h);
474464

475-
std::cout << "INFO: The parameters are: " << pad_h << " " << pad_w << " " << stride_w << " " << stride_h << " " << kernel_w << " " << kernel_h << std::endl;
476465
}
477466
else if(layer_type == "LRN") {
478467

@@ -502,7 +491,6 @@ int getLayerParams(const onnx::NodeProto& node_proto, std::string& params)
502491
+ " " + std::to_string(beta)
503492
+ " " + std::to_string(bias);
504493

505-
std::cout << "INFO: The parameters: " << lrn_local_size << " " << alpha << " " << beta << " " << bias << std::endl;
506494
}
507495

508496
return 0;
@@ -529,7 +517,6 @@ int parseOnnxGraph(
529517

530518
for(int i=0; i < graph_proto.node_size(); i++) {
531519
const onnx::NodeProto node_proto = graph_proto.node(i);
532-
std::cout << "INFO: Layer is : " << node_proto.op_type() << std::endl;
533520
std::string params;
534521
getLayerParams(node_proto, params);
535522

@@ -543,7 +530,6 @@ int parseOnnxGraph(
543530
layer_details["output"] = layer_output;
544531
layer_details["params"] = params;
545532

546-
std::cout << "Input size is : " << node_proto.input_size() << std::endl;
547533
if(node_proto.input_size() > 1) {
548534
std::string layer_weights = node_proto.input(1);
549535
layer_details["weights"] = layer_weights;

0 commit comments

Comments
 (0)