@@ -44,7 +44,6 @@ int calculateTensorDims
44
44
std::string layer_type = layer_details.find (" type" )->second ;
45
45
std::string layer_input = layer_details.find (" input" )->second ;
46
46
std::string layer_output = layer_details.find (" output" )->second ;
47
- std::cout << " Input output type is " << layer_input << " " << layer_output << " " << layer_type << std::endl;
48
47
int in_w, in_h, in_c, in_n;
49
48
int out_w, out_h, out_c, out_n;
50
49
std::vector<int > output_dims;
@@ -83,30 +82,25 @@ int calculateTensorDims
83
82
84
83
if (layer_details.size () > 5 ) {
85
84
in_out_map[layer_bias] = bias_dims;
86
- std::cout << " Bias dims aer " << layer_bias << " " << bias_dims[0 ] << std::endl;
87
85
}
88
- std::cout << " Conv" << std::endl;
89
86
}
90
87
else if (layer_type == " Relu" ) {
91
88
out_w = in_w;
92
89
out_h = in_h;
93
90
out_c = in_c;
94
91
out_n = in_n;
95
- std::cout << " Relu" << std::endl;
96
92
}
97
93
else if (layer_type == " LRN" ) {
98
94
out_w = in_w;
99
95
out_h = in_h;
100
96
out_c = in_c;
101
97
out_n = in_n;
102
- std::cout << " LRN" << std::endl;
103
98
}
104
99
else if (layer_type == " Dropout" ) {
105
100
out_w = in_w;
106
101
out_h = in_h;
107
102
out_c = in_c;
108
103
out_n = in_n;
109
- std::cout << " Dropout" << std::endl;
110
104
}
111
105
else if (layer_type == " MaxPool" ) {
112
106
std::string params = layer_details.find (" params" )->second ;
@@ -121,7 +115,6 @@ int calculateTensorDims
121
115
122
116
out_c = in_c;
123
117
out_n = in_n;
124
- std::cout << " MaxPool" << std::endl;
125
118
}
126
119
else if (layer_type == " Gemm" ) {
127
120
@@ -155,20 +148,15 @@ int calculateTensorDims
155
148
if (layer_details.size () > 5 ) {
156
149
in_out_map[layer_bias] = bias_dims;
157
150
}
158
- std::cout << " Gemm" << std::endl;
159
-
160
151
}
161
152
162
153
output_dims.push_back (out_n);
163
154
output_dims.push_back (out_c);
164
155
output_dims.push_back (out_h);
165
156
output_dims.push_back (out_w);
166
157
input_tensor_dim_map[layer_output] = output_dims;
167
- std::cout << " Added output : " << layer_output << std::endl;
168
158
in_out_map[layer_output] = output_dims;
169
159
170
- std::cout << " Out dims: " << out_w << " " << out_h << " " << out_c << " " << out_n << std::endl;
171
-
172
160
tensorDims[i] = in_out_map;
173
161
}
174
162
@@ -341,6 +329,9 @@ int writeGDF
341
329
else if (layer_type == " Dropout" ) {
342
330
ofsGDF << " node org.khronos.openvx.copy " << layer_input << " " << layer_output << std::endl;
343
331
}
332
+ else if (layer_type == " Softmax" ) {
333
+ ofsGDF << " node org.khronos.nn_extension.softmax_layer " << layer_input << " " << layer_output << std::endl;
334
+ }
344
335
345
336
if (i == net.size () - 1 ) {
346
337
ofsGDF << " write " << layer_output << " output.f32" << std::endl;
@@ -389,7 +380,7 @@ int dumpOnnxModel(const onnx::GraphProto& graph_proto)
389
380
}
390
381
391
382
fclose (fs);
392
- std::cout << " INFO: Weights dumped for: " << tensor_proto.name () << std::endl;
383
+ // std::cout << "INFO: Weights dumped for: " << tensor_proto.name() << std::endl;
393
384
}
394
385
else {
395
386
std::cout <<" ERROR: Unsupported data types will be supported in future." << std::endl;
@@ -438,7 +429,6 @@ int getLayerParams(const onnx::NodeProto& node_proto, std::string& params)
438
429
+ " " + std::to_string (dilation_w)
439
430
+ " " + std::to_string (dilation_h);
440
431
441
- std::cout << " INFO: The parameters are : " << pad_h << " " << pad_w << " " << stride_w << " " << stride_h << " " << kernel_w << " " << kernel_h << std::endl;
442
432
}
443
433
else if (layer_type == " MaxPool" ) {
444
434
@@ -472,7 +462,6 @@ int getLayerParams(const onnx::NodeProto& node_proto, std::string& params)
472
462
+ " " + std::to_string (pad_w)
473
463
+ " " + std::to_string (pad_h);
474
464
475
- std::cout << " INFO: The parameters are: " << pad_h << " " << pad_w << " " << stride_w << " " << stride_h << " " << kernel_w << " " << kernel_h << std::endl;
476
465
}
477
466
else if (layer_type == " LRN" ) {
478
467
@@ -502,7 +491,6 @@ int getLayerParams(const onnx::NodeProto& node_proto, std::string& params)
502
491
+ " " + std::to_string (beta)
503
492
+ " " + std::to_string (bias);
504
493
505
- std::cout << " INFO: The parameters: " << lrn_local_size << " " << alpha << " " << beta << " " << bias << std::endl;
506
494
}
507
495
508
496
return 0 ;
@@ -529,7 +517,6 @@ int parseOnnxGraph(
529
517
530
518
for (int i=0 ; i < graph_proto.node_size (); i++) {
531
519
const onnx::NodeProto node_proto = graph_proto.node (i);
532
- std::cout << " INFO: Layer is : " << node_proto.op_type () << std::endl;
533
520
std::string params;
534
521
getLayerParams (node_proto, params);
535
522
@@ -543,7 +530,6 @@ int parseOnnxGraph(
543
530
layer_details[" output" ] = layer_output;
544
531
layer_details[" params" ] = params;
545
532
546
- std::cout << " Input size is : " << node_proto.input_size () << std::endl;
547
533
if (node_proto.input_size () > 1 ) {
548
534
std::string layer_weights = node_proto.input (1 );
549
535
layer_details[" weights" ] = layer_weights;
0 commit comments