num_bits = 8 accuracy = 1 output_dir = 'model-artifacts/onnx/east' compile_options = { 'tidl_tools_path': os.environ['TIDL_TOOLS_PATH'], 'artifacts_folder': output_dir, 'tensor_bits': num_bits, 'accuracy_level': accuracy, 'advanced_options:calibration_frames': len(calib_images), 'advanced_options:calibration_iterations': 3, # used if accuracy_level = 1 'debug_level': 3, # Comma separated string of operator types as defined by ONNX runtime, ex "MaxPool, Concat" 'deny_list': "Resize" }
This is the model configuration we are using.
This function is called from __main__
def test_onnx(onnx_path, test_image_path): print("CHECK - 1 ") try: onnx.shape_inference.infer_shapes_path(onnx_path, onnx_path) except Exception as e: print(e) # return so = onnxruntime.SessionOptions() # so.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL EP_list = ['TIDLCompilationProvider','CPUExecutionProvider'] try: ort_session = onnxruntime.InferenceSession( onnx_path, providers=EP_list, provider_options=[compile_options,{}], sess_options=so) except Exception as e: print("FAILED to load ort_session") print(e) return print("CHECK - 2") img = Image.open(test_image_path) # You may need the resize_img function from your original code img, ratio_h, ratio_w = resize_img(img) img_tensor = load_pil(img) print("IMAGE SHAPE TENSOR",np.shape(img_tensor)) # Perform inference ort_inputs = {ort_session.get_inputs()[0].name: img_tensor.numpy()} for i in range(len(calib_images)): ort_outputs = ort_session.run(None, ort_inputs) # Post-process the inference results score = ort_outputs[0] geo = ort_outputs[1] boxes = get_boxes(score.squeeze(0), geo.squeeze(0)) adjusted_boxes = adjust_ratio(boxes, ratio_w, ratio_h) # Visualize and save the results result_img = plot_boxes(img, adjusted_boxes) result_img.show() result_img.save("result.jpg")
Output logs: (Output includes the model structure of the model we are trying to use.)
opset version of the model: 13
Link to the original repository: https://github.com/SakuraRiven/EAST
The compilation is performed in a docker container with edgeai image (https://github.com/TexasInstruments/edgeai-tidl-tools/blob/master/docs/advanced_setup.md#docker-based-setup-for-x86_pc)
root@904c189d1f61:/home/root# python3 onnx-detect.py graph torch_jit ( %input[FLOAT, 1x3x704x1280] ) initializers ( %output.conv1.weight[FLOAT, 1x32x1x1] %output.conv1.bias[FLOAT, 1] %output.conv2.weight[FLOAT, 4x32x1x1] %output.conv2.bias[FLOAT, 4] %output.conv3.weight[FLOAT, 1x32x1x1] %output.conv3.bias[FLOAT, 1] %onnx::Conv_244[FLOAT, 64x3x3x3] %onnx::Conv_245[FLOAT, 64] %onnx::Conv_247[FLOAT, 64x64x3x3] %onnx::Conv_248[FLOAT, 64] %onnx::Conv_250[FLOAT, 128x64x3x3] %onnx::Conv_251[FLOAT, 128] %onnx::Conv_253[FLOAT, 128x128x3x3] %onnx::Conv_254[FLOAT, 128] %onnx::Conv_256[FLOAT, 256x128x3x3] %onnx::Conv_257[FLOAT, 256] %onnx::Conv_259[FLOAT, 256x256x3x3] %onnx::Conv_260[FLOAT, 256] %onnx::Conv_262[FLOAT, 256x256x3x3] %onnx::Conv_263[FLOAT, 256] %onnx::Conv_265[FLOAT, 512x256x3x3] %onnx::Conv_266[FLOAT, 512] %onnx::Conv_268[FLOAT, 512x512x3x3] %onnx::Conv_269[FLOAT, 512] %onnx::Conv_271[FLOAT, 512x512x3x3] %onnx::Conv_272[FLOAT, 512] %onnx::Conv_274[FLOAT, 512x512x3x3] %onnx::Conv_275[FLOAT, 512] %onnx::Conv_277[FLOAT, 512x512x3x3] %onnx::Conv_278[FLOAT, 512] %onnx::Conv_280[FLOAT, 512x512x3x3] %onnx::Conv_281[FLOAT, 512] %onnx::Conv_283[FLOAT, 128x1024x1x1] %onnx::Conv_284[FLOAT, 128] %onnx::Conv_286[FLOAT, 128x128x3x3] %onnx::Conv_287[FLOAT, 128] %onnx::Conv_289[FLOAT, 64x384x1x1] %onnx::Conv_290[FLOAT, 64] %onnx::Conv_292[FLOAT, 64x64x3x3] %onnx::Conv_293[FLOAT, 64] %onnx::Conv_295[FLOAT, 32x192x1x1] %onnx::Conv_296[FLOAT, 32] %onnx::Conv_298[FLOAT, 32x32x3x3] %onnx::Conv_299[FLOAT, 32] %onnx::Conv_301[FLOAT, 32x32x3x3] %onnx::Conv_302[FLOAT, 32] %onnx::Resize_303[FLOAT, 4] ) { %onnx::Resize_305 = Identity(%onnx::Resize_303) %onnx::Resize_304 = Identity(%onnx::Resize_303) %input.4 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input, %onnx::Conv_244, %onnx::Conv_245) %onnx::Conv_149 = Relu(%input.4) %input.12 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_149, %onnx::Conv_247, %onnx::Conv_248) %onnx::MaxPool_152 = Relu(%input.12) %input.16 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_152) %input.24 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.16, %onnx::Conv_250, %onnx::Conv_251) %onnx::Conv_156 = Relu(%input.24) %input.32 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_156, %onnx::Conv_253, %onnx::Conv_254) %onnx::MaxPool_159 = Relu(%input.32) %input.36 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_159) %input.44 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.36, %onnx::Conv_256, %onnx::Conv_257) %onnx::Conv_163 = Relu(%input.44) %input.52 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_163, %onnx::Conv_259, %onnx::Conv_260) %onnx::Conv_166 = Relu(%input.52) %input.60 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_166, %onnx::Conv_262, %onnx::Conv_263) %onnx::MaxPool_169 = Relu(%input.60) %input.64 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_169) %input.72 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.64, %onnx::Conv_265, %onnx::Conv_266) %onnx::Conv_173 = Relu(%input.72) %input.80 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_173, %onnx::Conv_268, %onnx::Conv_269) %onnx::Conv_176 = Relu(%input.80) %input.88 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_176, %onnx::Conv_271, %onnx::Conv_272) %onnx::MaxPool_179 = Relu(%input.88) %input.92 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_179) %input.100 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.92, %onnx::Conv_274, %onnx::Conv_275) %onnx::Conv_183 = Relu(%input.100) %input.108 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_183, %onnx::Conv_277, %onnx::Conv_278) %onnx::Conv_186 = Relu(%input.108) %input.116 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_186, %onnx::Conv_280, %onnx::Conv_281) %onnx::MaxPool_189 = Relu(%input.116) %onnx::Resize_190 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_189) %onnx::Concat_195 = Resize[coordinate_transformation_mode = 'align_corners', cubic_coeff_a = -0.75, mode = 'linear', nearest_mode = 'floor'](%onnx::Resize_190, %, %onnx::Resize_303) %input.120 = Concat[axis = 1](%onnx::Concat_195, %input.92) %input.128 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.120, %onnx::Conv_283, %onnx::Conv_284) %input.132 = Relu(%input.128) %input.140 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.132, %onnx::Conv_286, %onnx::Conv_287) %y = Relu(%input.140) %onnx::Concat_207 = Resize[coordinate_transformation_mode = 'align_corners', cubic_coeff_a = -0.75, mode = 'linear', nearest_mode = 'floor'](%y, %, %onnx::Resize_304) %input.144 = Concat[axis = 1](%onnx::Concat_207, %input.64) %input.152 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.144, %onnx::Conv_289, %onnx::Conv_290) %input.156 = Relu(%input.152) %input.164 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.156, %onnx::Conv_292, %onnx::Conv_293) %y.3 = Relu(%input.164) %onnx::Concat_219 = Resize[coordinate_transformation_mode = 'align_corners', cubic_coeff_a = -0.75, mode = 'linear', nearest_mode = 'floor'](%y.3, %, %onnx::Resize_305) %input.168 = Concat[axis = 1](%onnx::Concat_219, %input.36) %input.176 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.168, %onnx::Conv_295, %onnx::Conv_296) %input.180 = Relu(%input.176) %input.188 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.180, %onnx::Conv_298, %onnx::Conv_299) %input.192 = Relu(%input.188) %input.200 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.192, %onnx::Conv_301, %onnx::Conv_302) %input.204 = Relu(%input.200) %onnx::Sigmoid_230 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.204, %output.conv1.weight, %output.conv1.bias) %output = Sigmoid(%onnx::Sigmoid_230) %onnx::Sigmoid_232 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.204, %output.conv2.weight, %output.conv2.bias) %onnx::Mul_233 = Sigmoid(%onnx::Sigmoid_232) %onnx::Mul_234 = Constant[value = <Scalar Tensor []>]() %onnx::Concat_235 = Mul(%onnx::Mul_233, %onnx::Mul_234) %onnx::Sigmoid_236 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.204, %output.conv3.weight, %output.conv3.bias) %onnx::Sub_237 = Sigmoid(%onnx::Sigmoid_236) %onnx::Sub_238 = Constant[value = <Scalar Tensor []>]() %onnx::Mul_239 = Sub(%onnx::Sub_237, %onnx::Sub_238) %onnx::Mul_240 = Constant[value = <Scalar Tensor []>]() %onnx::Concat_241 = Mul(%onnx::Mul_239, %onnx::Mul_240) %242 = Concat[axis = 1](%onnx::Concat_235, %onnx::Concat_241) return %output, %242 } img_1.jpg CHECK - 1 tidl_tools_path = /home/root/tidl_tools artifacts_folder = model-artifacts/onnx/east tidl_tensor_bits = 8 debug_level = 3 num_tidl_subgraphs = 16 tidl_denylist = Resize tidl_denylist_layer_name = tidl_denylist_layer_type = tidl_allowlist_layer_name = model_type = tidl_calibration_accuracy_level = 7 tidl_calibration_options:num_frames_calibration = 2 tidl_calibration_options:bias_calibration_iterations = 3 mixed_precision_factor = -1.000000 model_group_id = 0 power_of_2_quantization = 2 enable_high_resolution_optimization = 0 pre_batchnorm_fold = 1 add_data_convert_ops = 0 output_feature_16bit_names_list = m_params_16bit_names_list = reserved_compile_constraints_flag = 1601 ti_internal_reserved_1 = ****** WARNING : Network not identified as Object Detection network : (1) Ignore if network is not Object Detection network (2) If network is Object Detection network, please specify "model_type":"OD" as part of OSRT compilation options****** Supported TIDL layer type --- Conv -- Conv_2 Supported TIDL layer type --- Relu -- Relu_3 Supported TIDL layer type --- Conv -- Conv_4 Supported TIDL layer type --- Relu -- Relu_5 Supported TIDL layer type --- MaxPool -- MaxPool_6 Supported TIDL layer type --- Conv -- Conv_7 Supported TIDL layer type --- Relu -- Relu_8 Supported TIDL layer type --- Conv -- Conv_9 Supported TIDL layer type --- Relu -- Relu_10 Supported TIDL layer type --- MaxPool -- MaxPool_11 Supported TIDL layer type --- Conv -- Conv_12 Supported TIDL layer type --- Relu -- Relu_13 Supported TIDL layer type --- Conv -- Conv_14 Supported TIDL layer type --- Relu -- Relu_15 Supported TIDL layer type --- Conv -- Conv_16 Supported TIDL layer type --- Relu -- Relu_17 Supported TIDL layer type --- MaxPool -- MaxPool_18 Supported TIDL layer type --- Conv -- Conv_19 Supported TIDL layer type --- Relu -- Relu_20 Supported TIDL layer type --- Conv -- Conv_21 Supported TIDL layer type --- Relu -- Relu_22 Supported TIDL layer type --- Conv -- Conv_23 Supported TIDL layer type --- Relu -- Relu_24 Supported TIDL layer type --- MaxPool -- MaxPool_25 Supported TIDL layer type --- Conv -- Conv_26 Supported TIDL layer type --- Relu -- Relu_27 Supported TIDL layer type --- Conv -- Conv_28 Supported TIDL layer type --- Relu -- Relu_29 Supported TIDL layer type --- Conv -- Conv_30 Supported TIDL layer type --- Relu -- Relu_31 Supported TIDL layer type --- MaxPool -- MaxPool_32 Op type 'Resize' added to unsupported nodes as specified in deny list Supported TIDL layer type --- Concat -- Concat_34 Supported TIDL layer type --- Conv -- Conv_35 Supported TIDL layer type --- Relu -- Relu_36 Supported TIDL layer type --- Conv -- Conv_37 Supported TIDL layer type --- Relu -- Relu_38 Op type 'Resize' added to unsupported nodes as specified in deny list Supported TIDL layer type --- Concat -- Concat_40 Supported TIDL layer type --- Conv -- Conv_41 Supported TIDL layer type --- Relu -- Relu_42 Supported TIDL layer type --- Conv -- Conv_43 Supported TIDL layer type --- Relu -- Relu_44 Op type 'Resize' added to unsupported nodes as specified in deny list Supported TIDL layer type --- Concat -- Concat_46 Supported TIDL layer type --- Conv -- Conv_47 Supported TIDL layer type --- Relu -- Relu_48 Supported TIDL layer type --- Conv -- Conv_49 Supported TIDL layer type --- Relu -- Relu_50 Supported TIDL layer type --- Conv -- Conv_51 Supported TIDL layer type --- Relu -- Relu_52 Supported TIDL layer type --- Conv -- Conv_59 Supported TIDL layer type --- Sigmoid -- Sigmoid_60 Supported TIDL layer type --- Sub -- Sub_62 Supported TIDL layer type --- Mul -- Mul_64 Supported TIDL layer type --- Conv -- Conv_55 Supported TIDL layer type --- Sigmoid -- Sigmoid_56 Supported TIDL layer type --- Mul -- Mul_58 Supported TIDL layer type --- Concat -- Concat_65 Supported TIDL layer type --- Conv -- Conv_53 Supported TIDL layer type --- Sigmoid -- Sigmoid_54 Preliminary subgraphs created = 4 Final number of subgraphs created are : 4, - Offloaded Nodes - 58, Total Nodes - 61 Node in deny list...delegated to ARM --- layer type - Resize, Node name - Resize_33 Node in deny list...delegated to ARM --- layer type - Resize, Node name - Resize_39 Node in deny list...delegated to ARM --- layer type - Resize, Node name - Resize_45 /home/root/tidl_tools/tidl_graphVisualiser_runtimes.out: error while loading shared libraries: libcgraph.so.6: cannot open shared object file: No such file or directory Running runtimes graphviz - /home/root/tidl_tools/tidl_graphVisualiser_runtimes.out model-artifacts/onnx/east/allowedNode.txt model-artifacts/onnx/east/tempDir/graphvizInfo.txt model-artifacts/onnx/east/tempDir/runtimes_visualization.svg *** In TIDL_createStateImportFunc *** Compute on node : TIDLExecutionProvider_TIDL_0_0 0, Conv, 3, 1, input, input.4 1, Relu, 1, 1, input.4, onnx::Conv_149 2, Conv, 3, 1, onnx::Conv_149, input.12 3, Relu, 1, 1, input.12, onnx::MaxPool_152 4, MaxPool, 1, 1, onnx::MaxPool_152, input.16 5, Conv, 3, 1, input.16, input.24 6, Relu, 1, 1, input.24, onnx::Conv_156 7, Conv, 3, 1, onnx::Conv_156, input.32 8, Relu, 1, 1, input.32, onnx::MaxPool_159 9, MaxPool, 1, 1, onnx::MaxPool_159, input.36 10, Conv, 3, 1, input.36, input.44 11, Relu, 1, 1, input.44, onnx::Conv_163 12, Conv, 3, 1, onnx::Conv_163, input.52 13, Relu, 1, 1, input.52, onnx::Conv_166 14, Conv, 3, 1, onnx::Conv_166, input.60 15, Relu, 1, 1, input.60, onnx::MaxPool_169 16, MaxPool, 1, 1, onnx::MaxPool_169, input.64 17, Conv, 3, 1, input.64, input.72 18, Relu, 1, 1, input.72, onnx::Conv_173 19, Conv, 3, 1, onnx::Conv_173, input.80 20, Relu, 1, 1, input.80, onnx::Conv_176 21, Conv, 3, 1, onnx::Conv_176, input.88 22, Relu, 1, 1, input.88, onnx::MaxPool_179 23, MaxPool, 1, 1, onnx::MaxPool_179, input.92 24, Conv, 3, 1, input.92, input.100 25, Relu, 1, 1, input.100, onnx::Conv_183 26, Conv, 3, 1, onnx::Conv_183, input.108 27, Relu, 1, 1, input.108, onnx::Conv_186 28, Conv, 3, 1, onnx::Conv_186, input.116 29, Relu, 1, 1, input.116, onnx::MaxPool_189 30, MaxPool, 1, 1, onnx::MaxPool_189, onnx::Resize_190 Input tensor name - input Output tensor name - input.36 Output tensor name - input.64 Output tensor name - input.92 Output tensor name - onnx::Resize_190 *** In TIDL_createStateImportFunc *** Compute on node : TIDLExecutionProvider_TIDL_1_1 0, Concat, 2, 1, onnx::Concat_195, input.120 1, Conv, 3, 1, input.120, input.128 2, Relu, 1, 1, input.128, input.132 3, Conv, 3, 1, input.132, input.140 4, Relu, 1, 1, input.140, y Input tensor name - onnx::Concat_195 Input tensor name - input.92 Output tensor name - y *** In TIDL_createStateImportFunc *** Compute on node : TIDLExecutionProvider_TIDL_2_2 0, Concat, 2, 1, onnx::Concat_207, input.144 1, Conv, 3, 1, input.144, input.152 2, Relu, 1, 1, input.152, input.156 3, Conv, 3, 1, input.156, input.164 4, Relu, 1, 1, input.164, y.3 Input tensor name - onnx::Concat_207 Input tensor name - input.64 Output tensor name - y.3 *** In TIDL_createStateImportFunc *** Compute on node : TIDLExecutionProvider_TIDL_3_3 0, Concat, 2, 1, onnx::Concat_219, input.168 1, Conv, 3, 1, input.168, input.176 2, Relu, 1, 1, input.176, input.180 3, Conv, 3, 1, input.180, input.188 4, Relu, 1, 1, input.188, input.192 5, Conv, 3, 1, input.192, input.200 6, Relu, 1, 1, input.200, input.204 7, Conv, 3, 1, input.204, onnx::Sigmoid_230 8, Sigmoid, 1, 1, onnx::Sigmoid_230, output 9, Conv, 3, 1, input.204, onnx::Sigmoid_232 10, Sigmoid, 1, 1, onnx::Sigmoid_232, onnx::Mul_233 11, Mul, 2, 1, onnx::Mul_233, onnx::Concat_235 12, Conv, 3, 1, input.204, onnx::Sigmoid_236 13, Sigmoid, 1, 1, onnx::Sigmoid_236, onnx::Sub_237 14, Sub, 2, 1, onnx::Sub_237, onnx::Mul_239 15, Mul, 2, 1, onnx::Mul_239, onnx::Concat_241 16, Concat, 2, 1, onnx::Concat_235, 242 Input tensor name - onnx::Concat_219 Input tensor name - input.36 Output tensor name - 242 Output tensor name - output CHECK - 2 IMAGE SHAPE TENSOR torch.Size([1, 3, 704, 1280]) In TIDL_onnxRtImportInit subgraph_name=onnx::Resize_190 Layer 0, subgraph id onnx::Resize_190, name=input.36 Layer 1, subgraph id onnx::Resize_190, name=input.64 Layer 2, subgraph id onnx::Resize_190, name=input.92 Layer 3, subgraph id onnx::Resize_190, name=onnx::Resize_190 Layer 4, subgraph id onnx::Resize_190, name=input In TIDL_runtimesOptimizeNet: LayerIndex = 36, dataIndex = 32 ************** Frame index 1 : Running float import ************* In TIDL_runtimesPostProcessNet /home/root/tidl_tools/tidl_graphVisualiser.out: error while loading shared libraries: libcgraph.so.6: cannot open shared object file: No such file or directory **************************************************** ** ALL MODEL CHECK PASSED ** **************************************************** ************ in TIDL_subgraphRtCreate ************ The soft limit is 2048 The hard limit is 2048 MEM: Init ... !!! MEM: Init ... Done !!! 0.0s: VX_ZONE_INIT:Enabled 0.8s: VX_ZONE_ERROR:Enabled 0.10s: VX_ZONE_WARNING:Enabled 0.1596s: VX_ZONE_INIT:[tivxInit:184] Initialization Done !!! ************ TIDL_subgraphRtCreate done ************ ******* In TIDL_subgraphRtInvoke ******** 0 1.00000 -1.00000 1.00000 6 1 1.00000 0.00000 10.97984 6 2 1.00000 0.00000 8.12576 6 3 1.00000 0.00000 8.12576 6 4 1.00000 0.00000 7.14118 6 5 1.00000 0.00000 3.65851 6 6 1.00000 0.00000 3.65851 6 7 1.00000 0.00000 3.80828 6 8 1.00000 0.00000 2.07062 6 9 1.00000 0.00000 2.23841 6 10 1.00000 0.00000 2.23841 6 11 1.00000 0.00000 3.03889 6 12 1.00000 0.00000 2.26227 6 13 1.00000 0.00000 3.44511 6 14 1.00000 0.00000 3.44511 6 15 1.00000 0.00000 2.78007 6 16 1.00000 0.00000 1.93240 6 17 1.00000 0.00000 3.68336 6 18 1.00000 0.00000 3.68336 6 Layer, Layer Cycles,kernelOnlyCycles, coreLoopCycles,LayerSetupCycles,dmaPipeupCycles, dmaPipeDownCycles, PrefetchCycles,copyKerCoeffCycles,LayerDeinitCycles,LastBlockCycles, paddingTrigger, paddingWait,LayerWithoutPad,LayerHandleCopy, BackupCycles, RestoreCycles, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Sum of Layer Cycles 0 Segmentation fault (core dumped)