This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

TDA4VM: Importing and inferring issue from Caffe model to TIDL

Part Number: TDA4VM

Dear TIDL experts,

I am importing some customized Caffe model for an automotive application. the model prototxt is attached.

layer {
  name: "data"
  type: "Input"
  top: "data"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 128
      dim: 128
    }
  }
}
layer {
  name: "ConvNd_1"
  type: "Convolution"
  bottom: "data"
  top: "BatchNorm_1"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 2
    dilation: 1
  }
}
layer {
  name: "Threshold_1"
  type: "ReLU"
  bottom: "BatchNorm_1"
  top: "BatchNorm_1"
}
layer {
  name: "MaxPool2D_1"
  type: "Pooling"
  bottom: "BatchNorm_1"
  top: "MaxPool2D_1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
    pad: 0
  }
}
layer {
  name: "ConvNd_2"
  type: "Convolution"
  bottom: "MaxPool2D_1"
  top: "BatchNorm_2"
  convolution_param {
    num_output: 16
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_2"
  type: "ReLU"
  bottom: "BatchNorm_2"
  top: "BatchNorm_2"
}
layer {
  name: "ConvNd_3"
  type: "Convolution"
  bottom: "BatchNorm_2"
  top: "BatchNorm_3"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_4"
  type: "Convolution"
  bottom: "BatchNorm_2"
  top: "BatchNorm_4"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_1"
  type: "Concat"
  bottom: "BatchNorm_3"
  bottom: "BatchNorm_4"
  top: "Cat_1"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_3"
  type: "ReLU"
  bottom: "Cat_1"
  top: "Cat_1"
}
layer {
  name: "ConvNd_5"
  type: "Convolution"
  bottom: "Cat_1"
  top: "BatchNorm_5"
  convolution_param {
    num_output: 16
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_4"
  type: "ReLU"
  bottom: "BatchNorm_5"
  top: "BatchNorm_5"
}
layer {
  name: "ConvNd_6"
  type: "Convolution"
  bottom: "BatchNorm_5"
  top: "BatchNorm_6"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_7"
  type: "Convolution"
  bottom: "BatchNorm_5"
  top: "BatchNorm_7"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_2"
  type: "Concat"
  bottom: "BatchNorm_6"
  bottom: "BatchNorm_7"
  top: "Cat_2"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_5"
  type: "ReLU"
  bottom: "Cat_2"
  top: "Cat_2"
}
layer {
  name: "MaxPool2D_2"
  type: "Pooling"
  bottom: "Cat_2"
  top: "MaxPool2D_2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
    pad: 0
  }
}
layer {
  name: "ConvNd_8"
  type: "Convolution"
  bottom: "MaxPool2D_2"
  top: "BatchNorm_8"
  convolution_param {
    num_output: 32
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_6"
  type: "ReLU"
  bottom: "BatchNorm_8"
  top: "BatchNorm_8"
}
layer {
  name: "ConvNd_9"
  type: "Convolution"
  bottom: "BatchNorm_8"
  top: "BatchNorm_9"
  convolution_param {
    num_output: 128
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_10"
  type: "Convolution"
  bottom: "BatchNorm_8"
  top: "BatchNorm_10"
  convolution_param {
    num_output: 128
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_3"
  type: "Concat"
  bottom: "BatchNorm_9"
  bottom: "BatchNorm_10"
  top: "Cat_3"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_7"
  type: "ReLU"
  bottom: "Cat_3"
  top: "Cat_3"
}
layer {
  name: "ConvNd_11"
  type: "Convolution"
  bottom: "Cat_3"
  top: "BatchNorm_11"
  convolution_param {
    num_output: 32
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_8"
  type: "ReLU"
  bottom: "BatchNorm_11"
  top: "BatchNorm_11"
}
layer {
  name: "ConvNd_12"
  type: "Convolution"
  bottom: "BatchNorm_11"
  top: "BatchNorm_12"
  convolution_param {
    num_output: 128
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_13"
  type: "Convolution"
  bottom: "BatchNorm_11"
  top: "BatchNorm_13"
  convolution_param {
    num_output: 128
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_4"
  type: "Concat"
  bottom: "BatchNorm_12"
  bottom: "BatchNorm_13"
  top: "Cat_4"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_9"
  type: "ReLU"
  bottom: "Cat_4"
  top: "Cat_4"
}
layer {
  name: "MaxPool2D_3"
  type: "Pooling"
  bottom: "Cat_4"
  top: "MaxPool2D_3"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
    pad: 0
  }
}
layer {
  name: "ConvNd_14"
  type: "Convolution"
  bottom: "MaxPool2D_3"
  top: "BatchNorm_14"
  convolution_param {
    num_output: 48
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_10"
  type: "ReLU"
  bottom: "BatchNorm_14"
  top: "BatchNorm_14"
}
layer {
  name: "ConvNd_15"
  type: "Convolution"
  bottom: "BatchNorm_14"
  top: "BatchNorm_15"
  convolution_param {
    num_output: 192
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_16"
  type: "Convolution"
  bottom: "BatchNorm_14"
  top: "BatchNorm_16"
  convolution_param {
    num_output: 192
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_5"
  type: "Concat"
  bottom: "BatchNorm_15"
  bottom: "BatchNorm_16"
  top: "Cat_5"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_11"
  type: "ReLU"
  bottom: "Cat_5"
  top: "Cat_5"
}
layer {
  name: "ConvNd_17"
  type: "Convolution"
  bottom: "Cat_5"
  top: "BatchNorm_17"
  convolution_param {
    num_output: 48
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_12"
  type: "ReLU"
  bottom: "BatchNorm_17"
  top: "BatchNorm_17"
}
layer {
  name: "ConvNd_18"
  type: "Convolution"
  bottom: "BatchNorm_17"
  top: "BatchNorm_18"
  convolution_param {
    num_output: 192
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_19"
  type: "Convolution"
  bottom: "BatchNorm_17"
  top: "BatchNorm_19"
  convolution_param {
    num_output: 192
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_6"
  type: "Concat"
  bottom: "BatchNorm_18"
  bottom: "BatchNorm_19"
  top: "Cat_6"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_13"
  type: "ReLU"
  bottom: "Cat_6"
  top: "Cat_6"
}
layer {
  name: "ConvNd_20"
  type: "Convolution"
  bottom: "Cat_6"
  top: "BatchNorm_20"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_14"
  type: "ReLU"
  bottom: "BatchNorm_20"
  top: "BatchNorm_20"
}
layer {
  name: "ConvNd_21"
  type: "Convolution"
  bottom: "BatchNorm_20"
  top: "BatchNorm_21"
  convolution_param {
    num_output: 256
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_22"
  type: "Convolution"
  bottom: "BatchNorm_20"
  top: "BatchNorm_22"
  convolution_param {
    num_output: 256
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_7"
  type: "Concat"
  bottom: "BatchNorm_21"
  bottom: "BatchNorm_22"
  top: "Cat_7"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_15"
  type: "ReLU"
  bottom: "Cat_7"
  top: "Cat_7"
}
layer {
  name: "ConvNd_23"
  type: "Convolution"
  bottom: "Cat_7"
  top: "BatchNorm_23"
  convolution_param {
    num_output: 64
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Threshold_16"
  type: "ReLU"
  bottom: "BatchNorm_23"
  top: "BatchNorm_23"
}
layer {
  name: "ConvNd_24"
  type: "Convolution"
  bottom: "BatchNorm_23"
  top: "BatchNorm_24"
  convolution_param {
    num_output: 256
    bias_term: true
    pad: 0
    kernel_size: 1
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "ConvNd_25"
  type: "Convolution"
  bottom: "BatchNorm_23"
  top: "BatchNorm_25"
  convolution_param {
    num_output: 256
    bias_term: true
    pad: 1
    kernel_size: 3
    group: 1
    stride: 1
    dilation: 1
  }
}
layer {
  name: "Cat_8"
  type: "Concat"
  bottom: "BatchNorm_24"
  bottom: "BatchNorm_25"
  top: "Cat_8"
  concat_param {
    axis: 1
  }
}
layer {
  name: "Threshold_17"
  type: "ReLU"
  bottom: "Cat_8"
  top: "Cat_8"
}
layer {
  name: "AdaptiveAvgPool2d_1"
  type: "Pooling"
  bottom: "Cat_8"
  top: "AdaptiveAvgPool2d_1_"
  pooling_param {
    pool: AVE
    global_pooling: true
  }
}
layer {
  name: "flatten"
  type: "Flatten"
  bottom: "AdaptiveAvgPool2d_1_"
  top: "AdaptiveAvgPool2d_1"
  flatten_param{
    axis: 1
  }
}
layer {
  name: "Addmm_1"
  type: "InnerProduct"
  bottom: "AdaptiveAvgPool2d_1"
  top: "Addmm_1"
  inner_product_param {
    num_output: 144
    bias_term: true
  }
}
layer {
  name: "Addmm_2"
  type: "InnerProduct"
  bottom: "AdaptiveAvgPool2d_1"
  top: "Addmm_2"
  inner_product_param {
    num_output: 4
    bias_term: true
  }
}
layer {
  name: "Addmm_3"
  type: "InnerProduct"
  bottom: "AdaptiveAvgPool2d_1"
  top: "occ_prob"
  inner_product_param {
    num_output: 2
    bias_term: true
  }
}

The SDK is PROCESSOR-SDK-RTOS-DRA8X-TDA4X (V07.00.00.11).

Importing utility runs normally. When executing the imported model on the target, C7x_1 seems halted without any warning or error.

I've got another try to remove the model layer:

layer {
name: "Addmm_1"
type: "InnerProduct"
bottom: "AdaptiveAvgPool2d_1"
top: "Addmm_1"
inner_product_param {
num_output: 144
bias_term: true
}
}

And it seems worked, even though the results may be incorrect
It also works when "num_output" other than 144.
Please help me to figure it out. Thanks!