This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

TDA4VMXEVM: Segmentation Fault while importing a Pointnet Caffe model using TIDL importer

Part Number: TDA4VMXEVM

Hello Friends,

   I tried importing a pointnet based caffe model using the TIDL importer.I got the following error:

Name of the Network : pointnet_cls_basic
WARNING: Reshape layer only supports following with avg-pooling/inner product/ssd context.
WARNING: Reshape layer only supports following with avg-pooling/inner product/ssd context.
WARNING: Reshape layer only supports following with avg-pooling/inner product/ssd context.
WARNING: Reshape layer only supports following with avg-pooling/inner product/ssd context.
WARNING: Reshape layer only supports following with avg-pooling/inner product/ssd context.
WARNING: Reshape layer only supports following with avg-pooling/inner product/ssd context.
Segmentation fault

I have hereby attached the corresponding prototxt file for your reference(I have renamed the extension since the tool did not allow to upload the file with .prototxt extension).I request you to assist me in solving the issue.

Best Regards,

Vijay

name: "pointnet_cls_basic"

# ---------------------- data layer ---------------
#layer {
#  name: "data"
#  type: "HDF5Data"
#  top: "data"
#  top: "label"
#  hdf5_data_param {
#    source: "data/modelnet40_ply_hdf5_2048/train_files.txt"
#    batch_size: 32
#  }
#  include: { phase: TRAIN }
#}
#layer {
 # name: "data"
 # type: "HDF5Data"
 # top: "data"
 # top: "label"
 # hdf5_data_param {
 #   source: "data/modelnet40_ply_hdf5_2048/test_files.txt"
 #   batch_size: 32
 # }
 # include: { phase: TEST }
#}
layer {
    name: "reshape"
    type: "Reshape"
    bottom: "data"
    top: "data_reshape"
    reshape_param {
      shape {
        dim: 0  # copy the dimension from below
        dim: 1
        dim: -1
        dim: 3 # infer it from the other dimensions
      }
    }
}
# -------------------- input tansform net ---------------------
layer {
  name: "conv1_tnet1"
  type: "Convolution"
  bottom: "data_reshape"
  top: "conv1_tnet1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_w: 3
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn1_tnet1"
  type: "BatchNorm"
  bottom: "conv1_tnet1"
  top: "conv1_tnet1"
}
layer {
    bottom: "conv1_tnet1"
    top: "conv1_tnet1"
    name: "scale1_tnet1"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu1_tnet1"
  type: "ReLU"
  bottom: "conv1_tnet1"
  top: "conv1_tnet1"
}

layer {
  name: "conv2_tnet1"
  type: "Convolution"
  bottom: "conv1_tnet1"
  top: "conv2_tnet1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 128
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn2_tnet1"
  type: "BatchNorm"
  bottom: "conv2_tnet1"
  top: "conv2_tnet1"
}
layer {
    bottom: "conv2_tnet1"
    top: "conv2_tnet1"
    name: "scale2_tnet1"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu2_tnet1"
  type: "ReLU"
  bottom: "conv2_tnet1"
  top: "conv2_tnet1"
}

layer {
  name: "conv3_tnet1"
  type: "Convolution"
  bottom: "conv2_tnet1"
  top: "conv3_tnet1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 1024
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn3_tnet1"
  type: "BatchNorm"
  bottom: "conv3_tnet1"
  top: "conv3_tnet1"
}
layer {
    bottom: "conv3_tnet1"
    top: "conv3_tnet1"
    name: "scale3_tnet1"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu3_tnet1"
  type: "ReLU"
  bottom: "conv3_tnet1"
  top: "conv3_tnet1"
}
layer {
  name: "pool_tnet1"
  type: "Pooling"
  bottom: "conv3_tnet1"
  top: "global_feat_tnet1"
  pooling_param {
    pool: MAX
    pad: 0
    kernel_h: 2048
    kernel_w: 1
    stride: 1
  }
}
layer {
  name: "fc1_tnet1"
  type: "InnerProduct"
  bottom: "global_feat_tnet1"
  top: "fc1_tnet1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 512
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn6_tnet1"
  type: "BatchNorm"
  bottom: "fc1_tnet1"
  top: "fc1_tnet1"
}
layer {
    bottom: "fc1_tnet1"
    top: "fc1_tnet1"
    name: "scale6_tnet1"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu6_tnet1"
  type: "ReLU"
  bottom: "fc1_tnet1"
  top: "fc1_tnet1"
}

layer {
  name: "fc2_tnet1"
  type: "InnerProduct"
  bottom: "fc1_tnet1"
  top: "fc2_tnet1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 256
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn7_tnet1"
  type: "BatchNorm"
  bottom: "fc2_tnet1"
  top: "fc2_tnet1"
}
layer {
    bottom: "fc2_tnet1"
    top: "fc2_tnet1"
    name: "scale7_tnet1"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu7_tnet1"
  type: "ReLU"
  bottom: "fc2_tnet1"
  top: "fc2_tnet1"
}
layer {
  name: "fc3_tnet1"
  type: "InnerProduct"
  bottom: "fc2_tnet1"
  top: "fc3_tnet1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 9
    weight_filler {
      type: "constant"
      value: 0
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
    name: "reshape_tnet1"
    type: "Reshape"
    bottom: "fc3_tnet1"
    top: "fc3_tnet1_reshape"
    reshape_param {
      shape {
		dim: -1
        dim: 3
        dim: 3
      }
    }
}
#layer {
#  name: "eye_tnet1"
#  type: "Python"
#  bottom: "fc3_tnet1"
#  top: "eye_tnet1"
#  python_param {
 #   module: "eye_matrix_layer"
 #   layer: "EyeMatrixLayer"
 #   param_str: "{\'K\': 3}"
 # }
#}
layer {
  name: "eltwise_sum_tnet1"
  type: "Eltwise"
  bottom: "fc3_tnet1_reshape"
 # bottom: "eye_tnet1"
  top: "transform1"
  eltwise_param { operation: SUM }
}
# -------------------- input transform ---------------------
#layer {
#  name: "matmul_input"
#  type: "MatrixMultiplication"
#  bottom: "data" # BxNx3
#  bottom: "transform1" # 3x3
#  top: "data_transform1"  # 10x1x1 # BxNx3
#}
layer {
    name: "reshape_input_data"
    type: "Reshape"
    bottom: "data_transform1"
    top: "data_transform1_reshape"
    reshape_param {
      shape {
        dim: 0  # copy the dimension from below
        dim: 1
        dim: -1
        dim: 3 # infer it from the other dimensions
      }
    }
}
# -------------------- mlp ---------------------
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data_transform1_reshape"
  top: "conv1"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_w: 3
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn1"
  type: "BatchNorm"
  bottom: "conv1"
  top: "conv1"
}
layer {
    bottom: "conv1"
    top: "conv1"
    name: "scale1"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}

layer {
  name: "conv2"
  type: "Convolution"
  bottom: "conv1"
  top: "conv2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn2"
  type: "BatchNorm"
  bottom: "conv2"
  top: "conv2"
}
layer {
    bottom: "conv2"
    top: "conv2"
    name: "scale2"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}


# -------------------- feat tansform net ---------------------
layer {
  name: "conv1_tnet2"
  type: "Convolution"
  bottom: "conv2"
  top: "conv1_tnet2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn1_tnet2"
  type: "BatchNorm"
  bottom: "conv1_tnet2"
  top: "conv1_tnet2"
}
layer {
    bottom: "conv1_tnet2"
    top: "conv1_tnet2"
    name: "scale1_tnet2"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu1_tnet2"
  type: "ReLU"
  bottom: "conv1_tnet2"
  top: "conv1_tnet2"
}

layer {
  name: "conv2_tnet2"
  type: "Convolution"
  bottom: "conv1_tnet2"
  top: "conv2_tnet2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 128
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn2_tnet2"
  type: "BatchNorm"
  bottom: "conv2_tnet2"
  top: "conv2_tnet2"
}
layer {
    bottom: "conv2_tnet2"
    top: "conv2_tnet2"
    name: "scale2_tnet2"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu2_tnet2"
  type: "ReLU"
  bottom: "conv2_tnet2"
  top: "conv2_tnet2"
}

layer {
  name: "conv3_tnet2"
  type: "Convolution"
  bottom: "conv2_tnet2"
  top: "conv3_tnet2"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 1024
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn3_tnet2"
  type: "BatchNorm"
  bottom: "conv3_tnet2"
  top: "conv3_tnet2"
}
layer {
    bottom: "conv3_tnet2"
    top: "conv3_tnet2"
    name: "scale3_tnet2"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu3_tnet2"
  type: "ReLU"
  bottom: "conv3_tnet2"
  top: "conv3_tnet2"
}
layer {
  name: "pool_tnet2"
  type: "Pooling"
  bottom: "conv3_tnet2"
  top: "global_feat_tnet2"
  pooling_param {
    pool: MAX
    pad: 0
    kernel_h: 2048
    kernel_w: 1
    stride: 1
  }
}
layer {
  name: "fc1_tnet2"
  type: "InnerProduct"
  bottom: "global_feat_tnet2"
  top: "fc1_tnet2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 512
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn6_tnet2"
  type: "BatchNorm"
  bottom: "fc1_tnet2"
  top: "fc1_tnet2"
}
layer {
    bottom: "fc1_tnet2"
    top: "fc1_tnet2"
    name: "scale6_tnet2"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu6_tnet2"
  type: "ReLU"
  bottom: "fc1_tnet2"
  top: "fc1_tnet2"
}

layer {
  name: "fc2_tnet2"
  type: "InnerProduct"
  bottom: "fc1_tnet2"
  top: "fc2_tnet2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 256
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn7_tnet2"
  type: "BatchNorm"
  bottom: "fc2_tnet2"
  top: "fc2_tnet2"
}
layer {
    bottom: "fc2_tnet2"
    top: "fc2_tnet2"
    name: "scale7_tnet2"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu7_tnet2"
  type: "ReLU"
  bottom: "fc2_tnet2"
  top: "fc2_tnet2"
}
layer {
  name: "fc3_tnet2"
  type: "InnerProduct"
  bottom: "fc2_tnet2"
  top: "fc3_tnet2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 4096 # 64*64
    weight_filler {
      type: "constant"
      value: 0
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
    name: "reshape_tnet2"
    type: "Reshape"
    bottom: "fc3_tnet2"
    top: "fc3_tnet2_reshape"
    reshape_param {
      shape {
		dim: -1
        dim: 64
        dim: 64
      }
    }
}
#layer {
#  name: "eye_tnet2"
#  type: "Python"
#  bottom: "fc3_tnet2"
#  top: "eye_tnet2"
#  python_param {
#    module: "eye_matrix_layer"
#    layer: "EyeMatrixLayer"
#	param_str: "{\'K\': 64}"
#  }
#}
layer {
  name: "eltwise_sum_tnet2"
  type: "Eltwise"
  bottom: "fc3_tnet2_reshape"
 # bottom: "eye_tnet2"
  top: "transform2"
  eltwise_param { operation: SUM }
}
# -------------------- feat transform ---------------------
layer {
  name: "perm_scale2"
  type: "Permute"
  bottom: "conv2" # Bx64xNx1
  top: "scale2_permute" # BxNx64x1
  permute_param {
    order: 0
    order: 2
    order: 1
    order: 3
  }
}
layer {
    name: "reshape_scale2"
    type: "Reshape"
    bottom: "scale2_permute"  # BxNx64x1
    top: "scale2_reshape"  # BxNx64
    reshape_param {
      shape {
        dim: 0  # copy the dimension from below
        dim: -1
        dim: 64 # infer it from the other dimensions
      }
    }
}
#layer {
#  name: "matmul_feat"
#  type: "MatrixMultiplication"
#  bottom: "scale2_reshape" # BxNx64
#  bottom: "transform2" # 64x64
#  top: "scale2_transform2" # BxNx64
#}
layer {
  name: "perm_scale2_transform2"
  type: "Permute"
  bottom: "scale2_transform2" # BxNx64
  top: "scale2_permute_transform2" # Bx64xN
  permute_param {
    order: 0
    order: 2
    order: 1
  }
}
layer {
    name: "reshape_scale2_transform2"
    type: "Reshape"
    bottom: "scale2_permute_transform2" # Bx64xN
    top: "scale2_transform2_reshape" # Bx64xNx1
    reshape_param {
      shape {
        dim: 0  # copy the dimension from below
        dim: 0
        dim: 0
        dim: 1 
      }
    }
}

# -------------------- mlp ---------------------
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "scale2_transform2_reshape"
  top: "conv3"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 64
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn3"
  type: "BatchNorm"
  bottom: "conv3"
  top: "conv3"
}
layer {
    bottom: "conv3"
    top: "conv3"
    name: "scale3"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}

layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 128
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn4"
  type: "BatchNorm"
  bottom: "conv4"
  top: "conv4"
}
layer {
    bottom: "conv4"
    top: "conv4"
    name: "scale4"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}

layer {
  name: "conv5"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  param {
    lr_mult: 1
  }
  param {
    lr_mult: 2
  }
  convolution_param {
    num_output: 1024
    pad: 0
    kernel_w: 1
    kernel_h: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn5"
  type: "BatchNorm"
  bottom: "conv5"
  top: "conv5"
}
layer {
    bottom: "conv5"
    top: "conv5"
    name: "scale5"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}

# -------------------- max pool ---------------------
layer {
  name: "pool"
  type: "Pooling"
  bottom: "conv5"
  top: "global_feat"
  pooling_param {
    pool: MAX
    pad: 0
    kernel_h: 2048
    kernel_w: 1
    stride: 1
  }
}

# -------------------- fc ---------------------
layer {
  name: "fc1"
  type: "InnerProduct"
  bottom: "global_feat"
  top: "fc1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 512
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn6"
  type: "BatchNorm"
  bottom: "fc1"
  top: "fc1"
}
layer {
    bottom: "fc1"
    top: "fc1"
    name: "scale6"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "fc1"
  top: "fc1"
}

layer {
  name: "fc2"
  type: "InnerProduct"
  bottom: "fc1"
  top: "fc2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 256
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "bn7"
  type: "BatchNorm"
  bottom: "fc2"
  top: "fc2"
}
layer {
    bottom: "fc2"
    top: "fc2"
    name: "scale7"
    type: "Scale"
    scale_param {
        bias_term: true
    }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "fc2"
  top: "fc2"
}

layer {
  name: "drop1"
  type: "Dropout"
  bottom: "fc2"
  top: "drop1"
  dropout_param {
    dropout_ratio: 0.3
  }
}
layer {
  name: "fc3"
  type: "InnerProduct"
  bottom: "drop1"
  top: "fc3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 1
    decay_mult: 1
  }
  inner_product_param {
    num_output: 40
    weight_filler {
      type: "gaussian"
      std: 0.001
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
#layer {
 # name: "accuracy"
 # type: "Accuracy"
 # bottom: "fc3"
 # bottom: "label"
 # top: "accuracy"
 # include {
 #   phase: TEST
 # }
#}
layer {
  name: "loss"
  #type: "SoftmaxWithLoss"
  type:"Softmax"
  bottom: "fc3"
  bottom: "label"
  top: "loss"
}

# Enforce the transformation as orthogonal matrix
layer {
  name: "perm_loss_transform2"
  type: "Permute"
  bottom: "transform2" # Bx64x64
  top: "transform2_transpose" # Bx64x64
  permute_param {
    order: 0
    order: 2
    order: 1
  }
}
#layer {
 # name: "matmul_feat_loss"
 # type: "MatrixMultiplication"
 # bottom: "transform2"
 # bottom: "transform2_transpose"
 # top: "transform2_mul" # Bx64x64
#}
#layer {
#  name: "loss_feat"
#  type: "EuclideanLoss"
#  bottom: "transform2_mul"
#  bottom: "eye_tnet2"
#  top: "loss_feat"
#  loss_weight: 0.001
#}