This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

TDA2EXEVM: TIDL layerType issues (Host emulation mode)

Part Number: TDA2EXEVM

This is the model I trained, and I ran into some problems on the PC.

name: "cifar_mod_BSM_deploy"
input: "data"
input_shape {
dim: 1
dim: 3
dim: 320
dim: 180
}
layer {
name: "data/bias"
type: "Bias"
bottom: "data"
top: "data/bias"
param {
lr_mult: 0
decay_mult: 0
}
bias_param {
filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data/bias"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 24
bias_term: true
pad: 1
kernel_size: 5
group: 1
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
dilation: 1
}
}
layer {
name: "conv1/bn"
type: "BatchNorm"
bottom: "conv1"
top: "conv1"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.0001
}
}
layer {
name: "conv1/relu"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 18
bias_term: true
pad: 1
kernel_size: 5
group: 1
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
dilation: 1
}
}
layer {
name: "conv2/bn"
type: "BatchNorm"
bottom: "conv2"
top: "conv2"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.0001
}
}
layer {
name: "conv2/relu"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 24
bias_term: true
pad: 1
kernel_size: 5
group: 1
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
dilation: 1
}
}
layer {
name: "conv3/bn"
type: "BatchNorm"
bottom: "conv3"
top: "conv3"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.0001
}
}
layer {
name: "conv3/relu"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv4"
type: "Convolution"
bottom: "pool3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
bias_term: true
pad: 1
kernel_size: 3
group: 1
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
dilation: 1
}
}
layer {
name: "conv4/bn"
type: "BatchNorm"
bottom: "conv4"
top: "conv4"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.0001
}
}
layer {
name: "conv4/relu"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 24
bias_term: true
pad: 1
kernel_size: 5
group: 1
stride: 1
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
dilation: 1
}
}
layer {
name: "conv5/bn"
type: "BatchNorm"
bottom: "conv5"
top: "conv5"
batch_norm_param {
moving_average_fraction: 0.99
eps: 0.0001
}
}
layer {
name: "conv5/relu"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}

layer {
name: "ip5"
type: "InnerProduct"
bottom: "pool5"
top: "ip5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "prob"
type: "Softmax"
bottom: "ip5"
top: "prob"
}

cd "C:\PROCESSOR_SDK_VISION_03_02_00_00\ti_components\algorithms\REL.TIDL.01.00.00.00\modules\ti_dl\utils\tidlModelImport"

>tidl_model_import.out.exe ..\..\test\testvecs\config\import\tidl_import_jseg21.txt

the file "tidl_import_jseg21.txt" is :

# Default - 0
randParams = 0

# 0: Caffe, 1: TensorFlow, Default - 0
modelType = 0

# test sparse 0, dense 1
conv2dKernelType = 0

# 0: Fixed quantization By tarininng Framework, 1: Dyanamic quantization by TIDL, Default - 1
quantizationStyle = 1

# quantRoundAdd/100 will be added while rounding to integer, Default - 50
#quantRoundAdd = 25
quantRoundAdd = 25


#numParamBits = 8
numParamBits = 8

# 0 : 8bit Unsigned, 1 : 8bit Signed Default - 1
inElementType = 0


inputNetFile = "C:\PROCESSOR_SDK_VISION_03_02_00_00\ti_components\algorithms\REL.TIDL.01.00.00.00\modules\ti_dl\utils\tidlModelImport\cifar_mod_BSM_deploy.prototxt"
inputParamsFile = "C:\PROCESSOR_SDK_VISION_03_02_00_00\ti_components\algorithms\REL.TIDL.01.00.00.00\modules\ti_dl\utils\tidlModelImport\cifar_mod_BSM_iter_584.caffemodel"


outputNetFile = "..\..\test\testvecs\config\tidl_models\tidl_net_jsegnet21v2.bin"
outputParamsFile = "..\..\test\testvecs\config\tidl_models\tidl_param_jsegnet21v2.bin"
#preProcType = 3
rawSampleInData = 1
numSampleInData = 9


sampleInData = "..\..\test\testvecs\input\images_9.y"

#sampleInData = "..\..\test\testvecs\input\bird8.jpg"
tidlStatsTool = "..\quantStatsTool\eve_test_dl_algo.out.exe"
inWidth = 180
inHeight = 320

The output is:

199 190 254

201 192 254

201 192 254

201 192 254

199 190 254

199 190 254

199 190 254

199 190 254

199 190 254

Enter nine different pictures, and the results are basically the same.

Why is that?