Hi,
I am trying to use DSP_fft16x16 function and bumping into an issue where the output from this FFT kernel does not match reference output when tried in Octave and I am not sure why.
I generated the weight vector using gen_twiddle_fft16x16 function in the library and my inputs are in the range +8 to -8 stored in an signed char. My input and weight vector are double word aligned and are of type "short". Its a bit confusing in the corresponding DSPLIB documentation if the input needs to be scaled I tried scaling up by 4 bits and no scaling but the results dont match in either case. Not sure if I am breaking any other assumptions of this library function.
Here is my code below-
#define N 128
char block[N][N];
char out[N][N];
#pragma DATA_ALIGN(8)
short inp_scale[N];
#pragma DATA_ALIGN(8)
short inp_16[2*N];
#pragma DATA_ALIGN(8)
short out_16[2*N];
#pragma DATA_ALIGN(8)
short wt_16[2*N];
short fft_real[N][N];
short fft_imag[N][N];
Void TskFcnTwoDFft()
{
gen_twiddle_fft16x16(wt_16,N);
for(int row = 0; row < N; row++)
{
for(int col = 0; col < N; col++)
{
inp_16[2*col] = block[row][col];
inp_16[2*col+1] = 0;
}
DSP_fft16x16(wt_16,N,inp_16,out_16);
for (int col = 0; col < N; col++)
{
fft_real[row][col] = out_16[2*col];
fft_imag[row][col] = out_16[2*col+1];
}
}
for (int col = 0; col < N; col++)
{
for (int row = 0; row < N; row++)
{
inp_16[2*row] = fft_real[row][col]>>1;
inp_16[2*row+1] = fft_imag[row][col]>>1;
}
DSP_fft16x16(wt_16,N,inp_16,out_16);
for (int row = 0; row < N; row++)
{
fft_real[row][col] = out_16[2*row];
fft_imag[row][col] = out_16[2*row+1];
}
}
}