This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

TDA4VM: Multi-Camera Application with Shared Memory and Scaler Node

Part Number: TDA4VM

Tool/software:

Certainly! Here's a clear and professionally reworded version of your question:


Hello Team,  

We are using :

SDK version is :

RTOS : ti-processor-sdk-rtos-j721e-evm-08_01_00_13

Linux : ti-processor-sdk-linux-j7-evm-08_01_00_07-Linux

We are developing a multi-camera application using two separate processes:

  • App 1 (Producer): Captures video frames, processes them using VISS and LDC (Lens Distortion Correction), and writes LDC the output frames to a shared memory buffer.

  • App 2 (Consumer): Reads the LDC output frames from shared memory and inputs them into an OpenVX Scaler node to composite them.

Observations:

  • If we bypass the Scaler node and directly send the shared memory frames to GStreamer, the video streams correctly — indicating that the shared memory content is valid.

    • We dumped the shared memory output buffer to file (YUV format), which looks fine.
  • However, if we feed the same shared memory frames into the input of Scaler node, the output of scaler is a pinkish screen.

  • What could be causing the Scaler node to output a pink screen, even though the shared memory frames are valid?

  • Could there be any specific format, alignment, or memory ownership requirements for images passed into the Scaler node that differ from those for GStreamer?

  • Can you help us debug or validate the correct usage of vx_image creation and population when the input comes from shared memory?

For reading, we are using below code snippet in App2 (Consumer):

static vx_status app_init(AppObj *obj) {
  vx_status status = VX_SUCCESS;
    printf("%s %d status = %u \n", __func__, __LINE__, status);

  if (status == VX_SUCCESS) {
    /* Create OpenVx Context */
    obj->context = vxCreateContext();
    status = vxGetStatus((vx_reference)obj->context);
    printf("Creating context done!\n");
  }

  if (status == VX_SUCCESS) {
    tivxHwaLoadKernels(obj->context);
    tivxImagingLoadKernels(obj->context);
    tivxFileIOLoadKernels(obj->context);
    printf("Kernel loading done!\n");
  }

   obj->input.width = WIDTH;
   obj->input.height = HEIGHT;

   printf("obj->input.width = %u obj->input.height = %u \n", obj->input.width, obj->input.height);
      /* Create Input OpenVx object */
    vx_image input  = vxCreateImage(obj->context, obj->input.width, obj->input.height, VX_DF_IMAGE_NV12);
    status = vxGetStatus((vx_reference)input);
    printf("%s %d status = %u \n", __func__, __LINE__, status);
    if(status == VX_SUCCESS)
    {
        vx_int32 q;
        for(q = 0; q < APP_BUFFER_Q_DEPTH; q++)
        {
            obj->input.arr[q]     = vxCreateObjectArray(obj->context, (vx_reference)input, 1);
            obj->input_images[q]  = (vx_image)vxGetObjectArrayItem((vx_object_array)obj->input.arr[q], 0);
        }
        vxReleaseImage(&input);
    printf("%s %d status = %u \n", __func__, __LINE__, status);
    }

        obj->scalerObj.output[0].width = 1280;
        obj->scalerObj.output[0].height = 720;
	obj->scalerObj.color_format = VX_DF_IMAGE_NV12;

    /* Initialize modules */
    if(status == VX_SUCCESS)
    {
	printf("obj->scalerObj = %d %d %d  NUM_CH, = %d \n", obj->scalerObj.color_format, VX_DF_IMAGE_U8, VX_DF_IMAGE_NV12, NUM_CH);    
        status = app_init_scaler(obj->context, &obj->scalerObj, "scaler_obj", NUM_CH, 1);
        APP_PRINTF("Scaler Init Done! \n");
        printf("Scaler Init Done! \n");
    }

  if ((obj->enable_mosaic == 1) && (status == VX_SUCCESS)) {
    status = app_init_img_mosaic(obj->context, &obj->imgMosaicObj,
                                 "img_mosaic_obj", APP_BUFFER_Q_DEPTH);
    printf("Img Mosaic init done!\n");
  }

  appPerfPointSetName(&obj->total_perf, "TOTAL");
  appPerfPointSetName(&obj->fileio_perf, "FILEIO");
  return status;
}

static vx_status app_create_graph(AppObj *obj) {
  vx_status status = VX_SUCCESS;
  vx_int32 idx = 0;

  vx_graph_parameter_queue_params_t graph_parameters_queue_params_list[2] = {0};
  vx_int32 graph_parameter_index;

  obj->graph = vxCreateGraph(obj->context);
  status = vxGetStatus((vx_reference)obj->graph);
  if (status == VX_SUCCESS) {
    status =
        vxSetReferenceName((vx_reference)obj->graph, "multi_cam_app_2_graph");
    printf("Graph create done!\n");
  }


  if(status == VX_SUCCESS)
  {
      status = app_create_graph_scaler(obj->context, obj->graph, &obj->scalerObj, obj->input.arr[0]);
  }

  obj->imgMosaicObj.input_arr[idx++] = obj->scalerObj.output[0].arr;
  
  printf("obj->enable_mosaic = %d \n", obj->enable_mosaic);
  if (obj->enable_mosaic == 1) {
    obj->imgMosaicObj.num_inputs = idx;

    if (status == VX_SUCCESS) {
      status =
          app_create_graph_img_mosaic(obj->graph, &obj->imgMosaicObj, NULL);
      printf("Img Mosaic graph done!\n");
    }
  }

  if (status == VX_SUCCESS) {
    graph_parameter_index = 0;


    /* Scalar Node - input is in Index 0 */
    if(status == VX_SUCCESS)
    {
        graph_parameter_index = 0;
        add_graph_parameter_by_node_index(obj->graph, obj->scalerObj.node, 0);
        obj->scalerObj.graph_parameter_index = graph_parameter_index;
        graph_parameters_queue_params_list[graph_parameter_index].graph_parameter_index = graph_parameter_index;
        graph_parameters_queue_params_list[graph_parameter_index].refs_list_size = APP_BUFFER_Q_DEPTH;
        graph_parameters_queue_params_list[graph_parameter_index].refs_list = (vx_reference*)&obj->input_images[0];
        graph_parameter_index++;
    }

    printf("obj->en_out_img_write = %d obj->test_mode = %d \n", obj->en_out_img_write, obj->test_mode);
    if ((obj->en_out_img_write == 1) || (obj->test_mode == 1)) {
      add_graph_parameter_by_node_index(obj->graph, obj->imgMosaicObj.node, 1);
      obj->imgMosaicObj.graph_parameter_index = graph_parameter_index;
      graph_parameters_queue_params_list[graph_parameter_index].graph_parameter_index = graph_parameter_index;
      graph_parameters_queue_params_list[graph_parameter_index].refs_list_size = APP_BUFFER_Q_DEPTH;
      graph_parameters_queue_params_list[graph_parameter_index].refs_list = (vx_reference *)&obj->imgMosaicObj.output_image[0];
      graph_parameter_index++;
    }

    status = vxSetGraphScheduleConfig(
        obj->graph, VX_GRAPH_SCHEDULE_MODE_QUEUE_AUTO, graph_parameter_index,
        graph_parameters_queue_params_list);

    if (status == VX_SUCCESS) {
      status = tivxSetGraphPipelineDepth(obj->graph, APP_PIPELINE_DEPTH);
    }

    tivxSetNodeParameterNumBufByIndex(obj->scalerObj.node, 1, APP_BUFFER_Q_DEPTH);

    if ((obj->enable_mosaic == 1) && (status == VX_SUCCESS)) {
      if (!((obj->en_out_img_write == 1) || (obj->test_mode == 1))) {
        status = tivxSetNodeParameterNumBufByIndex(obj->imgMosaicObj.node, 1, APP_BUFFER_Q_DEPTH);
        printf("Pipeline params setup done!\n");
      }
    }
  }

  printf("Create Graph done\n");
  return status;
}


static vx_status app_run_graph_for_one_frame_pipeline(AppObj *obj, vx_int32 frame_id) {

  vx_status status = VX_SUCCESS;

#ifdef SHM_INPUT  
  sem_t* sem_data_ready = sem_open("/sem_data_ready", O_CREAT, 0666, 0);
  sem_t* sem_data_consumed = sem_open("/sem_data_consumed", O_CREAT, 0666, 0);
#else
  vx_char input_file_name[APP_MAX_FILE_PATH];
  vx_int32 obj_array_idx = -1;
  const char *input_path = "tidl_demo_images";
#endif
  
  ScalerObj    *scalerObj    = &obj->scalerObj;
  ImgMosaicObj *imgMosaicObj = &obj->imgMosaicObj;

#ifndef SHM_INPUT 
  if(frame_id % 20 == 0)
	  frame_id = 0;
  else
	  frame_id = frame_id % 20;

  //snprintf(input_file_name, APP_MAX_FILE_PATH, "%s/%010d.yuv", obj->input_file_path, frame_id+100);
  snprintf(input_file_name, APP_MAX_FILE_PATH, "%s/%010d.yuv", input_path, frame_id+100);

  printf("Input file: %s\n", input_file_name);
#endif

  appPerfPointBegin(&obj->total_perf);
  /* checksum_actual is the checksum determined by the realtime test
        checksum_expected is the checksum that is expected to be the pipeline
     output */
  uint32_t checksum_actual = 0;

  /* This is the number of frames required for the pipeline AWB and AE
     algorithms to stabilize (note that 15 is only required for the 6-8 camera
     use cases - others converge quicker) */
  uint8_t stability_frame = 15;

  if (obj->pipeline < 0) {

    /* Enqueue outpus */
    if ((obj->en_out_img_write == 1) || (obj->test_mode == 1)) {
      printf("%s %d \n", __func__, __LINE__);
      status = vxGraphParameterEnqueueReadyRef(
          obj->graph, imgMosaicObj->graph_parameter_index,
          (vx_reference *)&imgMosaicObj->output_image[obj->enqueueCnt], 1);
    }

#ifdef SHM_INPUT   
    printf("%s %d \n", __func__, __LINE__);
    printf("Consumer waiting for data...\n");

    // Block here until producer signals
    sem_wait(sem_data_ready); // Wait for data to be ready
    printf("Consumer received signal!\n");

    obj->input.arr[obj->enqueueCnt] = rebuild_ldc_output_from_shm(obj->context);
 
    sem_post(sem_data_consumed);    // Signal that data was consumed
    printf("Consumer sending ACK back to producer !\n");

#else
        appPerfPointBegin(&obj->fileio_perf);
        /* Read input */
        if(status == VX_SUCCESS)
        {
            status = readScalerInput(input_file_name, obj->input.arr[obj->enqueueCnt], APP_MODULES_READ_FILE, 0);
        }

        appPerfPointEnd(&obj->fileio_perf);

        APP_PRINTF("App Reading Input Done!\n");
#endif

    /* Enqueue input - start execution */
        if(status == VX_SUCCESS)
        {
            status = vxGraphParameterEnqueueReadyRef(obj->graph, scalerObj->graph_parameter_index, (vx_reference*)&obj->input_images[obj->enqueueCnt], 1);
        }

    obj->enqueueCnt++;
    obj->enqueueCnt =
        (obj->enqueueCnt >= APP_BUFFER_Q_DEPTH) ? 0 : obj->enqueueCnt;
    obj->pipeline++;
  }

  if ((obj->pipeline == 0) && (status == VX_SUCCESS)) {

    if ((obj->en_out_img_write == 1) || (obj->test_mode == 1)) {
      printf("%s %d \n", __func__, __LINE__);
      status = vxGraphParameterEnqueueReadyRef(
          obj->graph, imgMosaicObj->graph_parameter_index,
          (vx_reference *)&imgMosaicObj->output_image[obj->enqueueCnt], 1);
    }

#ifdef SHM_INPUT    
      printf("%s %d \n", __func__, __LINE__);
    printf("Consumer waiting for data...\n");
    sem_wait(sem_data_ready);  // Block here until producer signals
    printf("Consumer received signal!\n");

    obj->input.arr[obj->enqueueCnt] = rebuild_ldc_output_from_shm(obj->context);

    sem_post(sem_data_consumed);    // Signal that data was consumed
    printf("Consumer sending ACK back to producer !\n");

#else
        appPerfPointBegin(&obj->fileio_perf);
        /* Read input */
        if(status == VX_SUCCESS)
        {
            status = readScalerInput(input_file_name, obj->input.arr[obj->enqueueCnt], APP_MODULES_READ_FILE, 0);
        }

        appPerfPointEnd(&obj->fileio_perf);

        APP_PRINTF("App Reading Input Done!\n");
#endif

   /* Enqueue input - start execution */
        if(status == VX_SUCCESS)
        {
            status = vxGraphParameterEnqueueReadyRef(obj->graph, scalerObj->graph_parameter_index, (vx_reference*)&obj->input_images[obj->enqueueCnt], 1);
        } 
    
	
    printf("%s %d \n", __func__, __LINE__);
    obj->enqueueCnt++;
    obj->enqueueCnt = (obj->enqueueCnt >= APP_BUFFER_Q_DEPTH) ? 0 : obj->enqueueCnt;
    obj->pipeline++;
  }

  if ((obj->pipeline > 0) && (status == VX_SUCCESS)) {
    //vx_object_array capture_input_arr;
    vx_image scaler_input_image;
    vx_image mosaic_output_image;
    //vx_image mosaic_input_image;
    uint32_t num_refs;

    printf("%s %d \n", __func__, __LINE__);
    /* Dequeue input */
        if(status == VX_SUCCESS)
        {
            status = vxGraphParameterDequeueDoneRef(obj->graph, scalerObj->graph_parameter_index, (vx_reference*)&scaler_input_image, 1, &num_refs);
        }
    printf("%s %d \n", __func__, __LINE__);

#ifdef ENABLE_GSTREAMER
    obj->gst_stream_enable = true;
    printf("111 %s %d \n", __func__, __LINE__);
    //writeGstOutput(obj, obj->imgMosaicObj.output_image[0]);
    //writeGstarrOutput(obj, obj->input.arr[obj->enqueueCnt]);
    writeGstarrOutput(obj, obj->scalerObj.output[0].arr);
#endif

#ifdef SHM_INPUT 
  printf("Consumer waiting for data...\n");
  sem_wait(sem_data_ready);  // Block here until producer signals
  printf("Consumer received signal!\n");
       
  if (obj->input.arr[obj->enqueueCnt] != NULL)
  {
    vxReleaseObjectArray(&obj->input.arr[obj->enqueueCnt]);
  }
    
  obj->input.arr[obj->enqueueCnt] = rebuild_ldc_output_from_shm(obj->context);
    
    sem_post(sem_data_consumed);    // Signal that data was consumed
  printf("Consumer sending ACK back to producer !\n");
#else
        appPerfPointBegin(&obj->fileio_perf);

        if(status == VX_SUCCESS)
        {
            app_find_object_array_index(obj->input.arr, (vx_reference)scaler_input_image, APP_BUFFER_Q_DEPTH, &obj_array_idx);
        }
        if((obj_array_idx != -1) && (status == VX_SUCCESS))
        {
            status = readScalerInput(input_file_name, obj->input.arr[obj_array_idx], APP_MODULES_READ_FILE, 0);
        }

        appPerfPointEnd(&obj->fileio_perf);

        APP_PRINTF("App Reading Input Done!\n");
#endif

        /* Enqueue input - start execution */
        if(status == VX_SUCCESS)
        {
            status = vxGraphParameterEnqueueReadyRef(obj->graph, scalerObj->graph_parameter_index, (vx_reference*)&scaler_input_image, 1);
        }
    
    obj->enqueueCnt++;
    obj->dequeueCnt++;

    obj->enqueueCnt = (obj->enqueueCnt >= APP_BUFFER_Q_DEPTH) ? 0 : obj->enqueueCnt;
    obj->dequeueCnt = (obj->dequeueCnt >= APP_BUFFER_Q_DEPTH) ? 0 : obj->dequeueCnt;
  }
  
  appPerfPointEnd(&obj->total_perf);

  return status;
}


void write_ldc_output_to_shm(vx_object_array output_arr) {

    int shm_fd = shm_open(SHM_NAME, O_CREAT | O_RDWR, 0666);
    ftruncate(shm_fd, SHM_TOTAL_SIZE);
    SharedImageBuffer *shm_ptr = mmap(NULL, SHM_TOTAL_SIZE, PROT_WRITE, MAP_SHARED, shm_fd, 0);

    vx_size num_items = 0;
    vxQueryObjectArray(output_arr, VX_OBJECT_ARRAY_NUMITEMS, &num_items, sizeof(num_items));
    shm_ptr->num_images = num_items;

    printf("num_items = %ld \n", num_items);
    for (vx_size i = 0; i < num_items; i++) {
        vx_image image = (vx_image)vxGetObjectArrayItem(output_arr, i);
        vx_rectangle_t rect = {0, 0, WIDTH, HEIGHT};
        vx_imagepatch_addressing_t addr;
        void *base_ptr = NULL;
        vx_map_id map_id;

        vxMapImagePatch(image, &rect, 0, &map_id, &addr, &base_ptr,
                        VX_READ_ONLY, VX_MEMORY_TYPE_HOST, 0);
        memcpy(&shm_ptr->data[i * SHM_TOTAL_SIZE], base_ptr, SHM_TOTAL_SIZE);
        vxUnmapImagePatch(image, map_id);
        vxReleaseImage(&image);
    }

    munmap(shm_ptr, SHM_TOTAL_SIZE);
    close(shm_fd);
    //shm_unlink(SHM_NAME);
}

The reference code we took from "ti-processor-sdk-rtos-j721e-evm-08_01_00_13/vision_apps/apps/dl_demos/app_tidl_od".

We took this reference code as this code takes input from file and stream it to display.

Please guide us what is going wrong in our case.

Thanks in advance!

Regards,

Kishore