This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

dm8148 codec engine graphics

Other Parts Discussed in Thread: SYSBIOS

Dear all:

     on page 7th of DM814x_EZ_Software_Developers_Guide.gdf , i know  the codec engine examples can not be run out  with graphics,so i execute  "/etc/init.d/load-hd-firmware.sh stop " , the codec engine application can run on the dm8148 evm board.If i want to add hmi for codec engine application, how do i for it?here ,i have some questions as follow:

Q1, http://processors.wiki.ti.com/index.php/EZSDK_Memory_Map shows the EZSDK's memory allocation

 DSP_CODE SIZE = 0M, if i run codec on the c674 dsp core ,how to specify  the DSP_CODE  memory segment?

Q2, DM814x_EZ_Software_Developers_Guide.gdf  tell us that codec engine examples can not be run out with graphics,what about codec engine examples and viddeo m3 can run at  the  same time?

Q3,http://processors.wiki.ti.com/index.php/EZSDK_Memory_Map , i change memory map for 512Mb dm8148 board. my dsp configure files are below:

  config.bld:

  

/*
 *  ======== config.bld ========
 *  This script is run prior to all build scripts.  It initializes the
 *  rootDir configuration parameter of all supported targets.
 *
 *  There are more details about config.bld here:
 *  http://rtsc.eclipse.org/docs-tip/Glossary#config.bld
 */

var Build = xdc.useModule('xdc.bld.BuildEnvironment');
var Pkg = xdc.useModule('xdc.bld.PackageContents');

/* initialize local vars with those set in xdcpaths.mak (via XDCARGS) */
for (x = 0; x < arguments.length; x++) {
    if (arguments[x].match(/^CODEGEN_INSTALL_DIR=/)) {
        cgRootDir = arguments[x].split("=")[1];
    }
}

var mem_ext = [
["DDR3_HOST", {
    comment:    "DDR3 Memory reserved for use by the A8",
    name:       "DDR3_HOST",
    base:       0x80000000,
    len:        0x0B000000,
    }
],
["DDR3_data", {
    comment:    "DDR3 Memory reserved for use by the C674",
    name:       "DDR3_data",
    base:       0x99500000,
    len:        0x00800000,
    }
],
["DDR3", {
    comment:    "DDR3 Memory reserved for use by the C674",
    name:       "DDR3",
    base:       0x99D00000,
    len:        0x00400000,
    }
],
["DDRALGHEAP", {
    comment:    "DDRALGHEAP: off-chip memory for dynamic allocation",
    name:       "DDRALGHEAP",
    base:       0x98000000,
    len:        0x01400000,
    }
],
["DDR3_SR1", {
    comment:    "DDR3 Memory reserved for use by SharedRegion 1",
    name:       "DDR3_SR1",
    base:       0x9A100000,
    len:        0x00100000,
    }
],
["DDR3_SR0", {
    comment: "DDR3 Memory reserved for use by SharedRegion 0",
    name: "DDR3_SR0",
    base: 0x99400000,
    len:  0x00100000    /* 1 MB */
    }
],
];

/* platform instances used by this package */
Build.platformTable["ti.platforms.evmDM8148"] = {
    externalMemoryMap: mem_ext,
    l1DMode:"32k",
    l1PMode:"32k",
    l2Mode:"128k",           
    codeMemory: "DDR3",
    dataMemory: "DDR3_data",
    stackMemory: "DDR3"
};

/* should test here that cgRootDir is set! */

var C674 = xdc.useModule('ti.targets.elf.C674');
C674.rootDir = cgRootDir;

C674.platforms = [
    "ti.platforms.evmDM8148"
];

$trace("Adding ti.targets.elf.C674 to Build.targets", 1, ['genserver']);
Build.targets.$add(C674);

/* We remove a few profiles, just to cut down on build time */
delete C674.profiles["coverage"];
delete C674.profiles["profile"];
delete C674.profiles["whole_program"];
delete C674.profiles["whole_program_debug"];

/* Create a .zip file for redistribution.  Remove this line if you prefer .tar */
Pkg.attrs.archiver = 'zip';

server.cfg:

/*
 *  ======== server.cfg ========
 *
  */

/* scratch groups */
var MAXGROUPS = 20;
var GROUP_2 = 2;

$trace("platformName = '" + Program.platformName + "'", 1, ['genserver']);


function createHeapMem(size, sectionName, heapName) {
    var HeapMem = xdc.useModule('ti.sysbios.heaps.HeapMem');
    var heapMemParams = new HeapMem.Params();
    heapMemParams.size = size;
    heapMemParams.sectionName = sectionName;
    Program.sectMap[sectionName] = heapName;
    return HeapMem.create(heapMemParams);
}

/* heap config */
var internalMemoryName = 'IRAM';
var internalHeapSize = 0xc000;     //  48 kB
var externalMemoryName = 'DDR3';
var externalHeapSize = 0x20000;    // 128 kB

/* Configure internal and external heaps */
Program.global.EXT_HEAP =
    createHeapMem(externalHeapSize, ".EXT_HEAP", externalMemoryName);
Program.global.INT_HEAP =
    createHeapMem(internalHeapSize, ".INT_HEAP", internalMemoryName);

var DDRALGMemoryName = "DDRALGHEAP";
var DDRALGHeapSize = Program.platform.externalMemoryMap[DDRALGMemoryName].len;
Program.global.EXTALG_HEAP = createHeapMem(DDRALGHeapSize, ".EXTALG_HEAP", DDRALGMemoryName);

/* Place code */
Program.sectMap[".text"]      = externalMemoryName;

/* Set the default heap to the external heap */
var Memory = xdc.useModule('xdc.runtime.Memory');
Memory.defaultHeapInstance = Program.global.EXT_HEAP;
/* end heap config */

/* Setup xdcruntime proxys */
xdc.useModule('ti.sysbios.xdcruntime.Settings');

/*
 *  Configure CE's OSAL.  This codec server only builds for the BIOS-side of
 *  a heterogeneous system, so use the "DSPLINK_BIOS" configuration.
 */
var osalGlobal = xdc.useModule('ti.sdo.ce.osal.Global');
osalGlobal.runtimeEnv = osalGlobal.DSPLINK_BIOS;

var Timer = xdc.useModule('ti.sysbios.timers.dmtimer.Timer');
Timer.intFreq.hi = 0;
Timer.intFreq.lo = 20000000;


/* IPC-related config */
xdc.useModule('ti.sdo.ce.ipc.dsplink.dsp.Settings');
var MultiProc = xdc.useModule('ti.sdo.utils.MultiProc');
var settings = xdc.useModule('ti.sdo.ipc.family.Settings');
var procNames = settings.getDeviceProcNames();

MultiProc.setConfig("DSP", procNames);

var SharedRegion_map = {};
SharedRegion_map["SysLink: HOST<--->DSP"] = 0;
SharedRegion_map["Ipc"] = 1;
var SharedRegion  = xdc.useModule('ti.sdo.ipc.SharedRegion');
var syslinkSharedMem = Program.cpu.memoryMap["DDR3_SR0"];
var ipcSharedMem = Program.cpu.memoryMap["DDR3_SR1"];
var entry = new SharedRegion.Entry();

entry.base = syslinkSharedMem.base;
entry.len = syslinkSharedMem.len;
entry.ownerProcId = MultiProc.getIdMeta("HOST");
entry.isValid = true;
entry.name = "SYSLINK";

SharedRegion.setEntryMeta(
    SharedRegion_map["SysLink: HOST<--->DSP"],  /* index */
    entry
);

var entry2 = new SharedRegion.Entry();

entry2.base = ipcSharedMem.base;
entry2.len = ipcSharedMem.len;
entry2.ownerProcId = MultiProc.getIdMeta("HOST");
entry2.isValid = true;
entry2.createHeap = true;
entry2.cacheEnable = true;
entry2.name = "SR1";

SharedRegion.setEntryMeta(
    SharedRegion_map["Ipc"],  /* index */
    entry2
);


/*
 *  ======== Server Configuration ========
 */
var Server = xdc.useModule('ti.sdo.ce.Server');
/* The server's stackSize.  More than we need... but safe. */
Server.threadAttrs.stackSize = 16384;

/* The servers execution priority */
Server.threadAttrs.priority = Server.MINPRI;
xdc.loadCapsule('server_log.cfg');
/*
 * The optional stack pad to add to non-configured stacks.  This is well
 * beyond most codec needs, but follows the approach of "start big and
 * safe, then optimize when things are working."
 */
Server.stackSizePad = 9000;

/*
 *  "Use" the various codec modules; i.e., implementation of codecs.
 *  All these "xdc.useModule" commands provide a handle to the codecs,
 *  which we'll use to initialize config params and add the codecs to
 *  the Server.algs array.
 */
    var DARKLIGHT = xdc.useModule('adas.darklight.DARKLIGHT');
        DARKLIGHT.serverFxns = "UNIVERSAL_SKEL";
        DARKLIGHT.stubFxns = "UNIVERSAL_STUBS";
        DARKLIGHT.manageInBufsCache = [true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true];
        DARKLIGHT.manageInOutBufsCache = [true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true];
        DARKLIGHT.manageOutBufsCache = [true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true];

 

/*
 * The array of algorithms this server can serve up.  This array also
 * configures details about the threads which will be created to run the
 * algorithms (e.g. stack sizes, priorities, etc.).
 */
Server.algs = [
    {name: "darklight", mod: DARKLIGHT , threadAttrs: {
        stackMemId: 0, priority: Server.MINPRI + 1},
        groupId : 2,
    },

];

/* to link in debug/trace FC libs, uncomment one of these */
// xdc.useModule('ti.sdo.fc.global.Settings').profile = "debug");
// xdc.useModule('ti.sdo.fc.global.Settings').profile = "debug_trace");
// xdc.useModule('ti.sdo.fc.global.Settings').profile = "trace");

/*
 *  ======== DSKT2 (XDAIS Alg. memory allocation) configuration ========
 *
 *  DSKT2 is the memory manager for all algorithms running in the system,
 *  granting them persistent and temporary ("scratch") internal and external
 *  memory. We configure it here to define its memory allocation policy.
 *
 *  DSKT2 settings are critical for algorithm performance.
 *
 *  First we assign various types of algorithm internal memory (DARAM0..2,
 *  SARAM0..2,IPROG, which are all the same on a C64+ DSP) to "L1DHEAP"
 *  defined in the .tcf file as an internal memory heap. (For instance, if
 *  an algorithm asks for 5K of DARAM1 memory, DSKT2 will allocate 5K from
 *  L1DHEAP, if available, and give it to the algorithm; if the 5K is not
 *  available in the L1DHEAP, that algorithm's creation will fail.)
 *
 *  The remaining segments we point to the "DDRALGHEAP" external memory segment
 *  (also defined in the.tcf) except for DSKT2_HEAP which stores DSKT2's
 *  internal dynamically allocated objects, which must be preserved even if
 *  no codec instances are running, so we place them in "DDR2" memory segment
 *  with the rest of system code and static data.
 */
var DSKT2 = xdc.useModule('ti.sdo.fc.dskt2.DSKT2');
DSKT2.DARAM0     = "INT_HEAP";
DSKT2.DARAM1     = "INT_HEAP";
DSKT2.DARAM2     = "INT_HEAP";
DSKT2.SARAM0     = "INT_HEAP";
DSKT2.SARAM1     = "INT_HEAP";
DSKT2.SARAM2     = "INT_HEAP";
DSKT2.ESDATA     = "EXTALG_HEAP";
DSKT2.IPROG      = "INT_HEAP";
DSKT2.EPROG      = "EXTALG_HEAP"
DSKT2.DSKT2_HEAP = "EXT_HEAP";

/*
 *  Next we define how to fulfill algorithms' requests for fast ("scratch")
 *  internal memory allocation; "scratch" is an area an algorithm writes to
 *  while it processes a frame of data and is discarded afterwards.
 *
 *  First we turn off the switch that allows the DSKT2 algorithm memory manager
 *  to give to an algorithm external memory for scratch if the system has run
 *  out of internal memory. In that case, if an algorithm fails to get its
 *  requested scratch memory, it will fail at creation rather than proceed to
 *  run at poor performance. (If your algorithms fail to create, you may try
 *  changing this value to "true" just to get it running and optimize other
 *  scratch settings later.)
 *
 *  Setting "algorithm scratch sizes", is a scheme we use to minimize internal
 *  memory resources for algorithms' scratch memory allocation. Algorithms that
 *  belong to the same "scratch group ID" -- field "groupId" in the algorithm's
 *  Server.algs entry above, reflecting the priority of the task running the
 *  algorithm -- don't run at the same time and thus can share the same
 *  scratch area. When creating the first algorithm in a given "scratch group"
 *  (between 0 and 19), a shared scratch area for that groupId is created with
 *  a size equal to SARAM_SCRATCH_SIZES[<alg's groupId>] below -- unless the
 *  algorithm requests more than that number, in which case the size will be
 *  what the algorithm asks for. So SARAM_SCRATCH_SIZES[<alg's groupId>] size is
 *  more of a groupId size guideline -- if the algorithm needs more it will get
 *  it, but getting these size guidelines right is important for optimal use of
 *  internal memory. The reason for this is that if an algorithm comes along
 *  that needs more scratch memory than its groupId scratch area's size, it
 *  will get that memory allocated separately, without sharing.
 *
 *  This DSKT2.SARAM_SCRATCH_SIZES[<groupId>] does not mean it is a scratch size
 *  that will be automatically allocated for the group <groupId> at system
 *  startup, but only that is a preferred minimum scratch size to use for the
 *  first algorithm that gets created in the <groupId> group, if any.
 *
 *  (An example: if algorithms A and B with the same groupId = 0 require 10K and
 *  20K of scratch, and if SARAM_SCRATCH_SIZES[0] is 0, if A gets created first
 *  DSKT2 allocates a shared scratch area for group 0 of size 10K, as A needs.
 *  If then B gets to be created, the 20K scratch area it gets will not be
 *  shared with A's -- or anyone else's; the total internal memory use will be
 *  30K. By contrast, if B gets created first, a 20K shared scratch will be
 *  allocated, and when A comes along, it will get its 10K from the existing
 *  group 0's 20K area. To eliminate such surprises, we set
 *  SARAM_SCRATCH_SIZES[0] to 20K and always spend exactly 20K on A and B's
 *  shared needs -- independent of their creation order. Not only do we save 10K
 *  of precious internal memory, but we avoid the possibility that B can't be
 *  created because less than 20K was available in the DSKT2 internal heaps.)
 *
 *  Finally, note that if the codecs correctly implement the
 *  ti.sdo.ce.ICodec.getDaramScratchSize() and .getSaramScratchSize() methods,
 *  this scratch size configuration can be autogenerated by
 *  configuring Server.autoGenScratchSizeArrays = true.
 */
DSKT2.ALLOW_EXTERNAL_SCRATCH = false;
//DSKT2.SARAM_SCRATCH_SIZES[GROUP_2] = 0x0000;
DSKT2.DARAM_SCRATCH_SIZES[GROUP_2] = 0x0000;
DSKT2.SARAM_SCRATCH_SIZES[GROUP_2] = 32 * 1024;

/*
 *  ======== DMAN3 (DMA manager) configuration ========
 */

/*  First we configure how DMAN3 handles memory allocations:
 *
 *  Essentially the configuration below should work for most codec combinations.
 *  If it doesn't work for yours -- meaning an algorithm fails to create due
 *  to insufficient internal memory -- try the alternative (commented out
 *  line that assigns "DDRALGHEAP" to DMAN3.heapInternal).
 *
 *  What follows is an FYI -- an explanation for what the alternative would do:
 *
 *  When we use an external memory segment (DDRALGHEAP) for DMAN3 internal
 *  segment, we force algorithms to use external memory for what they think is
 *  internal memory -- we do this in a memory-constrained environment
 *  where all internal memory is used by cache and/or algorithm scratch
 *  memory, pessimistically assuming that if DMAN3 uses any internal memory,
 *  other components (algorithms) will not get the internal memory they need.
 *
 *  This setting would affect performance very lightly.
 *
 *  By setting DMAN3.heapInternal = <external-heap>  DMAN3 *may not* supply
 *  ACPY3_PROTOCOL IDMA3 channels the protocol required internal memory for
 *  IDMA3 channel 'env' memory. To deal with this catch-22 situation we
 *  configure DMAN3 with hook-functions to obtain internal-scratch memory
 *  from the shared scratch pool for the associated algorithm's
 *  scratch-group (i.e. it first tries to get the internal scratch memory
 *  from DSKT2 shared allocation pool, hoping there is enough extra memory
 *  in the shared pool, if that doesn't work it will try persistent
 *  allocation from DMAN3.internalHeap).
 */
var DMAN3 = xdc.useModule('ti.sdo.fc.dman3.DMAN3');
DMAN3.heapInternal    = "INT_HEAP";       /* INT_HEAP is an internal segment */
DMAN3.heapExternal    = "EXTALG_HEAP";
DMAN3.idma3Internal   = false;
DMAN3.scratchAllocFxn = "DSKT2_allocScratch";
DMAN3.scratchFreeFxn  = "DSKT2_freeScratch";
/*  Next, we configure all the physical resources that DMAN3 is granted
 *  exclusively. These settings are optimized for the DSP on DM6446 (DaVinci).
 *
 *  We assume PaRams 0..79 are taken by the Arm drivers, so we reserve
 *  all the rest, up to 127 (there are 128 PaRam sets on DM6446).
 *  DMAN3 takes TCC's 32 through 63 (hence the High TCC mask is 0xFFFFFFFF
 *  and the Low TCC mask is 0). Of the 48 PaRams we reserved, we assign
 *  all of them to scratch group 0; similarly, of the 32 TCCs we reserved,
 *  we assign all of them to scratch group 0.
 *
 *  If we had more scratch groups with algorithms that require EDMA, we would
 *  split those 48 PaRams and 32 TCCs appropriately. For example, if we had
 *  a video encoder alg. in group 0 and video decoder alg. in group 1, and they
 *  both needed a number of EDMA channels, we could assing 24 PaRams and 16
 *  TCCs to Groups [0] and [1] each. (Assuming both algorithms needed no more
 *  than 24 channels to run properly.)
 */
DMAN3.qdmaPaRamBase = 0x09004000;
DMAN3.paRamBaseIndex     = 80;  // 1st EDMA3 PaRAM set available for DMAN3
DMAN3.numQdmaChannels    = 8;   // number of device's QDMA channels to use
DMAN3.qdmaChannels       = [0,1,2,3,4,5,6,7]; // choice of QDMA channels
                                                 //to use
DMAN3.numPaRamEntries    = 48;  // number of PaRAM sets exclusively used
                                    //by DMAN
DMAN3.numPaRamGroup[GROUP_2]   = 48;  //number of PaRAM sets for scratch group 0
DMAN3.numTccGroup[GROUP_2]     = 32;  //number of TCCs assigned to scratch group 0
DMAN3.tccAllocationMaskL = 0;   // bit mask indicating which TCCs 0..31
                                    //to use
DMAN3.tccAllocationMaskH = 0xffffffff; // assign all TCCs 32..63 for DMAN

//DMAN3.paRamBaseIndex     = 80;  // 1st EDMA3 PaRAM set available for DMAN3
//DMAN3.numQdmaChannels    = 8;   // number of device's QDMA channels to use
//DMAN3.qdmaChannels       = [0,1,2,3,4,5,6,7]; // choice of QDMA channels to use
//DMAN3.numPaRamEntries    = 48;  // number of PaRAM sets exclusively used by DMAN
//DMAN3.numPaRamGroup[GROUP_2] = 0; // number of PaRAM sets for scratch group 2
//DMAN3.numTccGroup[GROUP_2] = 0; // number of TCCs assigned to scratch group 2
//DMAN3.tccAllocationMaskL = 0;          // assign no TCCs 0..31 for DMAN3
//DMAN3.tccAllocationMaskH = 0xffffffff; // assign all TCCs 32..63 for DMAN3

/*  The remaining DMAN3 configuration settings are as defined in ti.sdo.fc.DMAN3
 *  defaults. You may need to override them to add more QDMA channels and
 *  configure per-scratch-group resource sub-allocations.
 */

/*
 *  ======== RMAN (IRES Resource manager) configuration ========
 */
var RMAN = xdc.useModule('ti.sdo.fc.rman.RMAN');
RMAN.useDSKT2 = true;
RMAN.tableSize = 10;
/* The lock/unlock/set/getContext functions will default to DSKT2 */

 

how Modify  Memory Map to run codec engine app with graphics?

  • I actually was interested to know that, too. Although there are some sources, already named above, which can be investigated, I still hope that TI will provide a short and working how-to for all those changes. Which addresses and sizes can be changed where, and which components have to be re-built after which changes. Thanks in advance! Because actually it feels like a nightmare to find all the places where different software parts have to be configured concerning memory. The smallest time I can spend for programming itself, most time I am searching for right configurations. That state should change, urgently.

    Regards,
    Joern.