1 修改encodedecode demo测试codec
接下来,我们需要将修改过后的videnc_copy集成到encodedecodedemo中,实现实时采集视频、使用EMCV添加矩形框、然后输出显示的功能。
1.1 修改demo
(1)修改encodedecode.cfg
要使用videnc_copy,需要在encodedecode.cfg中修改createFromServer函数的参数,如下所示。
var demoEngine = Engine.createFromServer(
"encodedecode",
"bin/ti_platforms_evmDM6467/all.x64P",
"ti.sdo.ce.examples.servers.all_codecs"
);
|
(2)修改codecs.c
由于我们不对视频进行H264编码,所以需要在codecs中注释掉相关的参数定义。
#if 0
/* Use extended dynamic parameters to allow tweaking of the QP value */
static IH264VENC_DynamicParams extDynParams = {
{
sizeof(IVIDENC1_DynamicParams), /* size */
576, /* inputHeight */
720, /* inputWidth */
25000, /* refFrameRate */
25000, /* targetFrameRate */
2000000, /* targetBitRate (override in app) */
30, /* intraFrameInterval */
XDM_DECODE_AU, /* generateHeader */
0, /* captureWidth */
IVIDEO_NA_FRAME, /* forceFrame */
1, /* interFrameInterval */
0 /* mbDataFlag */
},
18, /* QPISlice */
20, /* QPSlice */
51, /* RateCtrlQpMax */
0, /* RateCtrlQpMin */
0, /* NumRowsInSlice */
0, /* LfDisableIdc */
0, /* LFAlphaC0Offset */
0, /* LFBetaOffset */
0, /* ChromaQPOffset */
0, /* SecChromaQPOffset */
};
#endif
|
(3)修改capture.c
由于encodedecode中默认是要将输入的YUV422视频流转换为YUV420再传递给video线程,而我们的videnc_copy是直接处理YUV422视频流,不需要进行转换,所以需要在Capture.c中修改部分代码。
#if 0
/* Configure color conversion job */
if (Ccv_config(hCcv, hInBuf, hDstBuf) < 0) {
ERR("Failed to configure 422 to 420 color conversion job\n");
cleanup(THREAD_FAILURE);
}
else
printf("Succeed to configure 422 to 420 color conversion job.\n");
#endif
frameSkip = TRUE;
/* Signal that initialization is done and wait for other threads */
Rendezvous_meet(envp->hRendezvousInit);
while (!gblGetQuit()) {
if (frameSkip) {
/* Capture an extra frame to halve the frame rate */
if (Capture_get(hCapture, &hCapBuf) < 0) {
ERR("Failed to get capture buffer\n");
cleanup(THREAD_FAILURE);
}
if (Capture_put(hCapture, hCapBuf) < 0) {
ERR("Failed to put capture buffer\n");
cleanup(THREAD_FAILURE);
}
}
/* Get a buffer from the capture driver to encode */
if (Capture_get(hCapture, &hCapBuf) < 0) {
ERR("Failed to get capture buffer\n");
cleanup(THREAD_FAILURE);
}
if (resize) {
/* Resize buffer from 720P to a smaller resolution */
if (Resize_execute(hRsz, hCapBuf, hIntBuf) < 0) {
ERR("Failed to execute resize job\n");
cleanup(THREAD_FAILURE);
}
hInBuf = hIntBuf;
}
else {
hInBuf = hCapBuf;
}
//if (Ccv_execute(hCcv, hInBuf, hDstBuf) < 0) {
//
//
//}
/* Send color converted buffer to video thread for encoding */
hInBuf) < 0) {
ERR("Failed to send buffer to display thread\n");
cleanup(THREAD_FAILURE);
}
/* Return a buffer to the capture driver */
hCapBuf) < 0) {
ERR("Failed to put capture buffer\n");
cleanup(THREAD_FAILURE);
}
/* Get a buffer from the video thread */
hInBuf);
if (fifoRet < 0) {
ERR("Failed to get buffer from video thread\n");
cleanup(THREAD_FAILURE);
}
/* Did the video thread flush the fifo? */
if (fifoRet == Dmai_EFLUSH) {
cleanup(THREAD_SUCCESS);
}
}
|
(4)修改display.c
同样,由于videnc_copy输出的视频流为YUV422格式,不需要进行转换就可以直接传递给驱动进行显示,所以在display.c中也要修改部分代码。
static Int config(Ccv_Handle hCcv, Blend_Handle hBlend, UI_Handle hUI,
Buffer_Handle hSrcBuf, Buffer_Handle hDstBuf, UInt8 trans)
{
Blend_Config_Params bConfigParams = Blend_Config_Params_DEFAULT;
Buffer_Handle hBmpBuf;
Int i;
#if 0
/* Configure the 420->422 color conversion job */
if (Ccv_config(hCcv, hSrcBuf, hDstBuf) < 0) {
ERR("Failed to configure color conversion job\n");
return FAILURE;
}
#endif
……
/*
* Color convert the 420Psemi decoded buffer from
* the video thread to the 422Psemi display.
*/
//if (Ccv_execute(hCcv, hSrcBuf, hDstBuf) < 0) {
//
//
//}
|
(5)修改video.c
video.c中的修改较多,只讲一下主要部分。
首先,注释掉与H264编解码相关的变量和参数,添加一些用于videnc_copy的变量。
VIDENC_Handle enc = NULL;
String encoderName = "videnc_copy";
VIDENC_InArgs encInArgs;
VIDENC_OutArgs encOutArgs;
VIDENC_DynamicParams encDynParams;
VIDENC_Status encStatus;
XDAS_Int8 *inBuf;
XDAS_Int8 *encodedBuf;
XDM_BufDesc inBufDesc;
XDAS_Int8 *src[XDM_MAX_IO_BUFFERS];
XDAS_Int32 inBufSizes[XDM_MAX_IO_BUFFERS];
XDM_BufDesc encodedBufDesc;
XDAS_Int8 *encoded[XDM_MAX_IO_BUFFERS];
XDAS_Int32 encBufSizes[XDM_MAX_IO_BUFFERS];
Memory_AllocParams allocParams;
encInArgs.size = sizeof(encInArgs);
encOutArgs.size = sizeof(encOutArgs);
encDynParams.size = sizeof(encDynParams);
encStatus.size = sizeof(encStatus);
allocParams.type = Memory_CONTIGPOOL;
allocParams.flags = Memory_NONCACHED;
allocParams.align = BUFALIGN;
allocParams.seg = 0;
inBuf = (XDAS_Int8 *)Memory_alloc(IFRAMESIZE, &allocParams);
encodedBuf = (XDAS_Int8 *)Memory_alloc(EFRAMESIZE, &allocParams);
/* clear and initialize the buffer descriptors */
memset(src, 0, sizeof(src[0]) * XDM_MAX_IO_BUFFERS);
memset(encoded, 0, sizeof(encoded[0]) * XDM_MAX_IO_BUFFERS);
src[0] = inBuf;
encoded[0] = encodedBuf;
inBufDesc.numBufs = encodedBufDesc.numBufs = 1;
inBufDesc.bufSizes = inBufSizes;
encodedBufDesc.bufSizes = encBufSizes;
inBufSizes[0] = IFRAMESIZE;
encBufSizes[0] = EFRAMESIZE;
inBufDesc.bufs = src;
encodedBufDesc.bufs = encoded;
|
然后,修改颜色空间定义,从原来的YUV420PSEMI修改为YUV422PSEMI。
ColorSpace_YUV422PSEMI;
gfxAttrs.dim.width = envp->imageWidth;
gfxAttrs.dim.height = envp->imageHeight;
gfxAttrs.dim.lineLength = BufferGfx_calcLineLength(gfxAttrs.dim.width,
gfxAttrs.colorSpace);
|
接下来,需要对buffer进行一定修改,再传递给videnc_copy进行处理。
if (!envp->passThrough) {
fifoRet = Fifo_get(envp->hDisplayOutFifo, &hDispBuf);
memcpy(inBuf, Buffer_getUserPtr(hVidBuf), IFRAMESIZE);
#ifdef CACHE_ENABLED
#ifdef xdc_target__isaCompatible_64P
Memory_cacheWbInv(inBuf, IFRAMESIZE);
#else
#error Unvalidated config - add appropriate fread-related cache maintenance
#endif
Memory_cacheInv(encodedBuf, EFRAMESIZE);
#endif
//memcpy(encodedBuf, inBuf, EFRAMESIZE);
ret = VIDENC_process(enc, &inBufDesc, &encodedBufDesc, &encInArgs, &encOutArgs);
if (ret < 0) {
cleanup(THREAD_FAILURE);
}
#ifdef CACHE_ENABLED
Memory_cacheWb(encodedBuf, EFRAMESIZE);
#endif
memcpy(Buffer_getUserPtr(hDispBuf), encodedBuf, EFRAMESIZE);
Buffer_setNumBytesUsed(hDispBuf, EFRAMESIZE);
/* Update global data for user interface */
gblIncVideoBytesProcessed(EFRAMESIZE);
if (Fifo_put(envp->hDisplayInFifo, hDispBuf) < 0) {
ERR("Failed to send buffer to display thread\n");
cleanup(THREAD_FAILURE);
}
/* and return input video frame to capture thread */
if (Fifo_put(envp->hCaptureInFifo, hVidBuf) < 0) {
ERR("Failed to send buffer to capture thread\n");
cleanup(THREAD_FAILURE);
}
}
else {
/* Send the buffer through to the display thread unmodified */
if (Fifo_put(envp->hDisplayInFifo, hVidBuf) < 0) {
ERR("Failed to send buffer to display thread\n");
cleanup(THREAD_FAILURE);
}
}
|
最后,需要特别注意的是:在上面的代码中使用了Fifo_put(envp->hDisplayInFifo, hDispBuf)将buffer传递给display线程,这就要注释掉后面部分代码,否则程序运行时video线程会阻塞,因为无法从display线程的fifo得到buffer(已经全部传递给capture线程了)。
#if 0
/* Get a buffer from the display thread to send to the capture thread */
if ((ret != Dmai_EFIRSTFIELD) && (Fifo_getNumEntries(envp->hDisplayOutFifo))) {
do {
fifoRet = Fifo_get(envp->hDisplayOutFifo, &hDispBuf);
if (fifoRet < 0) {
ERR("Failed to get buffer from video thread\n");
cleanup(THREAD_FAILURE);
}
/* Did the display thread flush the fifo? */
if (fifoRet == Dmai_EFLUSH) {
cleanup(THREAD_SUCCESS);
}
/* The display thread is no longer using the buffer */
Buffer_freeUseMask(BufTab_getBuf(hBufTab, Buffer_getId(hDispBuf)),
/*Buffer_getUseMask(BufTab_getBuf(hBufTab, Buffer_getId(hDispBuf)))*/DISPLAY_FREE);
/* Get a free buffer */
hDstBuf = BufTab_getFreeBuf(hBufTab);
if (hDstBuf == NULL) {
ERR("Failed to get free buffer from BufTab\n");
BufTab_print(hBufTab);
cleanup(THREAD_FAILURE);
}
/*
* Reset the dimensions of the buffer in case the decoder has
* changed them due to padding.
*/
BufferGfx_resetDimensions(hDstBuf);
/* Send a buffer to the capture thread */
if (Fifo_put(envp->hCaptureInFifo, hDstBuf) < 0) {
ERR("Failed to send buffer to capture thread\n");
cleanup(THREAD_FAILURE);
}
} while (Fifo_getNumEntries(envp->hDisplayOutFifo) > 0);
}
#endif
|
1.2 程序运行结果
修改完之后,确认编译通过,然后下载到开发板上运行,串口调试终端输出信息如下所示。

在显示器上的显示效果如下所示,证明程序运行成功。

1.3 遗留问题
从串口终端的输出信息可以看出,DSP端一直处于满负荷状态,视频处理的帧率还没有达到1fps,大约每处理1帧图像需要1.5秒左右的时间。这说明算法的效率很低,后期需要进行很多优化,主要考虑是将所有浮点运算转换为定点运算,因为DM6467的DSP是C64x+型定点DSP,做浮点运算时速度特别低。
另外,可以考虑使用TI提供的DSPLIB、FastRTSLIB、IQMathLIB、IMGLIB和VLIB等库。用VLIB和IMGLIB替换掉MECV中的部分函数,用VLIB进行颜色空间转换,用FastRTS、IQMathLIB和DSPLIB来进行一些基本的数据处理,例如浮点运算、矩阵运算和乘除运算等操作。
以上这些库其实都已经集成到C6Accel中了,所以接下来的工作就是学习使用C6Accel,并将OpenCV集成进去。