这里的视频采集是为了得到某种固定的格式的图像,然后提供给编码器进行编码操作。
这里视频采集的有几个比较关键的参数
1.分辨率
2.帧率
3.采集的格式
我们知道linux内核使用V4L2框架进行摄像头的采集和处理,所以我们要做的事情就是 根据给定的 分辨率和帧率 给出视频流即可
这里我使用的平台是Android,当然内核也是linux,只是不同之处是后面增加的JNI封装,提供给JAVA程序掉用,不过不需要可以忽略掉JNI。
#ifndef LINUX
#include <jni.h>
#include <android/log.h>
#include <android/bitmap.h>
#endif
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <fcntl.h> /* low-level i/o */
#include <unistd.h>
#include <errno.h>
#include <malloc.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <asm/types.h> /* for videodev2.h */
#include <linux/videodev2.h>
#include <linux/usbdevice_fs.h>
#define LOG_TAG "TEST"
#ifndef LINUX
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#else
#define LOGI printf
#define LOGE printf
#define LOGD printf
#endif
#define CLEAR(x) memset (&(x), 0, sizeof (x))
//#define IMG_WIDTH 640
//#define IMG_HEIGHT 480
#define ERROR_DEVICE_NOT_EXIST -1
#define ERROR_DEVICE_TYPE_ERROR -2
#define ERROR_DEVICE_OPEN_FALIED -3
#define ERROR_DEVICE_CAP_NOT_SUPPORT -4
#define ERROR_DEVICE_CAP_ERROR -5
#define ERROR_VIDIOC_QUERYBUF -6
#define ERROR_VIDIOC_QBUF -7
#define ERROR_VIDIOC_DQBUF -70
#define ERROR_REQBUFS -71
#define ERROR_MMAP_FAILD -8
#define ERROR_UNMMAP_FAILD -88
#define ERROR_LOCAL -9
#define ERROR_VIDIOC_STREAMON -10
#define ERROR_VIDIOC_STREAMOFF -11
#define ERROR_SELECT -12
#define SUCCESSED 0
struct buffer {
void * start;
size_t length;
};
typedef struct camera {
char device_name[32];//用指针不方便修改
int fd;
int width;
int height;
int display_depth;
int image_size;
int frame_number;
int framerate;
struct v4l2_capability v4l2_cap;
struct v4l2_cropcap v4l2_cropcap;
struct v4l2_format v4l2_fmt;
struct v4l2_crop crop;
struct buffer *buffers;
}Camera;
int xioctl(int fd, int request, void *arg);
int open_camera(Camera *cam);
int init_camera(Camera *cam);
int initmmap(Camera *cam);
int startcapturing(Camera *cam);
int readframeonce(Camera *cam,char *buffer,int *size);
int readframe(Camera *cam,char *buffer,int *size);
void processimage (const void *p);
int stopcapturing(Camera *cam);
int uninitdevice(Camera *cam);
int closedevice(Camera *cam);
#ifndef LINUX
extern "C"
{
jint Java_com_ist_Camera_prepareCamera( JNIEnv* env,jclass thiz, jint videoid ,jint width,jint height,jint framerate);
void Java_com_ist_Camera_processCamera( JNIEnv* env,jclass thiz,jbyteArray buf);
void Java_com_ist_Camera_stopCamera(JNIEnv* env,jclass thiz);
}
#endif
这里我定义了一个Camera结构,和一些函数用来做图像采集。封装一系列摄像头的操作。
看JNI的封装会比较清晰一点, 准备 处理、关闭。
#include "ImageProc.h"
#include <string>
using namespace std;
// 定义一个 Camera的指针
Camera *camera=NULL;
int n_buffers=0;
char * g_buffer=NULL;
char * g_buffer2=NULL;
int setInterge(JNIEnv *env, jobject thiz, jobject p1, int value)
{
jclass c;
jfieldID id;
c = env->FindClass("java/lang/Integer");
if (c==NULL)
{
LOGD("FindClass failed");
return -1;
}
id = env->GetFieldID(c, "value", "I");
if (id==NULL)
{
LOGD("GetFiledID failed");
return -1;
}
env->SetIntField(p1, id, value);
return 0;
}
void yuyv_2_yuv420( int inWidth, int inHeight ,unsigned char * psrc,unsigned char *pDest)
{
int i,j;
unsigned char *Y ,*U,*V;
unsigned char y1,y2,u,v;
Y=pDest;
U=pDest+inHeight*inWidth;
V=U+inHeight*inWidth/4;
for(i=0;i<inHeight;i++)
{
for(j=0;j<inWidth/2;j++)
{
y1 = *( psrc + (i*inWidth/2+j)*4);
u = *( psrc + (i*inWidth/2+j)*4 + 1);
y2 = *( psrc + (i*inWidth/2+j)*4 + 2);
v = *( psrc + (i*inWidth/2+j)*4 + 3);
*Y++=y1;
*Y++=y2;
if(i%2==0)
{
*U++=u;
*V++=v;
}
}
}
}
int xioctl(int fd, int request, void *arg)
{
int r = 0;
do {
r = ioctl(fd, request, arg);
} while (-1 == r && EINTR == errno);
return r;
}
//打开video设备
int open_camera(Camera *cam)
{
struct stat st;
//stat() 获得文件属性,并判断是否为字符设备文件
if (-1 == stat (cam->device_name, &st)) {
LOGE("Cannot identify '%s': %d, %s", cam->device_name, errno, strerror (errno));
return ERROR_DEVICE_NOT_EXIST;//改ID设备不存在
}
if (!S_ISCHR (st.st_mode)) {
LOGE("%s is no device", cam->device_name);
return ERROR_DEVICE_TYPE_ERROR;//设备类型不匹配
}
cam->fd = open(cam->device_name, O_RDWR| O_NONBLOCK, 0); // | O_NONBLOCK
if (-1 == cam->fd) {
LOGE("Cannot open '%s': %d, %s", cam->device_name, errno, strerror (errno));
return ERROR_DEVICE_OPEN_FALIED;//打开失败
}
LOGD(" open '%s' ok", cam->device_name);
//
return SUCCESSED;
}
//初始化设备
int init_camera( Camera *cam) {
struct v4l2_capability *cap = &(cam->v4l2_cap);
struct v4l2_cropcap *cropcap = &(cam->v4l2_cropcap);
struct v4l2_crop *crop = &(cam->crop);
struct v4l2_format *fmt = &(cam->v4l2_fmt);
unsigned int min;
int ret =0;
struct v4l2_fmtdesc fmtdesc;
//VIDIOC_QUERYCAP 命令 来获得当前设备的各个属性
if (-1 == xioctl(cam->fd, VIDIOC_QUERYCAP, cap)) {
if (EINVAL == errno) {
LOGE("%s is no V4L2 device\n", cam->device_name);
return ERROR_DEVICE_CAP_ERROR;
} else {
LOGE("%s error %d, %s\n", "VIDIOC_QUERYCAP", errno, strerror(errno));
return ERROR_DEVICE_CAP_ERROR;
}
}
LOGI("\nVIDOOC_QUERYCAP\n");
LOGI("the camera driver is %s\n", cap->driver);
LOGI("the camera card is %s\n", cap->card);//UVC Camera (046d:081b)
LOGI("the camera bus info is %s\n", cap->bus_info);
LOGI("the version is %d\n", cap->version);//199168
if (!(cap->capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
LOGE( "%s is no video capture device\n", cam->device_name);
return ERROR_DEVICE_CAP_ERROR;
}
fmtdesc.index=0;
fmtdesc.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
LOGE("Support format:\n");
while(ioctl(cam->fd,VIDIOC_ENUM_FMT,&fmtdesc)!=-1)
{
LOGE("\t%d.%s\n",fmtdesc.index+1,fmtdesc.description);
fmtdesc.index++;
}
if (!(cap->capabilities & V4L2_CAP_STREAMING)) {
LOGE( "%s does not support streaming i/o\n",cam->device_name);
return ERROR_DEVICE_CAP_ERROR;
}
//获得设备对 Image Cropping 和 Scaling 的支持
CLEAR (*cropcap);
cropcap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop->c.width = cam->width;
crop->c.height = cam->height;
crop->c.left = 0;
crop->c.top = 0;
crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
//设置图形格式
CLEAR (*fmt);
LOGI( "%s setfmt go...!\n", cam->device_name);
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt->fmt.pix.width = cam->width;
fmt->fmt.pix.height = cam->height;
//fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_H264; //yuv422
fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; //隔行扫描
//检查流权限
if (-1 == xioctl(cam->fd, VIDIOC_S_FMT, fmt)){
LOGE("%s error %d, %s\n", "VIDIOC_S_FMT", errno, strerror(errno));
return ERROR_DEVICE_CAP_ERROR;
}
min = fmt->fmt.pix.width * 2;
//每行像素所占的 byte 数
if (fmt->fmt.pix.bytesperline < min)
fmt->fmt.pix.bytesperline = min;
min = fmt->fmt.pix.bytesperline * fmt->fmt.pix.height;
if (fmt->fmt.pix.sizeimage < min)
fmt->fmt.pix.sizeimage = min;
LOGI("NEW yuyv buffer length = %d \n",cam->width*cam->height *2);
LOGI("NEW yuv420 buffer length = %d \n",cam->width*cam->height /2*3);
g_buffer = new char [cam->width*cam->height *2];
g_buffer2 = new char [cam->width*cam->height/2*3];
//设置帧率
struct v4l2_streamparm Stream_Parm;
memset(&Stream_Parm, 0, sizeof(struct v4l2_streamparm));
Stream_Parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
Stream_Parm.parm.capture.timeperframe.denominator =cam->framerate;;
Stream_Parm.parm.capture.timeperframe.numerator = 1;
ret = xioctl(cam->fd, VIDIOC_S_PARM, &Stream_Parm);
LOGI("VIDIOC_S_PARM= %d \n",ret);
return initmmap (cam);
}
//I/O模式选择
int initmmap( Camera *cam)
{
struct v4l2_requestbuffers req;
CLEAR (req);
LOGI("initmap\n");
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl (cam->fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
LOGE("%s does not support memory mapping", cam->device_name);
return ERROR_LOCAL;
} else {
return ERROR_REQBUFS;
}
}
if (req.count < 2) {
LOGE("Insufficient buffer memory on %s", cam->device_name);
return ERROR_LOCAL;
}
cam->buffers =(struct buffer *) calloc (req.count, sizeof (* (cam->buffers) ));
if (!cam->buffers) {
LOGE("Out of memory");
return ERROR_LOCAL;
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl (cam->fd, VIDIOC_QUERYBUF, &buf))
return ERROR_VIDIOC_QUERYBUF;
LOGI("MMAP_SIZE=%d\n",buf.length);
cam->buffers[n_buffers].length = buf.length;
cam->buffers[n_buffers].start =
mmap (NULL ,
buf.length,
PROT_READ | PROT_WRITE,
MAP_SHARED,
cam->fd, buf.m.offset);
if (MAP_FAILED == cam->buffers[n_buffers].start)
return ERROR_MMAP_FAILD;
}
return SUCCESSED;
}
int startcapturing(Camera *cam)
{
unsigned int i;
enum v4l2_buf_type type;
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl (cam->fd, VIDIOC_QBUF, &buf))
return ERROR_VIDIOC_QBUF;
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl (cam->fd, VIDIOC_STREAMON, &type))
return ERROR_VIDIOC_STREAMON;
return SUCCESSED;
}
int readframeonce(Camera *cam,char *buf ,int * size)
{
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO (&fds);
FD_SET (cam->fd, &fds);
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select (cam->fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
return ERROR_SELECT;
}
if (0 == r) {
LOGE("select timeout");
return ERROR_LOCAL;
}
if (readframe (cam,buf,size)==1)
break;
}
return SUCCESSED;
}
void cat_data( unsigned char * buf,int size){
int i;
string ss("");
char cc[10];
for( i=0;i<size;i++){
sprintf(cc,"%02x ",buf[i]);
ss.append(cc);
}
LOGI(ss.c_str());
LOGI("\n");
}
int readframe(Camera *cam,char *buffer,int *size){
struct v4l2_buffer buf;
unsigned int i;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
//buf.memory = V4L2_MEMORY_USERPTR;
//LOGE("fd=%d,request=%d,buf=%d",fd,VIDIOC_DQBUF,&buf);
if (-1 == xioctl (cam->fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
default:
return ERROR_VIDIOC_DQBUF;
}
}
assert (buf.index < n_buffers);
memcpy(g_buffer,cam->buffers[buf.index].start,cam->buffers[buf.index].length);
//*size =cam->buffers[buf.index].length;
yuyv_2_yuv420(cam->width,cam->height,(unsigned char *)g_buffer,(unsigned char *)g_buffer2);
*size =(cam->buffers[buf.index].length/4*3 );
LOGI("get yuv420 SIZE = %d",*size);
if (-1 == xioctl (cam->fd, VIDIOC_QBUF, &buf))
return ERROR_VIDIOC_QBUF;
return 1;
}
int stopcapturing(Camera *cam)
{
enum v4l2_buf_type type;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl (cam->fd, VIDIOC_STREAMOFF, &type))
return ERROR_VIDIOC_STREAMOFF;
return SUCCESSED;
}
int uninitdevice(Camera *cam)
{
unsigned int i;
for (i = 0; i < n_buffers; ++i)
if (-1 == munmap (cam->buffers[i].start, cam->buffers[i].length))
return ERROR_UNMMAP_FAILD;
free(cam->buffers);
free(g_buffer);
free(g_buffer2);
return SUCCESSED;
}
//关闭设备
int closedevice(Camera *cam)
{
if (-1 == close (cam->fd)){
cam->fd = -1;
return ERROR_LOCAL;
}
cam->fd = -1;
return SUCCESSED;
}
#ifndef LINUX
jint Java_com_ist_Camera_prepareCamera( JNIEnv* env,jclass thiz, jint videoid,jint width,jint height,jint framerate){
int ret;
camera = ( Camera *) malloc( sizeof( Camera ) );
sprintf(camera->device_name,"/dev/video%d",videoid);
camera->buffers = NULL;
camera->width = width;
camera->height = height;
camera->display_depth = 5; /* RGB24 */
camera->framerate = framerate;
ret = open_camera(camera);
if(ret !=0 ){
return ret;//open_fail!
}
ret = init_camera(camera);
LOGE("init_camera return %d\n",ret);
if(ret == SUCCESSED){
ret = startcapturing(camera);
LOGE("startcapturing return %d\n",ret);
}
if(ret != SUCCESSED){
stopcapturing(camera);
uninitdevice (camera);
closedevice (camera);
LOGE("device closed\n");
}
return SUCCESSED;
}
void
Java_com_ist_Camera_processCamera( JNIEnv* env,jclass thiz,jbyteArray buf){
int size =0;
readframeonce(camera,g_buffer2,&size);
env->SetByteArrayRegion(buf, 0, size, (jbyte*)g_buffer2);
}
void
Java_com_ist_Camera_stopCamera(JNIEnv* env,jclass thiz){
stopcapturing (camera);
uninitdevice (camera);
closedevice (camera);
free(camera);;
}
#else
int main(int argc,char *argv[]){
if(argc<2){
perror("argc <2\n");
exit(-1);
}
int ret;
camera = ( Camera *) malloc( sizeof( Camera ) );
sprintf(camera->device_name,"/dev/video%d",atoi(argv[1]));
camera->buffers = NULL;
camera->width = 176;
camera->height = 144;
//camera->display_depth = 5; /* RGB24 */
ret = open_camera(camera);
if(ret !=0 ){
return ret;//open_fail!
}
ret = init_camera(camera);
LOGE("init_camera return %d\n",ret);
if(ret == SUCCESSED){
ret = startcapturing(camera);
LOGE("startcapturing return %d\n",ret);
}
if(ret != SUCCESSED){
stopcapturing(camera);
uninitdevice (camera);
closedevice (camera);
LOGE("device CLOSED!\n");
}
//获取一帧
char * buf;
int size =0;
int i= readframeonce(camera,buf,&size);
//printf("get %d frame return \n",ret);
readframeonce(camera,buf,&size);
readframeonce(camera,buf,&size);
readframeonce(camera,buf,&size);
readframeonce(camera,buf,&size);
stopcapturing(camera);
sleep(1);
uninitdevice (camera);
sleep(1);
closedevice (camera);
free(camera);
return SUCCESSED;
}
#endif
这里定义了一个宏 LINUX ,来区分是JNI代码还是linux代码,测试的时候可以在本机编译的时候测试一下,比较方便一点。
这里主要 init_camera 函数 图像格式里面设置了分辨率和帧率。
这里需要注意的是摄像头支持的分辨率和帧率需要实现查看一下支持的格式,设置错误的参数会导致获取不到图像。
如命令:
v4l2-ctl --list-formats-ext -d /dev/video0
Pixel Format: 'YUYV'
Name : YUV 4:2:2 (YUYV)
Size: Discrete 640x480
Interval: Discrete 0.033 s (30.000 fps)
Interval: Discrete 0.040 s (25.000 fps)
Interval: Discrete 0.050 s (20.000 fps)
Interval: Discrete 0.067 s (15.000 fps)
Interval: Discrete 0.100 s (10.000 fps)
Interval: Discrete 0.200 s (5.000 fps)
..........
然后我们采用YUYV格式采集,基本上UVC摄像头都是支持这种格式的。
初始化的最后我们调用了initmmap函数,因为图像比较大,我们使用内存映射效率会高一些。
接着我们就可以调用startcapturing 和stopcapturing 设置和停止采集,获取一帧图像用readframe函数,最后uninitdevice释放资源,closedevice 关闭设备节点。
具体的流程可以参考main 函数的调用顺序即可。
这里因为采集的图像我是要用安卓自带的H264编码器进行编码,所以我通过yuyv_2_yuv420将图像转化了格式。
关于yuv图像的各种各类的打包格式,这里就进行介绍了。
这里比较关键的一点是不同yuv格式的存储空间和排列顺序都是不一样,注意一下分配的缓冲区大小就比较重要了。
至此,介绍完了在V4L2层根据分辨率和帧率获取了视频流。