USB Camera video节点方式关键代码分析

发布时间:2024年01月12日

原文网址(转载请注明出处):
http://t.csdnimg.cn/Dwpnw

源码基于:Android Q

1.CameraPreview 代码分析

这是一个自定义控件

	private int cameraId=4;
	private int cameraBase=0;

这两个值需要根据插入的USB Camera 生成的节点来进行赋值,例如 usb 插入后生成的节点是 /dev/video4 ,那么 cameraId 赋值为 4


    public native int prepareCamera(int videoid);
    public native int prepareCameraWithBase(int videoid, int camerabase);
    public native void processCamera();
    public native void stopCamera();
    public native void pixeltobmp(Bitmap bitmap);

这些是 调用的 jni 方法


    static {
        System.loadLibrary("ImageProc");
    }

System.loadLibrary这个函数会在如下路径搜索libxxx.so文件:

  • /system/lib
  • /data/data/xxx apk package/lib

	public CameraPreview(Context context) {
		super(context);
		this.context = context;
		if(DEBUG) Log.d(TAG,"CameraPreview constructed");
		setFocusable(true);
		
		holder = getHolder();
		holder.addCallback(this);
		holder.setType(SurfaceHolder.SURFACE_TYPE_NORMAL);	
	}

	public CameraPreview(Context context, AttributeSet attrs) {
		super(context, attrs);
		this.context = context;
		if(DEBUG) Log.d(TAG,"CameraPreview constructed");
		setFocusable(true);
		
		holder = getHolder();
		holder.addCallback(this);
		holder.setType(SurfaceHolder.SURFACE_TYPE_NORMAL);	
	}

包含有两个构造方法,根据不同的需要进行调用参考

  • 在代码中直接new一个Custom View实例的时候,会调用第一个构造函数
  • 在xml布局文件中调用CustomView的时候,会调用第二个构造函数

surfaceCreated
	@Override
	public void surfaceCreated(SurfaceHolder holder) {if (0 == r) {
			LOGE("select timeout");
			return ERROR_LOCAL;

		}
		if(DEBUG) Log.d(TAG, "surfaceCreated");
		if(bmp==null){
			bmp = Bitmap.createBitmap(IMG_WIDTH, IMG_HEIGHT, Bitmap.Config.ARGB_8888);
		}
		// /dev/videox (x=cameraId + cameraBase) is used
		int ret = prepareCameraWithBase(cameraId, cameraBase);// 2
		
		if(ret!=-1) cameraExists = true;
		
        mainLoop = new Thread(this);
        mainLoop.start();		
	}

因为 CameraPreview extends SurfaceView implements SurfaceHolder.Callback, Runnable ,所以,需要重写SurfaceView 的几个方法,在surfaceCreated 中,创建了 Bitmap , 准备检查相机 prepareCameraWithBase, 这是个jni方法,稍后在 run() 方法里面的一起分析。
CameraPreview 还 实现了 SurfaceHolder.Callback, Runnable,所以开启线程后会进入run() 方法

    @Override
    public void run() {
        while (true && cameraExists) {
        	// obtaining display area to draw a large image
        	// 获取显示区域,绘制大图
        	if(winWidth==0){
        		winWidth=this.getWidth();
        		winHeight=this.getHeight();

        		if(winWidth*3/4<=winHeight){
        			dw = 0;
        			dh = (winHeight-winWidth*3/4)/2;
        			rate = ((float)winWidth)/IMG_WIDTH;
        			rect = new Rect(dw,dh,dw+winWidth-1,dh+winWidth*3/4-1);
        		}else{
        			dw = (winWidth-winHeight*4/3)/2;
        			dh = 0;
        			rate = ((float)winHeight)/IMG_HEIGHT;
        			rect = new Rect(dw,dh,dw+winHeight*4/3 -1,dh+winHeight-1);
        		}
        	}
        	
        	// obtaining a camera image (pixel data are stored in an array in JNI).
        	// 获取相机图像(像素数据存储在JNI中的数组中)。
        	processCamera();
        	// camera image to bmp
            // 相机图像转换为BMP
        	pixeltobmp(bmp);
        	
            Canvas canvas = getHolder().lockCanvas();
            if (canvas != null)
            {
            	// draw camera bmp on canvas
            	// 在画布上绘制相机BMP
            	canvas.drawBitmap(bmp,null,rect,null);

            	getHolder().unlockCanvasAndPost(canvas);
            }

            if(shouldStop){
            	shouldStop = false;  
            	break;
            }	        
        }
    }

首先根据 surfaceCreated 返回的相机数据结果,如果相机存在,就绘制一个显示区域。使用 processCamera 获取相机图像,再使用 pixeltobmp 将获取的相机图像转化成 Bitmap,再获取一个 Canvas 对转化的 Bitmap 进行绘制。

2.ImageProc.h

jint Java_com_camera_simplewebcam_CameraPreview_prepareCamera( JNIEnv* env,jobject thiz, jint videoid);
jint Java_com_camera_simplewebcam_CameraPreview_prepareCameraWithBase( JNIEnv* env,jobject thiz, jint videoid, jint videobase);
void Java_com_camera_simplewebcam_CameraPreview_processCamera( JNIEnv* env,jobject thiz);
void Java_com_camera_simplewebcam_CameraPreview_stopCamera(JNIEnv* env,jobject thiz);
void Java_com_camera_simplewebcam_CameraPreview_pixeltobmp( JNIEnv* env,jobject thiz,jobject bitmap); 

如果没有在 .h 文件中声明,就需要在

static const JNINativeMethod camMethods[] = {
  { "_getNumberOfCameras",
    "()I",
    (void *)android_hardware_Camera_getNumberOfCameras },
  { "_getCameraInfo",
    "(ILandroid/hardware/Camera$CameraInfo;)V",
    (void*)android_hardware_Camera_getCameraInfo },

进行方法映射。

3.ImageProc.c

3.1.prepareCameraWithBase [native]

jint 
Java_com_camera_simplewebcam_CameraPreview_prepareCameraWithBase( JNIEnv* env,jobject thiz, jint videoid, jint videobase){
	
		int ret;

		camerabase = videobase;
	
		return Java_com_camera_simplewebcam_CameraPreview_prepareCamera(env,thiz,videoid);
	
}

----------------------------------
jint 
Java_com_camera_simplewebcam_CameraPreview_prepareCamera( JNIEnv* env,jobject thiz, jint videoid){

	int ret;

	if(camerabase<0){
		camerabase = checkCamerabase();
	}
    // 打开设备驱动节点
	ret = opendevice(camerabase + videoid); // 3.1 

	if(ret != ERROR_LOCAL){
	    // 查询驱动功能
		ret = initdevice();// 3.2 
	}
	if(ret != ERROR_LOCAL){
	    将帧缓存加入缓存队列并启动视频采集
		ret = startcapturing();// 3.3
        
        // 如果采集返回失败 代码
		if(ret != SUCCESS_LOCAL){
			stopcapturing();
			uninitdevice ();
			closedevice ();
			LOGE("device resetted");	
		}

	}

	if(ret != ERROR_LOCAL){
	    // 语句给指针变量分配一个整型存储空间
		rgb = (int *)malloc(sizeof(int) * (IMG_WIDTH*IMG_HEIGHT));
		ybuf = (int *)malloc(sizeof(int) * (IMG_WIDTH*IMG_HEIGHT));
	}
	return ret;
}	

3.1.1.opendevice

int opendevice(int i)
{
	struct stat st;

	sprintf(dev_name,"/dev/video%d",i);// 对 dev_name 进行赋值
	//stat() 获得文件属性,并判断是否为字符设备文件
	if (-1 == stat (dev_name, &st)) {
		LOGE("Cannot identify '%s': %d, %s", dev_name, errno, strerror (errno));
		return ERROR_LOCAL;
	}

	if (!S_ISCHR (st.st_mode)) {
	    // 如果节点不存在
		LOGE("%s is no device", dev_name);
		return ERROR_LOCAL;
	}
    // 打开节点
	fd = open (dev_name, O_RDWR | O_NONBLOCK, 0);

	if (-1 == fd) {
	    // 节点打开异常 : 可能会有权限错误
		LOGE("Cannot open '%s': %d, %s", dev_name, errno, strerror (errno));
		return ERROR_LOCAL;
	}
	return SUCCESS_LOCAL;
}

3.1.2.initdevice

int initdevice(void) 
{
	struct v4l2_capability cap;
	struct v4l2_cropcap cropcap;
	struct v4l2_crop crop;
	struct v4l2_format fmt;
	unsigned int min;
    // 查询设备的属性
	if (-1 == xioctl (fd, VIDIOC_QUERYCAP, &cap)) {
		if (EINVAL == errno) {
			LOGE("%s is no V4L2 device", dev_name);
			return ERROR_LOCAL;
		} else {
			return errnoexit ("VIDIOC_QUERYCAP");
		}
	}

    // 是否支持视频捕获
	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
		LOGE("%s is no video capture device", dev_name);
		return ERROR_LOCAL;
	}
    // 是否支持输入输出流控制
	if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
		LOGE("%s does not support streaming i/o", dev_name);
		return ERROR_LOCAL;
	}
	
	CLEAR (cropcap);
    // 设置 v4l2 的缓存类型
	cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    
    // 查询驱动的修剪能力
	if (0 == xioctl (fd, VIDIOC_CROPCAP, &cropcap)) {
		crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		crop.c = cropcap.defrect; 

		if (-1 == xioctl (fd, VIDIOC_S_CROP, &crop)) {
			switch (errno) {
				case EINVAL:
					break;
				default:
					break;
			}
		}
	} else {
	}
	// 设置视频格式
	CLEAR (fmt);

	fmt.type                = V4L2_BUF_TYPE_VIDEO_CAPTURE;

	fmt.fmt.pix.width       = IMG_WIDTH; 
	fmt.fmt.pix.height      = IMG_HEIGHT;

	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
	fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;

	if (-1 == xioctl (fd, VIDIOC_S_FMT, &fmt))
		return errnoexit ("VIDIOC_S_FMT");

	min = fmt.fmt.pix.width * 2;
	if (fmt.fmt.pix.bytesperline < min)
		fmt.fmt.pix.bytesperline = min;
	min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
	if (fmt.fmt.pix.sizeimage < min)
		fmt.fmt.pix.sizeimage = min;

	return initmmap ();

}

3.1.3.startcapturing

将帧缓存加入缓存队列并启动视频采集

int startcapturing(void)
{
	unsigned int i;
	enum v4l2_buf_type type;

	for (i = 0; i < n_buffers; ++i) {
		struct v4l2_buffer buf;

		CLEAR (buf);

		buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory      = V4L2_MEMORY_MMAP;
		buf.index       = i;
        // 把帧放入队列
		if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))
			return errnoexit ("VIDIOC_QBUF");
	}

	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    // 开始视频显示函数
	if (-1 == xioctl (fd, VIDIOC_STREAMON, &type))
		return errnoexit ("VIDIOC_STREAMON");

	return SUCCESS_LOCAL;
}

3.2.processCamera() [native]

3.2.1.readframeonce

void 
Java_com_camera_simplewebcam_CameraPreview_processCamera( JNIEnv* env,
										jobject thiz){
	readframeonce();
}

// 从缓存队列中取出一帧
int readframeonce(void)
{
	for (;;) {// 死循环
		fd_set fds;
		struct timeval tv;
		int r;

		FD_ZERO (&fds);
		FD_SET (fd, &fds);

		tv.tv_sec = 2;
		tv.tv_usec = 0;

		r = select (fd + 1, &fds, NULL, NULL, &tv);

		if (-1 == r) {
			if (EINTR == errno)
				continue;

			return errnoexit ("select");
		}

		if (0 == r) {
			LOGE("select timeout");
			return ERROR_LOCAL;

		}
		if(FD_ISSET(fd, &fds))
		{
			if (readframe ()==1)
			break;
		}
	}

	return SUCCESS_LOCAL;

}

3.2.2.readframe

int readframe(void)
{

	struct v4l2_buffer buf;
	unsigned int i;

	CLEAR (buf);

	buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	buf.memory = V4L2_MEMORY_MMAP;
    // 应用程序从视频采集输出队列中取出已含有采集数据的帧缓冲区
	if (-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {
		switch (errno) {
			case EAGAIN:
				return 0;
			case EIO:
			default:
				return errnoexit ("VIDIOC_DQBUF");
		}
	}

	assert (buf.index < n_buffers);
    // yuv 转码成 rgb
	processimage (buffers[buf.index].start);
    // 应用程序将该帧缓冲区重新挂入输入队列
	if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))
		return errnoexit ("VIDIOC_QBUF");

	return 1;
}

void processimage (const void *p)
{
		yuyv422toABGRY((unsigned char *)p);
}

3.2.pixeltobmp(bmp) [native]

void 
Java_com_camera_simplewebcam_CameraPreview_pixeltobmp( JNIEnv* env,jobject thiz,jobject bitmap){

	jboolean bo;


	AndroidBitmapInfo  info;
	void*              pixels;
	int                ret;
	int i;
	int *colors;

	int width=0;
	int height=0;
    // 功能:获取Bitmap信息,宽、高、格式等
	if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
		LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
		return;
	}
    
	width = info.width;
	height = info.height;

	if(!rgb || !ybuf) return;

	if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
		LOGE("Bitmap format is not RGBA_8888 !");
		return;
	}
    // 功能:锁定Bitmap原生像素缓存并获取Bitmap原生像素缓存地址,lock期间Bitmap原生像素缓存不会被改变
	if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
		LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
	}

	colors = (int*)pixels;
	int *lrgb =NULL;
	lrgb = &rgb[0];

	for(i=0 ; i<width*height ; i++){
		*colors++ = *lrgb++;
	}

	AndroidBitmap_unlockPixels(env, bitmap);

}

4.绘制

            Canvas canvas = getHolder().lockCanvas();
            if (canvas != null)
            {
            	// draw camera bmp on canvas
            	// 在画布上绘制相机 BMP
            	canvas.drawBitmap(bmp,null,rect,null);

            	getHolder().unlockCanvasAndPost(canvas);
            }
文章来源:https://blog.csdn.net/meng1100/article/details/135526647
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。