前面几章我们分析了UVCCamera的初始化、预览相关的准备工作,本章我们则来看看startPreview
的整个流程。按照惯例我们先大概看下调用的时序图:
接着之前开启预览过程最终走到AbstractUVCCameraHandler.CameraThread
的handleStartPreview
方法,继而调用UVCCamera 的startPreview
,如上图所示,UVCCamera 的startPreview
最终调用到C层的UVCPreview 的startPreview
方法。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 int UVCPreview::startPreview () { ENTER (); int result = EXIT_FAILURE; if (!isRunning ()) { mIsRunning = true ; pthread_mutex_lock (&preview_mutex); { if (LIKELY (mPreviewWindow)) { result = pthread_create (&preview_thread, NULL , preview_thread_func, (void *)this ); } } pthread_mutex_unlock (&preview_mutex); if (UNLIKELY (result != EXIT_SUCCESS)) { LOGW ("UVCCamera::window does not exist/already running/could not create thread etc." ); mIsRunning = false ; pthread_mutex_lock (&preview_mutex); { pthread_cond_signal (&preview_sync); } pthread_mutex_unlock (&preview_mutex); } } RETURN (result, int ); }
在startPreview
方法中通过调用pthread_create
起了一个线程,该线程中执行的内容在preview_thread_func
中,于是我们继续看preview_thread_func
:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 void *UVCPreview::preview_thread_func (void *vptr_args) { int result; ENTER (); UVCPreview *preview = reinterpret_cast <UVCPreview *>(vptr_args); if (LIKELY (preview)) { uvc_stream_ctrl_t ctrl; result = preview->prepare_preview (&ctrl); if (LIKELY (!result)) { preview->do_preview (&ctrl); } } PRE_EXIT (); pthread_exit (NULL ); }
这里我们就关心prepare_preview
、do_preview
这两个方法。先看prepare_preview
:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 int UVCPreview::prepare_preview (uvc_stream_ctrl_t *ctrl) { uvc_error_t result; ENTER (); result = uvc_get_stream_ctrl_format_size_fps (mDeviceHandle, ctrl, !requestMode ? UVC_FRAME_FORMAT_YUYV : UVC_FRAME_FORMAT_MJPEG, requestWidth, requestHeight, requestMinFps, requestMaxFps ); if (LIKELY (!result)) { #if LOCAL_DEBUG uvc_print_stream_ctrl (ctrl, stderr); #endif uvc_frame_desc_t *frame_desc; result = uvc_get_frame_desc (mDeviceHandle, ctrl, &frame_desc); if (LIKELY (!result)) { frameWidth = frame_desc->wWidth; frameHeight = frame_desc->wHeight; LOGI ("frameSize=(%d,%d)@%s" , frameWidth, frameHeight, (!requestMode ? "YUYV" : "MJPEG" )); pthread_mutex_lock (&preview_mutex); if (LIKELY (mPreviewWindow)) { ANativeWindow_setBuffersGeometry (mPreviewWindow, frameWidth, frameHeight, previewFormat); } pthread_mutex_unlock (&preview_mutex); } else { frameWidth = requestWidth; frameHeight = requestHeight; } frameMode = requestMode; frameBytes = frameWidth * frameHeight * (!requestMode ? 2 : 4 ); previewBytes = frameWidth * frameHeight * PREVIEW_PIXEL_BYTES; } else { LOGE ("could not negotiate with camera:err=%d" , result); } RETURN (result, int ); }
这个方法主要还是做了一些预览的参数设置工作,包括帧宽高、根据色彩模式配置所需要的内存空间等。其中也调用了ANativeWindow_setBuffersGeometry
来更新原生窗口的参数。之后我们再看do_preview
:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 void UVCPreview::do_preview (uvc_stream_ctrl_t *ctrl) { ENTER (); uvc_frame_t *frame = NULL ; uvc_frame_t *frame_mjpeg = NULL ; uvc_error_t result = uvc_start_streaming_bandwidth ( mDeviceHandle, ctrl, uvc_preview_frame_callback, (void *)this , requestBandwidth, 0 ); if (LIKELY (!result)) { clearPreviewFrame (); pthread_create (&capture_thread, NULL , capture_thread_func, (void *)this ); #if LOCAL_DEBUG LOGI ("Streaming..." ); #endif if (frameMode) { for ( ; LIKELY (isRunning ()) ; ) { frame_mjpeg = waitPreviewFrame (); if (LIKELY (frame_mjpeg)) { frame = get_frame (frame_mjpeg->width * frame_mjpeg->height * 2 ); result = uvc_mjpeg2yuyv (frame_mjpeg, frame); recycle_frame (frame_mjpeg); if (LIKELY (!result)) { frame = draw_preview_one (frame, &mPreviewWindow, uvc_any2rgbx, 4 ); addCaptureFrame (frame); } else { recycle_frame (frame); } } } } else { for ( ; LIKELY (isRunning ()) ; ) { frame = waitPreviewFrame (); if (LIKELY (frame)) { frame = draw_preview_one (frame, &mPreviewWindow, uvc_any2rgbx, 4 ); addCaptureFrame (frame); } } } pthread_cond_signal (&capture_sync); #if LOCAL_DEBUG LOGI ("preview_thread_func:wait for all callbacks complete" ); #endif uvc_stop_streaming (mDeviceHandle); #if LOCAL_DEBUG LOGI ("Streaming finished" ); #endif } else { uvc_perror (result, "failed start_streaming" ); } EXIT (); }
在该方法中我们看到调用了uvc_start_streaming_bandwidth
,这个方式是在libuvc 的stream.c 中。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 uvc_error_t uvc_start_streaming_bandwidth (uvc_device_handle_t *devh, uvc_stream_ctrl_t *ctrl, uvc_frame_callback_t *cb, void *user_ptr, float bandwidth_factor, uint8_t flags) { uvc_error_t ret; uvc_stream_handle_t *strmh; ret = uvc_stream_open_ctrl (devh, &strmh, ctrl); if (UNLIKELY (ret != UVC_SUCCESS)) return ret; ret = uvc_stream_start_bandwidth (strmh, cb, user_ptr, bandwidth_factor, flags); if (UNLIKELY (ret != UVC_SUCCESS)) { uvc_stream_close (strmh); return ret; } return UVC_SUCCESS; }
根据注释我们可以知道,这个方法作用就是将相机采集到的数据放到回调函数中,于是我们接着看传进来的回调函数:uvc_preview_frame_callback
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 void UVCPreview::uvc_preview_frame_callback (uvc_frame_t *frame, void *vptr_args) { UVCPreview *preview = reinterpret_cast <UVCPreview *>(vptr_args); if UNLIKELY (!preview->isRunning() || !frame || !frame->frame_format || !frame->data || !frame->data_bytes) return ; if (UNLIKELY ( ((frame->frame_format != UVC_FRAME_FORMAT_MJPEG) && (frame->actual_bytes < preview->frameBytes)) || (frame->width != preview->frameWidth) || (frame->height != preview->frameHeight) )) { #if LOCAL_DEBUG LOGD ("broken frame!:format=%d,actual_bytes=%d/%d(%d,%d/%d,%d)" , frame->frame_format, frame->actual_bytes, preview->frameBytes, frame->width, frame->height, preview->frameWidth, preview->frameHeight); #endif return ; } if (LIKELY (preview->isRunning ())) { uvc_frame_t *copy = preview->get_frame (frame->data_bytes); if (UNLIKELY (!copy)) { #if LOCAL_DEBUG LOGE ("uvc_callback:unable to allocate duplicate frame!" ); #endif return ; } uvc_error_t ret = uvc_duplicate_frame (frame, copy); if (UNLIKELY (ret)) { preview->recycle_frame (copy); return ; } preview->addPreviewFrame (copy); } }
这个方法前面一些可以先忽略,我们关心的是怎么样处理一帧数据的,可以看到uvc_frame_t *copy = preview->get_frame(frame->data_bytes);
这个方法:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 uvc_frame_t *UVCPreview::get_frame (size_t data_bytes) { uvc_frame_t *frame = NULL ; pthread_mutex_lock (&pool_mutex); { if (!mFramePool.isEmpty ()) { frame = mFramePool.last (); } } pthread_mutex_unlock (&pool_mutex); if UNLIKELY (!frame) { LOGW ("allocate new frame" ); frame = uvc_allocate_frame (data_bytes); } return frame; }
先从全局的mFramePool
中取出一帧,然后再调用libuvc中frame.c的方法—— uvc_duplicate_frame
来把从相机获取到的帧数据复制到刚才mFramePool
中取出的帧中。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 uvc_error_t uvc_duplicate_frame (uvc_frame_t *in, uvc_frame_t *out) { if (UNLIKELY (uvc_ensure_frame_size (out, in->data_bytes) < 0 )) return UVC_ERROR_NO_MEM; out->width = in->width; out->height = in->height; out->frame_format = in->frame_format; if (out->library_owns_data) out->step = in->step; out->sequence = in->sequence; out->capture_time = in->capture_time; out->source = in->source; out->actual_bytes = in->actual_bytes; #if USE_STRIDE if (in->step && out->step) { const int istep = in->step; const int ostep = out->step; const int hh = in->height < out->height ? in->height : out->height; const int rowbytes = istep < ostep ? istep : ostep; register void *ip = in->data; register void *op = out->data; int h; for (h = 0 ; h < hh; h += 4 ) { memcpy (op, ip, rowbytes); ip += istep; op += ostep; memcpy (op, ip, rowbytes); ip += istep; op += ostep; memcpy (op, ip, rowbytes); ip += istep; op += ostep; memcpy (op, ip, rowbytes); ip += istep; op += ostep; } } else { memcpy (out->data, in->data, in->actual_bytes); } #else memcpy (out->data, in->data, in->actual_bytes); #endif return UVC_SUCCESS; }
最后再通过UVCPreview的addPreviewFrame
方法将当前帧放入previewFrames
中。
1 2 3 4 5 6 7 8 9 10 11 12 13 void UVCPreview::addPreviewFrame (uvc_frame_t *frame) { pthread_mutex_lock (&preview_mutex); if (isRunning () && (previewFrames.size () < MAX_FRAME)) { previewFrames.put (frame); frame = NULL ; pthread_cond_signal (&preview_sync); } pthread_mutex_unlock (&preview_mutex); if (frame) { recycle_frame (frame); } }
以上是UVCPreview
中do_preview
方法中有关于预览回调处理的逻辑。接着我们继续回到do_preview
的后续代码中,核心是这一段代码:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 if (frameMode) { for ( ; LIKELY (isRunning ()) ; ) { frame_mjpeg = waitPreviewFrame (); if (LIKELY (frame_mjpeg)) { frame = get_frame (frame_mjpeg->width * frame_mjpeg->height * 2 ); result = uvc_mjpeg2yuyv (frame_mjpeg, frame); recycle_frame (frame_mjpeg); if (LIKELY (!result)) { frame = draw_preview_one (frame, &mPreviewWindow, uvc_any2rgbx, 4 ); addCaptureFrame (frame); } else { recycle_frame (frame); } } } } else { for ( ; LIKELY (isRunning ()) ; ) { frame = waitPreviewFrame (); if (LIKELY (frame)) { frame = draw_preview_one (frame, &mPreviewWindow, uvc_any2rgbx, 4 ); addCaptureFrame (frame); } } }
大概意思就是根据设置的模式来处理帧数据,其中MJPEG模式下只是比yuvyv多了一步转换工作,调用的是libuvc中frame-mjpeg.c的uvc_mjpeg2yuyv
方法,这里就不展开,感兴趣的读者可以去查看一下该方法的源码。我们再回到do_preview
方法中,无论是MJPEG还是yuvyv,最终都会调用draw_preview_one
方法,听这个方法名字就能大概知道,这里是把最终采集到的数据绘制到原生窗口上:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 uvc_frame_t *UVCPreview::draw_preview_one (uvc_frame_t *frame, ANativeWindow **window, convFunc_t convert_func, int pixcelBytes) { int b = 0 ; pthread_mutex_lock (&preview_mutex); { b = *window != NULL ; } pthread_mutex_unlock (&preview_mutex); if (LIKELY (b)) { uvc_frame_t *converted; if (convert_func) { converted = get_frame (frame->width * frame->height * pixcelBytes); if LIKELY (converted) { b = convert_func (frame, converted); if (!b) { pthread_mutex_lock (&preview_mutex); copyToSurface (converted, window); pthread_mutex_unlock (&preview_mutex); } else { LOGE ("failed converting" ); } recycle_frame (converted); } } else { pthread_mutex_lock (&preview_mutex); copyToSurface (frame, window); pthread_mutex_unlock (&preview_mutex); } } return frame; }
根据源码可知,核心是将准备好的frame通过调用copyToSurface
方法来绘制到ANativeWindow
上:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 int copyToSurface (uvc_frame_t *frame, ANativeWindow **window) { int result = 0 ; if (LIKELY (*window)) { ANativeWindow_Buffer buffer; if (LIKELY (ANativeWindow_lock (*window, &buffer, NULL ) == 0 )) { const uint8_t *src = (uint8_t *)frame->data; const int src_w = frame->width * PREVIEW_PIXEL_BYTES; const int src_step = frame->width * PREVIEW_PIXEL_BYTES; uint8_t *dest = (uint8_t *)buffer.bits; const int dest_w = buffer.width * PREVIEW_PIXEL_BYTES; const int dest_step = buffer.stride * PREVIEW_PIXEL_BYTES; const int w = src_w < dest_w ? src_w : dest_w; const int h = frame->height < buffer.height ? frame->height : buffer.height; copyFrame (src, dest, w, h, src_step, dest_step); ANativeWindow_unlockAndPost (*window); } else { result = -1 ; } } else { result = -1 ; } return result; }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 static void copyFrame (const uint8_t *src, uint8_t *dest, const int width, int height, const int stride_src, const int stride_dest) { const int h8 = height % 8 ; for (int i = 0 ; i < h8; i++) { memcpy (dest, src, width); dest += stride_dest; src += stride_src; } for (int i = 0 ; i < height; i += 8 ) { memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; memcpy (dest, src, width); dest += stride_dest; src += stride_src; } }
小结 至此,UVCCamera的预览功能大概就分析到这,整个流程还是比较清晰的,但是可能我讲得不够清晰……后续我再慢慢完善,请大家见谅。
相关内容
参考链接:https://www.jianshu.com/p/313e6e4ca418