在WebRTC中默认提供2种视频源捕获器。分别是DesktopCapturer和 VCMCapturer, 即桌面捕获器和摄像头捕获器。
DesktopCapturer 目前可以多次使用,没有发现什么问题(当然多个之后有CPU性能瓶颈)
VCMCapturer 问题较多,比如
- 如果机器上没有摄像头,就会报错。因为CreateDevice失败
- 该摄像头只能被使用一次,第二次创建就同样会报错。
- 在某些环境下,针对USB的读写有控制,读写摄像头会受到某种限制,导致在Createdevice的时候失败。
然而我们大多数情况下,并不需要用到VCMCapturer或者DesktopCapturer ,我们的数据源是自定义的,行为也想自定义。此时就需要用到自定义的视频源捕获器了。
现在国内的技术氛围真差,找了很久没有找到相关的直接的代码,这里我完整写出来没有什么可以保密的。技术本来分享了才有价值。
主要设计思路是
主要内容 | ||
---|---|---|
1 | 创建一个自定义视频捕获器的类,继承rtc::AdaptedVideoTrackSource该基类在Webrtc源代码的media/base/adapted_video_track_source.h文件中 | |
2 | 利用VisualStudio的重构工具自动化一键实现该基类的所有虚函数。这是一个标准模板行为,必须要 | 主要函数的作用:AddRef和Release主要是针对指针对象的操作state: 一般的返回 webrtc::MediaSourceInterface:kLive 状态即可remote : 返回false 即可is_screencast : 返回true,代表自适应码流或分辨率调整,当然也可以falseneeds_denoising : 编码器在编码前是否去噪视频。一般为false,看你需求 |
3 | 要有个StartCapture()和StopCapture()函数来控制捕获行为的开始和结束 | |
4 | 初始化的时候传入FPS,从而控制捕获的周期 | |
5 | 在StartCapture函数中,实现一个根据FPS来持续调用的线程,做视频帧数据相关的处理。然后将数据准备好后调用OnFrame函数给到webrtc流程中 | |
6 | 具体处理数据的函数是CaptureOurImage ,该函数你可以自定义行为,调用时机和参数控制。-》我们完整的代码中,是读取本地文件夹中的一堆BMP文件。 | |
7 | 在AddTrack之类的方法将这个视频源捕获器添加到peerconnection中 | rtc::scoped_refptr<FakeVideoCapturer> myDevice=new rtc::RefCountedObject<FakeVideoCapturer>(60); rtc::scoped_refptr<webrtc::VideoTrackInterface> myVideoTrack(mPeerConnectionFactory→CreateVideoTrack(“AA”,myDevice); mPeerConnection→AddTrack(myVideoTrack,{“BB”}; myDevice->StartCapture(); |
完整的头文件和实现文件如下
#pragma once
#include <string>
#include <thread>
#include "absl/memory/memory.h"
#include "absl/types/optional.h"
#include "api/audio/audio_mixer.h"
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/audio_codecs/builtin_audio_encoder_factory.h"
#include "api/audio_options.h"
#include "api/create_peerconnection_factory.h"
#include "api/rtp_sender_interface.h"
#include "api/video_codecs/builtin_video_decoder_factory.h"
#include "api/video_codecs/builtin_video_encoder_factory.h"
#include "api/video_codecs/video_decoder_factory.h"
#include "api/video_codecs/video_encoder_factory.h"
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_factory.h"
#include "p2p/base/port_allocator.h"
#include "pc/video_track_source.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/rtc_certificate_generator.h"
#include "rtc_base/strings/json.h"
#include "modules/desktop_capture/desktop_capturer.h"
#include "modules/desktop_capture/desktop_frame.h"
#include "modules/desktop_capture/desktop_capture_options.h"
#include "modules/desktop_capture/cropping_window_capturer.h"
#include "media/base/adapted_video_track_source.h"
#include "api/video/i420_buffer.h"
namespace QL
{
class FakeVideoCapturer :public rtc::AdaptedVideoTrackSource
{
public:
void CaptureOurImage();
//
void StartCapture();
void StopCapture();
// 通过 AdaptedVideoTrackSource 继承
virtual webrtc::MediaSourceInterface::SourceState state() const override;
virtual bool remote() const override;
virtual bool is_screencast() const override;
virtual absl::optional<bool> needs_denoising() const override;
virtual void AddRef() const override;
virtual rtc::RefCountReleaseStatus Release() const override;
protected:
explicit FakeVideoCapturer(size_t fps)
: mFPS(fps)
{
}
private:
mutable volatile int ref_count_;
//开线程做捕捉行为
std::unique_ptr<std::thread> mCaptureThread;
//是否已经启动捕捉的标志位
std::atomic_bool mIsStarted;
//我们输出视频的FPS
size_t mFPS;
rtc::scoped_refptr<webrtc::I420Buffer> mI420YUVBbuffer;
unsigned int mImageIndex;
};
}
C++ 实现
#include "FakeVideoCapturer.h"
#include <rtc_base/atomic_ops.h>
#include "../CommonHelper.h"
#include <libyuv.h>
#define TEST_IMAGE_COUNT 639
namespace QL
{
void FakeVideoCapturer::AddRef() const
{
rtc::AtomicOps::Increment(&ref_count_);
}
rtc::RefCountReleaseStatus FakeVideoCapturer::Release() const
{
const int count = rtc::AtomicOps::Decrement(&ref_count_);
if (count == 0) {
return rtc::RefCountReleaseStatus::kDroppedLastRef;
}
return rtc::RefCountReleaseStatus::kOtherRefsRemained;
}
webrtc::MediaSourceInterface::SourceState FakeVideoCapturer::state() const
{
return webrtc::MediaSourceInterface::kLive;
// return SourceState();
}
bool FakeVideoCapturer::remote() const
{
return false;
}
bool FakeVideoCapturer::is_screencast() const
{
return true;
}
absl::optional<bool> FakeVideoCapturer::needs_denoising() const
{
return false;
}
void FakeVideoCapturer::CaptureOurImage()
{
//获取本地Bitmap图片数据
uint8_t* myImageData;
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imagePixelFormat;
char imgFileName[MAX_PATH] = { 0 };
sprintf_s(imgFileName, "%s%d.bmp", "testimages/img_display_out", mImageIndex);
char targetImageFullPath[MAX_PATH] = { 0 };
CommonHelper::GetCurrentPath(targetImageFullPath, imgFileName);
CommonHelper::ReadBitmapImage(targetImageFullPath, &myImageData, &imageWidth, &imageHeight, &imagePixelFormat);
//CommonHelper::SaveBitmapToFile(myImageData, imageWidth, imageHeight, 32, 0, "d:\\b.bmp");
int width = imageWidth;
int height = imageHeight;
if (!mI420YUVBbuffer.get() || mI420YUVBbuffer->width() * mI420YUVBbuffer->height() < width * height)
{
mI420YUVBbuffer = webrtc::I420Buffer::Create(width, height);
}
int stride = width;
uint8_t* yplane = mI420YUVBbuffer->MutableDataY();
uint8_t* uplane = mI420YUVBbuffer->MutableDataU();
uint8_t* vplane = mI420YUVBbuffer->MutableDataV();
libyuv::ConvertToI420(myImageData, 0, yplane, stride, uplane,
(stride + 1) / 2, vplane, (stride + 1) / 2, 0, 0,
width, height, width, height, libyuv::kRotate0,
libyuv::FOURCC_ARGB);
//here will copy data , should relase by API level
webrtc::VideoFrame myYUVFrame = webrtc::VideoFrame(mI420YUVBbuffer, 0, 0, webrtc::kVideoRotation_0);
mImageIndex++;
if (mImageIndex > TEST_IMAGE_COUNT)
{
mImageIndex = 0;
}
this->OnFrame(myYUVFrame);
//Release memory
delete[] myImageData;
myImageData = nullptr;
}
/// <summary>
///
/// </summary>
void FakeVideoCapturer::StartCapture()
{
if (mIsStarted)
{
return;
}
mIsStarted = true;
// Start new thread to capture
mCaptureThread.reset(new std::thread([this]()
{
mImageIndex = 0;
while (mIsStarted)
{
CaptureOurImage();
std::this_thread::sleep_for(std::chrono::milliseconds(1000 / mFPS));
}
}));
}
/// <summary>
///
/// </summary>
void FakeVideoCapturer::StopCapture()
{
mIsStarted = false;
if (mCaptureThread && mCaptureThread->joinable())
{
mCaptureThread->join();
}
}
}
其中部分本地图像和读取方式是我的代码,你可以修改成你自己的。
本文来自作者投稿,版权归原作者所有。如需转载,请注明出处:https://www.nxrte.com/jishu/webrtc/25918.html