1) I would like to use my own Avcapturesession because I'm showing the view to the user before to start to recording or connecting to webrtc server. With this solution I have to wait until the connection with the server has been done.
2) If I rewritte the videocapturer the class rtc_video_capture_ios_objc.mm will be unused right? because I will pass the buffer or frames from my current app.
3) I can't do that because the same reason that 1)
+ (RTCVideoCapturer*)capturerWithDeviceName:(NSString*)deviceName {
const std::string& device_name = std::string([deviceName UTF8String]);
rtc::scoped_ptr<cricket::DeviceManagerInterface> dev_manager(
cricket::DeviceManagerFactory::Create());
bool initialized = dev_manager->Init();
NSAssert(initialized, @"DeviceManager::Init() failed");
// HERE: Add my VideoCapturer factory
cricket::DeviceManager* device_manager = static_cast<cricket::DeviceManager*>(dev_manager.get());
device_manager->SetVideoDeviceCapturerFactory(new cricket :: VideoCapturerFactoryCustom());
//End my custom code
cricket::Device device;
if (!dev_manager->GetVideoCaptureDevice(device_name, &device)) {
LOG(LS_ERROR) << "GetVideoCaptureDevice failed";
return 0;
}
rtc::scoped_ptr<cricket::VideoCapturer> capturer(
dev_manager->CreateVideoCapturer(device));
RTCVideoCapturer* rtcCapturer =
[[RTCVideoCapturer alloc] initWithCapturer:capturer.release()];
return rtcCapturer;
}
- (void)captureOutput:(AVCaptureOutput*)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection*)connection
- (void)captureOutput:(AVCaptureOutput*)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection*)connection