Again: I think I have a solution. I'm having a working solution based on the camera as input. I just want to replace it by a capture stream.
private func createMediaSenders() {
let videoSource = self.factory.videoSource()
self.videoCapturer = MyRTCCameraVideoCapturer(delegate: videoSource)
let videoTrack = self.factory.videoTrack(with: videoSource, trackId: "track0")
self.peerConnection.add(videoTrack, streamIds: ["stream0"])
self.localVideoTrack = videoTrack
}
I'm using a slightly modified RTCCameraVideoCapturer class since I had issues with the orientation with the original class, that is not the issue
Regarding 3)
func startCaptureLocalVideo() {
guard let capturer = self.videoCapturer as? MyRTCCameraVideoCapturer else {
return
}
guard
let camera = (MyRTCCameraVideoCapturer.captureDevices().first { $0.position == (AppSettings.useFrontCamera ? .front : .back) }) else {
// Alert here
return
}
// Stick to 640x480 and 30fps
for format in MyRTCCameraVideoCapturer.supportedFormats(for: camera) {
if (CMVideoFormatDescriptionGetDimensions(format.formatDescription).width == 640 &&
CMVideoFormatDescriptionGetDimensions(format.formatDescription).height == 480) {
capturer.startCapture(with: camera,
format: format,
fps: 30)
break
}
}
}
As said: This works. And it is now replaced by this:
Regarding 1)
private func createMediaSenders() {
let videoSource = self.factory.videoSource()
self.videoCapturer = RTCVideoCapturer(delegate: videoSource)
videoSource.adaptOutputFormat(toWidth: 640, height: 480, fps: 30)
let videoTrack = self.factory.videoTrack(with: videoSource, trackId: "track0")
self.peerConnection.add(videoTrack, streamIds: ["stream0"])
self.localVideoTrack = videoTrack
}
Regarding 3)
func startCaptureLocalVideo() {
guard let _ = self.videoCapturer else {
return
}
self.startRecording()
}
with
func startRecording() {
guard recorder.isAvailable else {
print("recording is not available at this time.")
return
}
recorder.isMicrophoneEnabled = false
if #available(iOS 11.0, *) {
recorder.startCapture(handler: { (sampleBuffer, bufferType, error) in
if (bufferType == .video) {
guard CMSampleBufferIsValid(sampleBuffer), CMSampleBufferDataIsReady(sampleBuffer), CMSampleBufferGetNumSamples(sampleBuffer) == 1 else {
print("invalid sampleBuffer")
return
}
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let rtcpixelBuffer = RTCCVPixelBuffer(pixelBuffer: pixelBuffer!)
let timestamp = NSDate().timeIntervalSince1970 * 1000 * 1000
let videoFrame = RTCVideoFrame(buffer: rtcpixelBuffer, rotation: RTCVideoRotation._0, timeStampNs: Int64(timestamp))
//print("vfw \(videoFrame.width) \(videoFrame.height)")
self.factory.videoSource().capturer(self.videoCapturer!, didCapture: videoFrame)
}
})
} else {
// Fallback on earlier versions
}
}
What I can see is:
1) The screen capturer is asking for my OK, once in the beginning and after 8 minutes of inactivity (as documented)
2) I'm getting sampleBuffers (which pass my validity checks) at 60 fps (measuring code not shown here)
3) The width and height of the final video frame is 1920 886 (iPhone XS, iOS 12.4, landscape), so my attempt to adjust frame rate and w/h seem to have no impact
4) The sequence of events is kept as shown in the initial statement
5) My SDP offer is the same in both cases and contains H.264 elements as well as VP8
But: There is no video sent...
Anybody able to see, what I'm missing?