MyCapturer::MyCapturer(rtc::Thread *thread,cricket::WebRtcVideoEncoderFactory *video_encoder,cricket::WebRtcVideoDecoderFactory *video_decoder)
{
capturerId_ = "video_frames";
running_ = 0;
format_ = NULL;
thread_ = thread;
image_ = webrtc::EncodedImage();
time_ = rtc::Time();
SetId(capturerId_);
}
MyCapturer::~MyCapturer()
{
Stop();
if (format_ != NULL)
{
delete format_;
format_ = NULL;
}
}
bool MyCapturer::GetBestCaptureFormat(const cricket::VideoFormat &desired,
cricket::VideoFormat *bestFormat)
{
*bestFormat = desired;
return 1;
}
cricket::CaptureState MyCapturer::Start(const cricket::VideoFormat &captureFormat)
{
#ifdef DEBUG
Log() << "MyCapturer: Started.\n";
#endif
SetCaptureFormat(&captureFormat);
Log()<<"\n\n___________________IN_CAPTURER_START()__\n";
running_ = 1;
return cricket::CS_RUNNING;
}
void MyCapturer::Stop()
{
SetCaptureFormat(NULL);
running_ = 0;
SetCaptureState(cricket::CS_STOPPED);
}
bool MyCapturer::IsRunning()
{
return running_;
}
bool MyCapturer::GetPreferredFourccs(std::vector<uint32_t> *fourccs)
{
fourccs -> clear();
fourccs -> push_back(cricket::FOURCC_ANY);
return 1;
}
/*
FIXME: This needs checking do we really screencast.
Maybe better get none screencast behaviour
from webrtc.
*/
bool MyCapturer::IsScreencast() const
{
return 1;
}
void MyCapturer::onEncodedImage(webrtc::EncodedImage image)
{
// SignalEncodedImage(this, image);
//Here I always used this method but now it is deprecated
// How to send webrtc::EncodedImage
}
void MyCapturer::setFrame (unsigned char *data, int size, int width, int height, int frametype, int flags, int method)
{
unsigned int currentTimestamp = rtc::Time();
insigned int elapsedTime = currentTimestamp - time_;
image_.capture_time_ms_ = elapsedTime;
image_._timeStamp = currentTimestamp;
image_.ntp_time_ms_ = 0;
image_._encodedWidth = width;
image_._encodedHeight = height;
image_._length = size;
image_._size = size;
image_._buffer = data;
image_.adapt_reason_.bw_resolutions_disabled = -1;
image_._completeFrame = true;
image_.rotation_= webrtc::VideoRotation::kVideoRotation_0;
frame->set_ntp_time_ms(0);
frame->set_rotation(webrtc::VideoRotation::kVideoRotation_90);
frame->set_timestamp(currentTimestamp);
image_._frameType = frametype == DisplayFrameKey ? webrtc::FrameType::kVideoFrameKey :
(frametype == DisplayFrameInter ? webrtc::FrameType::kVideoFrameDelta :
webrtc::FrameType::kEmptyFrame);
rtp_frag_header = NULL;
if (format_ != NULL)
{
format_ -> setData(data);
format_ -> setSize(size);
format_ -> setFlags(flags);
}
else
{
format_ = WebRTCFormat::CreateFormat(data, size, flags, method);
}
if (format_ != NULL)
{
format_ -> prepare();
rtp_frag_header = format_ -> getHeader();
if (rtp_frag_header == NULL)
{
#ifdef ERROR
Log() << "MyCapturer: ERROR! Could not get fragmentation units.\n";
#endif
return ;
}
}
rtc::Location location("setFrame()","MyCapturer.cpp");
rtc::MessageHandler *mhandler;
rtc::MessageData *mdata;
rtc::Message *msg;
Log()<<"After setters\n";
onEncodedImage(image_);
thread_->Post(RTC_FROM_HERE,mhandler,0,mdata,false);
}
void MyCapturer::OnMessage(rtc::Message *msg)
{
onEncodedImage(image_);
}