SignalEncodedImage deprecation

29 views
Skip to first unread message

Andriy Buchynskyy

unread,
Sep 12, 2018, 9:50:32 AM9/12/18
to EasyRTC
I used signalEncodedImage method to send desktop images to browser. But now there is no possibility to use such method in new webrtc code.
There is only onFrame method which sends webrtc::VideoFrame. But I have to send webrtc::EncodedImage. Can anybody tell me how I can do that?
Maybe there is possibility to convert webrtc::EncodedImage to webrtc::VideoFrame? I attached old cricket::VideoCapturer class so you can understand 
which implementation I used before. 

Here is my code:

MyCapturer.h
Enclass MyCapturer : public cricket::VideoCapturer

{

//
// Constructors and destructors.
//

public:

MyCapturer(rtc::Thread *thread,cricket::WebRtcVideoEncoderFactory *video_encoder,cricket::WebRtcVideoDecoderFactory *video_decoder);

virtual ~MyCapturer();

//
// Setting frame method called from DisplayEncoder.
//

void setFrame(unsigned Char *data, int size, int width, Int height, Int frametype, Int flags, Int method);

//
// Derived from rtc message handler.
//

virtual void OnMessage(rtc::Message* msg);

//
// Handling VideoCapturer.
//

virtual bool GetBestCaptureFormat(const cricket::VideoFormat &desired,
cricket::VideoFormat *bestFormat);

virtual cricket::CaptureState Start(const cricket::VideoFormat &captureFormat);

virtual void Stop();

virtual bool IsRunning();

virtual bool IsScreencast() const;

virtual bool GetPreferredFourccs(std::vector<uint32_t> *fourccs);

protected:

//
// Method connected to call VideoCapturer processing.
//

void onEncodedImage(webrtc::EncodedImage image);

private:

//
// Capture id.
//

char *capturerId_;

//
// EncodedImage object.
//

webrtc::EncodedImage image_;

webrtc::RTPFragmentationHeader *rtp_frag_header;


//
// Capturer State for virtual method.
//

int running_;

//
// Create time.
//

unsigned Int time_;

//
// Fragmentation format.
//

WebRTCFormat *format_;


//
// Rtc thread.
//

rtc::Thread *thread_;
}; 


MyCapturer.cpp
MyCapturer::MyCapturer(rtc::Thread *thread,cricket::WebRtcVideoEncoderFactory *video_encoder,cricket::WebRtcVideoDecoderFactory *video_decoder)
{
capturerId_ = "video_frames";
running_ = 0;
format_ = NULL;
thread_ = thread;
image_ = webrtc::EncodedImage();
time_ = rtc::Time();
SetId(capturerId_);
}

MyCapturer::~MyCapturer()
{
Stop();

if (format_ != NULL)
{
delete format_;

format_ = NULL;
}
}

bool MyCapturer::GetBestCaptureFormat(const cricket::VideoFormat &desired,
cricket::VideoFormat *bestFormat)
{
*bestFormat = desired;

return 1;
}

cricket::CaptureState MyCapturer::Start(const cricket::VideoFormat &captureFormat)
{
#ifdef DEBUG
Log() << "MyCapturer: Started.\n";
#endif

SetCaptureFormat(&captureFormat);
Log()<<"\n\n___________________IN_CAPTURER_START()__\n";
running_ = 1;
return cricket::CS_RUNNING;
}

void MyCapturer::Stop()
{
SetCaptureFormat(NULL);

running_ = 0;

SetCaptureState(cricket::CS_STOPPED);
}

bool MyCapturer::IsRunning()
{
return running_;
}

bool MyCapturer::GetPreferredFourccs(std::vector<uint32_t> *fourccs)
{
fourccs -> clear();

fourccs -> push_back(cricket::FOURCC_ANY);

return 1;
}

/*
FIXME: This needs checking do we really screencast.
Maybe better get none screencast behaviour
from webrtc.
*/

bool MyCapturer::IsScreencast() const
{
return 1;
}

void MyCapturer::onEncodedImage(webrtc::EncodedImage image)
{
// SignalEncodedImage(this, image);
//Here I always used this method but now it is deprecated
// How to send webrtc::EncodedImage
}

void MyCapturer::setFrame (unsigned char *data, int size, int width, int height, int frametype, int flags, int method)
{

unsigned int currentTimestamp = rtc::Time();
insigned int elapsedTime = currentTimestamp - time_;


image_.capture_time_ms_ = elapsedTime;
image_._timeStamp = currentTimestamp;
image_.ntp_time_ms_ = 0;
image_._encodedWidth = width;
image_._encodedHeight = height;
image_._length = size;
image_._size = size;
image_._buffer = data;
image_.adapt_reason_.bw_resolutions_disabled = -1;
image_._completeFrame = true;
image_.rotation_= webrtc::VideoRotation::kVideoRotation_0;

frame->set_ntp_time_ms(0);
frame->set_rotation(webrtc::VideoRotation::kVideoRotation_90);
frame->set_timestamp(currentTimestamp);
image_._frameType = frametype == DisplayFrameKey ? webrtc::FrameType::kVideoFrameKey :
(frametype == DisplayFrameInter ? webrtc::FrameType::kVideoFrameDelta :
webrtc::FrameType::kEmptyFrame);
rtp_frag_header = NULL;

if (format_ != NULL)
{
format_ -> setData(data);

format_ -> setSize(size);

format_ -> setFlags(flags);
}
else
{
format_ = WebRTCFormat::CreateFormat(data, size, flags, method);
}

if (format_ != NULL)
{
format_ -> prepare();
rtp_frag_header = format_ -> getHeader();

if (rtp_frag_header == NULL)
{
#ifdef ERROR
Log() << "MyCapturer: ERROR! Could not get fragmentation units.\n";
#endif

return ;
}
}
rtc::Location location("setFrame()","MyCapturer.cpp");

rtc::MessageHandler *mhandler;
rtc::MessageData *mdata;
rtc::Message *msg;

Log()<<"After setters\n";

onEncodedImage(image_);
thread_->Post(RTC_FROM_HERE,mhandler,0,mdata,false);

}

void MyCapturer::OnMessage(rtc::Message *msg)
{
onEncodedImage(image_);
}


If you have any ideas how to send that image or any solutions for my case please share them, thank you
videocapturer.h
Reply all
Reply to author
Forward
0 new messages