Hi all
I am using FFMPEG for video encoding, decoding(H264). After decoding, my frame height is reduced but width is perfect.
here is my code:
int FFMPEGDecode::initDecoder(int codec, int width, int height)
{
if (codec == CODEC_H263)
{
decoder = avcodec_find_decoder(AV_CODEC_ID_H263);
}
else if (codec == CODEC_H263P)
{
decoder = avcodec_find_decoder(AV_CODEC_ID_H263P);
}
else if (codec == CODEC_H264)
{
decoder = avcodec_find_decoder(AV_CODEC_ID_H264);
}
else
{
LOGE("Codec not recognised!! Please select a valid codec.");
return -1;
}
if (decoder == NULL)
{
LOGE("decoder not found");
return -3;
}
decoderContext = avcodec_alloc_context3(decoder);
// set properties for decoder. h264 packet contains information how it has been decoded. we dont need to specify them as we already did that part in initencoder function
decoderContext->pix_fmt = AV_PIX_FMT_YUV420P;
decoderContext->width = width;
decoderContext->height = height;
if (decoder->capabilities & CODEC_CAP_TRUNCATED)
decoderContext->flags |= CODEC_FLAG_TRUNCATED;
decoderContext->flags2 |= CODEC_FLAG2_FAST;
decoderScaleContext = sws_getCachedContext(decoderScaleContext, width,
height, AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_RGB32,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (avcodec_open2(decoderContext, decoder, NULL) < 0)
{
LOGE("decoder not opened");
return -4;
}
return 1;
}
int FFMPEGDecode::decode(byte* encodedData, int encodedDataSize,
int* decodedData)
{
static verylong st, et;
st = getCurrentSystemTimeMicro();
int decode;
int got_frame;
byte *rgb_buffer;
AVFrame *dframe;
AVFrame *dframergb;
AVPacket dc_packet;
dframe = av_frame_alloc(); // this frame contains output value after decoding
dframe->format = decoderContext->pix_fmt; //pixel format is yuv420p
dframe->width = decoderContext->width;
dframe->height = decoderContext->height;
dframergb = av_frame_alloc(); // we use this frame for yuv to rgb conversion, used in sws_scale() function as destination
dframergb->format = AV_PIX_FMT_RGB32; // pixel format rgb32. in java code we have used Bitmap.Config.ARGB_8888 to create bitmap.
// ARGB_8888 configuration assumes rgb pixel data is stored in 4 bytes. so we need to use rgb32 here
dframergb->width = decoderContext->width;
dframergb->height = decoderContext->height;
int num_bytes = avpicture_get_size(AV_PIX_FMT_RGB32, decoderContext->width,
decoderContext->height);
rgb_buffer = (byte *) av_malloc(num_bytes);
avpicture_fill((AVPicture*) dframergb, rgb_buffer, PIX_FMT_RGB32,
decoderContext->width, decoderContext->height); // filled frame with a blank picture.
av_init_packet(&dc_packet); // crate a new packet with encoded data and size
// initialize packet
dc_packet.data = (byte*) encodedData;
dc_packet.size = encodedDataSize;
decode = avcodec_decode_video2(decoderContext, dframe, &got_frame,
&dc_packet);
if (got_frame > 0)
{
int h = sws_scale(decoderScaleContext, (const uint8_t * const *) dframe->data,
dframe->linesize, 0, decoderContext->height, dframergb->data,
dframergb->linesize); // frame conversion yuv to rgb
memcpy(decodedData, dframergb->data[0],
decoderContext->width * decoderContext->height);
if(h != dframe->height){
LOGD("Decoded NALU successfully HEIGHT NOT MATCH.....");
}else{
LOGD("Decoded NALU successfully");
}
}
else
{
LOGE("Could not decode frame.. :(");
}
av_free_packet(&dc_packet);
av_free(rgb_buffer);
av_free(dframergb);
av_free(dframe);
et = getCurrentSystemTimeMicro();
LOGD("Decoding time: %llu", (et - st));
if (got_frame)
{
return decoderContext->height * decoderContext->width;
}
else
{
return 0;
}
}