st/va: count number of slices
[mesa.git] / src / gallium / state_trackers / va / picture.c
index a4eb26b406b065d2cf1cfae8893f3dac777849f1..da9ca5aa6c9617dd34a9539c85f06f4291eef41f 100644 (file)
@@ -32,6 +32,7 @@
 #include "util/u_video.h"
 
 #include "vl/vl_vlc.h"
+#include "vl/vl_winsys.h"
 
 #include "va_private.h"
 
@@ -58,14 +59,28 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
       return VA_STATUS_ERROR_INVALID_SURFACE;
 
    context->target = surf->buffer;
-   context->decoder->begin_frame(context->decoder, context->target, NULL);
+
+   if (!context->decoder) {
+      /* VPP */
+      if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
+         ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM  &&
+           context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM  &&
+           context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM  &&
+           context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM) ||
+           context->target->interlaced))
+         return VA_STATUS_ERROR_UNIMPLEMENTED;
+
+      return VA_STATUS_SUCCESS;
+   }
+
+   context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
 
    return VA_STATUS_SUCCESS;
 }
 
-static void
-getReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
-                  struct pipe_video_buffer **ref_frame)
+void
+vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
+                      struct pipe_video_buffer **ref_frame)
 {
    vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
    if (surf)
@@ -74,249 +89,80 @@ getReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
       *ref_frame = NULL;
 }
 
-static void
+static VAStatus
 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
 {
-   VAPictureParameterBufferMPEG2 *mpeg2;
-   VAPictureParameterBufferH264 *h264;
-   VAPictureParameterBufferVC1 * vc1;
-   VAPictureParameterBufferMPEG4 *mpeg4;
-   vlVaSurface *surf_forward;
-   vlVaSurface *surf_backward;
-   unsigned int i;
-   static const uint8_t default_intra_quant_matrix[64] = { 0 };
-   static const uint8_t default_non_intra_quant_matrix[64] = { 0 };
-
-   switch (u_reduce_video_profile(context->decoder->profile)) {
+   VAStatus vaStatus = VA_STATUS_SUCCESS;
+
+   switch (u_reduce_video_profile(context->templat.profile)) {
    case PIPE_VIDEO_FORMAT_MPEG12:
-      assert(buf->size >= sizeof(VAPictureParameterBufferMPEG2) && buf->num_elements == 1);
-      mpeg2 = buf->data;
-      /*horizontal_size;*/
-      /*vertical_size;*/
-      getReferenceFrame(drv, mpeg2->forward_reference_picture, &context->desc.mpeg12.ref[0]);
-      getReferenceFrame(drv, mpeg2->backward_reference_picture, &context->desc.mpeg12.ref[1]);
-      context->desc.mpeg12.picture_coding_type = mpeg2->picture_coding_type;
-      context->desc.mpeg12.f_code[0][0] = ((mpeg2->f_code >> 12) & 0xf) - 1;
-      context->desc.mpeg12.f_code[0][1] = ((mpeg2->f_code >> 8) & 0xf) - 1;
-      context->desc.mpeg12.f_code[1][0] = ((mpeg2->f_code >> 4) & 0xf) - 1;
-      context->desc.mpeg12.f_code[1][1] = (mpeg2->f_code & 0xf) - 1;
-      context->desc.mpeg12.intra_dc_precision =
-         mpeg2->picture_coding_extension.bits.intra_dc_precision;
-      context->desc.mpeg12.picture_structure =
-         mpeg2->picture_coding_extension.bits.picture_structure;
-      context->desc.mpeg12.top_field_first =
-         mpeg2->picture_coding_extension.bits.top_field_first;
-      context->desc.mpeg12.frame_pred_frame_dct =
-         mpeg2->picture_coding_extension.bits.frame_pred_frame_dct;
-      context->desc.mpeg12.concealment_motion_vectors =
-         mpeg2->picture_coding_extension.bits.concealment_motion_vectors;
-      context->desc.mpeg12.q_scale_type =
-         mpeg2->picture_coding_extension.bits.q_scale_type;
-      context->desc.mpeg12.intra_vlc_format =
-         mpeg2->picture_coding_extension.bits.intra_vlc_format;
-      context->desc.mpeg12.alternate_scan =
-         mpeg2->picture_coding_extension.bits.alternate_scan;
-      /*repeat_first_field*/
-      /*progressive_frame*/
-      /*is_first_field*/
+      vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
       break;
 
    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
-      assert(buf->size >= sizeof(VAPictureParameterBufferH264) && buf->num_elements == 1);
-      h264 = buf->data;
-      /*CurrPic*/
-      context->desc.h264.field_order_cnt[0] = h264->CurrPic.TopFieldOrderCnt;
-      context->desc.h264.field_order_cnt[1] = h264->CurrPic.BottomFieldOrderCnt;
-      /*ReferenceFrames[16]*/
-      /*picture_width_in_mbs_minus1*/
-      /*picture_height_in_mbs_minus1*/
-      /*bit_depth_luma_minus8*/
-      /*bit_depth_chroma_minus8*/
-      context->desc.h264.num_ref_frames = h264->num_ref_frames;
-      /*chroma_format_idc*/
-      /*residual_colour_transform_flag*/
-      /*gaps_in_frame_num_value_allowed_flag*/
-      context->desc.h264.pps->sps->frame_mbs_only_flag =
-         h264->seq_fields.bits.frame_mbs_only_flag;
-      context->desc.h264.pps->sps->mb_adaptive_frame_field_flag =
-         h264->seq_fields.bits.mb_adaptive_frame_field_flag;
-      context->desc.h264.pps->sps->direct_8x8_inference_flag =
-         h264->seq_fields.bits.direct_8x8_inference_flag;
-      /*MinLumaBiPredSize8x8*/
-      context->desc.h264.pps->sps->log2_max_frame_num_minus4 =
-         h264->seq_fields.bits.log2_max_frame_num_minus4;
-      context->desc.h264.pps->sps->pic_order_cnt_type =
-         h264->seq_fields.bits.pic_order_cnt_type;
-      context->desc.h264.pps->sps->log2_max_pic_order_cnt_lsb_minus4 =
-         h264->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4;
-      context->desc.h264.pps->sps->delta_pic_order_always_zero_flag =
-         h264->seq_fields.bits.delta_pic_order_always_zero_flag;
-      /*num_slice_groups_minus1*/
-      /*slice_group_map_type*/
-      /*slice_group_change_rate_minus1*/
-      context->desc.h264.pps->pic_init_qp_minus26 =
-         h264->pic_init_qp_minus26;
-      /*pic_init_qs_minus26*/
-      context->desc.h264.pps->chroma_qp_index_offset =
-         h264->chroma_qp_index_offset;
-      context->desc.h264.pps->second_chroma_qp_index_offset =
-         h264->second_chroma_qp_index_offset;
-      context->desc.h264.pps->entropy_coding_mode_flag =
-         h264->pic_fields.bits.entropy_coding_mode_flag;
-      context->desc.h264.pps->weighted_pred_flag =
-         h264->pic_fields.bits.weighted_pred_flag;
-      context->desc.h264.pps->weighted_bipred_idc =
-         h264->pic_fields.bits.weighted_bipred_idc;
-      context->desc.h264.pps->transform_8x8_mode_flag =
-         h264->pic_fields.bits.transform_8x8_mode_flag;
-      context->desc.h264.field_pic_flag =
-         h264->pic_fields.bits.field_pic_flag;
-      context->desc.h264.pps->constrained_intra_pred_flag =
-         h264->pic_fields.bits.constrained_intra_pred_flag;
-      context->desc.h264.pps->bottom_field_pic_order_in_frame_present_flag =
-         h264->pic_fields.bits.pic_order_present_flag;
-      context->desc.h264.pps->deblocking_filter_control_present_flag =
-         h264->pic_fields.bits.deblocking_filter_control_present_flag;
-      context->desc.h264.pps->redundant_pic_cnt_present_flag =
-         h264->pic_fields.bits.redundant_pic_cnt_present_flag;
-      /*reference_pic_flag*/
-      context->desc.h264.frame_num = h264->frame_num;
+      vlVaHandlePictureParameterBufferH264(drv, context, buf);
       break;
 
    case PIPE_VIDEO_FORMAT_VC1:
-      assert(buf->size >= sizeof(VAPictureParameterBufferVC1) && buf->num_elements == 1);
-      vc1 = buf->data;
-      getReferenceFrame(drv, vc1->forward_reference_picture, &context->desc.vc1.ref[0]);
-      getReferenceFrame(drv, vc1->backward_reference_picture, &context->desc.vc1.ref[1]);
-      context->desc.vc1.picture_type = vc1->picture_fields.bits.picture_type;
-      context->desc.vc1.frame_coding_mode = vc1->picture_fields.bits.frame_coding_mode;
-      context->desc.vc1.postprocflag = vc1->post_processing != 0;
-      context->desc.vc1.pulldown = vc1->sequence_fields.bits.pulldown;
-      context->desc.vc1.interlace = vc1->sequence_fields.bits.interlace;
-      context->desc.vc1.tfcntrflag = vc1->sequence_fields.bits.tfcntrflag;
-      context->desc.vc1.finterpflag = vc1->sequence_fields.bits.finterpflag;
-      context->desc.vc1.psf = vc1->sequence_fields.bits.psf;
-      context->desc.vc1.dquant = vc1->pic_quantizer_fields.bits.dquant;
-      context->desc.vc1.panscan_flag = vc1->entrypoint_fields.bits.panscan_flag;
-      context->desc.vc1.refdist_flag =
-         vc1->reference_fields.bits.reference_distance_flag;
-      context->desc.vc1.quantizer = vc1->pic_quantizer_fields.bits.quantizer;
-      context->desc.vc1.extended_mv = vc1->mv_fields.bits.extended_mv_flag;
-      context->desc.vc1.extended_dmv = vc1->mv_fields.bits.extended_dmv_flag;
-      context->desc.vc1.overlap = vc1->sequence_fields.bits.overlap;
-      context->desc.vc1.vstransform =
-         vc1->transform_fields.bits.variable_sized_transform_flag;
-      context->desc.vc1.loopfilter = vc1->entrypoint_fields.bits.loopfilter;
-      context->desc.vc1.fastuvmc = vc1->fast_uvmc_flag;
-      context->desc.vc1.range_mapy_flag = vc1->range_mapping_fields.bits.luma_flag;
-      context->desc.vc1.range_mapy = vc1->range_mapping_fields.bits.luma;
-      context->desc.vc1.range_mapuv_flag = vc1->range_mapping_fields.bits.chroma_flag;
-      context->desc.vc1.range_mapuv = vc1->range_mapping_fields.bits.chroma;
-      context->desc.vc1.multires = vc1->sequence_fields.bits.multires;
-      context->desc.vc1.syncmarker = vc1->sequence_fields.bits.syncmarker;
-      context->desc.vc1.rangered = vc1->sequence_fields.bits.rangered;
-      context->desc.vc1.maxbframes = vc1->sequence_fields.bits.max_b_frames;
-      context->desc.vc1.deblockEnable = vc1->post_processing != 0;
-      context->desc.vc1.pquant = vc1->pic_quantizer_fields.bits.pic_quantizer_scale;
+      vlVaHandlePictureParameterBufferVC1(drv, context, buf);
       break;
 
    case PIPE_VIDEO_FORMAT_MPEG4:
-      assert(buf->size >= sizeof(VAPictureParameterBufferMPEG4) && buf->num_elements == 1);
-      mpeg4 = buf->data;
-
-      context->mpeg4.pps = *mpeg4;
-
-      /* vop_width */
-      /* vop_height */
-      /* forward_reference_picture */
-      /* backward_reference_picture */
-      context->desc.mpeg4.short_video_header =
-            mpeg4->vol_fields.bits.short_video_header;
-      /* chroma_format */
-      context->desc.mpeg4.interlaced = mpeg4->vol_fields.bits.interlaced;
-      /* obmc_disable */
-      /* sprite_enable */
-      /* sprite_warping_accuracy */
-      context->desc.mpeg4.quant_type = mpeg4->vol_fields.bits.quant_type;
-      context->desc.mpeg4.quarter_sample = mpeg4->vol_fields.bits.quarter_sample;
-      /* data_partitioned */
-      /* reversible_vlc */
-      context->desc.mpeg4.resync_marker_disable =
-            mpeg4->vol_fields.bits.resync_marker_disable;
-      /* no_of_sprite_warping_points */
-      /* sprite_trajectory_du */
-      /* sprite_trajectory_dv */
-      /* quant_precision */
-      context->desc.mpeg4.vop_coding_type = mpeg4->vop_fields.bits.vop_coding_type;
-      /* backward_reference_vop_coding_type */
-      /* vop_rounding_type */
-      /* intra_dc_vlc_thr */
-      context->desc.mpeg4.top_field_first =
-            mpeg4->vop_fields.bits.top_field_first;
-      context->desc.mpeg4.alternate_vertical_scan_flag =
-            mpeg4->vop_fields.bits.alternate_vertical_scan_flag;
-      context->desc.mpeg4.vop_fcode_forward = mpeg4->vop_fcode_forward;
-      context->desc.mpeg4.vop_fcode_backward = mpeg4->vop_fcode_backward;
-      context->desc.mpeg4.vop_time_increment_resolution =
-            mpeg4->vop_time_increment_resolution;
-      /* num_gobs_in_vop */
-      /* num_macroblocks_in_gob */
-      context->desc.mpeg4.trb[0] = mpeg4->TRB;
-      context->desc.mpeg4.trb[1] = mpeg4->TRB;
-      context->desc.mpeg4.trd[0] = mpeg4->TRD;
-      context->desc.mpeg4.trd[1] = mpeg4->TRD;
-
-      /* default [non-]intra quant matrix because mpv does not set these
-         matrices */
-      if (!context->desc.mpeg4.intra_matrix)
-         context->desc.mpeg4.intra_matrix = default_intra_quant_matrix;
-      if (!context->desc.mpeg4.non_intra_matrix)
-         context->desc.mpeg4.non_intra_matrix = default_non_intra_quant_matrix;
-
-      surf_forward = handle_table_get(drv->htab, mpeg4->forward_reference_picture);
-      if (surf_forward)
-         context->desc.mpeg4.ref[0] = surf_forward->buffer;
-      surf_backward = handle_table_get(drv->htab, mpeg4->backward_reference_picture);
-      if (surf_backward)
-         context->desc.mpeg4.ref[1] = surf_backward->buffer;
-
-      context->mpeg4.vti_bits = 0;
-      for (i = context->desc.mpeg4.vop_time_increment_resolution; i > 0; i /= 2)
-         ++context->mpeg4.vti_bits;
+      vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
+      break;
 
+  case PIPE_VIDEO_FORMAT_HEVC:
+      vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
       break;
 
    default:
       break;
    }
+
+   /* Create the decoder once max_references is known. */
+   if (!context->decoder) {
+      if (!context->target)
+         return VA_STATUS_ERROR_INVALID_CONTEXT;
+
+      if (context->templat.max_references == 0)
+         return VA_STATUS_ERROR_INVALID_BUFFER;
+
+      if (u_reduce_video_profile(context->templat.profile) ==
+          PIPE_VIDEO_FORMAT_MPEG4_AVC)
+         context->templat.level = u_get_h264_level(context->templat.width,
+            context->templat.height, &context->templat.max_references);
+
+      context->decoder = drv->pipe->create_video_codec(drv->pipe,
+         &context->templat);
+
+      if (!context->decoder)
+         return VA_STATUS_ERROR_ALLOCATION_FAILED;
+
+      context->decoder->begin_frame(context->decoder, context->target,
+         &context->desc.base);
+   }
+
+   return vaStatus;
 }
 
 static void
 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
 {
-   VAIQMatrixBufferMPEG2 *mpeg2;
-   VAIQMatrixBufferH264 *h264;
-
-   switch (u_reduce_video_profile(context->decoder->profile)) {
+   switch (u_reduce_video_profile(context->templat.profile)) {
    case PIPE_VIDEO_FORMAT_MPEG12:
-      assert(buf->size >= sizeof(VAIQMatrixBufferMPEG2) && buf->num_elements == 1);
-      mpeg2 = buf->data;
-      if (mpeg2->load_intra_quantiser_matrix)
-         context->desc.mpeg12.intra_matrix = mpeg2->intra_quantiser_matrix;
-      else
-         context->desc.mpeg12.intra_matrix = NULL;
-
-      if (mpeg2->load_non_intra_quantiser_matrix)
-         context->desc.mpeg12.non_intra_matrix = mpeg2->non_intra_quantiser_matrix;
-      else
-         context->desc.mpeg12.non_intra_matrix = NULL;
+      vlVaHandleIQMatrixBufferMPEG12(context, buf);
       break;
 
    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
-      assert(buf->size >= sizeof(VAIQMatrixBufferH264) && buf->num_elements == 1);
-      h264 = buf->data;
-      memcpy(&context->desc.h264.pps->ScalingList4x4, h264->ScalingList4x4, 6 * 16);
-      memcpy(&context->desc.h264.pps->ScalingList8x8, h264->ScalingList8x8, 2 * 64);
+      vlVaHandleIQMatrixBufferH264(context, buf);
+      break;
+
+   case PIPE_VIDEO_FORMAT_MPEG4:
+      vlVaHandleIQMatrixBufferMPEG4(context, buf);
+      break;
+
+   case PIPE_VIDEO_FORMAT_HEVC:
+      vlVaHandleIQMatrixBufferHEVC(context, buf);
       break;
 
    default:
@@ -327,16 +173,25 @@ handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
 static void
 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
 {
-   VASliceParameterBufferH264 *h264;
+   switch (u_reduce_video_profile(context->templat.profile)) {
+   case PIPE_VIDEO_FORMAT_MPEG12:
+      vlVaHandleSliceParameterBufferMPEG12(context, buf);
+      break;
+
+   case PIPE_VIDEO_FORMAT_VC1:
+      vlVaHandleSliceParameterBufferVC1(context, buf);
+      break;
 
-   switch (u_reduce_video_profile(context->decoder->profile)) {
    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
-      assert(buf->size >= sizeof(VASliceParameterBufferH264) && buf->num_elements == 1);
-      h264 = buf->data;
-      context->desc.h264.num_ref_idx_l0_active_minus1 =
-         h264->num_ref_idx_l0_active_minus1;
-      context->desc.h264.num_ref_idx_l1_active_minus1 =
-         h264->num_ref_idx_l1_active_minus1;
+      vlVaHandleSliceParameterBufferH264(context, buf);
+      break;
+
+   case PIPE_VIDEO_FORMAT_MPEG4:
+      vlVaHandleSliceParameterBufferMPEG4(context, buf);
+      break;
+
+   case PIPE_VIDEO_FORMAT_HEVC:
+      vlVaHandleSliceParameterBufferHEVC(context, buf);
       break;
 
    default:
@@ -370,16 +225,24 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
    void * const *buffers[2];
    unsigned sizes[2];
    static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
+   static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
    static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
 
-   format = u_reduce_video_profile(context->decoder->profile);
+   format = u_reduce_video_profile(context->templat.profile);
    switch (format) {
    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
-         if (bufHasStartcode(buf, 0x000001, 24))
-            break;
+      if (bufHasStartcode(buf, 0x000001, 24))
+         break;
+
+      buffers[num_buffers] = (void *const)&start_code_h264;
+      sizes[num_buffers++] = sizeof(start_code_h264);
+      break;
+   case PIPE_VIDEO_FORMAT_HEVC:
+      if (bufHasStartcode(buf, 0x000001, 24))
+         break;
 
-         buffers[num_buffers] = (void *const)&start_code_h264;
-         sizes[num_buffers++] = sizeof(start_code_h264);
+      buffers[num_buffers] = (void *const)&start_code_h265;
+      sizes[num_buffers++] = sizeof(start_code_h265);
       break;
    case PIPE_VIDEO_FORMAT_VC1:
       if (bufHasStartcode(buf, 0x0000010d, 32) ||
@@ -387,9 +250,18 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
           bufHasStartcode(buf, 0x0000010b, 32))
          break;
 
+      if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
          buffers[num_buffers] = (void *const)&start_code_vc1;
          sizes[num_buffers++] = sizeof(start_code_vc1);
+      }
       break;
+   case PIPE_VIDEO_FORMAT_MPEG4:
+      if (bufHasStartcode(buf, 0x000001, 24))
+         break;
+
+      vlVaDecoderFixMPEG4Startcode(context);
+      buffers[num_buffers] = (void *)context->mpeg4.start_code;
+      sizes[num_buffers++] = context->mpeg4.start_code_size;
    default:
       break;
    }
@@ -397,7 +269,7 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
    buffers[num_buffers] = buf->data;
    sizes[num_buffers] = buf->size;
    ++num_buffers;
-   context->decoder->decode_bitstream(context->decoder, context->target, NULL,
+   context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
       num_buffers, (const void * const*)buffers, sizes);
 }
 
@@ -406,6 +278,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
 {
    vlVaDriver *drv;
    vlVaContext *context;
+   VAStatus vaStatus = VA_STATUS_SUCCESS;
 
    unsigned i;
 
@@ -427,7 +300,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
 
       switch (buf->type) {
       case VAPictureParameterBufferType:
-         handlePictureParameterBuffer(drv, context, buf);
+         vaStatus = handlePictureParameterBuffer(drv, context, buf);
          break;
 
       case VAIQMatrixBufferType:
@@ -441,13 +314,16 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
       case VASliceDataBufferType:
          handleVASliceDataBufferType(context, buf);
          break;
+      case VAProcPipelineParameterBufferType:
+         vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
+         break;
 
       default:
          break;
       }
    }
 
-   return VA_STATUS_SUCCESS;
+   return vaStatus;
 }
 
 VAStatus
@@ -467,6 +343,15 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
    if (!context)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
+   if (!context->decoder) {
+      if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
+         return VA_STATUS_ERROR_INVALID_CONTEXT;
+
+      /* VPP */
+      return VA_STATUS_SUCCESS;
+   }
+
+   context->mpeg4.frame_num++;
    context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
 
    return VA_STATUS_SUCCESS;