X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fstate_trackers%2Fvdpau%2Fdecode.c;h=56a171fcd8748f604951aeb1f51c3b5c8586b923;hb=d5fc3746fe3a337bccba0d644543633b728b9444;hp=3bf05bea21fe06f333bef294e1e2f58da92cecce;hpb=1d1d038c85ebb37f1da4540f092563e8ecab7dfb;p=mesa.git diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c index 3bf05bea21f..56a171fcd87 100644 --- a/src/gallium/state_trackers/vdpau/decode.c +++ b/src/gallium/state_trackers/vdpau/decode.c @@ -25,12 +25,16 @@ * **************************************************************************/ -#include -#include -#include +#include "util/u_memory.h" +#include "util/u_math.h" +#include "util/u_debug.h" +#include "util/u_video.h" #include "vdpau_private.h" +/** + * Create a VdpDecoder. + */ VdpStatus vlVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, @@ -40,15 +44,15 @@ vlVdpDecoderCreate(VdpDevice device, { enum pipe_video_profile p_profile; struct pipe_context *pipe; + struct pipe_screen *screen; vlVdpDevice *dev; vlVdpDecoder *vldecoder; VdpStatus ret; - unsigned i; - - VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating decoder\n"); + bool supported; if (!decoder) return VDP_STATUS_INVALID_POINTER; + *decoder = 0; if (!(width && height)) return VDP_STATUS_INVALID_VALUE; @@ -61,84 +65,86 @@ vlVdpDecoderCreate(VdpDevice device, if (!dev) return VDP_STATUS_INVALID_HANDLE; - pipe = dev->context->pipe; + pipe = dev->context; + screen = dev->vscreen->pscreen; + + pipe_mutex_lock(dev->mutex); + + supported = screen->get_video_param + ( + screen, + p_profile, + PIPE_VIDEO_CAP_SUPPORTED + ); + if (!supported) { + pipe_mutex_unlock(dev->mutex); + return VDP_STATUS_INVALID_DECODER_PROFILE; + } vldecoder = CALLOC(1,sizeof(vlVdpDecoder)); - if (!vldecoder) + if (!vldecoder) { + pipe_mutex_unlock(dev->mutex); return VDP_STATUS_RESOURCES; + } vldecoder->device = dev; - // TODO: Define max_references. Used mainly for H264 vldecoder->decoder = pipe->create_video_decoder ( pipe, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, PIPE_VIDEO_CHROMA_FORMAT_420, - width, height + width, height, max_references, + false ); + if (!vldecoder->decoder) { ret = VDP_STATUS_ERROR; goto error_decoder; } - vldecoder->cur_buffer = 0; - - for (i = 0; i < VL_NUM_DECODE_BUFFERS; ++i) { - vldecoder->buffer[i] = vldecoder->decoder->create_buffer(vldecoder->decoder); - if (!vldecoder->buffer[i]) { - ret = VDP_STATUS_ERROR; - goto error_buffer; - } - } - *decoder = vlAddDataHTAB(vldecoder); if (*decoder == 0) { ret = VDP_STATUS_ERROR; goto error_handle; } - - VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoder created succesfully\n"); + pipe_mutex_unlock(dev->mutex); return VDP_STATUS_OK; error_handle: -error_buffer: - - for (i = 0; i < VL_NUM_DECODE_BUFFERS; ++i) - if (vldecoder->buffer[i]) - vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffer[i]); - vldecoder->decoder->destroy(vldecoder->decoder); error_decoder: + pipe_mutex_unlock(dev->mutex); FREE(vldecoder); return ret; } +/** + * Destroy a VdpDecoder. + */ VdpStatus vlVdpDecoderDestroy(VdpDecoder decoder) { vlVdpDecoder *vldecoder; - unsigned i; - - VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Destroying decoder\n"); vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder); if (!vldecoder) return VDP_STATUS_INVALID_HANDLE; - for (i = 0; i < VL_NUM_DECODE_BUFFERS; ++i) - if (vldecoder->buffer[i]) - vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffer[i]); - + pipe_mutex_lock(vldecoder->device->mutex); vldecoder->decoder->destroy(vldecoder->decoder); + pipe_mutex_unlock(vldecoder->device->mutex); FREE(vldecoder); return VDP_STATUS_OK; } +/** + * Retrieve the parameters used to create a VdpBitmapSurface. + */ VdpStatus vlVdpDecoderGetParameters(VdpDecoder decoder, VdpDecoderProfile *profile, @@ -147,83 +153,234 @@ vlVdpDecoderGetParameters(VdpDecoder decoder, { vlVdpDecoder *vldecoder; - VDPAU_MSG(VDPAU_TRACE, "[VDPAU] decoder get parameters called\n"); - vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder); if (!vldecoder) return VDP_STATUS_INVALID_HANDLE; - + *profile = PipeToProfile(vldecoder->decoder->profile); *width = vldecoder->decoder->width; *height = vldecoder->decoder->height; - + return VDP_STATUS_OK; } static VdpStatus -vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder, - VdpPictureInfoMPEG1Or2 *picture_info, - uint32_t bitstream_buffer_count, - VdpBitstreamBuffer const *bitstream_buffers) +vlVdpGetReferenceFrame(VdpVideoSurface handle, struct pipe_video_buffer **ref_frame) { - struct pipe_mpeg12_picture_desc picture; - struct pipe_video_buffer *ref_frames[2]; - uint8_t intra_quantizer_matrix[64]; - unsigned num_ycbcr_blocks[3] = { 0, 0, 0 }; - unsigned i; - - VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG2\n"); - - i = 0; + vlVdpSurface *surface; /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */ - if (picture_info->forward_reference != VDP_INVALID_HANDLE) { - ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer; - if (!ref_frames[i]) - return VDP_STATUS_INVALID_HANDLE; - ++i; + if (handle == VDP_INVALID_HANDLE) { + *ref_frame = NULL; + return VDP_STATUS_OK; } - if (picture_info->backward_reference != VDP_INVALID_HANDLE) { - ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer; - if (!ref_frames[i]) + surface = vlGetDataHTAB(handle); + if (!surface) + return VDP_STATUS_INVALID_HANDLE; + + *ref_frame = surface->video_buffer; + if (!*ref_frame) return VDP_STATUS_INVALID_HANDLE; - ++i; - } - decoder->set_reference_frames(decoder, ref_frames, i); + return VDP_STATUS_OK; +} - memset(&picture, 0, sizeof(picture)); - picture.base.profile = decoder->profile; - picture.picture_coding_type = picture_info->picture_coding_type; - picture.picture_structure = picture_info->picture_structure; - picture.frame_pred_frame_dct = picture_info->frame_pred_frame_dct; - picture.q_scale_type = picture_info->q_scale_type; - picture.alternate_scan = picture_info->alternate_scan; - picture.intra_vlc_format = picture_info->intra_vlc_format; - picture.concealment_motion_vectors = picture_info->concealment_motion_vectors; - picture.f_code[0][0] = picture_info->f_code[0][0] - 1; - picture.f_code[0][1] = picture_info->f_code[0][1] - 1; - picture.f_code[1][0] = picture_info->f_code[1][0] - 1; - picture.f_code[1][1] = picture_info->f_code[1][1] - 1; +/** + * Decode a mpeg 1/2 video. + */ +static VdpStatus +vlVdpDecoderRenderMpeg12(struct pipe_mpeg12_picture_desc *picture, + VdpPictureInfoMPEG1Or2 *picture_info) +{ + VdpStatus r; + + VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG12\n"); + + r = vlVdpGetReferenceFrame(picture_info->forward_reference, &picture->ref[0]); + if (r != VDP_STATUS_OK) + return r; + + r = vlVdpGetReferenceFrame(picture_info->backward_reference, &picture->ref[1]); + if (r != VDP_STATUS_OK) + return r; + + picture->picture_coding_type = picture_info->picture_coding_type; + picture->picture_structure = picture_info->picture_structure; + picture->frame_pred_frame_dct = picture_info->frame_pred_frame_dct; + picture->q_scale_type = picture_info->q_scale_type; + picture->alternate_scan = picture_info->alternate_scan; + picture->intra_vlc_format = picture_info->intra_vlc_format; + picture->concealment_motion_vectors = picture_info->concealment_motion_vectors; + picture->intra_dc_precision = picture_info->intra_dc_precision; + picture->f_code[0][0] = picture_info->f_code[0][0] - 1; + picture->f_code[0][1] = picture_info->f_code[0][1] - 1; + picture->f_code[1][0] = picture_info->f_code[1][0] - 1; + picture->f_code[1][1] = picture_info->f_code[1][1] - 1; + picture->num_slices = picture_info->slice_count; + picture->top_field_first = picture_info->top_field_first; + picture->full_pel_forward_vector = picture_info->full_pel_forward_vector; + picture->full_pel_backward_vector = picture_info->full_pel_backward_vector; + picture->intra_matrix = picture_info->intra_quantizer_matrix; + picture->non_intra_matrix = picture_info->non_intra_quantizer_matrix; - decoder->set_picture_parameters(decoder, &picture.base); + return VDP_STATUS_OK; +} - memcpy(intra_quantizer_matrix, picture_info->intra_quantizer_matrix, sizeof(intra_quantizer_matrix)); - intra_quantizer_matrix[0] = 1 << (7 - picture_info->intra_dc_precision); - decoder->set_quant_matrix(decoder, intra_quantizer_matrix, picture_info->non_intra_quantizer_matrix); +/** + * Decode a mpeg 4 video. + */ +static VdpStatus +vlVdpDecoderRenderMpeg4(struct pipe_mpeg4_picture_desc *picture, + VdpPictureInfoMPEG4Part2 *picture_info) +{ + VdpStatus r; + unsigned i; + + VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG4\n"); + + r = vlVdpGetReferenceFrame(picture_info->forward_reference, &picture->ref[0]); + if (r != VDP_STATUS_OK) + return r; - decoder->begin_frame(decoder); + r = vlVdpGetReferenceFrame(picture_info->backward_reference, &picture->ref[1]); + if (r != VDP_STATUS_OK) + return r; + + for (i = 0; i < 2; ++i) { + picture->trd[i] = picture_info->trd[i]; + picture->trb[i] = picture_info->trb[i]; + } + picture->vop_time_increment_resolution = picture_info->vop_time_increment_resolution; + picture->vop_coding_type = picture_info->vop_coding_type; + picture->vop_fcode_forward = picture_info->vop_fcode_forward; + picture->vop_fcode_backward = picture_info->vop_fcode_backward; + picture->resync_marker_disable = picture_info->resync_marker_disable; + picture->interlaced = picture_info->interlaced; + picture->quant_type = picture_info->quant_type; + picture->quarter_sample = picture_info->quarter_sample; + picture->short_video_header = picture_info->short_video_header; + picture->rounding_control = picture_info->rounding_control; + picture->alternate_vertical_scan_flag = picture_info->alternate_vertical_scan_flag; + picture->top_field_first = picture_info->top_field_first; + picture->intra_matrix = picture_info->intra_quantizer_matrix; + picture->non_intra_matrix = picture_info->non_intra_quantizer_matrix; - for (i = 0; i < bitstream_buffer_count; ++i) - decoder->decode_bitstream(decoder, bitstream_buffers[i].bitstream_bytes, - bitstream_buffers[i].bitstream, num_ycbcr_blocks); + return VDP_STATUS_OK; +} - decoder->end_frame(decoder, num_ycbcr_blocks); +static VdpStatus +vlVdpDecoderRenderVC1(struct pipe_vc1_picture_desc *picture, + VdpPictureInfoVC1 *picture_info) +{ + VdpStatus r; + + VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding VC-1\n"); + + r = vlVdpGetReferenceFrame(picture_info->forward_reference, &picture->ref[0]); + if (r != VDP_STATUS_OK) + return r; + + r = vlVdpGetReferenceFrame(picture_info->backward_reference, &picture->ref[1]); + if (r != VDP_STATUS_OK) + return r; + + picture->slice_count = picture_info->slice_count; + picture->picture_type = picture_info->picture_type; + picture->frame_coding_mode = picture_info->frame_coding_mode; + picture->postprocflag = picture_info->postprocflag; + picture->pulldown = picture_info->pulldown; + picture->interlace = picture_info->interlace; + picture->tfcntrflag = picture_info->tfcntrflag; + picture->finterpflag = picture_info->finterpflag; + picture->psf = picture_info->psf; + picture->dquant = picture_info->dquant; + picture->panscan_flag = picture_info->panscan_flag; + picture->refdist_flag = picture_info->refdist_flag; + picture->quantizer = picture_info->quantizer; + picture->extended_mv = picture_info->extended_mv; + picture->extended_dmv = picture_info->extended_dmv; + picture->overlap = picture_info->overlap; + picture->vstransform = picture_info->vstransform; + picture->loopfilter = picture_info->loopfilter; + picture->fastuvmc = picture_info->fastuvmc; + picture->range_mapy_flag = picture_info->range_mapy_flag; + picture->range_mapy = picture_info->range_mapy; + picture->range_mapuv_flag = picture_info->range_mapuv_flag; + picture->range_mapuv = picture_info->range_mapuv; + picture->multires = picture_info->multires; + picture->syncmarker = picture_info->syncmarker; + picture->rangered = picture_info->rangered; + picture->maxbframes = picture_info->maxbframes; + picture->deblockEnable = picture_info->deblockEnable; + picture->pquant = picture_info->pquant; return VDP_STATUS_OK; } +static VdpStatus +vlVdpDecoderRenderH264(struct pipe_h264_picture_desc *picture, + VdpPictureInfoH264 *picture_info) +{ + unsigned i; + + VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding H264\n"); + + picture->slice_count = picture_info->slice_count; + picture->field_order_cnt[0] = picture_info->field_order_cnt[0]; + picture->field_order_cnt[1] = picture_info->field_order_cnt[1]; + picture->is_reference = picture_info->is_reference; + picture->frame_num = picture_info->frame_num; + picture->field_pic_flag = picture_info->field_pic_flag; + picture->bottom_field_flag = picture_info->bottom_field_flag; + picture->num_ref_frames = picture_info->num_ref_frames; + picture->mb_adaptive_frame_field_flag = picture_info->mb_adaptive_frame_field_flag; + picture->constrained_intra_pred_flag = picture_info->constrained_intra_pred_flag; + picture->weighted_pred_flag = picture_info->weighted_pred_flag; + picture->weighted_bipred_idc = picture_info->weighted_bipred_idc; + picture->frame_mbs_only_flag = picture_info->frame_mbs_only_flag; + picture->transform_8x8_mode_flag = picture_info->transform_8x8_mode_flag; + picture->chroma_qp_index_offset = picture_info->chroma_qp_index_offset; + picture->second_chroma_qp_index_offset = picture_info->second_chroma_qp_index_offset; + picture->pic_init_qp_minus26 = picture_info->pic_init_qp_minus26; + picture->num_ref_idx_l0_active_minus1 = picture_info->num_ref_idx_l0_active_minus1; + picture->num_ref_idx_l1_active_minus1 = picture_info->num_ref_idx_l1_active_minus1; + picture->log2_max_frame_num_minus4 = picture_info->log2_max_frame_num_minus4; + picture->pic_order_cnt_type = picture_info->pic_order_cnt_type; + picture->log2_max_pic_order_cnt_lsb_minus4 = picture_info->log2_max_pic_order_cnt_lsb_minus4; + picture->delta_pic_order_always_zero_flag = picture_info->delta_pic_order_always_zero_flag; + picture->direct_8x8_inference_flag = picture_info->direct_8x8_inference_flag; + picture->entropy_coding_mode_flag = picture_info->entropy_coding_mode_flag; + picture->pic_order_present_flag = picture_info->pic_order_present_flag; + picture->deblocking_filter_control_present_flag = picture_info->deblocking_filter_control_present_flag; + picture->redundant_pic_cnt_present_flag = picture_info->redundant_pic_cnt_present_flag; + + memcpy(picture->scaling_lists_4x4, picture_info->scaling_lists_4x4, 6*16); + memcpy(picture->scaling_lists_8x8, picture_info->scaling_lists_8x8, 2*64); + + for (i = 0; i < 16; ++i) { + VdpStatus ret = vlVdpGetReferenceFrame + ( + picture_info->referenceFrames[i].surface, + &picture->ref[i] + ); + if (ret != VDP_STATUS_OK) + return ret; + + picture->is_long_term[i] = picture_info->referenceFrames[i].is_long_term; + picture->top_is_reference[i] = picture_info->referenceFrames[i].top_is_reference; + picture->bottom_is_reference[i] = picture_info->referenceFrames[i].bottom_is_reference; + picture->field_order_cnt_list[i][0] = picture_info->referenceFrames[i].field_order_cnt[0]; + picture->field_order_cnt_list[i][1] = picture_info->referenceFrames[i].field_order_cnt[1]; + picture->frame_num_list[i] = picture_info->referenceFrames[i].frame_idx; + } + + return VDP_STATUS_OK; +} + +/** + * Decode a compressed field/frame and render the result into a VdpVideoSurface. + */ VdpStatus vlVdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target, @@ -231,10 +388,22 @@ vlVdpDecoderRender(VdpDecoder decoder, uint32_t bitstream_buffer_count, VdpBitstreamBuffer const *bitstream_buffers) { + const void * buffers[bitstream_buffer_count]; + unsigned sizes[bitstream_buffer_count]; vlVdpDecoder *vldecoder; vlVdpSurface *vlsurf; - - VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding\n"); + VdpStatus ret; + struct pipe_screen *screen; + struct pipe_video_decoder *dec; + bool buffer_support[2]; + unsigned i; + union { + struct pipe_picture_desc base; + struct pipe_mpeg12_picture_desc mpeg12; + struct pipe_mpeg4_picture_desc mpeg4; + struct pipe_vc1_picture_desc vc1; + struct pipe_h264_picture_desc h264; + } desc; if (!(picture_info && bitstream_buffers)) return VDP_STATUS_INVALID_POINTER; @@ -242,6 +411,8 @@ vlVdpDecoderRender(VdpDecoder decoder, vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder); if (!vldecoder) return VDP_STATUS_INVALID_HANDLE; + dec = vldecoder->decoder; + screen = dec->context->screen; vlsurf = (vlVdpSurface *)vlGetDataHTAB(target); if (!vlsurf) @@ -250,26 +421,71 @@ vlVdpDecoderRender(VdpDecoder decoder, if (vlsurf->device != vldecoder->device) return VDP_STATUS_HANDLE_DEVICE_MISMATCH; - if (vlsurf->video_buffer->chroma_format != vldecoder->decoder->chroma_format) + if (vlsurf->video_buffer != NULL && vlsurf->video_buffer->chroma_format != dec->chroma_format) // TODO: Recreate decoder with correct chroma return VDP_STATUS_INVALID_CHROMA_TYPE; - // TODO: Right now only mpeg 1 & 2 is supported. - switch (vldecoder->decoder->profile) { - case PIPE_VIDEO_PROFILE_MPEG1: - case PIPE_VIDEO_PROFILE_MPEG2_SIMPLE: - case PIPE_VIDEO_PROFILE_MPEG2_MAIN: - ++vldecoder->cur_buffer; - vldecoder->cur_buffer %= VL_NUM_DECODE_BUFFERS; + pipe_mutex_lock(vlsurf->device->mutex); - vldecoder->decoder->set_decode_buffer(vldecoder->decoder, vldecoder->buffer[vldecoder->cur_buffer]); - vldecoder->decoder->set_decode_target(vldecoder->decoder, vlsurf->video_buffer); + buffer_support[0] = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE); + buffer_support[1] = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_CAP_SUPPORTS_INTERLACED); - return vlVdpDecoderRenderMpeg12(vldecoder->decoder, (VdpPictureInfoMPEG1Or2 *)picture_info, - bitstream_buffer_count, bitstream_buffers); - break; + if (vlsurf->video_buffer == NULL || + !screen->is_video_format_supported(screen, vlsurf->video_buffer->buffer_format, dec->profile) || + buffer_support[vlsurf->video_buffer->interlaced]) { + + /* destroy the old one */ + if (vlsurf->video_buffer) + vlsurf->video_buffer->destroy(vlsurf->video_buffer); + + /* set the buffer format to the prefered one */ + vlsurf->templat.buffer_format = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_CAP_PREFERED_FORMAT); + + /* also set interlacing to decoders preferences */ + vlsurf->templat.interlaced = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_CAP_PREFERS_INTERLACED); + + /* and recreate the video buffer */ + vlsurf->video_buffer = dec->context->create_video_buffer(dec->context, &vlsurf->templat); + + /* still no luck? get me out of here... */ + if (!vlsurf->video_buffer) { + pipe_mutex_unlock(vlsurf->device->mutex); + return VDP_STATUS_NO_IMPLEMENTATION; + } + } + memset(&desc, 0, sizeof(desc)); + desc.base.profile = dec->profile; + switch (u_reduce_video_profile(dec->profile)) { + case PIPE_VIDEO_CODEC_MPEG12: + ret = vlVdpDecoderRenderMpeg12(&desc.mpeg12, (VdpPictureInfoMPEG1Or2 *)picture_info); + break; + case PIPE_VIDEO_CODEC_MPEG4: + ret = vlVdpDecoderRenderMpeg4(&desc.mpeg4, (VdpPictureInfoMPEG4Part2 *)picture_info); + break; + case PIPE_VIDEO_CODEC_VC1: + ret = vlVdpDecoderRenderVC1(&desc.vc1, (VdpPictureInfoVC1 *)picture_info); + break; + case PIPE_VIDEO_CODEC_MPEG4_AVC: + ret = vlVdpDecoderRenderH264(&desc.h264, (VdpPictureInfoH264 *)picture_info); + break; default: + pipe_mutex_unlock(vlsurf->device->mutex); return VDP_STATUS_INVALID_DECODER_PROFILE; } + if (ret != VDP_STATUS_OK) { + pipe_mutex_unlock(vlsurf->device->mutex); + return ret; + } + + for (i = 0; i < bitstream_buffer_count; ++i) { + buffers[i] = bitstream_buffers[i].bitstream; + sizes[i] = bitstream_buffers[i].bitstream_bytes; + } + + dec->begin_frame(dec, vlsurf->video_buffer, &desc.base); + dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes); + dec->end_frame(dec, vlsurf->video_buffer, &desc.base); + pipe_mutex_unlock(vlsurf->device->mutex); + return ret; }