From b4fb6d76161d86c67697cf28a221d7913b8d084d Mon Sep 17 00:00:00 2001 From: Julien Isorce Date: Thu, 26 Nov 2015 08:29:54 +0000 Subject: [PATCH] st/va: delay decoder creation until max_references is known MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit In general max_references cannot be based on num_render_targets. This patch allows to allocate buffers with an accurate size. I.e. no more than necessary. For other codecs it is a fixed value 2. This is similar behaviour as vaapi/vdpau-driver. For now HEVC case defaults to num_render_targets as before. But it could also benefits this change by setting a more accurate max_references number in handlePictureParameterBuffer. Signed-off-by: Julien Isorce Reviewed-by: Christian König Reviewed-by: Emil Velikov --- src/gallium/state_trackers/va/context.c | 48 ++++++++++---------- src/gallium/state_trackers/va/picture.c | 48 ++++++++++++++++---- src/gallium/state_trackers/va/picture_h264.c | 4 ++ src/gallium/state_trackers/va/va_private.h | 2 +- 4 files changed, 68 insertions(+), 34 deletions(-) diff --git a/src/gallium/state_trackers/va/context.c b/src/gallium/state_trackers/va/context.c index f0051e5f6a5..192794fefaa 100644 --- a/src/gallium/state_trackers/va/context.c +++ b/src/gallium/state_trackers/va/context.c @@ -187,7 +187,6 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width, int picture_height, int flag, VASurfaceID *render_targets, int num_render_targets, VAContextID *context_id) { - struct pipe_video_codec templat = {}; vlVaDriver *drv; vlVaContext *context; int is_vpp; @@ -213,27 +212,22 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width, return VA_STATUS_ERROR_INVALID_CONTEXT; } } else { - templat.profile = config_id; - templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM; - templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420; - templat.width = picture_width; - templat.height = picture_height; - templat.max_references = num_render_targets; - templat.expect_chunked_decode = true; - - if (u_reduce_video_profile(templat.profile) == - PIPE_VIDEO_FORMAT_MPEG4_AVC) - templat.level = u_get_h264_level(templat.width, templat.height, - &templat.max_references); - - context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat); - if (!context->decoder) { - FREE(context); - return VA_STATUS_ERROR_ALLOCATION_FAILED; - } - - if (u_reduce_video_profile(context->decoder->profile) == - PIPE_VIDEO_FORMAT_MPEG4_AVC) { + context->templat.profile = config_id; + context->templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM; + context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420; + context->templat.width = picture_width; + context->templat.height = picture_height; + context->templat.expect_chunked_decode = true; + + switch (u_reduce_video_profile(context->templat.profile)) { + case PIPE_VIDEO_FORMAT_MPEG12: + case PIPE_VIDEO_FORMAT_VC1: + case PIPE_VIDEO_FORMAT_MPEG4: + context->templat.max_references = 2; + break; + + case PIPE_VIDEO_FORMAT_MPEG4_AVC: + context->templat.max_references = 0; context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps); if (!context->desc.h264.pps) { FREE(context); @@ -245,10 +239,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width, FREE(context); return VA_STATUS_ERROR_ALLOCATION_FAILED; } - } + break; - if (u_reduce_video_profile(context->decoder->profile) == - PIPE_VIDEO_FORMAT_HEVC) { + case PIPE_VIDEO_FORMAT_HEVC: + context->templat.max_references = num_render_targets; context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps); if (!context->desc.h265.pps) { FREE(context); @@ -260,6 +254,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width, FREE(context); return VA_STATUS_ERROR_ALLOCATION_FAILED; } + break; + + default: + break; } } diff --git a/src/gallium/state_trackers/va/picture.c b/src/gallium/state_trackers/va/picture.c index 34e7d553d1b..c7c377ad43e 100644 --- a/src/gallium/state_trackers/va/picture.c +++ b/src/gallium/state_trackers/va/picture.c @@ -59,14 +59,17 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende return VA_STATUS_ERROR_INVALID_SURFACE; context->target = surf->buffer; + if (!context->decoder) { /* VPP */ - if ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM && + if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN && + ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM && context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM && context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM && context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM) || - context->target->interlaced) + context->target->interlaced)) return VA_STATUS_ERROR_UNIMPLEMENTED; + return VA_STATUS_SUCCESS; } @@ -86,13 +89,14 @@ vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id, *ref_frame = NULL; } -static void +static VAStatus handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf) { VAPictureParameterBufferHEVC *hevc; unsigned int i; + VAStatus vaStatus = VA_STATUS_SUCCESS; - switch (u_reduce_video_profile(context->decoder->profile)) { + switch (u_reduce_video_profile(context->templat.profile)) { case PIPE_VIDEO_FORMAT_MPEG12: vlVaHandlePictureParameterBufferMPEG12(drv, context, buf); break; @@ -263,6 +267,31 @@ handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer * default: break; } + + /* Create the decoder once max_references is known. */ + if (!context->decoder) { + if (!context->target) + return VA_STATUS_ERROR_INVALID_CONTEXT; + + if (context->templat.max_references == 0) + return VA_STATUS_ERROR_INVALID_BUFFER; + + if (u_reduce_video_profile(context->templat.profile) == + PIPE_VIDEO_FORMAT_MPEG4_AVC) + context->templat.level = u_get_h264_level(context->templat.width, + context->templat.height, &context->templat.max_references); + + context->decoder = drv->pipe->create_video_codec(drv->pipe, + &context->templat); + + if (!context->decoder) + return VA_STATUS_ERROR_ALLOCATION_FAILED; + + context->decoder->begin_frame(context->decoder, context->target, + &context->desc.base); + } + + return vaStatus; } static void @@ -270,7 +299,7 @@ handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf) { VAIQMatrixBufferHEVC *h265; - switch (u_reduce_video_profile(context->decoder->profile)) { + switch (u_reduce_video_profile(context->templat.profile)) { case PIPE_VIDEO_FORMAT_MPEG12: vlVaHandleIQMatrixBufferMPEG12(context, buf); break; @@ -304,7 +333,7 @@ handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf) { VASliceParameterBufferHEVC *h265; - switch (u_reduce_video_profile(context->decoder->profile)) { + switch (u_reduce_video_profile(context->templat.profile)) { case PIPE_VIDEO_FORMAT_MPEG4_AVC: vlVaHandleSliceParameterBufferH264(context, buf); break; @@ -356,7 +385,7 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf) static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 }; static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d }; - format = u_reduce_video_profile(context->decoder->profile); + format = u_reduce_video_profile(context->templat.profile); switch (format) { case PIPE_VIDEO_FORMAT_MPEG4_AVC: if (bufHasStartcode(buf, 0x000001, 24)) @@ -428,7 +457,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff switch (buf->type) { case VAPictureParameterBufferType: - handlePictureParameterBuffer(drv, context, buf); + vaStatus = handlePictureParameterBuffer(drv, context, buf); break; case VAIQMatrixBufferType: @@ -472,6 +501,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id) return VA_STATUS_ERROR_INVALID_CONTEXT; if (!context->decoder) { + if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN) + return VA_STATUS_ERROR_INVALID_CONTEXT; + /* VPP */ return VA_STATUS_SUCCESS; } diff --git a/src/gallium/state_trackers/va/picture_h264.c b/src/gallium/state_trackers/va/picture_h264.c index bd6c8a0f127..f6e5b70ebcc 100644 --- a/src/gallium/state_trackers/va/picture_h264.c +++ b/src/gallium/state_trackers/va/picture_h264.c @@ -26,6 +26,7 @@ * **************************************************************************/ +#include "util/u_video.h" #include "va_private.h" void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf) @@ -90,6 +91,9 @@ void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, h264->pic_fields.bits.redundant_pic_cnt_present_flag; /*reference_pic_flag*/ context->desc.h264.frame_num = h264->frame_num; + + if (!context->decoder && context->desc.h264.num_ref_frames > 0) + context->templat.max_references = MIN2(context->desc.h264.num_ref_frames, 16); } void vlVaHandleIQMatrixBufferH264(vlVaContext *context, vlVaBuffer *buf) diff --git a/src/gallium/state_trackers/va/va_private.h b/src/gallium/state_trackers/va/va_private.h index 98ca66af98c..da2e593d469 100644 --- a/src/gallium/state_trackers/va/va_private.h +++ b/src/gallium/state_trackers/va/va_private.h @@ -215,7 +215,7 @@ typedef struct { } vlVaSubpicture; typedef struct { - struct pipe_video_codec *decoder; + struct pipe_video_codec templat, *decoder; struct pipe_video_buffer *target; union { struct pipe_picture_desc base; -- 2.30.2